Commit f26dda2117

mlugg <mlugg@mlugg.co.uk>
2023-06-22 19:46:56
all: migrate code to new cast builtin syntax
Most of this migration was performed automatically with `zig fmt`. There were a few exceptions which I had to manually fix: * `@alignCast` and `@addrSpaceCast` cannot be automatically rewritten * `@truncate`'s fixup is incorrect for vectors * Test cases are not formatted, and their error locations change
1 parent 447ca4e
Changed files (651)
lib
compiler_rt
std
atomic
Build
c
compress
crypto
event
fmt
fs
hash
heap
http
io
json
math
mem
meta
os
rand
sort
testing
Thread
time
unicode
valgrind
zig
src
test
behavior
c_abi
cases
compile_errors
llvm
safety
link
macho
dead_strip_dylibs
standalone
hello_world
issue_11595
main_return_error
mix_c_files
pie
tools
lib/compiler_rt/addf3.zig
@@ -24,28 +24,28 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T {
     const significandMask = (@as(Z, 1) << significandBits) - 1;
 
     const absMask = signBit - 1;
-    const qnanRep = @bitCast(Z, math.nan(T)) | quietBit;
+    const qnanRep = @as(Z, @bitCast(math.nan(T))) | quietBit;
 
-    var aRep = @bitCast(Z, a);
-    var bRep = @bitCast(Z, b);
+    var aRep = @as(Z, @bitCast(a));
+    var bRep = @as(Z, @bitCast(b));
     const aAbs = aRep & absMask;
     const bAbs = bRep & absMask;
 
-    const infRep = @bitCast(Z, math.inf(T));
+    const infRep = @as(Z, @bitCast(math.inf(T)));
 
     // Detect if a or b is zero, infinity, or NaN.
     if (aAbs -% @as(Z, 1) >= infRep - @as(Z, 1) or
         bAbs -% @as(Z, 1) >= infRep - @as(Z, 1))
     {
         // NaN + anything = qNaN
-        if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit);
+        if (aAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(a)) | quietBit));
         // anything + NaN = qNaN
-        if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit);
+        if (bAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(b)) | quietBit));
 
         if (aAbs == infRep) {
             // +/-infinity + -/+infinity = qNaN
-            if ((@bitCast(Z, a) ^ @bitCast(Z, b)) == signBit) {
-                return @bitCast(T, qnanRep);
+            if ((@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) == signBit) {
+                return @as(T, @bitCast(qnanRep));
             }
             // +/-infinity + anything remaining = +/- infinity
             else {
@@ -60,7 +60,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T {
         if (aAbs == 0) {
             // but we need to get the sign right for zero + zero
             if (bAbs == 0) {
-                return @bitCast(T, @bitCast(Z, a) & @bitCast(Z, b));
+                return @as(T, @bitCast(@as(Z, @bitCast(a)) & @as(Z, @bitCast(b))));
             } else {
                 return b;
             }
@@ -78,8 +78,8 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T {
     }
 
     // Extract the exponent and significand from the (possibly swapped) a and b.
-    var aExponent = @intCast(i32, (aRep >> significandBits) & maxExponent);
-    var bExponent = @intCast(i32, (bRep >> significandBits) & maxExponent);
+    var aExponent = @as(i32, @intCast((aRep >> significandBits) & maxExponent));
+    var bExponent = @as(i32, @intCast((bRep >> significandBits) & maxExponent));
     var aSignificand = aRep & significandMask;
     var bSignificand = bRep & significandMask;
 
@@ -101,11 +101,11 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T {
 
     // Shift the significand of b by the difference in exponents, with a sticky
     // bottom bit to get rounding correct.
-    const @"align" = @intCast(u32, aExponent - bExponent);
+    const @"align" = @as(u32, @intCast(aExponent - bExponent));
     if (@"align" != 0) {
         if (@"align" < typeWidth) {
-            const sticky = if (bSignificand << @intCast(S, typeWidth - @"align") != 0) @as(Z, 1) else 0;
-            bSignificand = (bSignificand >> @truncate(S, @"align")) | sticky;
+            const sticky = if (bSignificand << @as(S, @intCast(typeWidth - @"align")) != 0) @as(Z, 1) else 0;
+            bSignificand = (bSignificand >> @as(S, @truncate(@"align"))) | sticky;
         } else {
             bSignificand = 1; // sticky; b is known to be non-zero.
         }
@@ -113,13 +113,13 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T {
     if (subtraction) {
         aSignificand -= bSignificand;
         // If a == -b, return +zero.
-        if (aSignificand == 0) return @bitCast(T, @as(Z, 0));
+        if (aSignificand == 0) return @as(T, @bitCast(@as(Z, 0)));
 
         // If partial cancellation occured, we need to left-shift the result
         // and adjust the exponent:
         if (aSignificand < integerBit << 3) {
-            const shift = @intCast(i32, @clz(aSignificand)) - @intCast(i32, @clz(integerBit << 3));
-            aSignificand <<= @intCast(S, shift);
+            const shift = @as(i32, @intCast(@clz(aSignificand))) - @as(i32, @intCast(@clz(integerBit << 3)));
+            aSignificand <<= @as(S, @intCast(shift));
             aExponent -= shift;
         }
     } else { // addition
@@ -135,13 +135,13 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T {
     }
 
     // If we have overflowed the type, return +/- infinity:
-    if (aExponent >= maxExponent) return @bitCast(T, infRep | resultSign);
+    if (aExponent >= maxExponent) return @as(T, @bitCast(infRep | resultSign));
 
     if (aExponent <= 0) {
         // Result is denormal; the exponent and round/sticky bits are zero.
         // All we need to do is shift the significand and apply the correct sign.
-        aSignificand >>= @intCast(S, 4 - aExponent);
-        return @bitCast(T, resultSign | aSignificand);
+        aSignificand >>= @as(S, @intCast(4 - aExponent));
+        return @as(T, @bitCast(resultSign | aSignificand));
     }
 
     // Low three bits are round, guard, and sticky.
@@ -151,7 +151,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T {
     var result = (aSignificand >> 3) & significandMask;
 
     // Insert the exponent and sign.
-    result |= @intCast(Z, aExponent) << significandBits;
+    result |= @as(Z, @intCast(aExponent)) << significandBits;
     result |= resultSign;
 
     // Final rounding.  The result may overflow to infinity, but that is the
@@ -164,7 +164,7 @@ pub inline fn addf3(comptime T: type, a: T, b: T) T {
         if ((result >> significandBits) != 0) result |= integerBit;
     }
 
-    return @bitCast(T, result);
+    return @as(T, @bitCast(result));
 }
 
 test {
lib/compiler_rt/addf3_test.zig
@@ -5,7 +5,7 @@
 
 const std = @import("std");
 const math = std.math;
-const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
+const qnan128 = @as(f128, @bitCast(@as(u128, 0x7fff800000000000) << 64));
 
 const __addtf3 = @import("addtf3.zig").__addtf3;
 const __addxf3 = @import("addxf3.zig").__addxf3;
@@ -14,9 +14,9 @@ const __subtf3 = @import("subtf3.zig").__subtf3;
 fn test__addtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
     const x = __addtf3(a, b);
 
-    const rep = @bitCast(u128, x);
-    const hi = @intCast(u64, rep >> 64);
-    const lo = @truncate(u64, rep);
+    const rep = @as(u128, @bitCast(x));
+    const hi = @as(u64, @intCast(rep >> 64));
+    const lo = @as(u64, @truncate(rep));
 
     if (hi == expected_hi and lo == expected_lo) {
         return;
@@ -37,7 +37,7 @@ test "addtf3" {
     try test__addtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
 
     // NaN + any = NaN
-    try test__addtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
+    try test__addtf3(@as(f128, @bitCast((@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000))), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
 
     // inf + inf = inf
     try test__addtf3(math.inf(f128), math.inf(f128), 0x7fff000000000000, 0x0);
@@ -53,9 +53,9 @@ test "addtf3" {
 fn test__subtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
     const x = __subtf3(a, b);
 
-    const rep = @bitCast(u128, x);
-    const hi = @intCast(u64, rep >> 64);
-    const lo = @truncate(u64, rep);
+    const rep = @as(u128, @bitCast(x));
+    const hi = @as(u64, @intCast(rep >> 64));
+    const lo = @as(u64, @truncate(rep));
 
     if (hi == expected_hi and lo == expected_lo) {
         return;
@@ -77,7 +77,7 @@ test "subtf3" {
     try test__subtf3(qnan128, 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
 
     // NaN + any = NaN
-    try test__subtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
+    try test__subtf3(@as(f128, @bitCast((@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000))), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
 
     // inf - any = inf
     try test__subtf3(math.inf(f128), 0x1.23456789abcdefp+5, 0x7fff000000000000, 0x0);
@@ -87,16 +87,16 @@ test "subtf3" {
     try test__subtf3(0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x1.234567829a3bcdef5678ade36734p+5, 0xc0041b8af1915166, 0xa44a7bca780a166c);
 }
 
-const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1)));
+const qnan80 = @as(f80, @bitCast(@as(u80, @bitCast(math.nan(f80))) | (1 << (math.floatFractionalBits(f80) - 1))));
 
 fn test__addxf3(a: f80, b: f80, expected: u80) !void {
     const x = __addxf3(a, b);
-    const rep = @bitCast(u80, x);
+    const rep = @as(u80, @bitCast(x));
 
     if (rep == expected)
         return;
 
-    if (math.isNan(@bitCast(f80, expected)) and math.isNan(x))
+    if (math.isNan(@as(f80, @bitCast(expected))) and math.isNan(x))
         return; // We don't currently test NaN payload propagation
 
     return error.TestFailed;
@@ -104,33 +104,33 @@ fn test__addxf3(a: f80, b: f80, expected: u80) !void {
 
 test "addxf3" {
     // NaN + any = NaN
-    try test__addxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
-    try test__addxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
+    try test__addxf3(qnan80, 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80)));
+    try test__addxf3(@as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80)));
 
     // any + NaN = NaN
-    try test__addxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80));
-    try test__addxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80));
+    try test__addxf3(0x1.23456789abcdefp+5, qnan80, @as(u80, @bitCast(qnan80)));
+    try test__addxf3(0x1.23456789abcdefp+5, @as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), @as(u80, @bitCast(qnan80)));
 
     // NaN + inf = NaN
-    try test__addxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80));
+    try test__addxf3(qnan80, math.inf(f80), @as(u80, @bitCast(qnan80)));
 
     // inf + NaN = NaN
-    try test__addxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80));
+    try test__addxf3(math.inf(f80), qnan80, @as(u80, @bitCast(qnan80)));
 
     // inf + inf = inf
-    try test__addxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80)));
+    try test__addxf3(math.inf(f80), math.inf(f80), @as(u80, @bitCast(math.inf(f80))));
 
     // inf + -inf = NaN
-    try test__addxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, qnan80));
+    try test__addxf3(math.inf(f80), -math.inf(f80), @as(u80, @bitCast(qnan80)));
 
     // -inf + inf = NaN
-    try test__addxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, qnan80));
+    try test__addxf3(-math.inf(f80), math.inf(f80), @as(u80, @bitCast(qnan80)));
 
     // inf + any = inf
-    try test__addxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80)));
+    try test__addxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @as(u80, @bitCast(math.inf(f80))));
 
     // any + inf = inf
-    try test__addxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80)));
+    try test__addxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @as(u80, @bitCast(math.inf(f80))));
 
     // any + any
     try test__addxf3(0x1.23456789abcdp+5, 0x1.dcba987654321p+5, 0x4005_BFFFFFFFFFFFC400);
lib/compiler_rt/arm.zig
@@ -192,6 +192,6 @@ pub fn __aeabi_ldivmod() callconv(.Naked) void {
 }
 
 pub fn __aeabi_drsub(a: f64, b: f64) callconv(.AAPCS) f64 {
-    const neg_a = @bitCast(f64, @bitCast(u64, a) ^ (@as(u64, 1) << 63));
+    const neg_a = @as(f64, @bitCast(@as(u64, @bitCast(a)) ^ (@as(u64, 1) << 63)));
     return b + neg_a;
 }
lib/compiler_rt/atomics.zig
@@ -232,16 +232,16 @@ fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T {
 
     const addr = @intFromPtr(ptr);
     const wide_addr = addr & ~(@as(T, smallest_atomic_fetch_exch_size) - 1);
-    const wide_ptr = @alignCast(smallest_atomic_fetch_exch_size, @ptrFromInt(*WideAtomic, wide_addr));
+    const wide_ptr: *align(smallest_atomic_fetch_exch_size) WideAtomic = @alignCast(@as(*WideAtomic, @ptrFromInt(wide_addr)));
 
     const inner_offset = addr & (@as(T, smallest_atomic_fetch_exch_size) - 1);
-    const inner_shift = @intCast(std.math.Log2Int(T), inner_offset * 8);
+    const inner_shift = @as(std.math.Log2Int(T), @intCast(inner_offset * 8));
 
     const mask = @as(WideAtomic, std.math.maxInt(T)) << inner_shift;
 
     var wide_old = @atomicLoad(WideAtomic, wide_ptr, .SeqCst);
     while (true) {
-        const old = @truncate(T, (wide_old & mask) >> inner_shift);
+        const old = @as(T, @truncate((wide_old & mask) >> inner_shift));
         const new = update(val, old);
         const wide_new = wide_old & ~mask | (@as(WideAtomic, new) << inner_shift);
         if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .SeqCst, .SeqCst)) |new_wide_old| {
lib/compiler_rt/aulldiv.zig
@@ -21,9 +21,9 @@ pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
     const an = (a ^ s_a) -% s_a;
     const bn = (b ^ s_b) -% s_b;
 
-    const r = @bitCast(u64, an) / @bitCast(u64, bn);
+    const r = @as(u64, @bitCast(an)) / @as(u64, @bitCast(bn));
     const s = s_a ^ s_b;
-    return (@bitCast(i64, r) ^ s) -% s;
+    return (@as(i64, @bitCast(r)) ^ s) -% s;
 }
 
 pub fn _aulldiv() callconv(.Naked) void {
lib/compiler_rt/aullrem.zig
@@ -21,9 +21,9 @@ pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
     const an = (a ^ s_a) -% s_a;
     const bn = (b ^ s_b) -% s_b;
 
-    const r = @bitCast(u64, an) % @bitCast(u64, bn);
+    const r = @as(u64, @bitCast(an)) % @as(u64, @bitCast(bn));
     const s = s_a ^ s_b;
-    return (@bitCast(i64, r) ^ s) -% s;
+    return (@as(i64, @bitCast(r)) ^ s) -% s;
 }
 
 pub fn _aullrem() callconv(.Naked) void {
lib/compiler_rt/ceil.zig
@@ -27,12 +27,12 @@ comptime {
 
 pub fn __ceilh(x: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, ceilf(x));
+    return @as(f16, @floatCast(ceilf(x)));
 }
 
 pub fn ceilf(x: f32) callconv(.C) f32 {
-    var u = @bitCast(u32, x);
-    var e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
+    var u = @as(u32, @bitCast(x));
+    var e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
     var m: u32 = undefined;
 
     // TODO: Shouldn't need this explicit check.
@@ -43,7 +43,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 {
     if (e >= 23) {
         return x;
     } else if (e >= 0) {
-        m = @as(u32, 0x007FFFFF) >> @intCast(u5, e);
+        m = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e));
         if (u & m == 0) {
             return x;
         }
@@ -52,7 +52,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 {
             u += m;
         }
         u &= ~m;
-        return @bitCast(f32, u);
+        return @as(f32, @bitCast(u));
     } else {
         math.doNotOptimizeAway(x + 0x1.0p120);
         if (u >> 31 != 0) {
@@ -66,7 +66,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 {
 pub fn ceil(x: f64) callconv(.C) f64 {
     const f64_toint = 1.0 / math.floatEps(f64);
 
-    const u = @bitCast(u64, x);
+    const u = @as(u64, @bitCast(x));
     const e = (u >> 52) & 0x7FF;
     var y: f64 = undefined;
 
@@ -96,13 +96,13 @@ pub fn ceil(x: f64) callconv(.C) f64 {
 
 pub fn __ceilx(x: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, ceilq(x));
+    return @as(f80, @floatCast(ceilq(x)));
 }
 
 pub fn ceilq(x: f128) callconv(.C) f128 {
     const f128_toint = 1.0 / math.floatEps(f128);
 
-    const u = @bitCast(u128, x);
+    const u = @as(u128, @bitCast(x));
     const e = (u >> 112) & 0x7FFF;
     var y: f128 = undefined;
 
lib/compiler_rt/clear_cache.zig
@@ -102,7 +102,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void {
         // If CTR_EL0.IDC is set, data cache cleaning to the point of unification
         // is not required for instruction to data coherence.
         if (((ctr_el0 >> 28) & 0x1) == 0x0) {
-            const dcache_line_size: usize = @as(usize, 4) << @intCast(u6, (ctr_el0 >> 16) & 15);
+            const dcache_line_size: usize = @as(usize, 4) << @as(u6, @intCast((ctr_el0 >> 16) & 15));
             addr = start & ~(dcache_line_size - 1);
             while (addr < end) : (addr += dcache_line_size) {
                 asm volatile ("dc cvau, %[addr]"
@@ -115,7 +115,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void {
         // If CTR_EL0.DIC is set, instruction cache invalidation to the point of
         // unification is not required for instruction to data coherence.
         if (((ctr_el0 >> 29) & 0x1) == 0x0) {
-            const icache_line_size: usize = @as(usize, 4) << @intCast(u6, (ctr_el0 >> 0) & 15);
+            const icache_line_size: usize = @as(usize, 4) << @as(u6, @intCast((ctr_el0 >> 0) & 15));
             addr = start & ~(icache_line_size - 1);
             while (addr < end) : (addr += icache_line_size) {
                 asm volatile ("ic ivau, %[addr]"
lib/compiler_rt/clzdi2_test.zig
@@ -2,7 +2,7 @@ const clz = @import("count0bits.zig");
 const testing = @import("std").testing;
 
 fn test__clzdi2(a: u64, expected: i64) !void {
-    var x = @bitCast(i64, a);
+    var x = @as(i64, @bitCast(a));
     var result = clz.__clzdi2(x);
     try testing.expectEqual(expected, result);
 }
lib/compiler_rt/clzsi2_test.zig
@@ -4,8 +4,8 @@ const testing = @import("std").testing;
 
 fn test__clzsi2(a: u32, expected: i32) !void {
     const nakedClzsi2 = clz.__clzsi2;
-    const actualClzsi2 = @ptrCast(*const fn (a: i32) callconv(.C) i32, &nakedClzsi2);
-    const x = @bitCast(i32, a);
+    const actualClzsi2 = @as(*const fn (a: i32) callconv(.C) i32, @ptrCast(&nakedClzsi2));
+    const x = @as(i32, @bitCast(a));
     const result = actualClzsi2(x);
     try testing.expectEqual(expected, result);
 }
lib/compiler_rt/clzti2_test.zig
@@ -2,7 +2,7 @@ const clz = @import("count0bits.zig");
 const testing = @import("std").testing;
 
 fn test__clzti2(a: u128, expected: i64) !void {
-    var x = @bitCast(i128, a);
+    var x = @as(i128, @bitCast(a));
     var result = clz.__clzti2(x);
     try testing.expectEqual(expected, result);
 }
lib/compiler_rt/cmptf2.zig
@@ -75,30 +75,30 @@ fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.C) i32 {
 }
 
 fn _Qp_feq(a: *const f128, b: *const f128) callconv(.C) bool {
-    return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Equal;
+    return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Equal;
 }
 
 fn _Qp_fne(a: *const f128, b: *const f128) callconv(.C) bool {
-    return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) != .Equal;
+    return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) != .Equal;
 }
 
 fn _Qp_flt(a: *const f128, b: *const f128) callconv(.C) bool {
-    return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Less;
+    return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Less;
 }
 
 fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.C) bool {
-    return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Greater;
+    return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Greater;
 }
 
 fn _Qp_fge(a: *const f128, b: *const f128) callconv(.C) bool {
-    return switch (@enumFromInt(SparcFCMP, _Qp_cmp(a, b))) {
+    return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) {
         .Equal, .Greater => true,
         .Less, .Unordered => false,
     };
 }
 
 fn _Qp_fle(a: *const f128, b: *const f128) callconv(.C) bool {
-    return switch (@enumFromInt(SparcFCMP, _Qp_cmp(a, b))) {
+    return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) {
         .Equal, .Less => true,
         .Greater, .Unordered => false,
     };
lib/compiler_rt/common.zig
@@ -102,22 +102,22 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
         u16 => {
             // 16x16 --> 32 bit multiply
             const product = @as(u32, a) * @as(u32, b);
-            hi.* = @intCast(u16, product >> 16);
-            lo.* = @truncate(u16, product);
+            hi.* = @as(u16, @intCast(product >> 16));
+            lo.* = @as(u16, @truncate(product));
         },
         u32 => {
             // 32x32 --> 64 bit multiply
             const product = @as(u64, a) * @as(u64, b);
-            hi.* = @truncate(u32, product >> 32);
-            lo.* = @truncate(u32, product);
+            hi.* = @as(u32, @truncate(product >> 32));
+            lo.* = @as(u32, @truncate(product));
         },
         u64 => {
             const S = struct {
                 fn loWord(x: u64) u64 {
-                    return @truncate(u32, x);
+                    return @as(u32, @truncate(x));
                 }
                 fn hiWord(x: u64) u64 {
-                    return @truncate(u32, x >> 32);
+                    return @as(u32, @truncate(x >> 32));
                 }
             };
             // 64x64 -> 128 wide multiply for platforms that don't have such an operation;
@@ -141,16 +141,16 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
             const Word_FullMask = @as(u64, 0xffffffffffffffff);
             const S = struct {
                 fn Word_1(x: u128) u64 {
-                    return @truncate(u32, x >> 96);
+                    return @as(u32, @truncate(x >> 96));
                 }
                 fn Word_2(x: u128) u64 {
-                    return @truncate(u32, x >> 64);
+                    return @as(u32, @truncate(x >> 64));
                 }
                 fn Word_3(x: u128) u64 {
-                    return @truncate(u32, x >> 32);
+                    return @as(u32, @truncate(x >> 32));
                 }
                 fn Word_4(x: u128) u64 {
-                    return @truncate(u32, x);
+                    return @as(u32, @truncate(x));
                 }
             };
             // 128x128 -> 256 wide multiply for platforms that don't have such an operation;
@@ -216,7 +216,7 @@ pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeIn
     const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T);
 
     const shift = @clz(significand.*) - @clz(integerBit);
-    significand.* <<= @intCast(std.math.Log2Int(Z), shift);
+    significand.* <<= @as(std.math.Log2Int(Z), @intCast(shift));
     return @as(i32, 1) - shift;
 }
 
@@ -228,8 +228,8 @@ pub inline fn fneg(a: anytype) @TypeOf(a) {
         .bits = bits,
     } });
     const sign_bit_mask = @as(U, 1) << (bits - 1);
-    const negated = @bitCast(U, a) ^ sign_bit_mask;
-    return @bitCast(F, negated);
+    const negated = @as(U, @bitCast(a)) ^ sign_bit_mask;
+    return @as(F, @bitCast(negated));
 }
 
 /// Allows to access underlying bits as two equally sized lower and higher
lib/compiler_rt/comparef.zig
@@ -26,12 +26,12 @@ pub inline fn cmpf2(comptime T: type, comptime RT: type, a: T, b: T) RT {
     const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
     const absMask = signBit - 1;
     const infT = comptime std.math.inf(T);
-    const infRep = @bitCast(rep_t, infT);
+    const infRep = @as(rep_t, @bitCast(infT));
 
-    const aInt = @bitCast(srep_t, a);
-    const bInt = @bitCast(srep_t, b);
-    const aAbs = @bitCast(rep_t, aInt) & absMask;
-    const bAbs = @bitCast(rep_t, bInt) & absMask;
+    const aInt = @as(srep_t, @bitCast(a));
+    const bInt = @as(srep_t, @bitCast(b));
+    const aAbs = @as(rep_t, @bitCast(aInt)) & absMask;
+    const bAbs = @as(rep_t, @bitCast(bInt)) & absMask;
 
     // If either a or b is NaN, they are unordered.
     if (aAbs > infRep or bAbs > infRep) return RT.Unordered;
@@ -81,7 +81,7 @@ pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
         return .Equal;
     } else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
         // signs are different
-        if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) {
+        if (@as(i16, @bitCast(a_rep.exp)) < @as(i16, @bitCast(b_rep.exp))) {
             return .Less;
         } else {
             return .Greater;
@@ -104,10 +104,10 @@ pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
     const exponentBits = std.math.floatExponentBits(T);
     const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
     const absMask = signBit - 1;
-    const infRep = @bitCast(rep_t, std.math.inf(T));
+    const infRep = @as(rep_t, @bitCast(std.math.inf(T)));
 
-    const aAbs: rep_t = @bitCast(rep_t, a) & absMask;
-    const bAbs: rep_t = @bitCast(rep_t, b) & absMask;
+    const aAbs: rep_t = @as(rep_t, @bitCast(a)) & absMask;
+    const bAbs: rep_t = @as(rep_t, @bitCast(b)) & absMask;
 
     return @intFromBool(aAbs > infRep or bAbs > infRep);
 }
lib/compiler_rt/cos.zig
@@ -25,7 +25,7 @@ comptime {
 
 pub fn __cosh(a: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, cosf(a));
+    return @as(f16, @floatCast(cosf(a)));
 }
 
 pub fn cosf(x: f32) callconv(.C) f32 {
@@ -35,7 +35,7 @@ pub fn cosf(x: f32) callconv(.C) f32 {
     const c3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2
     const c4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18
 
-    var ix = @bitCast(u32, x);
+    var ix = @as(u32, @bitCast(x));
     const sign = ix >> 31 != 0;
     ix &= 0x7fffffff;
 
@@ -86,7 +86,7 @@ pub fn cosf(x: f32) callconv(.C) f32 {
 }
 
 pub fn cos(x: f64) callconv(.C) f64 {
-    var ix = @bitCast(u64, x) >> 32;
+    var ix = @as(u64, @bitCast(x)) >> 32;
     ix &= 0x7fffffff;
 
     // |x| ~< pi/4
@@ -116,12 +116,12 @@ pub fn cos(x: f64) callconv(.C) f64 {
 
 pub fn __cosx(a: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, cosq(a));
+    return @as(f80, @floatCast(cosq(a)));
 }
 
 pub fn cosq(a: f128) callconv(.C) f128 {
     // TODO: more correct implementation
-    return cos(@floatCast(f64, a));
+    return cos(@as(f64, @floatCast(a)));
 }
 
 pub fn cosl(x: c_longdouble) callconv(.C) c_longdouble {
lib/compiler_rt/count0bits.zig
@@ -32,9 +32,9 @@ comptime {
 
 inline fn clzXi2(comptime T: type, a: T) i32 {
     var x = switch (@bitSizeOf(T)) {
-        32 => @bitCast(u32, a),
-        64 => @bitCast(u64, a),
-        128 => @bitCast(u128, a),
+        32 => @as(u32, @bitCast(a)),
+        64 => @as(u64, @bitCast(a)),
+        128 => @as(u128, @bitCast(a)),
         else => unreachable,
     };
     var n: T = @bitSizeOf(T);
@@ -49,7 +49,7 @@ inline fn clzXi2(comptime T: type, a: T) i32 {
             x = y;
         }
     }
-    return @intCast(i32, n - @bitCast(T, x));
+    return @as(i32, @intCast(n - @as(T, @bitCast(x))));
 }
 
 fn __clzsi2_thumb1() callconv(.Naked) void {
@@ -169,9 +169,9 @@ pub fn __clzti2(a: i128) callconv(.C) i32 {
 
 inline fn ctzXi2(comptime T: type, a: T) i32 {
     var x = switch (@bitSizeOf(T)) {
-        32 => @bitCast(u32, a),
-        64 => @bitCast(u64, a),
-        128 => @bitCast(u128, a),
+        32 => @as(u32, @bitCast(a)),
+        64 => @as(u64, @bitCast(a)),
+        128 => @as(u128, @bitCast(a)),
         else => unreachable,
     };
     var n: T = 1;
@@ -187,7 +187,7 @@ inline fn ctzXi2(comptime T: type, a: T) i32 {
             x = x >> shift;
         }
     }
-    return @intCast(i32, n - @bitCast(T, (x & 1)));
+    return @as(i32, @intCast(n - @as(T, @bitCast((x & 1)))));
 }
 
 pub fn __ctzsi2(a: i32) callconv(.C) i32 {
@@ -204,9 +204,9 @@ pub fn __ctzti2(a: i128) callconv(.C) i32 {
 
 inline fn ffsXi2(comptime T: type, a: T) i32 {
     var x = switch (@bitSizeOf(T)) {
-        32 => @bitCast(u32, a),
-        64 => @bitCast(u64, a),
-        128 => @bitCast(u128, a),
+        32 => @as(u32, @bitCast(a)),
+        64 => @as(u64, @bitCast(a)),
+        128 => @as(u128, @bitCast(a)),
         else => unreachable,
     };
     var n: T = 1;
@@ -224,7 +224,7 @@ inline fn ffsXi2(comptime T: type, a: T) i32 {
         }
     }
     // return ctz + 1
-    return @intCast(i32, n - @bitCast(T, (x & 1))) + @as(i32, 1);
+    return @as(i32, @intCast(n - @as(T, @bitCast((x & 1))))) + @as(i32, 1);
 }
 
 pub fn __ffssi2(a: i32) callconv(.C) i32 {
lib/compiler_rt/ctzdi2_test.zig
@@ -2,7 +2,7 @@ const ctz = @import("count0bits.zig");
 const testing = @import("std").testing;
 
 fn test__ctzdi2(a: u64, expected: i32) !void {
-    var x = @bitCast(i64, a);
+    var x = @as(i64, @bitCast(a));
     var result = ctz.__ctzdi2(x);
     try testing.expectEqual(expected, result);
 }
lib/compiler_rt/ctzsi2_test.zig
@@ -2,7 +2,7 @@ const ctz = @import("count0bits.zig");
 const testing = @import("std").testing;
 
 fn test__ctzsi2(a: u32, expected: i32) !void {
-    var x = @bitCast(i32, a);
+    var x = @as(i32, @bitCast(a));
     var result = ctz.__ctzsi2(x);
     try testing.expectEqual(expected, result);
 }
lib/compiler_rt/ctzti2_test.zig
@@ -2,7 +2,7 @@ const ctz = @import("count0bits.zig");
 const testing = @import("std").testing;
 
 fn test__ctzti2(a: u128, expected: i32) !void {
-    var x = @bitCast(i128, a);
+    var x = @as(i128, @bitCast(a));
     var result = ctz.__ctzti2(x);
     try testing.expectEqual(expected, result);
 }
lib/compiler_rt/divdf3.zig
@@ -47,52 +47,52 @@ inline fn div(a: f64, b: f64) f64 {
     const absMask = signBit - 1;
     const exponentMask = absMask ^ significandMask;
     const qnanRep = exponentMask | quietBit;
-    const infRep = @bitCast(Z, std.math.inf(f64));
+    const infRep = @as(Z, @bitCast(std.math.inf(f64)));
 
-    const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent);
-    const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent);
-    const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
+    const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent));
+    const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent));
+    const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit;
 
-    var aSignificand: Z = @bitCast(Z, a) & significandMask;
-    var bSignificand: Z = @bitCast(Z, b) & significandMask;
+    var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask;
+    var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask;
     var scale: i32 = 0;
 
     // Detect if a or b is zero, denormal, infinity, or NaN.
     if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
-        const aAbs: Z = @bitCast(Z, a) & absMask;
-        const bAbs: Z = @bitCast(Z, b) & absMask;
+        const aAbs: Z = @as(Z, @bitCast(a)) & absMask;
+        const bAbs: Z = @as(Z, @bitCast(b)) & absMask;
 
         // NaN / anything = qNaN
-        if (aAbs > infRep) return @bitCast(f64, @bitCast(Z, a) | quietBit);
+        if (aAbs > infRep) return @as(f64, @bitCast(@as(Z, @bitCast(a)) | quietBit));
         // anything / NaN = qNaN
-        if (bAbs > infRep) return @bitCast(f64, @bitCast(Z, b) | quietBit);
+        if (bAbs > infRep) return @as(f64, @bitCast(@as(Z, @bitCast(b)) | quietBit));
 
         if (aAbs == infRep) {
             // infinity / infinity = NaN
             if (bAbs == infRep) {
-                return @bitCast(f64, qnanRep);
+                return @as(f64, @bitCast(qnanRep));
             }
             // infinity / anything else = +/- infinity
             else {
-                return @bitCast(f64, aAbs | quotientSign);
+                return @as(f64, @bitCast(aAbs | quotientSign));
             }
         }
 
         // anything else / infinity = +/- 0
-        if (bAbs == infRep) return @bitCast(f64, quotientSign);
+        if (bAbs == infRep) return @as(f64, @bitCast(quotientSign));
 
         if (aAbs == 0) {
             // zero / zero = NaN
             if (bAbs == 0) {
-                return @bitCast(f64, qnanRep);
+                return @as(f64, @bitCast(qnanRep));
             }
             // zero / anything else = +/- zero
             else {
-                return @bitCast(f64, quotientSign);
+                return @as(f64, @bitCast(quotientSign));
             }
         }
         // anything else / zero = +/- infinity
-        if (bAbs == 0) return @bitCast(f64, infRep | quotientSign);
+        if (bAbs == 0) return @as(f64, @bitCast(infRep | quotientSign));
 
         // one or both of a or b is denormal, the other (if applicable) is a
         // normal number.  Renormalize one or both of a and b, and set scale to
@@ -106,13 +106,13 @@ inline fn div(a: f64, b: f64) f64 {
     // won't hurt anything.)
     aSignificand |= implicitBit;
     bSignificand |= implicitBit;
-    var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale;
+    var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale;
 
     // Align the significand of b as a Q31 fixed-point number in the range
     // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
     // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2.  This
     // is accurate to about 3.5 binary digits.
-    const q31b: u32 = @truncate(u32, bSignificand >> 21);
+    const q31b: u32 = @as(u32, @truncate(bSignificand >> 21));
     var recip32 = @as(u32, 0x7504f333) -% q31b;
 
     // Now refine the reciprocal estimate using a Newton-Raphson iteration:
@@ -123,12 +123,12 @@ inline fn div(a: f64, b: f64) f64 {
     // with each iteration, so after three iterations, we have about 28 binary
     // digits of accuracy.
     var correction32: u32 = undefined;
-    correction32 = @truncate(u32, ~(@as(u64, recip32) *% q31b >> 32) +% 1);
-    recip32 = @truncate(u32, @as(u64, recip32) *% correction32 >> 31);
-    correction32 = @truncate(u32, ~(@as(u64, recip32) *% q31b >> 32) +% 1);
-    recip32 = @truncate(u32, @as(u64, recip32) *% correction32 >> 31);
-    correction32 = @truncate(u32, ~(@as(u64, recip32) *% q31b >> 32) +% 1);
-    recip32 = @truncate(u32, @as(u64, recip32) *% correction32 >> 31);
+    correction32 = @as(u32, @truncate(~(@as(u64, recip32) *% q31b >> 32) +% 1));
+    recip32 = @as(u32, @truncate(@as(u64, recip32) *% correction32 >> 31));
+    correction32 = @as(u32, @truncate(~(@as(u64, recip32) *% q31b >> 32) +% 1));
+    recip32 = @as(u32, @truncate(@as(u64, recip32) *% correction32 >> 31));
+    correction32 = @as(u32, @truncate(~(@as(u64, recip32) *% q31b >> 32) +% 1));
+    recip32 = @as(u32, @truncate(@as(u64, recip32) *% correction32 >> 31));
 
     // recip32 might have overflowed to exactly zero in the preceding
     // computation if the high word of b is exactly 1.0.  This would sabotage
@@ -138,12 +138,12 @@ inline fn div(a: f64, b: f64) f64 {
 
     // We need to perform one more iteration to get us to 56 binary digits;
     // The last iteration needs to happen with extra precision.
-    const q63blo: u32 = @truncate(u32, bSignificand << 11);
+    const q63blo: u32 = @as(u32, @truncate(bSignificand << 11));
     var correction: u64 = undefined;
     var reciprocal: u64 = undefined;
     correction = ~(@as(u64, recip32) *% q31b +% (@as(u64, recip32) *% q63blo >> 32)) +% 1;
-    const cHi = @truncate(u32, correction >> 32);
-    const cLo = @truncate(u32, correction);
+    const cHi = @as(u32, @truncate(correction >> 32));
+    const cLo = @as(u32, @truncate(correction));
     reciprocal = @as(u64, recip32) *% cHi +% (@as(u64, recip32) *% cLo >> 32);
 
     // We already adjusted the 32-bit estimate, now we need to adjust the final
@@ -195,7 +195,7 @@ inline fn div(a: f64, b: f64) f64 {
 
     if (writtenExponent >= maxExponent) {
         // If we have overflowed the exponent, return infinity.
-        return @bitCast(f64, infRep | quotientSign);
+        return @as(f64, @bitCast(infRep | quotientSign));
     } else if (writtenExponent < 1) {
         if (writtenExponent == 0) {
             // Check whether the rounded result is normal.
@@ -206,22 +206,22 @@ inline fn div(a: f64, b: f64) f64 {
             absResult += round;
             if ((absResult & ~significandMask) != 0) {
                 // The rounded result is normal; return it.
-                return @bitCast(f64, absResult | quotientSign);
+                return @as(f64, @bitCast(absResult | quotientSign));
             }
         }
         // Flush denormals to zero.  In the future, it would be nice to add
         // code to round them correctly.
-        return @bitCast(f64, quotientSign);
+        return @as(f64, @bitCast(quotientSign));
     } else {
         const round = @intFromBool((residual << 1) > bSignificand);
         // Clear the implicit bit
         var absResult = quotient & significandMask;
         // Insert the exponent
-        absResult |= @bitCast(Z, @as(SignedZ, writtenExponent)) << significandBits;
+        absResult |= @as(Z, @bitCast(@as(SignedZ, writtenExponent))) << significandBits;
         // Round
         absResult +%= round;
         // Insert the sign and return
-        return @bitCast(f64, absResult | quotientSign);
+        return @as(f64, @bitCast(absResult | quotientSign));
     }
 }
 
lib/compiler_rt/divdf3_test.zig
@@ -6,7 +6,7 @@ const __divdf3 = @import("divdf3.zig").__divdf3;
 const testing = @import("std").testing;
 
 fn compareResultD(result: f64, expected: u64) bool {
-    const rep = @bitCast(u64, result);
+    const rep = @as(u64, @bitCast(result));
 
     if (rep == expected) {
         return true;
lib/compiler_rt/divhf3.zig
@@ -7,5 +7,5 @@ comptime {
 
 pub fn __divhf3(a: f16, b: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, divsf3.__divsf3(a, b));
+    return @as(f16, @floatCast(divsf3.__divsf3(a, b)));
 }
lib/compiler_rt/divsf3.zig
@@ -44,52 +44,52 @@ inline fn div(a: f32, b: f32) f32 {
     const absMask = signBit - 1;
     const exponentMask = absMask ^ significandMask;
     const qnanRep = exponentMask | quietBit;
-    const infRep = @bitCast(Z, std.math.inf(f32));
+    const infRep = @as(Z, @bitCast(std.math.inf(f32)));
 
-    const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent);
-    const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent);
-    const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
+    const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent));
+    const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent));
+    const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit;
 
-    var aSignificand: Z = @bitCast(Z, a) & significandMask;
-    var bSignificand: Z = @bitCast(Z, b) & significandMask;
+    var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask;
+    var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask;
     var scale: i32 = 0;
 
     // Detect if a or b is zero, denormal, infinity, or NaN.
     if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
-        const aAbs: Z = @bitCast(Z, a) & absMask;
-        const bAbs: Z = @bitCast(Z, b) & absMask;
+        const aAbs: Z = @as(Z, @bitCast(a)) & absMask;
+        const bAbs: Z = @as(Z, @bitCast(b)) & absMask;
 
         // NaN / anything = qNaN
-        if (aAbs > infRep) return @bitCast(f32, @bitCast(Z, a) | quietBit);
+        if (aAbs > infRep) return @as(f32, @bitCast(@as(Z, @bitCast(a)) | quietBit));
         // anything / NaN = qNaN
-        if (bAbs > infRep) return @bitCast(f32, @bitCast(Z, b) | quietBit);
+        if (bAbs > infRep) return @as(f32, @bitCast(@as(Z, @bitCast(b)) | quietBit));
 
         if (aAbs == infRep) {
             // infinity / infinity = NaN
             if (bAbs == infRep) {
-                return @bitCast(f32, qnanRep);
+                return @as(f32, @bitCast(qnanRep));
             }
             // infinity / anything else = +/- infinity
             else {
-                return @bitCast(f32, aAbs | quotientSign);
+                return @as(f32, @bitCast(aAbs | quotientSign));
             }
         }
 
         // anything else / infinity = +/- 0
-        if (bAbs == infRep) return @bitCast(f32, quotientSign);
+        if (bAbs == infRep) return @as(f32, @bitCast(quotientSign));
 
         if (aAbs == 0) {
             // zero / zero = NaN
             if (bAbs == 0) {
-                return @bitCast(f32, qnanRep);
+                return @as(f32, @bitCast(qnanRep));
             }
             // zero / anything else = +/- zero
             else {
-                return @bitCast(f32, quotientSign);
+                return @as(f32, @bitCast(quotientSign));
             }
         }
         // anything else / zero = +/- infinity
-        if (bAbs == 0) return @bitCast(f32, infRep | quotientSign);
+        if (bAbs == 0) return @as(f32, @bitCast(infRep | quotientSign));
 
         // one or both of a or b is denormal, the other (if applicable) is a
         // normal number.  Renormalize one or both of a and b, and set scale to
@@ -103,7 +103,7 @@ inline fn div(a: f32, b: f32) f32 {
     // won't hurt anything.)
     aSignificand |= implicitBit;
     bSignificand |= implicitBit;
-    var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale;
+    var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale;
 
     // Align the significand of b as a Q31 fixed-point number in the range
     // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
@@ -120,12 +120,12 @@ inline fn div(a: f32, b: f32) f32 {
     // with each iteration, so after three iterations, we have about 28 binary
     // digits of accuracy.
     var correction: u32 = undefined;
-    correction = @truncate(u32, ~(@as(u64, reciprocal) *% q31b >> 32) +% 1);
-    reciprocal = @truncate(u32, @as(u64, reciprocal) *% correction >> 31);
-    correction = @truncate(u32, ~(@as(u64, reciprocal) *% q31b >> 32) +% 1);
-    reciprocal = @truncate(u32, @as(u64, reciprocal) *% correction >> 31);
-    correction = @truncate(u32, ~(@as(u64, reciprocal) *% q31b >> 32) +% 1);
-    reciprocal = @truncate(u32, @as(u64, reciprocal) *% correction >> 31);
+    correction = @as(u32, @truncate(~(@as(u64, reciprocal) *% q31b >> 32) +% 1));
+    reciprocal = @as(u32, @truncate(@as(u64, reciprocal) *% correction >> 31));
+    correction = @as(u32, @truncate(~(@as(u64, reciprocal) *% q31b >> 32) +% 1));
+    reciprocal = @as(u32, @truncate(@as(u64, reciprocal) *% correction >> 31));
+    correction = @as(u32, @truncate(~(@as(u64, reciprocal) *% q31b >> 32) +% 1));
+    reciprocal = @as(u32, @truncate(@as(u64, reciprocal) *% correction >> 31));
 
     // Exhaustive testing shows that the error in reciprocal after three steps
     // is in the interval [-0x1.f58108p-31, 0x1.d0e48cp-29], in line with our
@@ -147,7 +147,7 @@ inline fn div(a: f32, b: f32) f32 {
     //       is the error in the reciprocal of b scaled by the maximum
     //       possible value of a.  As a consequence of this error bound,
     //       either q or nextafter(q) is the correctly rounded
-    var quotient: Z = @truncate(u32, @as(u64, reciprocal) *% (aSignificand << 1) >> 32);
+    var quotient: Z = @as(u32, @truncate(@as(u64, reciprocal) *% (aSignificand << 1) >> 32));
 
     // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
     // In either case, we are going to compute a residual of the form
@@ -175,7 +175,7 @@ inline fn div(a: f32, b: f32) f32 {
 
     if (writtenExponent >= maxExponent) {
         // If we have overflowed the exponent, return infinity.
-        return @bitCast(f32, infRep | quotientSign);
+        return @as(f32, @bitCast(infRep | quotientSign));
     } else if (writtenExponent < 1) {
         if (writtenExponent == 0) {
             // Check whether the rounded result is normal.
@@ -186,22 +186,22 @@ inline fn div(a: f32, b: f32) f32 {
             absResult += round;
             if ((absResult & ~significandMask) > 0) {
                 // The rounded result is normal; return it.
-                return @bitCast(f32, absResult | quotientSign);
+                return @as(f32, @bitCast(absResult | quotientSign));
             }
         }
         // Flush denormals to zero.  In the future, it would be nice to add
         // code to round them correctly.
-        return @bitCast(f32, quotientSign);
+        return @as(f32, @bitCast(quotientSign));
     } else {
         const round = @intFromBool((residual << 1) > bSignificand);
         // Clear the implicit bit
         var absResult = quotient & significandMask;
         // Insert the exponent
-        absResult |= @bitCast(Z, writtenExponent) << significandBits;
+        absResult |= @as(Z, @bitCast(writtenExponent)) << significandBits;
         // Round
         absResult +%= round;
         // Insert the sign and return
-        return @bitCast(f32, absResult | quotientSign);
+        return @as(f32, @bitCast(absResult | quotientSign));
     }
 }
 
lib/compiler_rt/divsf3_test.zig
@@ -6,7 +6,7 @@ const __divsf3 = @import("divsf3.zig").__divsf3;
 const testing = @import("std").testing;
 
 fn compareResultF(result: f32, expected: u32) bool {
-    const rep = @bitCast(u32, result);
+    const rep = @as(u32, @bitCast(result));
 
     if (rep == expected) {
         return true;
lib/compiler_rt/divtf3.zig
@@ -41,52 +41,52 @@ inline fn div(a: f128, b: f128) f128 {
     const absMask = signBit - 1;
     const exponentMask = absMask ^ significandMask;
     const qnanRep = exponentMask | quietBit;
-    const infRep = @bitCast(Z, std.math.inf(f128));
+    const infRep = @as(Z, @bitCast(std.math.inf(f128)));
 
-    const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent);
-    const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent);
-    const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
+    const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent));
+    const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent));
+    const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit;
 
-    var aSignificand: Z = @bitCast(Z, a) & significandMask;
-    var bSignificand: Z = @bitCast(Z, b) & significandMask;
+    var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask;
+    var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask;
     var scale: i32 = 0;
 
     // Detect if a or b is zero, denormal, infinity, or NaN.
     if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
-        const aAbs: Z = @bitCast(Z, a) & absMask;
-        const bAbs: Z = @bitCast(Z, b) & absMask;
+        const aAbs: Z = @as(Z, @bitCast(a)) & absMask;
+        const bAbs: Z = @as(Z, @bitCast(b)) & absMask;
 
         // NaN / anything = qNaN
-        if (aAbs > infRep) return @bitCast(f128, @bitCast(Z, a) | quietBit);
+        if (aAbs > infRep) return @as(f128, @bitCast(@as(Z, @bitCast(a)) | quietBit));
         // anything / NaN = qNaN
-        if (bAbs > infRep) return @bitCast(f128, @bitCast(Z, b) | quietBit);
+        if (bAbs > infRep) return @as(f128, @bitCast(@as(Z, @bitCast(b)) | quietBit));
 
         if (aAbs == infRep) {
             // infinity / infinity = NaN
             if (bAbs == infRep) {
-                return @bitCast(f128, qnanRep);
+                return @as(f128, @bitCast(qnanRep));
             }
             // infinity / anything else = +/- infinity
             else {
-                return @bitCast(f128, aAbs | quotientSign);
+                return @as(f128, @bitCast(aAbs | quotientSign));
             }
         }
 
         // anything else / infinity = +/- 0
-        if (bAbs == infRep) return @bitCast(f128, quotientSign);
+        if (bAbs == infRep) return @as(f128, @bitCast(quotientSign));
 
         if (aAbs == 0) {
             // zero / zero = NaN
             if (bAbs == 0) {
-                return @bitCast(f128, qnanRep);
+                return @as(f128, @bitCast(qnanRep));
             }
             // zero / anything else = +/- zero
             else {
-                return @bitCast(f128, quotientSign);
+                return @as(f128, @bitCast(quotientSign));
             }
         }
         // anything else / zero = +/- infinity
-        if (bAbs == 0) return @bitCast(f128, infRep | quotientSign);
+        if (bAbs == 0) return @as(f128, @bitCast(infRep | quotientSign));
 
         // one or both of a or b is denormal, the other (if applicable) is a
         // normal number.  Renormalize one or both of a and b, and set scale to
@@ -100,13 +100,13 @@ inline fn div(a: f128, b: f128) f128 {
     // won't hurt anything.
     aSignificand |= implicitBit;
     bSignificand |= implicitBit;
-    var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale;
+    var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale;
 
     // Align the significand of b as a Q63 fixed-point number in the range
     // [1, 2.0) and get a Q64 approximate reciprocal using a small minimax
     // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2.  This
     // is accurate to about 3.5 binary digits.
-    const q63b = @truncate(u64, bSignificand >> 49);
+    const q63b = @as(u64, @truncate(bSignificand >> 49));
     var recip64 = @as(u64, 0x7504f333F9DE6484) -% q63b;
     // 0x7504f333F9DE6484 / 2^64 + 1 = 3/4 + 1/sqrt(2)
 
@@ -117,16 +117,16 @@ inline fn div(a: f128, b: f128) f128 {
     // This doubles the number of correct binary digits in the approximation
     // with each iteration.
     var correction64: u64 = undefined;
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
 
     // The reciprocal may have overflowed to zero if the upper half of b is
     // exactly 1.0.  This would sabatoge the full-width final stage of the
@@ -135,7 +135,7 @@ inline fn div(a: f128, b: f128) f128 {
 
     // We need to perform one more iteration to get us to 112 binary digits;
     // The last iteration needs to happen with extra precision.
-    const q127blo: u64 = @truncate(u64, bSignificand << 15);
+    const q127blo: u64 = @as(u64, @truncate(bSignificand << 15));
     var correction: u128 = undefined;
     var reciprocal: u128 = undefined;
 
@@ -151,8 +151,8 @@ inline fn div(a: f128, b: f128) f128 {
 
     correction = -%(r64q63 + (r64q127 >> 64));
 
-    const cHi = @truncate(u64, correction >> 64);
-    const cLo = @truncate(u64, correction);
+    const cHi = @as(u64, @truncate(correction >> 64));
+    const cLo = @as(u64, @truncate(correction));
 
     wideMultiply(u128, recip64, cHi, &dummy, &r64cH);
     wideMultiply(u128, recip64, cLo, &dummy, &r64cL);
@@ -210,7 +210,7 @@ inline fn div(a: f128, b: f128) f128 {
 
     if (writtenExponent >= maxExponent) {
         // If we have overflowed the exponent, return infinity.
-        return @bitCast(f128, infRep | quotientSign);
+        return @as(f128, @bitCast(infRep | quotientSign));
     } else if (writtenExponent < 1) {
         if (writtenExponent == 0) {
             // Check whether the rounded result is normal.
@@ -221,22 +221,22 @@ inline fn div(a: f128, b: f128) f128 {
             absResult += round;
             if ((absResult & ~significandMask) > 0) {
                 // The rounded result is normal; return it.
-                return @bitCast(f128, absResult | quotientSign);
+                return @as(f128, @bitCast(absResult | quotientSign));
             }
         }
         // Flush denormals to zero.  In the future, it would be nice to add
         // code to round them correctly.
-        return @bitCast(f128, quotientSign);
+        return @as(f128, @bitCast(quotientSign));
     } else {
         const round = @intFromBool((residual << 1) >= bSignificand);
         // Clear the implicit bit
         var absResult = quotient & significandMask;
         // Insert the exponent
-        absResult |= @intCast(Z, writtenExponent) << significandBits;
+        absResult |= @as(Z, @intCast(writtenExponent)) << significandBits;
         // Round
         absResult +%= round;
         // Insert the sign and return
-        return @bitCast(f128, absResult | quotientSign);
+        return @as(f128, @bitCast(absResult | quotientSign));
     }
 }
 
lib/compiler_rt/divtf3_test.zig
@@ -5,9 +5,9 @@ const testing = std.testing;
 const __divtf3 = @import("divtf3.zig").__divtf3;
 
 fn compareResultLD(result: f128, expectedHi: u64, expectedLo: u64) bool {
-    const rep = @bitCast(u128, result);
-    const hi = @truncate(u64, rep >> 64);
-    const lo = @truncate(u64, rep);
+    const rep = @as(u128, @bitCast(result));
+    const hi = @as(u64, @truncate(rep >> 64));
+    const lo = @as(u64, @truncate(rep));
 
     if (hi == expectedHi and lo == expectedLo) {
         return true;
lib/compiler_rt/divti3.zig
@@ -21,7 +21,7 @@ pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
 const v128 = @Vector(2, u64);
 
 fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
-    return @bitCast(v128, div(@bitCast(i128, a), @bitCast(i128, b)));
+    return @as(v128, @bitCast(div(@as(i128, @bitCast(a)), @as(i128, @bitCast(b)))));
 }
 
 inline fn div(a: i128, b: i128) i128 {
@@ -31,9 +31,9 @@ inline fn div(a: i128, b: i128) i128 {
     const an = (a ^ s_a) -% s_a;
     const bn = (b ^ s_b) -% s_b;
 
-    const r = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), null);
+    const r = udivmod(u128, @as(u128, @bitCast(an)), @as(u128, @bitCast(bn)), null);
     const s = s_a ^ s_b;
-    return (@bitCast(i128, r) ^ s) -% s;
+    return (@as(i128, @bitCast(r)) ^ s) -% s;
 }
 
 test {
lib/compiler_rt/divti3_test.zig
@@ -14,8 +14,8 @@ test "divti3" {
     try test__divti3(-2, 1, -2);
     try test__divti3(-2, -1, 2);
 
-    try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), 1, @bitCast(i128, @as(u128, 0x8 << 124)));
-    try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), -1, @bitCast(i128, @as(u128, 0x8 << 124)));
-    try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), -2, @bitCast(i128, @as(u128, 0x4 << 124)));
-    try test__divti3(@bitCast(i128, @as(u128, 0x8 << 124)), 2, @bitCast(i128, @as(u128, 0xc << 124)));
+    try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), 1, @as(i128, @bitCast(@as(u128, 0x8 << 124))));
+    try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), -1, @as(i128, @bitCast(@as(u128, 0x8 << 124))));
+    try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), -2, @as(i128, @bitCast(@as(u128, 0x4 << 124))));
+    try test__divti3(@as(i128, @bitCast(@as(u128, 0x8 << 124))), 2, @as(i128, @bitCast(@as(u128, 0xc << 124))));
 }
lib/compiler_rt/divxf3.zig
@@ -29,53 +29,53 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
     const significandMask = (@as(Z, 1) << significandBits) - 1;
 
     const absMask = signBit - 1;
-    const qnanRep = @bitCast(Z, std.math.nan(T)) | quietBit;
-    const infRep = @bitCast(Z, std.math.inf(T));
+    const qnanRep = @as(Z, @bitCast(std.math.nan(T))) | quietBit;
+    const infRep = @as(Z, @bitCast(std.math.inf(T)));
 
-    const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent);
-    const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent);
-    const quotientSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
+    const aExponent = @as(u32, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent));
+    const bExponent = @as(u32, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent));
+    const quotientSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit;
 
-    var aSignificand: Z = @bitCast(Z, a) & significandMask;
-    var bSignificand: Z = @bitCast(Z, b) & significandMask;
+    var aSignificand: Z = @as(Z, @bitCast(a)) & significandMask;
+    var bSignificand: Z = @as(Z, @bitCast(b)) & significandMask;
     var scale: i32 = 0;
 
     // Detect if a or b is zero, denormal, infinity, or NaN.
     if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
-        const aAbs: Z = @bitCast(Z, a) & absMask;
-        const bAbs: Z = @bitCast(Z, b) & absMask;
+        const aAbs: Z = @as(Z, @bitCast(a)) & absMask;
+        const bAbs: Z = @as(Z, @bitCast(b)) & absMask;
 
         // NaN / anything = qNaN
-        if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit);
+        if (aAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(a)) | quietBit));
         // anything / NaN = qNaN
-        if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit);
+        if (bAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(b)) | quietBit));
 
         if (aAbs == infRep) {
             // infinity / infinity = NaN
             if (bAbs == infRep) {
-                return @bitCast(T, qnanRep);
+                return @as(T, @bitCast(qnanRep));
             }
             // infinity / anything else = +/- infinity
             else {
-                return @bitCast(T, aAbs | quotientSign);
+                return @as(T, @bitCast(aAbs | quotientSign));
             }
         }
 
         // anything else / infinity = +/- 0
-        if (bAbs == infRep) return @bitCast(T, quotientSign);
+        if (bAbs == infRep) return @as(T, @bitCast(quotientSign));
 
         if (aAbs == 0) {
             // zero / zero = NaN
             if (bAbs == 0) {
-                return @bitCast(T, qnanRep);
+                return @as(T, @bitCast(qnanRep));
             }
             // zero / anything else = +/- zero
             else {
-                return @bitCast(T, quotientSign);
+                return @as(T, @bitCast(quotientSign));
             }
         }
         // anything else / zero = +/- infinity
-        if (bAbs == 0) return @bitCast(T, infRep | quotientSign);
+        if (bAbs == 0) return @as(T, @bitCast(infRep | quotientSign));
 
         // one or both of a or b is denormal, the other (if applicable) is a
         // normal number.  Renormalize one or both of a and b, and set scale to
@@ -83,13 +83,13 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
         if (aAbs < integerBit) scale +%= normalize(T, &aSignificand);
         if (bAbs < integerBit) scale -%= normalize(T, &bSignificand);
     }
-    var quotientExponent: i32 = @bitCast(i32, aExponent -% bExponent) +% scale;
+    var quotientExponent: i32 = @as(i32, @bitCast(aExponent -% bExponent)) +% scale;
 
     // Align the significand of b as a Q63 fixed-point number in the range
     // [1, 2.0) and get a Q64 approximate reciprocal using a small minimax
     // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2.  This
     // is accurate to about 3.5 binary digits.
-    const q63b = @intCast(u64, bSignificand);
+    const q63b = @as(u64, @intCast(bSignificand));
     var recip64 = @as(u64, 0x7504f333F9DE6484) -% q63b;
     // 0x7504f333F9DE6484 / 2^64 + 1 = 3/4 + 1/sqrt(2)
 
@@ -100,16 +100,16 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
     // This doubles the number of correct binary digits in the approximation
     // with each iteration.
     var correction64: u64 = undefined;
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
-    correction64 = @truncate(u64, ~(@as(u128, recip64) *% q63b >> 64) +% 1);
-    recip64 = @truncate(u64, @as(u128, recip64) *% correction64 >> 63);
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
+    correction64 = @as(u64, @truncate(~(@as(u128, recip64) *% q63b >> 64) +% 1));
+    recip64 = @as(u64, @truncate(@as(u128, recip64) *% correction64 >> 63));
 
     // The reciprocal may have overflowed to zero if the upper half of b is
     // exactly 1.0.  This would sabatoge the full-width final stage of the
@@ -128,8 +128,8 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
 
     correction = -%correction;
 
-    const cHi = @truncate(u64, correction >> 64);
-    const cLo = @truncate(u64, correction);
+    const cHi = @as(u64, @truncate(correction >> 64));
+    const cLo = @as(u64, @truncate(correction));
 
     var r64cH: u128 = undefined;
     var r64cL: u128 = undefined;
@@ -164,8 +164,8 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
     // exponent accordingly.
     var quotient: u64 = if (quotient128 < (integerBit << 1)) b: {
         quotientExponent -= 1;
-        break :b @intCast(u64, quotient128);
-    } else @intCast(u64, quotient128 >> 1);
+        break :b @as(u64, @intCast(quotient128));
+    } else @as(u64, @intCast(quotient128 >> 1));
 
     // We are going to compute a residual of the form
     //
@@ -182,26 +182,26 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
     const writtenExponent = quotientExponent + exponentBias;
     if (writtenExponent >= maxExponent) {
         // If we have overflowed the exponent, return infinity.
-        return @bitCast(T, infRep | quotientSign);
+        return @as(T, @bitCast(infRep | quotientSign));
     } else if (writtenExponent < 1) {
         if (writtenExponent == 0) {
             // Check whether the rounded result is normal.
             if (residual > (bSignificand >> 1)) { // round
                 if (quotient == (integerBit - 1)) // If the rounded result is normal, return it
-                    return @bitCast(T, @bitCast(Z, std.math.floatMin(T)) | quotientSign);
+                    return @as(T, @bitCast(@as(Z, @bitCast(std.math.floatMin(T))) | quotientSign));
             }
         }
         // Flush denormals to zero.  In the future, it would be nice to add
         // code to round them correctly.
-        return @bitCast(T, quotientSign);
+        return @as(T, @bitCast(quotientSign));
     } else {
         const round = @intFromBool(residual > (bSignificand >> 1));
         // Insert the exponent
-        var absResult = quotient | (@intCast(Z, writtenExponent) << significandBits);
+        var absResult = quotient | (@as(Z, @intCast(writtenExponent)) << significandBits);
         // Round
         absResult +%= round;
         // Insert the sign and return
-        return @bitCast(T, absResult | quotientSign | integerBit);
+        return @as(T, @bitCast(absResult | quotientSign | integerBit));
     }
 }
 
lib/compiler_rt/divxf3_test.zig
@@ -5,11 +5,11 @@ const testing = std.testing;
 const __divxf3 = @import("divxf3.zig").__divxf3;
 
 fn compareResult(result: f80, expected: u80) bool {
-    const rep = @bitCast(u80, result);
+    const rep = @as(u80, @bitCast(result));
 
     if (rep == expected) return true;
     // test other possible NaN representations (signal NaN)
-    if (math.isNan(result) and math.isNan(@bitCast(f80, expected))) return true;
+    if (math.isNan(result) and math.isNan(@as(f80, @bitCast(expected)))) return true;
 
     return false;
 }
@@ -25,9 +25,9 @@ fn test__divxf3(a: f80, b: f80) !void {
     const x = __divxf3(a, b);
 
     // Next float (assuming normal, non-zero result)
-    const x_plus_eps = @bitCast(f80, (@bitCast(u80, x) + 1) | integerBit);
+    const x_plus_eps = @as(f80, @bitCast((@as(u80, @bitCast(x)) + 1) | integerBit));
     // Prev float (assuming normal, non-zero result)
-    const x_minus_eps = @bitCast(f80, (@bitCast(u80, x) - 1) | integerBit);
+    const x_minus_eps = @as(f80, @bitCast((@as(u80, @bitCast(x)) - 1) | integerBit));
 
     // Make sure result is more accurate than the adjacent floats
     const err_x = @fabs(@mulAdd(f80, x, b, -a));
lib/compiler_rt/emutls.zig
@@ -33,18 +33,14 @@ pub fn __emutls_get_address(control: *emutls_control) callconv(.C) *anyopaque {
 const simple_allocator = struct {
     /// Allocate a memory chunk for requested type. Return a pointer on the data.
     pub fn alloc(comptime T: type) *T {
-        return @ptrCast(*T, @alignCast(
-            @alignOf(T),
-            advancedAlloc(@alignOf(T), @sizeOf(T)),
-        ));
+        return @ptrCast(@alignCast(advancedAlloc(@alignOf(T), @sizeOf(T))));
     }
 
     /// Allocate a slice of T, with len elements.
     pub fn allocSlice(comptime T: type, len: usize) []T {
-        return @ptrCast([*]T, @alignCast(
-            @alignOf(T),
+        return @as([*]T, @ptrCast(@alignCast(
             advancedAlloc(@alignOf(T), @sizeOf(T) * len),
-        ))[0 .. len - 1];
+        )))[0 .. len - 1];
     }
 
     /// Allocate a memory chunk.
@@ -56,22 +52,19 @@ const simple_allocator = struct {
             abort();
         }
 
-        return @ptrCast([*]u8, aligned_ptr);
+        return @as([*]u8, @ptrCast(aligned_ptr));
     }
 
     /// Resize a slice.
     pub fn reallocSlice(comptime T: type, slice: []T, len: usize) []T {
-        var c_ptr: *anyopaque = @ptrCast(*anyopaque, slice.ptr);
-        var new_array: [*]T = @ptrCast([*]T, @alignCast(
-            @alignOf(T),
-            std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort(),
-        ));
+        var c_ptr: *anyopaque = @as(*anyopaque, @ptrCast(slice.ptr));
+        var new_array: [*]T = @ptrCast(@alignCast(std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort()));
         return new_array[0..len];
     }
 
     /// Free a memory chunk allocated with simple_allocator.
     pub fn free(ptr: anytype) void {
-        std.c.free(@ptrCast(*anyopaque, ptr));
+        std.c.free(@as(*anyopaque, @ptrCast(ptr)));
     }
 };
 
@@ -132,20 +125,20 @@ const ObjectArray = struct {
         if (self.slots[index] == null) {
             // initialize the slot
             const size = control.size;
-            const alignment = @truncate(u29, control.alignment);
+            const alignment = @as(u29, @truncate(control.alignment));
 
             var data = simple_allocator.advancedAlloc(alignment, size);
             errdefer simple_allocator.free(data);
 
             if (control.default_value) |value| {
                 // default value: copy the content to newly allocated object.
-                @memcpy(data[0..size], @ptrCast([*]const u8, value));
+                @memcpy(data[0..size], @as([*]const u8, @ptrCast(value)));
             } else {
                 // no default: return zeroed memory.
                 @memset(data[0..size], 0);
             }
 
-            self.slots[index] = @ptrCast(*anyopaque, data);
+            self.slots[index] = @as(*anyopaque, @ptrCast(data));
         }
 
         return self.slots[index].?;
@@ -180,18 +173,12 @@ const current_thread_storage = struct {
 
     /// Return casted thread specific value.
     fn getspecific() ?*ObjectArray {
-        return @ptrCast(
-            ?*ObjectArray,
-            @alignCast(
-                @alignOf(ObjectArray),
-                std.c.pthread_getspecific(current_thread_storage.key),
-            ),
-        );
+        return @ptrCast(@alignCast(std.c.pthread_getspecific(current_thread_storage.key)));
     }
 
     /// Set casted thread specific value.
     fn setspecific(new: ?*ObjectArray) void {
-        if (std.c.pthread_setspecific(current_thread_storage.key, @ptrCast(*anyopaque, new)) != 0) {
+        if (std.c.pthread_setspecific(current_thread_storage.key, @as(*anyopaque, @ptrCast(new))) != 0) {
             abort();
         }
     }
@@ -205,10 +192,7 @@ const current_thread_storage = struct {
 
     /// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer.
     fn deinit(arrayPtr: *anyopaque) callconv(.C) void {
-        var array = @ptrCast(
-            *ObjectArray,
-            @alignCast(@alignOf(ObjectArray), arrayPtr),
-        );
+        var array: *ObjectArray = @ptrCast(@alignCast(arrayPtr));
         array.deinit();
     }
 };
@@ -294,7 +278,7 @@ const emutls_control = extern struct {
             .size = @sizeOf(T),
             .alignment = @alignOf(T),
             .object = .{ .index = 0 },
-            .default_value = @ptrCast(?*const anyopaque, default_value),
+            .default_value = @as(?*const anyopaque, @ptrCast(default_value)),
         };
     }
 
@@ -313,10 +297,7 @@ const emutls_control = extern struct {
     pub fn get_typed_pointer(self: *emutls_control, comptime T: type) *T {
         assert(self.size == @sizeOf(T));
         assert(self.alignment == @alignOf(T));
-        return @ptrCast(
-            *T,
-            @alignCast(@alignOf(T), self.getPointer()),
-        );
+        return @ptrCast(@alignCast(self.getPointer()));
     }
 };
 
@@ -343,7 +324,7 @@ test "__emutls_get_address zeroed" {
     try expect(ctl.object.index == 0);
 
     // retrieve a variable from ctl
-    var x = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl)));
+    var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
     try expect(ctl.object.index != 0); // index has been allocated for this ctl
     try expect(x.* == 0); // storage has been zeroed
 
@@ -351,7 +332,7 @@ test "__emutls_get_address zeroed" {
     x.* = 1234;
 
     // retrieve a variable from ctl (same ctl)
-    var y = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl)));
+    var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
 
     try expect(y.* == 1234); // same content that x.*
     try expect(x == y); // same pointer
@@ -364,7 +345,7 @@ test "__emutls_get_address with default_value" {
     var ctl = emutls_control.init(usize, &value);
     try expect(ctl.object.index == 0);
 
-    var x: *usize = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl)));
+    var x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
     try expect(ctl.object.index != 0);
     try expect(x.* == 5678); // storage initialized with default value
 
@@ -373,7 +354,7 @@ test "__emutls_get_address with default_value" {
 
     try expect(value == 5678); // the default value didn't change
 
-    var y = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl)));
+    var y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
     try expect(y.* == 9012); // the modified storage persists
 }
 
lib/compiler_rt/exp.zig
@@ -27,7 +27,7 @@ comptime {
 
 pub fn __exph(a: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, expf(a));
+    return @as(f16, @floatCast(expf(a)));
 }
 
 pub fn expf(x_: f32) callconv(.C) f32 {
@@ -39,8 +39,8 @@ pub fn expf(x_: f32) callconv(.C) f32 {
     const P2 = -2.7667332906e-3;
 
     var x = x_;
-    var hx = @bitCast(u32, x);
-    const sign = @intCast(i32, hx >> 31);
+    var hx = @as(u32, @bitCast(x));
+    const sign = @as(i32, @intCast(hx >> 31));
     hx &= 0x7FFFFFFF;
 
     if (math.isNan(x)) {
@@ -74,12 +74,12 @@ pub fn expf(x_: f32) callconv(.C) f32 {
     if (hx > 0x3EB17218) {
         // |x| > 1.5 * ln2
         if (hx > 0x3F851592) {
-            k = @intFromFloat(i32, invln2 * x + half[@intCast(usize, sign)]);
+            k = @as(i32, @intFromFloat(invln2 * x + half[@as(usize, @intCast(sign))]));
         } else {
             k = 1 - sign - sign;
         }
 
-        const fk = @floatFromInt(f32, k);
+        const fk = @as(f32, @floatFromInt(k));
         hi = x - fk * ln2hi;
         lo = fk * ln2lo;
         x = hi - lo;
@@ -117,9 +117,9 @@ pub fn exp(x_: f64) callconv(.C) f64 {
     const P5: f64 = 4.13813679705723846039e-08;
 
     var x = x_;
-    var ux = @bitCast(u64, x);
+    var ux = @as(u64, @bitCast(x));
     var hx = ux >> 32;
-    const sign = @intCast(i32, hx >> 31);
+    const sign = @as(i32, @intCast(hx >> 31));
     hx &= 0x7FFFFFFF;
 
     if (math.isNan(x)) {
@@ -157,12 +157,12 @@ pub fn exp(x_: f64) callconv(.C) f64 {
     if (hx > 0x3FD62E42) {
         // |x| >= 1.5 * ln2
         if (hx > 0x3FF0A2B2) {
-            k = @intFromFloat(i32, invln2 * x + half[@intCast(usize, sign)]);
+            k = @as(i32, @intFromFloat(invln2 * x + half[@as(usize, @intCast(sign))]));
         } else {
             k = 1 - sign - sign;
         }
 
-        const dk = @floatFromInt(f64, k);
+        const dk = @as(f64, @floatFromInt(k));
         hi = x - dk * ln2hi;
         lo = dk * ln2lo;
         x = hi - lo;
@@ -191,12 +191,12 @@ pub fn exp(x_: f64) callconv(.C) f64 {
 
 pub fn __expx(a: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, expq(a));
+    return @as(f80, @floatCast(expq(a)));
 }
 
 pub fn expq(a: f128) callconv(.C) f128 {
     // TODO: more correct implementation
-    return exp(@floatCast(f64, a));
+    return exp(@as(f64, @floatCast(a)));
 }
 
 pub fn expl(x: c_longdouble) callconv(.C) c_longdouble {
lib/compiler_rt/exp2.zig
@@ -27,18 +27,18 @@ comptime {
 
 pub fn __exp2h(x: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, exp2f(x));
+    return @as(f16, @floatCast(exp2f(x)));
 }
 
 pub fn exp2f(x: f32) callconv(.C) f32 {
-    const tblsiz = @intCast(u32, exp2ft.len);
-    const redux: f32 = 0x1.8p23 / @floatFromInt(f32, tblsiz);
+    const tblsiz = @as(u32, @intCast(exp2ft.len));
+    const redux: f32 = 0x1.8p23 / @as(f32, @floatFromInt(tblsiz));
     const P1: f32 = 0x1.62e430p-1;
     const P2: f32 = 0x1.ebfbe0p-3;
     const P3: f32 = 0x1.c6b348p-5;
     const P4: f32 = 0x1.3b2c9cp-7;
 
-    var u = @bitCast(u32, x);
+    var u = @as(u32, @bitCast(x));
     const ix = u & 0x7FFFFFFF;
 
     // |x| > 126
@@ -72,32 +72,32 @@ pub fn exp2f(x: f32) callconv(.C) f32 {
     // intended result but should confirm how GCC/Clang handle this to ensure.
 
     var uf = x + redux;
-    var i_0 = @bitCast(u32, uf);
+    var i_0 = @as(u32, @bitCast(uf));
     i_0 +%= tblsiz / 2;
 
     const k = i_0 / tblsiz;
-    const uk = @bitCast(f64, @as(u64, 0x3FF + k) << 52);
+    const uk = @as(f64, @bitCast(@as(u64, 0x3FF + k) << 52));
     i_0 &= tblsiz - 1;
     uf -= redux;
 
     const z: f64 = x - uf;
-    var r: f64 = exp2ft[@intCast(usize, i_0)];
+    var r: f64 = exp2ft[@as(usize, @intCast(i_0))];
     const t: f64 = r * z;
     r = r + t * (P1 + z * P2) + t * (z * z) * (P3 + z * P4);
-    return @floatCast(f32, r * uk);
+    return @as(f32, @floatCast(r * uk));
 }
 
 pub fn exp2(x: f64) callconv(.C) f64 {
-    const tblsiz: u32 = @intCast(u32, exp2dt.len / 2);
-    const redux: f64 = 0x1.8p52 / @floatFromInt(f64, tblsiz);
+    const tblsiz: u32 = @as(u32, @intCast(exp2dt.len / 2));
+    const redux: f64 = 0x1.8p52 / @as(f64, @floatFromInt(tblsiz));
     const P1: f64 = 0x1.62e42fefa39efp-1;
     const P2: f64 = 0x1.ebfbdff82c575p-3;
     const P3: f64 = 0x1.c6b08d704a0a6p-5;
     const P4: f64 = 0x1.3b2ab88f70400p-7;
     const P5: f64 = 0x1.5d88003875c74p-10;
 
-    const ux = @bitCast(u64, x);
-    const ix = @intCast(u32, ux >> 32) & 0x7FFFFFFF;
+    const ux = @as(u64, @bitCast(x));
+    const ix = @as(u32, @intCast(ux >> 32)) & 0x7FFFFFFF;
 
     // TODO: This should be handled beneath.
     if (math.isNan(x)) {
@@ -119,7 +119,7 @@ pub fn exp2(x: f64) callconv(.C) f64 {
         if (ux >> 63 != 0) {
             // underflow
             if (x <= -1075 or x - 0x1.0p52 + 0x1.0p52 != x) {
-                math.doNotOptimizeAway(@floatCast(f32, -0x1.0p-149 / x));
+                math.doNotOptimizeAway(@as(f32, @floatCast(-0x1.0p-149 / x)));
             }
             if (x <= -1075) {
                 return 0;
@@ -139,18 +139,18 @@ pub fn exp2(x: f64) callconv(.C) f64 {
     // reduce x
     var uf: f64 = x + redux;
     // NOTE: musl performs an implicit 64-bit to 32-bit u32 truncation here
-    var i_0: u32 = @truncate(u32, @bitCast(u64, uf));
+    var i_0: u32 = @as(u32, @truncate(@as(u64, @bitCast(uf))));
     i_0 +%= tblsiz / 2;
 
     const k: u32 = i_0 / tblsiz * tblsiz;
-    const ik: i32 = @divTrunc(@bitCast(i32, k), tblsiz);
+    const ik: i32 = @divTrunc(@as(i32, @bitCast(k)), tblsiz);
     i_0 %= tblsiz;
     uf -= redux;
 
     // r = exp2(y) = exp2t[i_0] * p(z - eps[i])
     var z: f64 = x - uf;
-    const t: f64 = exp2dt[@intCast(usize, 2 * i_0)];
-    z -= exp2dt[@intCast(usize, 2 * i_0 + 1)];
+    const t: f64 = exp2dt[@as(usize, @intCast(2 * i_0))];
+    z -= exp2dt[@as(usize, @intCast(2 * i_0 + 1))];
     const r: f64 = t + t * z * (P1 + z * (P2 + z * (P3 + z * (P4 + z * P5))));
 
     return math.scalbn(r, ik);
@@ -158,12 +158,12 @@ pub fn exp2(x: f64) callconv(.C) f64 {
 
 pub fn __exp2x(x: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, exp2q(x));
+    return @as(f80, @floatCast(exp2q(x)));
 }
 
 pub fn exp2q(x: f128) callconv(.C) f128 {
     // TODO: more correct implementation
-    return exp2(@floatCast(f64, x));
+    return exp2(@as(f64, @floatCast(x)));
 }
 
 pub fn exp2l(x: c_longdouble) callconv(.C) c_longdouble {
lib/compiler_rt/extenddftf2.zig
@@ -13,9 +13,9 @@ comptime {
 }
 
 pub fn __extenddftf2(a: f64) callconv(.C) f128 {
-    return extendf(f128, f64, @bitCast(u64, a));
+    return extendf(f128, f64, @as(u64, @bitCast(a)));
 }
 
 fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void {
-    c.* = extendf(f128, f64, @bitCast(u64, a));
+    c.* = extendf(f128, f64, @as(u64, @bitCast(a)));
 }
lib/compiler_rt/extenddfxf2.zig
@@ -8,5 +8,5 @@ comptime {
 }
 
 pub fn __extenddfxf2(a: f64) callconv(.C) f80 {
-    return extend_f80(f64, @bitCast(u64, a));
+    return extend_f80(f64, @as(u64, @bitCast(a)));
 }
lib/compiler_rt/extendf.zig
@@ -33,7 +33,7 @@ pub inline fn extendf(
     const dstMinNormal: dst_rep_t = @as(dst_rep_t, 1) << dstSigBits;
 
     // Break a into a sign and representation of the absolute value
-    const aRep: src_rep_t = @bitCast(src_rep_t, a);
+    const aRep: src_rep_t = @as(src_rep_t, @bitCast(a));
     const aAbs: src_rep_t = aRep & srcAbsMask;
     const sign: src_rep_t = aRep & srcSignMask;
     var absResult: dst_rep_t = undefined;
@@ -58,10 +58,10 @@ pub inline fn extendf(
         // the correct adjusted exponent in the destination type.
         const scale: u32 = @clz(aAbs) -
             @clz(@as(src_rep_t, srcMinNormal));
-        absResult = @as(dst_rep_t, aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale);
+        absResult = @as(dst_rep_t, aAbs) << @as(DstShift, @intCast(dstSigBits - srcSigBits + scale));
         absResult ^= dstMinNormal;
         const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1;
-        absResult |= @intCast(dst_rep_t, resultExponent) << dstSigBits;
+        absResult |= @as(dst_rep_t, @intCast(resultExponent)) << dstSigBits;
     } else {
         // a is zero.
         absResult = 0;
@@ -69,7 +69,7 @@ pub inline fn extendf(
 
     // Apply the signbit to (dst_t)abs(a).
     const result: dst_rep_t align(@alignOf(dst_t)) = absResult | @as(dst_rep_t, sign) << (dstBits - srcBits);
-    return @bitCast(dst_t, result);
+    return @as(dst_t, @bitCast(result));
 }
 
 pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
@@ -104,7 +104,7 @@ pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeI
         // a is a normal number.
         // Extend to the destination type by shifting the significand and
         // exponent into the proper position and rebiasing the exponent.
-        dst.exp = @intCast(u16, a_abs >> src_sig_bits);
+        dst.exp = @as(u16, @intCast(a_abs >> src_sig_bits));
         dst.exp += dst_exp_bias - src_exp_bias;
         dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits);
         dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
@@ -124,9 +124,9 @@ pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeI
         const scale: u16 = @clz(a_abs) -
             @clz(@as(src_rep_t, src_min_normal));
 
-        dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
+        dst.fraction = @as(u64, a_abs) << @as(u6, @intCast(dst_sig_bits - src_sig_bits + scale));
         dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
-        dst.exp = @truncate(u16, a_abs >> @intCast(SrcShift, src_sig_bits - scale));
+        dst.exp = @as(u16, @truncate(a_abs >> @as(SrcShift, @intCast(src_sig_bits - scale))));
         dst.exp ^= 1;
         dst.exp |= dst_exp_bias - src_exp_bias - scale + 1;
     } else {
lib/compiler_rt/extendf_test.zig
@@ -11,12 +11,12 @@ const F16T = @import("./common.zig").F16T;
 fn test__extenddfxf2(a: f64, expected: u80) !void {
     const x = __extenddfxf2(a);
 
-    const rep = @bitCast(u80, x);
+    const rep = @as(u80, @bitCast(x));
     if (rep == expected)
         return;
 
     // test other possible NaN representation(signal NaN)
-    if (math.isNan(@bitCast(f80, expected)) and math.isNan(x))
+    if (math.isNan(@as(f80, @bitCast(expected))) and math.isNan(x))
         return;
 
     @panic("__extenddfxf2 test failure");
@@ -25,9 +25,9 @@ fn test__extenddfxf2(a: f64, expected: u80) !void {
 fn test__extenddftf2(a: f64, expected_hi: u64, expected_lo: u64) !void {
     const x = __extenddftf2(a);
 
-    const rep = @bitCast(u128, x);
-    const hi = @intCast(u64, rep >> 64);
-    const lo = @truncate(u64, rep);
+    const rep = @as(u128, @bitCast(x));
+    const hi = @as(u64, @intCast(rep >> 64));
+    const lo = @as(u64, @truncate(rep));
 
     if (hi == expected_hi and lo == expected_lo)
         return;
@@ -45,14 +45,14 @@ fn test__extenddftf2(a: f64, expected_hi: u64, expected_lo: u64) !void {
 }
 
 fn test__extendhfsf2(a: u16, expected: u32) !void {
-    const x = __extendhfsf2(@bitCast(F16T(f32), a));
-    const rep = @bitCast(u32, x);
+    const x = __extendhfsf2(@as(F16T(f32), @bitCast(a)));
+    const rep = @as(u32, @bitCast(x));
 
     if (rep == expected) {
         if (rep & 0x7fffffff > 0x7f800000) {
             return; // NaN is always unequal.
         }
-        if (x == @bitCast(f32, expected)) {
+        if (x == @as(f32, @bitCast(expected))) {
             return;
         }
     }
@@ -63,9 +63,9 @@ fn test__extendhfsf2(a: u16, expected: u32) !void {
 fn test__extendsftf2(a: f32, expected_hi: u64, expected_lo: u64) !void {
     const x = __extendsftf2(a);
 
-    const rep = @bitCast(u128, x);
-    const hi = @intCast(u64, rep >> 64);
-    const lo = @truncate(u64, rep);
+    const rep = @as(u128, @bitCast(x));
+    const hi = @as(u64, @intCast(rep >> 64));
+    const lo = @as(u64, @truncate(rep));
 
     if (hi == expected_hi and lo == expected_lo)
         return;
@@ -184,35 +184,35 @@ test "extendsftf2" {
 }
 
 fn makeQNaN64() f64 {
-    return @bitCast(f64, @as(u64, 0x7ff8000000000000));
+    return @as(f64, @bitCast(@as(u64, 0x7ff8000000000000)));
 }
 
 fn makeInf64() f64 {
-    return @bitCast(f64, @as(u64, 0x7ff0000000000000));
+    return @as(f64, @bitCast(@as(u64, 0x7ff0000000000000)));
 }
 
 fn makeNaN64(rand: u64) f64 {
-    return @bitCast(f64, 0x7ff0000000000000 | (rand & 0xfffffffffffff));
+    return @as(f64, @bitCast(0x7ff0000000000000 | (rand & 0xfffffffffffff)));
 }
 
 fn makeQNaN32() f32 {
-    return @bitCast(f32, @as(u32, 0x7fc00000));
+    return @as(f32, @bitCast(@as(u32, 0x7fc00000)));
 }
 
 fn makeNaN32(rand: u32) f32 {
-    return @bitCast(f32, 0x7f800000 | (rand & 0x7fffff));
+    return @as(f32, @bitCast(0x7f800000 | (rand & 0x7fffff)));
 }
 
 fn makeInf32() f32 {
-    return @bitCast(f32, @as(u32, 0x7f800000));
+    return @as(f32, @bitCast(@as(u32, 0x7f800000)));
 }
 
 fn test__extendhftf2(a: u16, expected_hi: u64, expected_lo: u64) !void {
-    const x = __extendhftf2(@bitCast(F16T(f128), a));
+    const x = __extendhftf2(@as(F16T(f128), @bitCast(a)));
 
-    const rep = @bitCast(u128, x);
-    const hi = @intCast(u64, rep >> 64);
-    const lo = @truncate(u64, rep);
+    const rep = @as(u128, @bitCast(x));
+    const hi = @as(u64, @intCast(rep >> 64));
+    const lo = @as(u64, @truncate(rep));
 
     if (hi == expected_hi and lo == expected_lo)
         return;
lib/compiler_rt/extendhfdf2.zig
@@ -8,5 +8,5 @@ comptime {
 }
 
 pub fn __extendhfdf2(a: common.F16T(f64)) callconv(.C) f64 {
-    return extendf(f64, f16, @bitCast(u16, a));
+    return extendf(f64, f16, @as(u16, @bitCast(a)));
 }
lib/compiler_rt/extendhfsf2.zig
@@ -13,13 +13,13 @@ comptime {
 }
 
 pub fn __extendhfsf2(a: common.F16T(f32)) callconv(.C) f32 {
-    return extendf(f32, f16, @bitCast(u16, a));
+    return extendf(f32, f16, @as(u16, @bitCast(a)));
 }
 
 fn __gnu_h2f_ieee(a: common.F16T(f32)) callconv(.C) f32 {
-    return extendf(f32, f16, @bitCast(u16, a));
+    return extendf(f32, f16, @as(u16, @bitCast(a)));
 }
 
 fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 {
-    return extendf(f32, f16, @bitCast(u16, a));
+    return extendf(f32, f16, @as(u16, @bitCast(a)));
 }
lib/compiler_rt/extendhftf2.zig
@@ -8,5 +8,5 @@ comptime {
 }
 
 pub fn __extendhftf2(a: common.F16T(f128)) callconv(.C) f128 {
-    return extendf(f128, f16, @bitCast(u16, a));
+    return extendf(f128, f16, @as(u16, @bitCast(a)));
 }
lib/compiler_rt/extendhfxf2.zig
@@ -8,5 +8,5 @@ comptime {
 }
 
 fn __extendhfxf2(a: common.F16T(f80)) callconv(.C) f80 {
-    return extend_f80(f16, @bitCast(u16, a));
+    return extend_f80(f16, @as(u16, @bitCast(a)));
 }
lib/compiler_rt/extendsfdf2.zig
@@ -12,9 +12,9 @@ comptime {
 }
 
 fn __extendsfdf2(a: f32) callconv(.C) f64 {
-    return extendf(f64, f32, @bitCast(u32, a));
+    return extendf(f64, f32, @as(u32, @bitCast(a)));
 }
 
 fn __aeabi_f2d(a: f32) callconv(.AAPCS) f64 {
-    return extendf(f64, f32, @bitCast(u32, a));
+    return extendf(f64, f32, @as(u32, @bitCast(a)));
 }
lib/compiler_rt/extendsftf2.zig
@@ -13,9 +13,9 @@ comptime {
 }
 
 pub fn __extendsftf2(a: f32) callconv(.C) f128 {
-    return extendf(f128, f32, @bitCast(u32, a));
+    return extendf(f128, f32, @as(u32, @bitCast(a)));
 }
 
 fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void {
-    c.* = extendf(f128, f32, @bitCast(u32, a));
+    c.* = extendf(f128, f32, @as(u32, @bitCast(a)));
 }
lib/compiler_rt/extendsfxf2.zig
@@ -8,5 +8,5 @@ comptime {
 }
 
 fn __extendsfxf2(a: f32) callconv(.C) f80 {
-    return extend_f80(f32, @bitCast(u32, a));
+    return extend_f80(f32, @as(u32, @bitCast(a)));
 }
lib/compiler_rt/extendxftf2.zig
@@ -39,12 +39,12 @@ fn __extendxftf2(a: f80) callconv(.C) f128 {
         // renormalize the significand and clear the leading bit and integer part,
         // then insert the correct adjusted exponent in the destination type.
         const scale: u32 = @clz(a_rep.fraction);
-        abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
+        abs_result = @as(u128, a_rep.fraction) << @as(u7, @intCast(dst_sig_bits - src_sig_bits + scale + 1));
         abs_result ^= dst_min_normal;
         abs_result |= @as(u128, scale + 1) << dst_sig_bits;
     }
 
     // Apply the signbit to (dst_t)abs(a).
     const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16);
-    return @bitCast(f128, result);
+    return @as(f128, @bitCast(result));
 }
lib/compiler_rt/fabs.zig
@@ -51,7 +51,7 @@ pub fn fabsl(x: c_longdouble) callconv(.C) c_longdouble {
 inline fn generic_fabs(x: anytype) @TypeOf(x) {
     const T = @TypeOf(x);
     const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
-    const float_bits = @bitCast(TBits, x);
+    const float_bits = @as(TBits, @bitCast(x));
     const remove_sign = ~@as(TBits, 0) >> 1;
-    return @bitCast(T, float_bits & remove_sign);
+    return @as(T, @bitCast(float_bits & remove_sign));
 }
lib/compiler_rt/ffsdi2_test.zig
@@ -2,7 +2,7 @@ const ffs = @import("count0bits.zig");
 const testing = @import("std").testing;
 
 fn test__ffsdi2(a: u64, expected: i32) !void {
-    var x = @bitCast(i64, a);
+    var x = @as(i64, @bitCast(a));
     var result = ffs.__ffsdi2(x);
     try testing.expectEqual(expected, result);
 }
lib/compiler_rt/ffssi2_test.zig
@@ -2,7 +2,7 @@ const ffs = @import("count0bits.zig");
 const testing = @import("std").testing;
 
 fn test__ffssi2(a: u32, expected: i32) !void {
-    var x = @bitCast(i32, a);
+    var x = @as(i32, @bitCast(a));
     var result = ffs.__ffssi2(x);
     try testing.expectEqual(expected, result);
 }
lib/compiler_rt/ffsti2_test.zig
@@ -2,7 +2,7 @@ const ffs = @import("count0bits.zig");
 const testing = @import("std").testing;
 
 fn test__ffsti2(a: u128, expected: i32) !void {
-    var x = @bitCast(i128, a);
+    var x = @as(i128, @bitCast(a));
     var result = ffs.__ffsti2(x);
     try testing.expectEqual(expected, result);
 }
lib/compiler_rt/fixdfti.zig
@@ -19,5 +19,5 @@ pub fn __fixdfti(a: f64) callconv(.C) i128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(i128, a));
+    return @as(v2u64, @bitCast(intFromFloat(i128, a)));
 }
lib/compiler_rt/fixhfti.zig
@@ -19,5 +19,5 @@ pub fn __fixhfti(a: f16) callconv(.C) i128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixhfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(i128, a));
+    return @as(v2u64, @bitCast(intFromFloat(i128, a)));
 }
lib/compiler_rt/fixsfti.zig
@@ -19,5 +19,5 @@ pub fn __fixsfti(a: f32) callconv(.C) i128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixsfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(i128, a));
+    return @as(v2u64, @bitCast(intFromFloat(i128, a)));
 }
lib/compiler_rt/fixtfti.zig
@@ -21,5 +21,5 @@ pub fn __fixtfti(a: f128) callconv(.C) i128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixtfti_windows_x86_64(a: f128) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(i128, a));
+    return @as(v2u64, @bitCast(intFromFloat(i128, a)));
 }
lib/compiler_rt/fixunsdfti.zig
@@ -19,5 +19,5 @@ pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixunsdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(u128, a));
+    return @as(v2u64, @bitCast(intFromFloat(u128, a)));
 }
lib/compiler_rt/fixunshfti.zig
@@ -19,5 +19,5 @@ pub fn __fixunshfti(a: f16) callconv(.C) u128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(u128, a));
+    return @as(v2u64, @bitCast(intFromFloat(u128, a)));
 }
lib/compiler_rt/fixunssfti.zig
@@ -19,5 +19,5 @@ pub fn __fixunssfti(a: f32) callconv(.C) u128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixunssfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(u128, a));
+    return @as(v2u64, @bitCast(intFromFloat(u128, a)));
 }
lib/compiler_rt/fixunstfti.zig
@@ -21,5 +21,5 @@ pub fn __fixunstfti(a: f128) callconv(.C) u128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixunstfti_windows_x86_64(a: f128) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(u128, a));
+    return @as(v2u64, @bitCast(intFromFloat(u128, a)));
 }
lib/compiler_rt/fixunsxfti.zig
@@ -19,5 +19,5 @@ pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixunsxfti_windows_x86_64(a: f80) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(u128, a));
+    return @as(v2u64, @bitCast(intFromFloat(u128, a)));
 }
lib/compiler_rt/fixxfti.zig
@@ -19,5 +19,5 @@ pub fn __fixxfti(a: f80) callconv(.C) i128 {
 const v2u64 = @Vector(2, u64);
 
 fn __fixxfti_windows_x86_64(a: f80) callconv(.C) v2u64 {
-    return @bitCast(v2u64, intFromFloat(i128, a));
+    return @as(v2u64, @bitCast(intFromFloat(i128, a)));
 }
lib/compiler_rt/float_from_int.zig
@@ -25,17 +25,17 @@ pub fn floatFromInt(comptime T: type, x: anytype) T {
     // Compute significand
     var exp = int_bits - @clz(abs_val) - 1;
     if (int_bits <= fractional_bits or exp <= fractional_bits) {
-        const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp);
+        const shift_amt = fractional_bits - @as(math.Log2Int(uT), @intCast(exp));
 
         // Shift up result to line up with the significand - no rounding required
-        result = (@intCast(uT, abs_val) << shift_amt);
+        result = (@as(uT, @intCast(abs_val)) << shift_amt);
         result ^= implicit_bit; // Remove implicit integer bit
     } else {
-        var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits);
+        var shift_amt = @as(math.Log2Int(Z), @intCast(exp - fractional_bits));
         const exact_tie: bool = @ctz(abs_val) == shift_amt - 1;
 
         // Shift down result and remove implicit integer bit
-        result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1);
+        result = @as(uT, @intCast((abs_val >> (shift_amt - 1)))) ^ (implicit_bit << 1);
 
         // Round result, including round-to-even for exact ties
         result = ((result + 1) >> 1) & ~@as(uT, @intFromBool(exact_tie));
@@ -43,14 +43,14 @@ pub fn floatFromInt(comptime T: type, x: anytype) T {
 
     // Compute exponent
     if ((int_bits > max_exp) and (exp > max_exp)) // If exponent too large, overflow to infinity
-        return @bitCast(T, sign_bit | @bitCast(uT, inf));
+        return @as(T, @bitCast(sign_bit | @as(uT, @bitCast(inf))));
 
     result += (@as(uT, exp) + exp_bias) << math.floatMantissaBits(T);
 
     // If the result included a carry, we need to restore the explicit integer bit
     if (T == f80) result |= 1 << fractional_bits;
 
-    return @bitCast(T, sign_bit | result);
+    return @as(T, @bitCast(sign_bit | result));
 }
 
 test {
lib/compiler_rt/float_from_int_test.zig
@@ -30,12 +30,12 @@ const __floatuntitf = @import("floatuntitf.zig").__floatuntitf;
 
 fn test__floatsisf(a: i32, expected: u32) !void {
     const r = __floatsisf(a);
-    try std.testing.expect(@bitCast(u32, r) == expected);
+    try std.testing.expect(@as(u32, @bitCast(r)) == expected);
 }
 
 fn test_one_floatunsisf(a: u32, expected: u32) !void {
     const r = __floatunsisf(a);
-    try std.testing.expect(@bitCast(u32, r) == expected);
+    try std.testing.expect(@as(u32, @bitCast(r)) == expected);
 }
 
 test "floatsisf" {
@@ -43,7 +43,7 @@ test "floatsisf" {
     try test__floatsisf(1, 0x3f800000);
     try test__floatsisf(-1, 0xbf800000);
     try test__floatsisf(0x7FFFFFFF, 0x4f000000);
-    try test__floatsisf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xcf000000);
+    try test__floatsisf(@as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 0xcf000000);
 }
 
 test "floatunsisf" {
@@ -72,10 +72,10 @@ test "floatdisf" {
     try test__floatdisf(-2, -2.0);
     try test__floatdisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
     try test__floatdisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
-    try test__floatdisf(@bitCast(i64, @as(u64, 0x8000008000000000)), -0x1.FFFFFEp+62);
-    try test__floatdisf(@bitCast(i64, @as(u64, 0x8000010000000000)), -0x1.FFFFFCp+62);
-    try test__floatdisf(@bitCast(i64, @as(u64, 0x8000000000000000)), -0x1.000000p+63);
-    try test__floatdisf(@bitCast(i64, @as(u64, 0x8000000000000001)), -0x1.000000p+63);
+    try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000008000000000))), -0x1.FFFFFEp+62);
+    try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000010000000000))), -0x1.FFFFFCp+62);
+    try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), -0x1.000000p+63);
+    try test__floatdisf(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), -0x1.000000p+63);
     try test__floatdisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
     try test__floatdisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
     try test__floatdisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
@@ -228,17 +228,17 @@ test "floatuntisf" {
     try test__floatuntisf(make_uti(0x0000000000001FED, 0xCBE0000000000000), 0x1.FEDCBEp+76);
 
     // Test overflow to infinity
-    try test__floatuntisf(@as(u128, math.maxInt(u128)), @bitCast(f32, math.inf(f32)));
+    try test__floatuntisf(@as(u128, math.maxInt(u128)), @as(f32, @bitCast(math.inf(f32))));
 }
 
 fn test_one_floatsidf(a: i32, expected: u64) !void {
     const r = __floatsidf(a);
-    try std.testing.expect(@bitCast(u64, r) == expected);
+    try std.testing.expect(@as(u64, @bitCast(r)) == expected);
 }
 
 fn test_one_floatunsidf(a: u32, expected: u64) !void {
     const r = __floatunsidf(a);
-    try std.testing.expect(@bitCast(u64, r) == expected);
+    try std.testing.expect(@as(u64, @bitCast(r)) == expected);
 }
 
 test "floatsidf" {
@@ -246,15 +246,15 @@ test "floatsidf" {
     try test_one_floatsidf(1, 0x3ff0000000000000);
     try test_one_floatsidf(-1, 0xbff0000000000000);
     try test_one_floatsidf(0x7FFFFFFF, 0x41dfffffffc00000);
-    try test_one_floatsidf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xc1e0000000000000);
+    try test_one_floatsidf(@as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 0xc1e0000000000000);
 }
 
 test "floatunsidf" {
     try test_one_floatunsidf(0, 0x0000000000000000);
     try test_one_floatunsidf(1, 0x3ff0000000000000);
     try test_one_floatunsidf(0x7FFFFFFF, 0x41dfffffffc00000);
-    try test_one_floatunsidf(@intCast(u32, 0x80000000), 0x41e0000000000000);
-    try test_one_floatunsidf(@intCast(u32, 0xFFFFFFFF), 0x41efffffffe00000);
+    try test_one_floatunsidf(@as(u32, @intCast(0x80000000)), 0x41e0000000000000);
+    try test_one_floatunsidf(@as(u32, @intCast(0xFFFFFFFF)), 0x41efffffffe00000);
 }
 
 fn test__floatdidf(a: i64, expected: f64) !void {
@@ -279,12 +279,12 @@ test "floatdidf" {
     try test__floatdidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
     try test__floatdidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
     try test__floatdidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
-    try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000008000000000)), -0x1.FFFFFEp+62);
-    try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000800)), -0x1.FFFFFFFFFFFFEp+62);
-    try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000010000000000)), -0x1.FFFFFCp+62);
-    try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000001000)), -0x1.FFFFFFFFFFFFCp+62);
-    try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000000)), -0x1.000000p+63);
-    try test__floatdidf(@bitCast(i64, @intCast(u64, 0x8000000000000001)), -0x1.000000p+63); // 0x8000000000000001
+    try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000008000000000)))), -0x1.FFFFFEp+62);
+    try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000000800)))), -0x1.FFFFFFFFFFFFEp+62);
+    try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000010000000000)))), -0x1.FFFFFCp+62);
+    try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000001000)))), -0x1.FFFFFFFFFFFFCp+62);
+    try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000)))), -0x1.000000p+63);
+    try test__floatdidf(@as(i64, @bitCast(@as(u64, @intCast(0x8000000000000001)))), -0x1.000000p+63); // 0x8000000000000001
     try test__floatdidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
     try test__floatdidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
     try test__floatdidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
@@ -505,7 +505,7 @@ test "floatuntidf" {
 
 fn test__floatsitf(a: i32, expected: u128) !void {
     const r = __floatsitf(a);
-    try std.testing.expect(@bitCast(u128, r) == expected);
+    try std.testing.expect(@as(u128, @bitCast(r)) == expected);
 }
 
 test "floatsitf" {
@@ -513,16 +513,16 @@ test "floatsitf" {
     try test__floatsitf(0x7FFFFFFF, 0x401dfffffffc00000000000000000000);
     try test__floatsitf(0x12345678, 0x401b2345678000000000000000000000);
     try test__floatsitf(-0x12345678, 0xc01b2345678000000000000000000000);
-    try test__floatsitf(@bitCast(i32, @intCast(u32, 0xffffffff)), 0xbfff0000000000000000000000000000);
-    try test__floatsitf(@bitCast(i32, @intCast(u32, 0x80000000)), 0xc01e0000000000000000000000000000);
+    try test__floatsitf(@as(i32, @bitCast(@as(u32, @intCast(0xffffffff)))), 0xbfff0000000000000000000000000000);
+    try test__floatsitf(@as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 0xc01e0000000000000000000000000000);
 }
 
 fn test__floatunsitf(a: u32, expected_hi: u64, expected_lo: u64) !void {
     const x = __floatunsitf(a);
 
-    const x_repr = @bitCast(u128, x);
-    const x_hi = @intCast(u64, x_repr >> 64);
-    const x_lo = @truncate(u64, x_repr);
+    const x_repr = @as(u128, @bitCast(x));
+    const x_hi = @as(u64, @intCast(x_repr >> 64));
+    const x_lo = @as(u64, @truncate(x_repr));
 
     if (x_hi == expected_hi and x_lo == expected_lo) {
         return;
@@ -552,9 +552,9 @@ fn test__floatditf(a: i64, expected: f128) !void {
 fn test__floatunditf(a: u64, expected_hi: u64, expected_lo: u64) !void {
     const x = __floatunditf(a);
 
-    const x_repr = @bitCast(u128, x);
-    const x_hi = @intCast(u64, x_repr >> 64);
-    const x_lo = @truncate(u64, x_repr);
+    const x_repr = @as(u128, @bitCast(x));
+    const x_hi = @as(u64, @intCast(x_repr >> 64));
+    const x_lo = @as(u64, @truncate(x_repr));
 
     if (x_hi == expected_hi and x_lo == expected_lo) {
         return;
@@ -575,10 +575,10 @@ test "floatditf" {
     try test__floatditf(0x2, make_tf(0x4000000000000000, 0x0));
     try test__floatditf(0x1, make_tf(0x3fff000000000000, 0x0));
     try test__floatditf(0x0, make_tf(0x0, 0x0));
-    try test__floatditf(@bitCast(i64, @as(u64, 0xffffffffffffffff)), make_tf(0xbfff000000000000, 0x0));
-    try test__floatditf(@bitCast(i64, @as(u64, 0xfffffffffffffffe)), make_tf(0xc000000000000000, 0x0));
+    try test__floatditf(@as(i64, @bitCast(@as(u64, 0xffffffffffffffff))), make_tf(0xbfff000000000000, 0x0));
+    try test__floatditf(@as(i64, @bitCast(@as(u64, 0xfffffffffffffffe))), make_tf(0xc000000000000000, 0x0));
     try test__floatditf(-0x123456789abcdef1, make_tf(0xc03b23456789abcd, 0xef10000000000000));
-    try test__floatditf(@bitCast(i64, @as(u64, 0x8000000000000000)), make_tf(0xc03e000000000000, 0x0));
+    try test__floatditf(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), make_tf(0xc03e000000000000, 0x0));
 }
 
 test "floatunditf" {
@@ -773,7 +773,7 @@ fn make_ti(high: u64, low: u64) i128 {
     var result: u128 = high;
     result <<= 64;
     result |= low;
-    return @bitCast(i128, result);
+    return @as(i128, @bitCast(result));
 }
 
 fn make_uti(high: u64, low: u64) u128 {
@@ -787,7 +787,7 @@ fn make_tf(high: u64, low: u64) f128 {
     var result: u128 = high;
     result <<= 64;
     result |= low;
-    return @bitCast(f128, result);
+    return @as(f128, @bitCast(result));
 }
 
 test "conversion to f16" {
@@ -815,22 +815,22 @@ test "conversion to f80" {
     const floatFromInt = @import("./float_from_int.zig").floatFromInt;
 
     try testing.expect(floatFromInt(f80, @as(i80, -12)) == -12);
-    try testing.expect(@intFromFloat(u80, floatFromInt(f80, @as(u64, math.maxInt(u64)) + 0)) == math.maxInt(u64) + 0);
-    try testing.expect(@intFromFloat(u80, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1);
+    try testing.expect(@as(u80, @intFromFloat(floatFromInt(f80, @as(u64, math.maxInt(u64)) + 0))) == math.maxInt(u64) + 0);
+    try testing.expect(@as(u80, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1))) == math.maxInt(u64) + 1);
 
     try testing.expect(floatFromInt(f80, @as(u32, 0)) == 0.0);
     try testing.expect(floatFromInt(f80, @as(u32, 1)) == 1.0);
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u32, math.maxInt(u24)) + 0)) == math.maxInt(u24));
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 0)) == math.maxInt(u64));
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1)) == math.maxInt(u64) + 1); // Exact
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 2)) == math.maxInt(u64) + 1); // Rounds down
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 3)) == math.maxInt(u64) + 3); // Tie - Exact
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u64)) + 4)) == math.maxInt(u64) + 5); // Rounds up
-
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 0)) == math.maxInt(u65) + 1); // Rounds up
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 1)) == math.maxInt(u65) + 1); // Exact
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 2)) == math.maxInt(u65) + 1); // Rounds down
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 3)) == math.maxInt(u65) + 1); // Tie - Rounds down
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 4)) == math.maxInt(u65) + 5); // Rounds up
-    try testing.expect(@intFromFloat(u128, floatFromInt(f80, @as(u80, math.maxInt(u65)) + 5)) == math.maxInt(u65) + 5); // Exact
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u32, math.maxInt(u24)) + 0))) == math.maxInt(u24));
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 0))) == math.maxInt(u64));
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 1))) == math.maxInt(u64) + 1); // Exact
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 2))) == math.maxInt(u64) + 1); // Rounds down
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 3))) == math.maxInt(u64) + 3); // Tie - Exact
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u64)) + 4))) == math.maxInt(u64) + 5); // Rounds up
+
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 0))) == math.maxInt(u65) + 1); // Rounds up
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 1))) == math.maxInt(u65) + 1); // Exact
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 2))) == math.maxInt(u65) + 1); // Rounds down
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 3))) == math.maxInt(u65) + 1); // Tie - Rounds down
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 4))) == math.maxInt(u65) + 5); // Rounds up
+    try testing.expect(@as(u128, @intFromFloat(floatFromInt(f80, @as(u80, math.maxInt(u65)) + 5))) == math.maxInt(u65) + 5); // Exact
 }
lib/compiler_rt/floattidf.zig
@@ -17,5 +17,5 @@ pub fn __floattidf(a: i128) callconv(.C) f64 {
 }
 
 fn __floattidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 {
-    return floatFromInt(f64, @bitCast(i128, a));
+    return floatFromInt(f64, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floattihf.zig
@@ -17,5 +17,5 @@ pub fn __floattihf(a: i128) callconv(.C) f16 {
 }
 
 fn __floattihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 {
-    return floatFromInt(f16, @bitCast(i128, a));
+    return floatFromInt(f16, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floattisf.zig
@@ -17,5 +17,5 @@ pub fn __floattisf(a: i128) callconv(.C) f32 {
 }
 
 fn __floattisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 {
-    return floatFromInt(f32, @bitCast(i128, a));
+    return floatFromInt(f32, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floattitf.zig
@@ -19,5 +19,5 @@ pub fn __floattitf(a: i128) callconv(.C) f128 {
 }
 
 fn __floattitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 {
-    return floatFromInt(f128, @bitCast(i128, a));
+    return floatFromInt(f128, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floattixf.zig
@@ -17,5 +17,5 @@ pub fn __floattixf(a: i128) callconv(.C) f80 {
 }
 
 fn __floattixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 {
-    return floatFromInt(f80, @bitCast(i128, a));
+    return floatFromInt(f80, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floatuntidf.zig
@@ -17,5 +17,5 @@ pub fn __floatuntidf(a: u128) callconv(.C) f64 {
 }
 
 fn __floatuntidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 {
-    return floatFromInt(f64, @bitCast(u128, a));
+    return floatFromInt(f64, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floatuntihf.zig
@@ -17,5 +17,5 @@ pub fn __floatuntihf(a: u128) callconv(.C) f16 {
 }
 
 fn __floatuntihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 {
-    return floatFromInt(f16, @bitCast(u128, a));
+    return floatFromInt(f16, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floatuntisf.zig
@@ -17,5 +17,5 @@ pub fn __floatuntisf(a: u128) callconv(.C) f32 {
 }
 
 fn __floatuntisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 {
-    return floatFromInt(f32, @bitCast(u128, a));
+    return floatFromInt(f32, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floatuntitf.zig
@@ -19,5 +19,5 @@ pub fn __floatuntitf(a: u128) callconv(.C) f128 {
 }
 
 fn __floatuntitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 {
-    return floatFromInt(f128, @bitCast(u128, a));
+    return floatFromInt(f128, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floatuntixf.zig
@@ -17,5 +17,5 @@ pub fn __floatuntixf(a: u128) callconv(.C) f80 {
 }
 
 fn __floatuntixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 {
-    return floatFromInt(f80, @bitCast(u128, a));
+    return floatFromInt(f80, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floor.zig
@@ -26,8 +26,8 @@ comptime {
 }
 
 pub fn __floorh(x: f16) callconv(.C) f16 {
-    var u = @bitCast(u16, x);
-    const e = @intCast(i16, (u >> 10) & 31) - 15;
+    var u = @as(u16, @bitCast(x));
+    const e = @as(i16, @intCast((u >> 10) & 31)) - 15;
     var m: u16 = undefined;
 
     // TODO: Shouldn't need this explicit check.
@@ -40,7 +40,7 @@ pub fn __floorh(x: f16) callconv(.C) f16 {
     }
 
     if (e >= 0) {
-        m = @as(u16, 1023) >> @intCast(u4, e);
+        m = @as(u16, 1023) >> @as(u4, @intCast(e));
         if (u & m == 0) {
             return x;
         }
@@ -48,7 +48,7 @@ pub fn __floorh(x: f16) callconv(.C) f16 {
         if (u >> 15 != 0) {
             u += m;
         }
-        return @bitCast(f16, u & ~m);
+        return @as(f16, @bitCast(u & ~m));
     } else {
         math.doNotOptimizeAway(x + 0x1.0p120);
         if (u >> 15 == 0) {
@@ -60,8 +60,8 @@ pub fn __floorh(x: f16) callconv(.C) f16 {
 }
 
 pub fn floorf(x: f32) callconv(.C) f32 {
-    var u = @bitCast(u32, x);
-    const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
+    var u = @as(u32, @bitCast(x));
+    const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
     var m: u32 = undefined;
 
     // TODO: Shouldn't need this explicit check.
@@ -74,7 +74,7 @@ pub fn floorf(x: f32) callconv(.C) f32 {
     }
 
     if (e >= 0) {
-        m = @as(u32, 0x007FFFFF) >> @intCast(u5, e);
+        m = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e));
         if (u & m == 0) {
             return x;
         }
@@ -82,7 +82,7 @@ pub fn floorf(x: f32) callconv(.C) f32 {
         if (u >> 31 != 0) {
             u += m;
         }
-        return @bitCast(f32, u & ~m);
+        return @as(f32, @bitCast(u & ~m));
     } else {
         math.doNotOptimizeAway(x + 0x1.0p120);
         if (u >> 31 == 0) {
@@ -96,7 +96,7 @@ pub fn floorf(x: f32) callconv(.C) f32 {
 pub fn floor(x: f64) callconv(.C) f64 {
     const f64_toint = 1.0 / math.floatEps(f64);
 
-    const u = @bitCast(u64, x);
+    const u = @as(u64, @bitCast(x));
     const e = (u >> 52) & 0x7FF;
     var y: f64 = undefined;
 
@@ -126,13 +126,13 @@ pub fn floor(x: f64) callconv(.C) f64 {
 
 pub fn __floorx(x: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, floorq(x));
+    return @as(f80, @floatCast(floorq(x)));
 }
 
 pub fn floorq(x: f128) callconv(.C) f128 {
     const f128_toint = 1.0 / math.floatEps(f128);
 
-    const u = @bitCast(u128, x);
+    const u = @as(u128, @bitCast(x));
     const e = (u >> 112) & 0x7FFF;
     var y: f128 = undefined;
 
lib/compiler_rt/fma.zig
@@ -28,20 +28,20 @@ comptime {
 
 pub fn __fmah(x: f16, y: f16, z: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, fmaf(x, y, z));
+    return @as(f16, @floatCast(fmaf(x, y, z)));
 }
 
 pub fn fmaf(x: f32, y: f32, z: f32) callconv(.C) f32 {
     const xy = @as(f64, x) * y;
     const xy_z = xy + z;
-    const u = @bitCast(u64, xy_z);
+    const u = @as(u64, @bitCast(xy_z));
     const e = (u >> 52) & 0x7FF;
 
     if ((u & 0x1FFFFFFF) != 0x10000000 or e == 0x7FF or (xy_z - xy == z and xy_z - z == xy)) {
-        return @floatCast(f32, xy_z);
+        return @as(f32, @floatCast(xy_z));
     } else {
         // TODO: Handle inexact case with double-rounding
-        return @floatCast(f32, xy_z);
+        return @as(f32, @floatCast(xy_z));
     }
 }
 
@@ -95,7 +95,7 @@ pub fn fma(x: f64, y: f64, z: f64) callconv(.C) f64 {
 
 pub fn __fmax(a: f80, b: f80, c: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, fmaq(a, b, c));
+    return @as(f80, @floatCast(fmaq(a, b, c)));
 }
 
 /// Fused multiply-add: Compute x * y + z with a single rounding error.
@@ -201,12 +201,12 @@ fn dd_mul(a: f64, b: f64) dd {
 fn add_adjusted(a: f64, b: f64) f64 {
     var sum = dd_add(a, b);
     if (sum.lo != 0) {
-        var uhii = @bitCast(u64, sum.hi);
+        var uhii = @as(u64, @bitCast(sum.hi));
         if (uhii & 1 == 0) {
             // hibits += copysign(1.0, sum.hi, sum.lo)
-            const uloi = @bitCast(u64, sum.lo);
+            const uloi = @as(u64, @bitCast(sum.lo));
             uhii += 1 - ((uhii ^ uloi) >> 62);
-            sum.hi = @bitCast(f64, uhii);
+            sum.hi = @as(f64, @bitCast(uhii));
         }
     }
     return sum.hi;
@@ -215,12 +215,12 @@ fn add_adjusted(a: f64, b: f64) f64 {
 fn add_and_denorm(a: f64, b: f64, scale: i32) f64 {
     var sum = dd_add(a, b);
     if (sum.lo != 0) {
-        var uhii = @bitCast(u64, sum.hi);
-        const bits_lost = -@intCast(i32, (uhii >> 52) & 0x7FF) - scale + 1;
+        var uhii = @as(u64, @bitCast(sum.hi));
+        const bits_lost = -@as(i32, @intCast((uhii >> 52) & 0x7FF)) - scale + 1;
         if ((bits_lost != 1) == (uhii & 1 != 0)) {
-            const uloi = @bitCast(u64, sum.lo);
+            const uloi = @as(u64, @bitCast(sum.lo));
             uhii += 1 - (((uhii ^ uloi) >> 62) & 2);
-            sum.hi = @bitCast(f64, uhii);
+            sum.hi = @as(f64, @bitCast(uhii));
         }
     }
     return math.scalbn(sum.hi, scale);
@@ -257,12 +257,12 @@ fn dd_add128(a: f128, b: f128) dd128 {
 fn add_adjusted128(a: f128, b: f128) f128 {
     var sum = dd_add128(a, b);
     if (sum.lo != 0) {
-        var uhii = @bitCast(u128, sum.hi);
+        var uhii = @as(u128, @bitCast(sum.hi));
         if (uhii & 1 == 0) {
             // hibits += copysign(1.0, sum.hi, sum.lo)
-            const uloi = @bitCast(u128, sum.lo);
+            const uloi = @as(u128, @bitCast(sum.lo));
             uhii += 1 - ((uhii ^ uloi) >> 126);
-            sum.hi = @bitCast(f128, uhii);
+            sum.hi = @as(f128, @bitCast(uhii));
         }
     }
     return sum.hi;
@@ -282,12 +282,12 @@ fn add_and_denorm128(a: f128, b: f128, scale: i32) f128 {
     // If we are losing only one bit to denormalization, however, we must
     // break the ties manually.
     if (sum.lo != 0) {
-        var uhii = @bitCast(u128, sum.hi);
-        const bits_lost = -@intCast(i32, (uhii >> 112) & 0x7FFF) - scale + 1;
+        var uhii = @as(u128, @bitCast(sum.hi));
+        const bits_lost = -@as(i32, @intCast((uhii >> 112) & 0x7FFF)) - scale + 1;
         if ((bits_lost != 1) == (uhii & 1 != 0)) {
-            const uloi = @bitCast(u128, sum.lo);
+            const uloi = @as(u128, @bitCast(sum.lo));
             uhii += 1 - (((uhii ^ uloi) >> 126) & 2);
-            sum.hi = @bitCast(f128, uhii);
+            sum.hi = @as(f128, @bitCast(uhii));
         }
     }
     return math.scalbn(sum.hi, scale);
lib/compiler_rt/fmod.zig
@@ -22,7 +22,7 @@ comptime {
 
 pub fn __fmodh(x: f16, y: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, fmodf(x, y));
+    return @as(f16, @floatCast(fmodf(x, y)));
 }
 
 pub fn fmodf(x: f32, y: f32) callconv(.C) f32 {
@@ -46,12 +46,12 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
     const signBit = (@as(Z, 1) << (significandBits + exponentBits));
     const maxExponent = ((1 << exponentBits) - 1);
 
-    var aRep = @bitCast(Z, a);
-    var bRep = @bitCast(Z, b);
+    var aRep = @as(Z, @bitCast(a));
+    var bRep = @as(Z, @bitCast(b));
 
     const signA = aRep & signBit;
-    var expA = @intCast(i32, (@bitCast(Z, a) >> significandBits) & maxExponent);
-    var expB = @intCast(i32, (@bitCast(Z, b) >> significandBits) & maxExponent);
+    var expA = @as(i32, @intCast((@as(Z, @bitCast(a)) >> significandBits) & maxExponent));
+    var expB = @as(i32, @intCast((@as(Z, @bitCast(b)) >> significandBits) & maxExponent));
 
     // There are 3 cases where the answer is undefined, check for:
     //   - fmodx(val, 0)
@@ -82,8 +82,8 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
 
     var highA: u64 = 0;
     var highB: u64 = 0;
-    var lowA: u64 = @truncate(u64, aRep);
-    var lowB: u64 = @truncate(u64, bRep);
+    var lowA: u64 = @as(u64, @truncate(aRep));
+    var lowB: u64 = @as(u64, @truncate(bRep));
 
     while (expA > expB) : (expA -= 1) {
         var high = highA -% highB;
@@ -123,11 +123,11 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
 
     // Combine the exponent with the sign and significand, normalize if happened to be denormalized
     if (expA < -fractionalBits) {
-        return @bitCast(T, signA);
+        return @as(T, @bitCast(signA));
     } else if (expA <= 0) {
-        return @bitCast(T, (lowA >> @intCast(math.Log2Int(u64), 1 - expA)) | signA);
+        return @as(T, @bitCast((lowA >> @as(math.Log2Int(u64), @intCast(1 - expA))) | signA));
     } else {
-        return @bitCast(T, lowA | (@as(Z, @intCast(u16, expA)) << significandBits) | signA);
+        return @as(T, @bitCast(lowA | (@as(Z, @as(u16, @intCast(expA))) << significandBits) | signA));
     }
 }
 
@@ -136,10 +136,10 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
 pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
     var amod = a;
     var bmod = b;
-    const aPtr_u64 = @ptrCast([*]u64, &amod);
-    const bPtr_u64 = @ptrCast([*]u64, &bmod);
-    const aPtr_u16 = @ptrCast([*]u16, &amod);
-    const bPtr_u16 = @ptrCast([*]u16, &bmod);
+    const aPtr_u64 = @as([*]u64, @ptrCast(&amod));
+    const bPtr_u64 = @as([*]u64, @ptrCast(&bmod));
+    const aPtr_u16 = @as([*]u16, @ptrCast(&amod));
+    const bPtr_u16 = @as([*]u16, @ptrCast(&bmod));
 
     const exp_and_sign_index = comptime switch (builtin.target.cpu.arch.endian()) {
         .Little => 7,
@@ -155,8 +155,8 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
     };
 
     const signA = aPtr_u16[exp_and_sign_index] & 0x8000;
-    var expA = @intCast(i32, (aPtr_u16[exp_and_sign_index] & 0x7fff));
-    var expB = @intCast(i32, (bPtr_u16[exp_and_sign_index] & 0x7fff));
+    var expA = @as(i32, @intCast((aPtr_u16[exp_and_sign_index] & 0x7fff)));
+    var expB = @as(i32, @intCast((bPtr_u16[exp_and_sign_index] & 0x7fff)));
 
     // There are 3 cases where the answer is undefined, check for:
     //   - fmodq(val, 0)
@@ -173,8 +173,8 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
     }
 
     // Remove the sign from both
-    aPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expA));
-    bPtr_u16[exp_and_sign_index] = @bitCast(u16, @intCast(i16, expB));
+    aPtr_u16[exp_and_sign_index] = @as(u16, @bitCast(@as(i16, @intCast(expA))));
+    bPtr_u16[exp_and_sign_index] = @as(u16, @bitCast(@as(i16, @intCast(expB))));
     if (amod <= bmod) {
         if (amod == bmod) {
             return 0 * a;
@@ -241,10 +241,10 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
 
     // Combine the exponent with the sign, normalize if happend to be denormalized
     if (expA <= 0) {
-        aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, (expA +% 120))) | signA;
+        aPtr_u16[exp_and_sign_index] = @as(u16, @truncate(@as(u32, @bitCast((expA +% 120))))) | signA;
         amod *= 0x1p-120;
     } else {
-        aPtr_u16[exp_and_sign_index] = @truncate(u16, @bitCast(u32, expA)) | signA;
+        aPtr_u16[exp_and_sign_index] = @as(u16, @truncate(@as(u32, @bitCast(expA)))) | signA;
     }
 
     return amod;
@@ -270,14 +270,14 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T {
     const exp_bits = if (T == f32) 9 else 12;
     const bits_minus_1 = bits - 1;
     const mask = if (T == f32) 0xff else 0x7ff;
-    var ux = @bitCast(uint, x);
-    var uy = @bitCast(uint, y);
-    var ex = @intCast(i32, (ux >> digits) & mask);
-    var ey = @intCast(i32, (uy >> digits) & mask);
-    const sx = if (T == f32) @intCast(u32, ux & 0x80000000) else @intCast(i32, ux >> bits_minus_1);
+    var ux = @as(uint, @bitCast(x));
+    var uy = @as(uint, @bitCast(y));
+    var ex = @as(i32, @intCast((ux >> digits) & mask));
+    var ey = @as(i32, @intCast((uy >> digits) & mask));
+    const sx = if (T == f32) @as(u32, @intCast(ux & 0x80000000)) else @as(i32, @intCast(ux >> bits_minus_1));
     var i: uint = undefined;
 
-    if (uy << 1 == 0 or math.isNan(@bitCast(T, uy)) or ex == mask)
+    if (uy << 1 == 0 or math.isNan(@as(T, @bitCast(uy))) or ex == mask)
         return (x * y) / (x * y);
 
     if (ux << 1 <= uy << 1) {
@@ -293,7 +293,7 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T {
             ex -= 1;
             i <<= 1;
         }) {}
-        ux <<= @intCast(log2uint, @bitCast(u32, -ex + 1));
+        ux <<= @as(log2uint, @intCast(@as(u32, @bitCast(-ex + 1))));
     } else {
         ux &= math.maxInt(uint) >> exp_bits;
         ux |= 1 << digits;
@@ -304,7 +304,7 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T {
             ey -= 1;
             i <<= 1;
         }) {}
-        uy <<= @intCast(log2uint, @bitCast(u32, -ey + 1));
+        uy <<= @as(log2uint, @intCast(@as(u32, @bitCast(-ey + 1))));
     } else {
         uy &= math.maxInt(uint) >> exp_bits;
         uy |= 1 << digits;
@@ -334,16 +334,16 @@ inline fn generic_fmod(comptime T: type, x: T, y: T) T {
     // scale result up
     if (ex > 0) {
         ux -%= 1 << digits;
-        ux |= @as(uint, @bitCast(u32, ex)) << digits;
+        ux |= @as(uint, @as(u32, @bitCast(ex))) << digits;
     } else {
-        ux >>= @intCast(log2uint, @bitCast(u32, -ex + 1));
+        ux >>= @as(log2uint, @intCast(@as(u32, @bitCast(-ex + 1))));
     }
     if (T == f32) {
         ux |= sx;
     } else {
-        ux |= @intCast(uint, sx) << bits_minus_1;
+        ux |= @as(uint, @intCast(sx)) << bits_minus_1;
     }
-    return @bitCast(T, ux);
+    return @as(T, @bitCast(ux));
 }
 
 test "fmodf" {
lib/compiler_rt/int.zig
@@ -52,8 +52,8 @@ test "test_divmodti4" {
         [_]i128{ -7, 5, -1, -2 },
         [_]i128{ 19, 5, 3, 4 },
         [_]i128{ 19, -5, -3, 4 },
-        [_]i128{ @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 8, @bitCast(i128, @as(u128, 0xf0000000000000000000000000000000)), 0 },
-        [_]i128{ @bitCast(i128, @as(u128, 0x80000000000000000000000000000007)), 8, @bitCast(i128, @as(u128, 0xf0000000000000000000000000000001)), -1 },
+        [_]i128{ @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 8, @as(i128, @bitCast(@as(u128, 0xf0000000000000000000000000000000))), 0 },
+        [_]i128{ @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000007))), 8, @as(i128, @bitCast(@as(u128, 0xf0000000000000000000000000000001))), -1 },
     };
 
     for (cases) |case| {
@@ -85,8 +85,8 @@ test "test_divmoddi4" {
         [_]i64{ -7, 5, -1, -2 },
         [_]i64{ 19, 5, 3, 4 },
         [_]i64{ 19, -5, -3, 4 },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 8, @bitCast(i64, @as(u64, 0xf000000000000000)), 0 },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000007)), 8, @bitCast(i64, @as(u64, 0xf000000000000001)), -1 },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 8, @as(i64, @bitCast(@as(u64, 0xf000000000000000))), 0 },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000007))), 8, @as(i64, @bitCast(@as(u64, 0xf000000000000001))), -1 },
     };
 
     for (cases) |case| {
@@ -110,14 +110,14 @@ test "test_udivmoddi4" {
 
 pub fn __divdi3(a: i64, b: i64) callconv(.C) i64 {
     // Set aside the sign of the quotient.
-    const sign = @bitCast(u64, (a ^ b) >> 63);
+    const sign = @as(u64, @bitCast((a ^ b) >> 63));
     // Take absolute value of a and b via abs(x) = (x^(x >> 63)) - (x >> 63).
     const abs_a = (a ^ (a >> 63)) -% (a >> 63);
     const abs_b = (b ^ (b >> 63)) -% (b >> 63);
     // Unsigned division
-    const res = __udivmoddi4(@bitCast(u64, abs_a), @bitCast(u64, abs_b), null);
+    const res = __udivmoddi4(@as(u64, @bitCast(abs_a)), @as(u64, @bitCast(abs_b)), null);
     // Apply sign of quotient to result and return.
-    return @bitCast(i64, (res ^ sign) -% sign);
+    return @as(i64, @bitCast((res ^ sign) -% sign));
 }
 
 test "test_divdi3" {
@@ -129,10 +129,10 @@ test "test_divdi3" {
         [_]i64{ -2, 1, -2 },
         [_]i64{ -2, -1, 2 },
 
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 1, @bitCast(i64, @as(u64, 0x8000000000000000)) },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -1, @bitCast(i64, @as(u64, 0x8000000000000000)) },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -2, 0x4000000000000000 },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 2, @bitCast(i64, @as(u64, 0xC000000000000000)) },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))) },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))) },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -2, 0x4000000000000000 },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 2, @as(i64, @bitCast(@as(u64, 0xC000000000000000))) },
     };
 
     for (cases) |case| {
@@ -151,9 +151,9 @@ pub fn __moddi3(a: i64, b: i64) callconv(.C) i64 {
     const abs_b = (b ^ (b >> 63)) -% (b >> 63);
     // Unsigned division
     var r: u64 = undefined;
-    _ = __udivmoddi4(@bitCast(u64, abs_a), @bitCast(u64, abs_b), &r);
+    _ = __udivmoddi4(@as(u64, @bitCast(abs_a)), @as(u64, @bitCast(abs_b)), &r);
     // Apply the sign of the dividend and return.
-    return (@bitCast(i64, r) ^ (a >> 63)) -% (a >> 63);
+    return (@as(i64, @bitCast(r)) ^ (a >> 63)) -% (a >> 63);
 }
 
 test "test_moddi3" {
@@ -165,12 +165,12 @@ test "test_moddi3" {
         [_]i64{ -5, 3, -2 },
         [_]i64{ -5, -3, -2 },
 
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 1, 0 },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -1, 0 },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 2, 0 },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -2, 0 },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), 3, -2 },
-        [_]i64{ @bitCast(i64, @as(u64, 0x8000000000000000)), -3, -2 },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1, 0 },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -1, 0 },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 2, 0 },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -2, 0 },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 3, -2 },
+        [_]i64{ @as(i64, @bitCast(@as(u64, 0x8000000000000000))), -3, -2 },
     };
 
     for (cases) |case| {
@@ -225,8 +225,8 @@ test "test_divmodsi4" {
         [_]i32{ 19, 5, 3, 4 },
         [_]i32{ 19, -5, -3, 4 },
 
-        [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), 8, @bitCast(i32, @as(u32, 0xf0000000)), 0 },
-        [_]i32{ @bitCast(i32, @as(u32, 0x80000007)), 8, @bitCast(i32, @as(u32, 0xf0000001)), -1 },
+        [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), 8, @as(i32, @bitCast(@as(u32, 0xf0000000))), 0 },
+        [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000007))), 8, @as(i32, @bitCast(@as(u32, 0xf0000001))), -1 },
     };
 
     for (cases) |case| {
@@ -242,7 +242,7 @@ fn test_one_divmodsi4(a: i32, b: i32, expected_q: i32, expected_r: i32) !void {
 
 pub fn __udivmodsi4(a: u32, b: u32, rem: *u32) callconv(.C) u32 {
     const d = __udivsi3(a, b);
-    rem.* = @bitCast(u32, @bitCast(i32, a) -% (@bitCast(i32, d) * @bitCast(i32, b)));
+    rem.* = @as(u32, @bitCast(@as(i32, @bitCast(a)) -% (@as(i32, @bitCast(d)) * @as(i32, @bitCast(b)))));
     return d;
 }
 
@@ -256,14 +256,14 @@ fn __aeabi_idiv(n: i32, d: i32) callconv(.AAPCS) i32 {
 
 inline fn div_i32(n: i32, d: i32) i32 {
     // Set aside the sign of the quotient.
-    const sign = @bitCast(u32, (n ^ d) >> 31);
+    const sign = @as(u32, @bitCast((n ^ d) >> 31));
     // Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
     const abs_n = (n ^ (n >> 31)) -% (n >> 31);
     const abs_d = (d ^ (d >> 31)) -% (d >> 31);
     // abs(a) / abs(b)
-    const res = @bitCast(u32, abs_n) / @bitCast(u32, abs_d);
+    const res = @as(u32, @bitCast(abs_n)) / @as(u32, @bitCast(abs_d));
     // Apply sign of quotient to result and return.
-    return @bitCast(i32, (res ^ sign) -% sign);
+    return @as(i32, @bitCast((res ^ sign) -% sign));
 }
 
 test "test_divsi3" {
@@ -275,10 +275,10 @@ test "test_divsi3" {
         [_]i32{ -2, 1, -2 },
         [_]i32{ -2, -1, 2 },
 
-        [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), 1, @bitCast(i32, @as(u32, 0x80000000)) },
-        [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), -1, @bitCast(i32, @as(u32, 0x80000000)) },
-        [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), -2, 0x40000000 },
-        [_]i32{ @bitCast(i32, @as(u32, 0x80000000)), 2, @bitCast(i32, @as(u32, 0xC0000000)) },
+        [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), 1, @as(i32, @bitCast(@as(u32, 0x80000000))) },
+        [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), -1, @as(i32, @bitCast(@as(u32, 0x80000000))) },
+        [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), -2, 0x40000000 },
+        [_]i32{ @as(i32, @bitCast(@as(u32, 0x80000000))), 2, @as(i32, @bitCast(@as(u32, 0xC0000000))) },
     };
 
     for (cases) |case| {
@@ -304,7 +304,7 @@ inline fn div_u32(n: u32, d: u32) u32 {
     // special cases
     if (d == 0) return 0; // ?!
     if (n == 0) return 0;
-    var sr = @bitCast(c_uint, @as(c_int, @clz(d)) - @as(c_int, @clz(n)));
+    var sr = @as(c_uint, @bitCast(@as(c_int, @clz(d)) - @as(c_int, @clz(n))));
     // 0 <= sr <= n_uword_bits - 1 or sr large
     if (sr > n_uword_bits - 1) {
         // d > r
@@ -317,12 +317,12 @@ inline fn div_u32(n: u32, d: u32) u32 {
     sr += 1;
     // 1 <= sr <= n_uword_bits - 1
     // Not a special case
-    var q: u32 = n << @intCast(u5, n_uword_bits - sr);
-    var r: u32 = n >> @intCast(u5, sr);
+    var q: u32 = n << @as(u5, @intCast(n_uword_bits - sr));
+    var r: u32 = n >> @as(u5, @intCast(sr));
     var carry: u32 = 0;
     while (sr > 0) : (sr -= 1) {
         // r:q = ((r:q)  << 1) | carry
-        r = (r << 1) | (q >> @intCast(u5, n_uword_bits - 1));
+        r = (r << 1) | (q >> @as(u5, @intCast(n_uword_bits - 1)));
         q = (q << 1) | carry;
         // carry = 0;
         // if (r.all >= d.all)
@@ -330,9 +330,9 @@ inline fn div_u32(n: u32, d: u32) u32 {
         //      r.all -= d.all;
         //      carry = 1;
         // }
-        const s = @bitCast(i32, d -% r -% 1) >> @intCast(u5, n_uword_bits - 1);
-        carry = @intCast(u32, s & 1);
-        r -= d & @bitCast(u32, s);
+        const s = @as(i32, @bitCast(d -% r -% 1)) >> @as(u5, @intCast(n_uword_bits - 1));
+        carry = @as(u32, @intCast(s & 1));
+        r -= d & @as(u32, @bitCast(s));
     }
     q = (q << 1) | carry;
     return q;
@@ -496,11 +496,11 @@ test "test_modsi3" {
         [_]i32{ 5, -3, 2 },
         [_]i32{ -5, 3, -2 },
         [_]i32{ -5, -3, -2 },
-        [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), 1, 0x0 },
-        [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), 2, 0x0 },
-        [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), -2, 0x0 },
-        [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), 3, -2 },
-        [_]i32{ @bitCast(i32, @intCast(u32, 0x80000000)), -3, -2 },
+        [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 1, 0x0 },
+        [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 2, 0x0 },
+        [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), -2, 0x0 },
+        [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), 3, -2 },
+        [_]i32{ @as(i32, @bitCast(@as(u32, @intCast(0x80000000)))), -3, -2 },
     };
 
     for (cases) |case| {
lib/compiler_rt/int_from_float.zig
@@ -17,9 +17,9 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I {
     const sig_mask = (@as(rep_t, 1) << sig_bits) - 1;
 
     // Break a into sign, exponent, significand
-    const a_rep: rep_t = @bitCast(rep_t, a);
+    const a_rep: rep_t = @as(rep_t, @bitCast(a));
     const negative = (a_rep >> (float_bits - 1)) != 0;
-    const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias;
+    const exponent = @as(i32, @intCast((a_rep << 1) >> (sig_bits + 1))) - exp_bias;
     const significand: rep_t = (a_rep & sig_mask) | implicit_bit;
 
     // If the exponent is negative, the result rounds to zero.
@@ -29,9 +29,9 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I {
     switch (@typeInfo(I).Int.signedness) {
         .unsigned => {
             if (negative) return 0;
-            if (@intCast(c_uint, exponent) >= @min(int_bits, max_exp)) return math.maxInt(I);
+            if (@as(c_uint, @intCast(exponent)) >= @min(int_bits, max_exp)) return math.maxInt(I);
         },
-        .signed => if (@intCast(c_uint, exponent) >= @min(int_bits - 1, max_exp)) {
+        .signed => if (@as(c_uint, @intCast(exponent)) >= @min(int_bits - 1, max_exp)) {
             return if (negative) math.minInt(I) else math.maxInt(I);
         },
     }
@@ -40,9 +40,9 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I {
     // Otherwise, shift left.
     var result: I = undefined;
     if (exponent < fractional_bits) {
-        result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent));
+        result = @as(I, @intCast(significand >> @as(Log2Int(rep_t), @intCast(fractional_bits - exponent))));
     } else {
-        result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits);
+        result = @as(I, @intCast(significand)) << @as(Log2Int(I), @intCast(exponent - fractional_bits));
     }
 
     if ((@typeInfo(I).Int.signedness == .signed) and negative)
lib/compiler_rt/log.zig
@@ -27,7 +27,7 @@ comptime {
 
 pub fn __logh(a: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, logf(a));
+    return @as(f16, @floatCast(logf(a)));
 }
 
 pub fn logf(x_: f32) callconv(.C) f32 {
@@ -39,7 +39,7 @@ pub fn logf(x_: f32) callconv(.C) f32 {
     const Lg4: f32 = 0xf89e26.0p-26;
 
     var x = x_;
-    var ix = @bitCast(u32, x);
+    var ix = @as(u32, @bitCast(x));
     var k: i32 = 0;
 
     // x < 2^(-126)
@@ -56,7 +56,7 @@ pub fn logf(x_: f32) callconv(.C) f32 {
         // subnormal, scale x
         k -= 25;
         x *= 0x1.0p25;
-        ix = @bitCast(u32, x);
+        ix = @as(u32, @bitCast(x));
     } else if (ix >= 0x7F800000) {
         return x;
     } else if (ix == 0x3F800000) {
@@ -65,9 +65,9 @@ pub fn logf(x_: f32) callconv(.C) f32 {
 
     // x into [sqrt(2) / 2, sqrt(2)]
     ix += 0x3F800000 - 0x3F3504F3;
-    k += @intCast(i32, ix >> 23) - 0x7F;
+    k += @as(i32, @intCast(ix >> 23)) - 0x7F;
     ix = (ix & 0x007FFFFF) + 0x3F3504F3;
-    x = @bitCast(f32, ix);
+    x = @as(f32, @bitCast(ix));
 
     const f = x - 1.0;
     const s = f / (2.0 + f);
@@ -77,7 +77,7 @@ pub fn logf(x_: f32) callconv(.C) f32 {
     const t2 = z * (Lg1 + w * Lg3);
     const R = t2 + t1;
     const hfsq = 0.5 * f * f;
-    const dk = @floatFromInt(f32, k);
+    const dk = @as(f32, @floatFromInt(k));
 
     return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi;
 }
@@ -94,8 +94,8 @@ pub fn log(x_: f64) callconv(.C) f64 {
     const Lg7: f64 = 1.479819860511658591e-01;
 
     var x = x_;
-    var ix = @bitCast(u64, x);
-    var hx = @intCast(u32, ix >> 32);
+    var ix = @as(u64, @bitCast(x));
+    var hx = @as(u32, @intCast(ix >> 32));
     var k: i32 = 0;
 
     if (hx < 0x00100000 or hx >> 31 != 0) {
@@ -111,7 +111,7 @@ pub fn log(x_: f64) callconv(.C) f64 {
         // subnormal, scale x
         k -= 54;
         x *= 0x1.0p54;
-        hx = @intCast(u32, @bitCast(u64, ix) >> 32);
+        hx = @as(u32, @intCast(@as(u64, @bitCast(ix)) >> 32));
     } else if (hx >= 0x7FF00000) {
         return x;
     } else if (hx == 0x3FF00000 and ix << 32 == 0) {
@@ -120,10 +120,10 @@ pub fn log(x_: f64) callconv(.C) f64 {
 
     // x into [sqrt(2) / 2, sqrt(2)]
     hx += 0x3FF00000 - 0x3FE6A09E;
-    k += @intCast(i32, hx >> 20) - 0x3FF;
+    k += @as(i32, @intCast(hx >> 20)) - 0x3FF;
     hx = (hx & 0x000FFFFF) + 0x3FE6A09E;
     ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF);
-    x = @bitCast(f64, ix);
+    x = @as(f64, @bitCast(ix));
 
     const f = x - 1.0;
     const hfsq = 0.5 * f * f;
@@ -133,19 +133,19 @@ pub fn log(x_: f64) callconv(.C) f64 {
     const t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
     const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
     const R = t2 + t1;
-    const dk = @floatFromInt(f64, k);
+    const dk = @as(f64, @floatFromInt(k));
 
     return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi;
 }
 
 pub fn __logx(a: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, logq(a));
+    return @as(f80, @floatCast(logq(a)));
 }
 
 pub fn logq(a: f128) callconv(.C) f128 {
     // TODO: more correct implementation
-    return log(@floatCast(f64, a));
+    return log(@as(f64, @floatCast(a)));
 }
 
 pub fn logl(x: c_longdouble) callconv(.C) c_longdouble {
lib/compiler_rt/log10.zig
@@ -28,7 +28,7 @@ comptime {
 
 pub fn __log10h(a: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, log10f(a));
+    return @as(f16, @floatCast(log10f(a)));
 }
 
 pub fn log10f(x_: f32) callconv(.C) f32 {
@@ -42,7 +42,7 @@ pub fn log10f(x_: f32) callconv(.C) f32 {
     const Lg4: f32 = 0xf89e26.0p-26;
 
     var x = x_;
-    var u = @bitCast(u32, x);
+    var u = @as(u32, @bitCast(x));
     var ix = u;
     var k: i32 = 0;
 
@@ -59,7 +59,7 @@ pub fn log10f(x_: f32) callconv(.C) f32 {
 
         k -= 25;
         x *= 0x1.0p25;
-        ix = @bitCast(u32, x);
+        ix = @as(u32, @bitCast(x));
     } else if (ix >= 0x7F800000) {
         return x;
     } else if (ix == 0x3F800000) {
@@ -68,9 +68,9 @@ pub fn log10f(x_: f32) callconv(.C) f32 {
 
     // x into [sqrt(2) / 2, sqrt(2)]
     ix += 0x3F800000 - 0x3F3504F3;
-    k += @intCast(i32, ix >> 23) - 0x7F;
+    k += @as(i32, @intCast(ix >> 23)) - 0x7F;
     ix = (ix & 0x007FFFFF) + 0x3F3504F3;
-    x = @bitCast(f32, ix);
+    x = @as(f32, @bitCast(ix));
 
     const f = x - 1.0;
     const s = f / (2.0 + f);
@@ -82,11 +82,11 @@ pub fn log10f(x_: f32) callconv(.C) f32 {
     const hfsq = 0.5 * f * f;
 
     var hi = f - hfsq;
-    u = @bitCast(u32, hi);
+    u = @as(u32, @bitCast(hi));
     u &= 0xFFFFF000;
-    hi = @bitCast(f32, u);
+    hi = @as(f32, @bitCast(u));
     const lo = f - hi - hfsq + s * (hfsq + R);
-    const dk = @floatFromInt(f32, k);
+    const dk = @as(f32, @floatFromInt(k));
 
     return dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi + hi * ivln10hi + dk * log10_2hi;
 }
@@ -105,8 +105,8 @@ pub fn log10(x_: f64) callconv(.C) f64 {
     const Lg7: f64 = 1.479819860511658591e-01;
 
     var x = x_;
-    var ix = @bitCast(u64, x);
-    var hx = @intCast(u32, ix >> 32);
+    var ix = @as(u64, @bitCast(x));
+    var hx = @as(u32, @intCast(ix >> 32));
     var k: i32 = 0;
 
     if (hx < 0x00100000 or hx >> 31 != 0) {
@@ -122,7 +122,7 @@ pub fn log10(x_: f64) callconv(.C) f64 {
         // subnormal, scale x
         k -= 54;
         x *= 0x1.0p54;
-        hx = @intCast(u32, @bitCast(u64, x) >> 32);
+        hx = @as(u32, @intCast(@as(u64, @bitCast(x)) >> 32));
     } else if (hx >= 0x7FF00000) {
         return x;
     } else if (hx == 0x3FF00000 and ix << 32 == 0) {
@@ -131,10 +131,10 @@ pub fn log10(x_: f64) callconv(.C) f64 {
 
     // x into [sqrt(2) / 2, sqrt(2)]
     hx += 0x3FF00000 - 0x3FE6A09E;
-    k += @intCast(i32, hx >> 20) - 0x3FF;
+    k += @as(i32, @intCast(hx >> 20)) - 0x3FF;
     hx = (hx & 0x000FFFFF) + 0x3FE6A09E;
     ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF);
-    x = @bitCast(f64, ix);
+    x = @as(f64, @bitCast(ix));
 
     const f = x - 1.0;
     const hfsq = 0.5 * f * f;
@@ -147,14 +147,14 @@ pub fn log10(x_: f64) callconv(.C) f64 {
 
     // hi + lo = f - hfsq + s * (hfsq + R) ~ log(1 + f)
     var hi = f - hfsq;
-    var hii = @bitCast(u64, hi);
+    var hii = @as(u64, @bitCast(hi));
     hii &= @as(u64, maxInt(u64)) << 32;
-    hi = @bitCast(f64, hii);
+    hi = @as(f64, @bitCast(hii));
     const lo = f - hi - hfsq + s * (hfsq + R);
 
     // val_hi + val_lo ~ log10(1 + f) + k * log10(2)
     var val_hi = hi * ivln10hi;
-    const dk = @floatFromInt(f64, k);
+    const dk = @as(f64, @floatFromInt(k));
     const y = dk * log10_2hi;
     var val_lo = dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi;
 
@@ -168,12 +168,12 @@ pub fn log10(x_: f64) callconv(.C) f64 {
 
 pub fn __log10x(a: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, log10q(a));
+    return @as(f80, @floatCast(log10q(a)));
 }
 
 pub fn log10q(a: f128) callconv(.C) f128 {
     // TODO: more correct implementation
-    return log10(@floatCast(f64, a));
+    return log10(@as(f64, @floatCast(a)));
 }
 
 pub fn log10l(x: c_longdouble) callconv(.C) c_longdouble {
lib/compiler_rt/log2.zig
@@ -28,7 +28,7 @@ comptime {
 
 pub fn __log2h(a: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, log2f(a));
+    return @as(f16, @floatCast(log2f(a)));
 }
 
 pub fn log2f(x_: f32) callconv(.C) f32 {
@@ -40,7 +40,7 @@ pub fn log2f(x_: f32) callconv(.C) f32 {
     const Lg4: f32 = 0xf89e26.0p-26;
 
     var x = x_;
-    var u = @bitCast(u32, x);
+    var u = @as(u32, @bitCast(x));
     var ix = u;
     var k: i32 = 0;
 
@@ -57,7 +57,7 @@ pub fn log2f(x_: f32) callconv(.C) f32 {
 
         k -= 25;
         x *= 0x1.0p25;
-        ix = @bitCast(u32, x);
+        ix = @as(u32, @bitCast(x));
     } else if (ix >= 0x7F800000) {
         return x;
     } else if (ix == 0x3F800000) {
@@ -66,9 +66,9 @@ pub fn log2f(x_: f32) callconv(.C) f32 {
 
     // x into [sqrt(2) / 2, sqrt(2)]
     ix += 0x3F800000 - 0x3F3504F3;
-    k += @intCast(i32, ix >> 23) - 0x7F;
+    k += @as(i32, @intCast(ix >> 23)) - 0x7F;
     ix = (ix & 0x007FFFFF) + 0x3F3504F3;
-    x = @bitCast(f32, ix);
+    x = @as(f32, @bitCast(ix));
 
     const f = x - 1.0;
     const s = f / (2.0 + f);
@@ -80,11 +80,11 @@ pub fn log2f(x_: f32) callconv(.C) f32 {
     const hfsq = 0.5 * f * f;
 
     var hi = f - hfsq;
-    u = @bitCast(u32, hi);
+    u = @as(u32, @bitCast(hi));
     u &= 0xFFFFF000;
-    hi = @bitCast(f32, u);
+    hi = @as(f32, @bitCast(u));
     const lo = f - hi - hfsq + s * (hfsq + R);
-    return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + @floatFromInt(f32, k);
+    return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + @as(f32, @floatFromInt(k));
 }
 
 pub fn log2(x_: f64) callconv(.C) f64 {
@@ -99,8 +99,8 @@ pub fn log2(x_: f64) callconv(.C) f64 {
     const Lg7: f64 = 1.479819860511658591e-01;
 
     var x = x_;
-    var ix = @bitCast(u64, x);
-    var hx = @intCast(u32, ix >> 32);
+    var ix = @as(u64, @bitCast(x));
+    var hx = @as(u32, @intCast(ix >> 32));
     var k: i32 = 0;
 
     if (hx < 0x00100000 or hx >> 31 != 0) {
@@ -116,7 +116,7 @@ pub fn log2(x_: f64) callconv(.C) f64 {
         // subnormal, scale x
         k -= 54;
         x *= 0x1.0p54;
-        hx = @intCast(u32, @bitCast(u64, x) >> 32);
+        hx = @as(u32, @intCast(@as(u64, @bitCast(x)) >> 32));
     } else if (hx >= 0x7FF00000) {
         return x;
     } else if (hx == 0x3FF00000 and ix << 32 == 0) {
@@ -125,10 +125,10 @@ pub fn log2(x_: f64) callconv(.C) f64 {
 
     // x into [sqrt(2) / 2, sqrt(2)]
     hx += 0x3FF00000 - 0x3FE6A09E;
-    k += @intCast(i32, hx >> 20) - 0x3FF;
+    k += @as(i32, @intCast(hx >> 20)) - 0x3FF;
     hx = (hx & 0x000FFFFF) + 0x3FE6A09E;
     ix = (@as(u64, hx) << 32) | (ix & 0xFFFFFFFF);
-    x = @bitCast(f64, ix);
+    x = @as(f64, @bitCast(ix));
 
     const f = x - 1.0;
     const hfsq = 0.5 * f * f;
@@ -141,16 +141,16 @@ pub fn log2(x_: f64) callconv(.C) f64 {
 
     // hi + lo = f - hfsq + s * (hfsq + R) ~ log(1 + f)
     var hi = f - hfsq;
-    var hii = @bitCast(u64, hi);
+    var hii = @as(u64, @bitCast(hi));
     hii &= @as(u64, maxInt(u64)) << 32;
-    hi = @bitCast(f64, hii);
+    hi = @as(f64, @bitCast(hii));
     const lo = f - hi - hfsq + s * (hfsq + R);
 
     var val_hi = hi * ivln2hi;
     var val_lo = (lo + hi) * ivln2lo + lo * ivln2hi;
 
     // spadd(val_hi, val_lo, y)
-    const y = @floatFromInt(f64, k);
+    const y = @as(f64, @floatFromInt(k));
     const ww = y + val_hi;
     val_lo += (y - ww) + val_hi;
     val_hi = ww;
@@ -160,12 +160,12 @@ pub fn log2(x_: f64) callconv(.C) f64 {
 
 pub fn __log2x(a: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, log2q(a));
+    return @as(f80, @floatCast(log2q(a)));
 }
 
 pub fn log2q(a: f128) callconv(.C) f128 {
     // TODO: more correct implementation
-    return log2(@floatCast(f64, a));
+    return log2(@as(f64, @floatCast(a)));
 }
 
 pub fn log2l(x: c_longdouble) callconv(.C) c_longdouble {
lib/compiler_rt/modti3.zig
@@ -24,7 +24,7 @@ pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
 const v2u64 = @Vector(2, u64);
 
 fn __modti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
-    return @bitCast(v2u64, mod(@bitCast(i128, a), @bitCast(i128, b)));
+    return @as(v2u64, @bitCast(mod(@as(i128, @bitCast(a)), @as(i128, @bitCast(b)))));
 }
 
 inline fn mod(a: i128, b: i128) i128 {
@@ -35,8 +35,8 @@ inline fn mod(a: i128, b: i128) i128 {
     const bn = (b ^ s_b) -% s_b; // negate if s == -1
 
     var r: u128 = undefined;
-    _ = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), &r);
-    return (@bitCast(i128, r) ^ s_a) -% s_a; // negate if s == -1
+    _ = udivmod(u128, @as(u128, @bitCast(an)), @as(u128, @bitCast(bn)), &r);
+    return (@as(i128, @bitCast(r)) ^ s_a) -% s_a; // negate if s == -1
 }
 
 test {
lib/compiler_rt/modti3_test.zig
@@ -33,5 +33,5 @@ fn make_ti(high: u64, low: u64) i128 {
     var result: u128 = high;
     result <<= 64;
     result |= low;
-    return @bitCast(i128, result);
+    return @as(i128, @bitCast(result));
 }
lib/compiler_rt/mulf3.zig
@@ -28,53 +28,53 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
     const significandMask = (@as(Z, 1) << significandBits) - 1;
 
     const absMask = signBit - 1;
-    const qnanRep = @bitCast(Z, math.nan(T)) | quietBit;
-    const infRep = @bitCast(Z, math.inf(T));
-    const minNormalRep = @bitCast(Z, math.floatMin(T));
+    const qnanRep = @as(Z, @bitCast(math.nan(T))) | quietBit;
+    const infRep = @as(Z, @bitCast(math.inf(T)));
+    const minNormalRep = @as(Z, @bitCast(math.floatMin(T)));
 
     const ZExp = if (typeWidth >= 32) u32 else Z;
-    const aExponent = @truncate(ZExp, (@bitCast(Z, a) >> significandBits) & maxExponent);
-    const bExponent = @truncate(ZExp, (@bitCast(Z, b) >> significandBits) & maxExponent);
-    const productSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
+    const aExponent = @as(ZExp, @truncate((@as(Z, @bitCast(a)) >> significandBits) & maxExponent));
+    const bExponent = @as(ZExp, @truncate((@as(Z, @bitCast(b)) >> significandBits) & maxExponent));
+    const productSign: Z = (@as(Z, @bitCast(a)) ^ @as(Z, @bitCast(b))) & signBit;
 
-    var aSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, a) & significandMask);
-    var bSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, b) & significandMask);
+    var aSignificand: ZSignificand = @as(ZSignificand, @intCast(@as(Z, @bitCast(a)) & significandMask));
+    var bSignificand: ZSignificand = @as(ZSignificand, @intCast(@as(Z, @bitCast(b)) & significandMask));
     var scale: i32 = 0;
 
     // Detect if a or b is zero, denormal, infinity, or NaN.
     if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
-        const aAbs: Z = @bitCast(Z, a) & absMask;
-        const bAbs: Z = @bitCast(Z, b) & absMask;
+        const aAbs: Z = @as(Z, @bitCast(a)) & absMask;
+        const bAbs: Z = @as(Z, @bitCast(b)) & absMask;
 
         // NaN * anything = qNaN
-        if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit);
+        if (aAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(a)) | quietBit));
         // anything * NaN = qNaN
-        if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit);
+        if (bAbs > infRep) return @as(T, @bitCast(@as(Z, @bitCast(b)) | quietBit));
 
         if (aAbs == infRep) {
             // infinity * non-zero = +/- infinity
             if (bAbs != 0) {
-                return @bitCast(T, aAbs | productSign);
+                return @as(T, @bitCast(aAbs | productSign));
             } else {
                 // infinity * zero = NaN
-                return @bitCast(T, qnanRep);
+                return @as(T, @bitCast(qnanRep));
             }
         }
 
         if (bAbs == infRep) {
             //? non-zero * infinity = +/- infinity
             if (aAbs != 0) {
-                return @bitCast(T, bAbs | productSign);
+                return @as(T, @bitCast(bAbs | productSign));
             } else {
                 // zero * infinity = NaN
-                return @bitCast(T, qnanRep);
+                return @as(T, @bitCast(qnanRep));
             }
         }
 
         // zero * anything = +/- zero
-        if (aAbs == 0) return @bitCast(T, productSign);
+        if (aAbs == 0) return @as(T, @bitCast(productSign));
         // anything * zero = +/- zero
-        if (bAbs == 0) return @bitCast(T, productSign);
+        if (bAbs == 0) return @as(T, @bitCast(productSign));
 
         // one or both of a or b is denormal, the other (if applicable) is a
         // normal number.  Renormalize one or both of a and b, and set scale to
@@ -99,7 +99,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
     const left_align_shift = ZSignificandBits - fractionalBits - 1;
     common.wideMultiply(ZSignificand, aSignificand, bSignificand << left_align_shift, &productHi, &productLo);
 
-    var productExponent: i32 = @intCast(i32, aExponent + bExponent) - exponentBias + scale;
+    var productExponent: i32 = @as(i32, @intCast(aExponent + bExponent)) - exponentBias + scale;
 
     // Normalize the significand, adjust exponent if needed.
     if ((productHi & integerBit) != 0) {
@@ -110,7 +110,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
     }
 
     // If we have overflowed the type, return +/- infinity.
-    if (productExponent >= maxExponent) return @bitCast(T, infRep | productSign);
+    if (productExponent >= maxExponent) return @as(T, @bitCast(infRep | productSign));
 
     var result: Z = undefined;
     if (productExponent <= 0) {
@@ -120,8 +120,8 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
         // a zero of the appropriate sign.  Mathematically there is no need to
         // handle this case separately, but we make it a special case to
         // simplify the shift logic.
-        const shift: u32 = @truncate(u32, @as(Z, 1) -% @bitCast(u32, productExponent));
-        if (shift >= ZSignificandBits) return @bitCast(T, productSign);
+        const shift: u32 = @as(u32, @truncate(@as(Z, 1) -% @as(u32, @bitCast(productExponent))));
+        if (shift >= ZSignificandBits) return @as(T, @bitCast(productSign));
 
         // Otherwise, shift the significand of the result so that the round
         // bit is the high bit of productLo.
@@ -135,7 +135,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
     } else {
         // Result is normal before rounding; insert the exponent.
         result = productHi & significandMask;
-        result |= @intCast(Z, productExponent) << significandBits;
+        result |= @as(Z, @intCast(productExponent)) << significandBits;
     }
 
     // Final rounding.  The final result may overflow to infinity, or underflow
@@ -156,7 +156,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
     // Insert the sign of the result:
     result |= productSign;
 
-    return @bitCast(T, result);
+    return @as(T, @bitCast(result));
 }
 
 /// Returns `true` if the right shift is inexact (i.e. any bit shifted out is non-zero)
@@ -168,12 +168,12 @@ fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
     const S = math.Log2Int(Z);
     var inexact = false;
     if (count < typeWidth) {
-        inexact = (lo.* << @intCast(S, typeWidth -% count)) != 0;
-        lo.* = (hi.* << @intCast(S, typeWidth -% count)) | (lo.* >> @intCast(S, count));
-        hi.* = hi.* >> @intCast(S, count);
+        inexact = (lo.* << @as(S, @intCast(typeWidth -% count))) != 0;
+        lo.* = (hi.* << @as(S, @intCast(typeWidth -% count))) | (lo.* >> @as(S, @intCast(count)));
+        hi.* = hi.* >> @as(S, @intCast(count));
     } else if (count < 2 * typeWidth) {
-        inexact = (hi.* << @intCast(S, 2 * typeWidth -% count) | lo.*) != 0;
-        lo.* = hi.* >> @intCast(S, count -% typeWidth);
+        inexact = (hi.* << @as(S, @intCast(2 * typeWidth -% count)) | lo.*) != 0;
+        lo.* = hi.* >> @as(S, @intCast(count -% typeWidth));
         hi.* = 0;
     } else {
         inexact = (hi.* | lo.*) != 0;
@@ -188,7 +188,7 @@ fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 {
     const integerBit = @as(Z, 1) << math.floatFractionalBits(T);
 
     const shift = @clz(significand.*) - @clz(integerBit);
-    significand.* <<= @intCast(math.Log2Int(Z), shift);
+    significand.* <<= @as(math.Log2Int(Z), @intCast(shift));
     return @as(i32, 1) - shift;
 }
 
lib/compiler_rt/mulf3_test.zig
@@ -4,8 +4,8 @@
 
 const std = @import("std");
 const math = std.math;
-const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
-const inf128 = @bitCast(f128, @as(u128, 0x7fff000000000000) << 64);
+const qnan128 = @as(f128, @bitCast(@as(u128, 0x7fff800000000000) << 64));
+const inf128 = @as(f128, @bitCast(@as(u128, 0x7fff000000000000) << 64));
 
 const __multf3 = @import("multf3.zig").__multf3;
 const __mulxf3 = @import("mulxf3.zig").__mulxf3;
@@ -16,9 +16,9 @@ const __mulsf3 = @import("mulsf3.zig").__mulsf3;
 // use two 64-bit integers intead of one 128-bit integer
 // because 128-bit integer constant can't be assigned directly
 fn compareResultLD(result: f128, expectedHi: u64, expectedLo: u64) bool {
-    const rep = @bitCast(u128, result);
-    const hi = @intCast(u64, rep >> 64);
-    const lo = @truncate(u64, rep);
+    const rep = @as(u128, @bitCast(result));
+    const hi = @as(u64, @intCast(rep >> 64));
+    const lo = @as(u64, @truncate(rep));
 
     if (hi == expectedHi and lo == expectedLo) {
         return true;
@@ -45,7 +45,7 @@ fn test__multf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
 
 fn makeNaN128(rand: u64) f128 {
     const int_result = @as(u128, 0x7fff000000000000 | (rand & 0xffffffffffff)) << 64;
-    const float_result = @bitCast(f128, int_result);
+    const float_result = @as(f128, @bitCast(int_result));
     return float_result;
 }
 test "multf3" {
@@ -60,15 +60,15 @@ test "multf3" {
 
     // any * any
     try test__multf3(
-        @bitCast(f128, @as(u128, 0x40042eab345678439abcdefea5678234)),
-        @bitCast(f128, @as(u128, 0x3ffeedcb34a235253948765432134675)),
+        @as(f128, @bitCast(@as(u128, 0x40042eab345678439abcdefea5678234))),
+        @as(f128, @bitCast(@as(u128, 0x3ffeedcb34a235253948765432134675))),
         0x400423e7f9e3c9fc,
         0xd906c2c2a85777c4,
     );
 
     try test__multf3(
-        @bitCast(f128, @as(u128, 0x3fcd353e45674d89abacc3a2ebf3ff50)),
-        @bitCast(f128, @as(u128, 0x3ff6ed8764648369535adf4be3214568)),
+        @as(f128, @bitCast(@as(u128, 0x3fcd353e45674d89abacc3a2ebf3ff50))),
+        @as(f128, @bitCast(@as(u128, 0x3ff6ed8764648369535adf4be3214568))),
         0x3fc52a163c6223fc,
         0xc94c4bf0430768b4,
     );
@@ -81,8 +81,8 @@ test "multf3" {
     );
 
     try test__multf3(
-        @bitCast(f128, @as(u128, 0x3f154356473c82a9fabf2d22ace345df)),
-        @bitCast(f128, @as(u128, 0x3e38eda98765476743ab21da23d45679)),
+        @as(f128, @bitCast(@as(u128, 0x3f154356473c82a9fabf2d22ace345df))),
+        @as(f128, @bitCast(@as(u128, 0x3e38eda98765476743ab21da23d45679))),
         0x3d4f37c1a3137cae,
         0xfc6807048bc2836a,
     );
@@ -108,16 +108,16 @@ test "multf3" {
     try test__multf3(2.0, math.floatTrueMin(f128), 0x0000_0000_0000_0000, 0x0000_0000_0000_0002);
 }
 
-const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1)));
+const qnan80 = @as(f80, @bitCast(@as(u80, @bitCast(math.nan(f80))) | (1 << (math.floatFractionalBits(f80) - 1))));
 
 fn test__mulxf3(a: f80, b: f80, expected: u80) !void {
     const x = __mulxf3(a, b);
-    const rep = @bitCast(u80, x);
+    const rep = @as(u80, @bitCast(x));
 
     if (rep == expected)
         return;
 
-    if (math.isNan(@bitCast(f80, expected)) and math.isNan(x))
+    if (math.isNan(@as(f80, @bitCast(expected))) and math.isNan(x))
         return; // We don't currently test NaN payload propagation
 
     return error.TestFailed;
@@ -125,33 +125,33 @@ fn test__mulxf3(a: f80, b: f80, expected: u80) !void {
 
 test "mulxf3" {
     // NaN * any = NaN
-    try test__mulxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
-    try test__mulxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
+    try test__mulxf3(qnan80, 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80)));
+    try test__mulxf3(@as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), 0x1.23456789abcdefp+5, @as(u80, @bitCast(qnan80)));
 
     // any * NaN = NaN
-    try test__mulxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80));
-    try test__mulxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80));
+    try test__mulxf3(0x1.23456789abcdefp+5, qnan80, @as(u80, @bitCast(qnan80)));
+    try test__mulxf3(0x1.23456789abcdefp+5, @as(f80, @bitCast(@as(u80, 0x7fff_8000_8000_3000_0000))), @as(u80, @bitCast(qnan80)));
 
     // NaN * inf = NaN
-    try test__mulxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80));
+    try test__mulxf3(qnan80, math.inf(f80), @as(u80, @bitCast(qnan80)));
 
     // inf * NaN = NaN
-    try test__mulxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80));
+    try test__mulxf3(math.inf(f80), qnan80, @as(u80, @bitCast(qnan80)));
 
     // inf * inf = inf
-    try test__mulxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80)));
+    try test__mulxf3(math.inf(f80), math.inf(f80), @as(u80, @bitCast(math.inf(f80))));
 
     // inf * -inf = -inf
-    try test__mulxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, -math.inf(f80)));
+    try test__mulxf3(math.inf(f80), -math.inf(f80), @as(u80, @bitCast(-math.inf(f80))));
 
     // -inf + inf = -inf
-    try test__mulxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, -math.inf(f80)));
+    try test__mulxf3(-math.inf(f80), math.inf(f80), @as(u80, @bitCast(-math.inf(f80))));
 
     // inf * any = inf
-    try test__mulxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80)));
+    try test__mulxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @as(u80, @bitCast(math.inf(f80))));
 
     // any * inf = inf
-    try test__mulxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80)));
+    try test__mulxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @as(u80, @bitCast(math.inf(f80))));
 
     // any * any
     try test__mulxf3(0x1.0p+0, 0x1.dcba987654321p+5, 0x4004_ee5d_4c3b_2a19_0800);
lib/compiler_rt/mulo.zig
@@ -45,7 +45,7 @@ inline fn muloXi4_genericFast(comptime ST: type, a: ST, b: ST, overflow: *c_int)
     //invariant: -2^{bitwidth(EST)} < res < 2^{bitwidth(EST)-1}
     if (res < min or max < res)
         overflow.* = 1;
-    return @truncate(ST, res);
+    return @as(ST, @truncate(res));
 }
 
 pub fn __mulosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
lib/compiler_rt/mulodi4_test.zig
@@ -54,34 +54,34 @@ test "mulodi4" {
 
     try test__mulodi4(0x7FFFFFFFFFFFFFFF, -2, 2, 1);
     try test__mulodi4(-2, 0x7FFFFFFFFFFFFFFF, 2, 1);
-    try test__mulodi4(0x7FFFFFFFFFFFFFFF, -1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0);
-    try test__mulodi4(-1, 0x7FFFFFFFFFFFFFFF, @bitCast(i64, @as(u64, 0x8000000000000001)), 0);
+    try test__mulodi4(0x7FFFFFFFFFFFFFFF, -1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0);
+    try test__mulodi4(-1, 0x7FFFFFFFFFFFFFFF, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0);
     try test__mulodi4(0x7FFFFFFFFFFFFFFF, 0, 0, 0);
     try test__mulodi4(0, 0x7FFFFFFFFFFFFFFF, 0, 0);
     try test__mulodi4(0x7FFFFFFFFFFFFFFF, 1, 0x7FFFFFFFFFFFFFFF, 0);
     try test__mulodi4(1, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, 0);
-    try test__mulodi4(0x7FFFFFFFFFFFFFFF, 2, @bitCast(i64, @as(u64, 0x8000000000000001)), 1);
-    try test__mulodi4(2, 0x7FFFFFFFFFFFFFFF, @bitCast(i64, @as(u64, 0x8000000000000001)), 1);
+    try test__mulodi4(0x7FFFFFFFFFFFFFFF, 2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1);
+    try test__mulodi4(2, 0x7FFFFFFFFFFFFFFF, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1);
 
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), -2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1);
-    try test__mulodi4(-2, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1);
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), -1, @bitCast(i64, @as(u64, 0x8000000000000000)), 1);
-    try test__mulodi4(-1, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1);
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 0, 0, 0);
-    try test__mulodi4(0, @bitCast(i64, @as(u64, 0x8000000000000000)), 0, 0);
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 1, @bitCast(i64, @as(u64, 0x8000000000000000)), 0);
-    try test__mulodi4(1, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 0);
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000000)), 2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1);
-    try test__mulodi4(2, @bitCast(i64, @as(u64, 0x8000000000000000)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), -2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1);
+    try test__mulodi4(-2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), -1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1);
+    try test__mulodi4(-1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0, 0, 0);
+    try test__mulodi4(0, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0, 0);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0);
+    try test__mulodi4(1, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 0);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000000))), 2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1);
+    try test__mulodi4(2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1);
 
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), -2, @bitCast(i64, @as(u64, 0x8000000000000001)), 1);
-    try test__mulodi4(-2, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000001)), 1);
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), -1, 0x7FFFFFFFFFFFFFFF, 0);
-    try test__mulodi4(-1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0x7FFFFFFFFFFFFFFF, 0);
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 0, 0, 0);
-    try test__mulodi4(0, @bitCast(i64, @as(u64, 0x8000000000000001)), 0, 0);
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 1, @bitCast(i64, @as(u64, 0x8000000000000001)), 0);
-    try test__mulodi4(1, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000001)), 0);
-    try test__mulodi4(@bitCast(i64, @as(u64, 0x8000000000000001)), 2, @bitCast(i64, @as(u64, 0x8000000000000000)), 1);
-    try test__mulodi4(2, @bitCast(i64, @as(u64, 0x8000000000000001)), @bitCast(i64, @as(u64, 0x8000000000000000)), 1);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), -2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1);
+    try test__mulodi4(-2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), -1, 0x7FFFFFFFFFFFFFFF, 0);
+    try test__mulodi4(-1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0x7FFFFFFFFFFFFFFF, 0);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0, 0, 0);
+    try test__mulodi4(0, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0, 0);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), 1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0);
+    try test__mulodi4(1, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), @as(i64, @bitCast(@as(u64, 0x8000000000000001))), 0);
+    try test__mulodi4(@as(i64, @bitCast(@as(u64, 0x8000000000000001))), 2, @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1);
+    try test__mulodi4(2, @as(i64, @bitCast(@as(u64, 0x8000000000000001))), @as(i64, @bitCast(@as(u64, 0x8000000000000000))), 1);
 }
lib/compiler_rt/mulosi4_test.zig
@@ -37,36 +37,36 @@ test "mulosi4" {
     try test__mulosi4(1, -0x1234567, -0x1234567, 0);
     try test__mulosi4(-0x1234567, 1, -0x1234567, 0);
 
-    try test__mulosi4(0x7FFFFFFF, -2, @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__mulosi4(-2, 0x7FFFFFFF, @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__mulosi4(0x7FFFFFFF, -1, @bitCast(i32, @as(u32, 0x80000001)), 0);
-    try test__mulosi4(-1, 0x7FFFFFFF, @bitCast(i32, @as(u32, 0x80000001)), 0);
+    try test__mulosi4(0x7FFFFFFF, -2, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__mulosi4(-2, 0x7FFFFFFF, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__mulosi4(0x7FFFFFFF, -1, @as(i32, @bitCast(@as(u32, 0x80000001))), 0);
+    try test__mulosi4(-1, 0x7FFFFFFF, @as(i32, @bitCast(@as(u32, 0x80000001))), 0);
     try test__mulosi4(0x7FFFFFFF, 0, 0, 0);
     try test__mulosi4(0, 0x7FFFFFFF, 0, 0);
     try test__mulosi4(0x7FFFFFFF, 1, 0x7FFFFFFF, 0);
     try test__mulosi4(1, 0x7FFFFFFF, 0x7FFFFFFF, 0);
-    try test__mulosi4(0x7FFFFFFF, 2, @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__mulosi4(2, 0x7FFFFFFF, @bitCast(i32, @as(u32, 0x80000001)), 1);
+    try test__mulosi4(0x7FFFFFFF, 2, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__mulosi4(2, 0x7FFFFFFF, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
 
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), -2, @bitCast(i32, @as(u32, 0x80000000)), 1);
-    try test__mulosi4(-2, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 1);
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), -1, @bitCast(i32, @as(u32, 0x80000000)), 1);
-    try test__mulosi4(-1, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 1);
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), 0, 0, 0);
-    try test__mulosi4(0, @bitCast(i32, @as(u32, 0x80000000)), 0, 0);
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), 1, @bitCast(i32, @as(u32, 0x80000000)), 0);
-    try test__mulosi4(1, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 0);
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000000)), 2, @bitCast(i32, @as(u32, 0x80000000)), 1);
-    try test__mulosi4(2, @bitCast(i32, @as(u32, 0x80000000)), @bitCast(i32, @as(u32, 0x80000000)), 1);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), -2, @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
+    try test__mulosi4(-2, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), -1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
+    try test__mulosi4(-1, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), 0, 0, 0);
+    try test__mulosi4(0, @as(i32, @bitCast(@as(u32, 0x80000000))), 0, 0);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), 1, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
+    try test__mulosi4(1, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000000))), 2, @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
+    try test__mulosi4(2, @as(i32, @bitCast(@as(u32, 0x80000000))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
 
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), -2, @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__mulosi4(-2, @bitCast(i32, @as(u32, 0x80000001)), @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), -1, 0x7FFFFFFF, 0);
-    try test__mulosi4(-1, @bitCast(i32, @as(u32, 0x80000001)), 0x7FFFFFFF, 0);
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), 0, 0, 0);
-    try test__mulosi4(0, @bitCast(i32, @as(u32, 0x80000001)), 0, 0);
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), 1, @bitCast(i32, @as(u32, 0x80000001)), 0);
-    try test__mulosi4(1, @bitCast(i32, @as(u32, 0x80000001)), @bitCast(i32, @as(u32, 0x80000001)), 0);
-    try test__mulosi4(@bitCast(i32, @as(u32, 0x80000001)), 2, @bitCast(i32, @as(u32, 0x80000000)), 1);
-    try test__mulosi4(2, @bitCast(i32, @as(u32, 0x80000001)), @bitCast(i32, @as(u32, 0x80000000)), 1);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), -2, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__mulosi4(-2, @as(i32, @bitCast(@as(u32, 0x80000001))), @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), -1, 0x7FFFFFFF, 0);
+    try test__mulosi4(-1, @as(i32, @bitCast(@as(u32, 0x80000001))), 0x7FFFFFFF, 0);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), 0, 0, 0);
+    try test__mulosi4(0, @as(i32, @bitCast(@as(u32, 0x80000001))), 0, 0);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), 1, @as(i32, @bitCast(@as(u32, 0x80000001))), 0);
+    try test__mulosi4(1, @as(i32, @bitCast(@as(u32, 0x80000001))), @as(i32, @bitCast(@as(u32, 0x80000001))), 0);
+    try test__mulosi4(@as(i32, @bitCast(@as(u32, 0x80000001))), 2, @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
+    try test__mulosi4(2, @as(i32, @bitCast(@as(u32, 0x80000001))), @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
 }
lib/compiler_rt/muloti4_test.zig
@@ -52,38 +52,38 @@ test "muloti4" {
     try test__muloti4(2097152, -4398046511103, -9223372036852678656, 0);
     try test__muloti4(-2097152, -4398046511103, 9223372036852678656, 0);
 
-    try test__muloti4(@bitCast(i128, @as(u128, 0x00000000000000B504F333F9DE5BE000)), @bitCast(i128, @as(u128, 0x000000000000000000B504F333F9DE5B)), @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFF328DF915DA296E8A000)), 0);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1);
-    try test__muloti4(-2, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x00000000000000B504F333F9DE5BE000))), @as(i128, @bitCast(@as(u128, 0x000000000000000000B504F333F9DE5B))), @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFF328DF915DA296E8A000))), 0);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), -2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1);
+    try test__muloti4(-2, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1);
 
-    try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0);
-    try test__muloti4(-1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0, 0);
-    try test__muloti4(0, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
-    try test__muloti4(1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1);
-    try test__muloti4(2, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), -1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0);
+    try test__muloti4(-1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0, 0, 0);
+    try test__muloti4(0, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0, 0);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0);
+    try test__muloti4(1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1);
+    try test__muloti4(2, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1);
 
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), -2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1);
-    try test__muloti4(-2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), -1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1);
-    try test__muloti4(-1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0, 0, 0);
-    try test__muloti4(0, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0, 0);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0);
-    try test__muloti4(1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 0);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1);
-    try test__muloti4(2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), -2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1);
+    try test__muloti4(-2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), -1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1);
+    try test__muloti4(-1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0, 0, 0);
+    try test__muloti4(0, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0, 0);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0);
+    try test__muloti4(1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 0);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1);
+    try test__muloti4(2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1);
 
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), -2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1);
-    try test__muloti4(-2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), -1, @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
-    try test__muloti4(-1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0, 0, 0);
-    try test__muloti4(0, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0, 0);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0);
-    try test__muloti4(1, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 0);
-    try test__muloti4(@bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), 2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1);
-    try test__muloti4(2, @bitCast(i128, @as(u128, 0x80000000000000000000000000000001)), @bitCast(i128, @as(u128, 0x80000000000000000000000000000000)), 1);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), -2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1);
+    try test__muloti4(-2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), -1, @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0);
+    try test__muloti4(-1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))), 0);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0, 0, 0);
+    try test__muloti4(0, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0, 0);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0);
+    try test__muloti4(1, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 0);
+    try test__muloti4(@as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), 2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1);
+    try test__muloti4(2, @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000001))), @as(i128, @bitCast(@as(u128, 0x80000000000000000000000000000000))), 1);
 }
lib/compiler_rt/mulXi3.zig
@@ -21,8 +21,8 @@ comptime {
 }
 
 pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 {
-    var ua = @bitCast(u32, a);
-    var ub = @bitCast(u32, b);
+    var ua = @as(u32, @bitCast(a));
+    var ub = @as(u32, @bitCast(b));
     var r: u32 = 0;
 
     while (ua > 0) {
@@ -31,7 +31,7 @@ pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 {
         ub <<= 1;
     }
 
-    return @bitCast(i32, r);
+    return @as(i32, @bitCast(r));
 }
 
 pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 {
@@ -93,7 +93,7 @@ pub fn __multi3(a: i128, b: i128) callconv(.C) i128 {
 const v2u64 = @Vector(2, u64);
 
 fn __multi3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
-    return @bitCast(v2u64, mulX(i128, @bitCast(i128, a), @bitCast(i128, b)));
+    return @as(v2u64, @bitCast(mulX(i128, @as(i128, @bitCast(a)), @as(i128, @bitCast(b)))));
 }
 
 test {
lib/compiler_rt/mulXi3_test.zig
@@ -46,14 +46,14 @@ test "mulsi3" {
     try test_one_mulsi3(-46340, 46340, -2147395600);
     try test_one_mulsi3(46340, -46340, -2147395600);
     try test_one_mulsi3(-46340, -46340, 2147395600);
-    try test_one_mulsi3(4194303, 8192, @truncate(i32, 34359730176));
-    try test_one_mulsi3(-4194303, 8192, @truncate(i32, -34359730176));
-    try test_one_mulsi3(4194303, -8192, @truncate(i32, -34359730176));
-    try test_one_mulsi3(-4194303, -8192, @truncate(i32, 34359730176));
-    try test_one_mulsi3(8192, 4194303, @truncate(i32, 34359730176));
-    try test_one_mulsi3(-8192, 4194303, @truncate(i32, -34359730176));
-    try test_one_mulsi3(8192, -4194303, @truncate(i32, -34359730176));
-    try test_one_mulsi3(-8192, -4194303, @truncate(i32, 34359730176));
+    try test_one_mulsi3(4194303, 8192, @as(i32, @truncate(34359730176)));
+    try test_one_mulsi3(-4194303, 8192, @as(i32, @truncate(-34359730176)));
+    try test_one_mulsi3(4194303, -8192, @as(i32, @truncate(-34359730176)));
+    try test_one_mulsi3(-4194303, -8192, @as(i32, @truncate(34359730176)));
+    try test_one_mulsi3(8192, 4194303, @as(i32, @truncate(34359730176)));
+    try test_one_mulsi3(-8192, 4194303, @as(i32, @truncate(-34359730176)));
+    try test_one_mulsi3(8192, -4194303, @as(i32, @truncate(-34359730176)));
+    try test_one_mulsi3(-8192, -4194303, @as(i32, @truncate(34359730176)));
 }
 
 test "muldi3" {
lib/compiler_rt/negv.zig
@@ -33,7 +33,7 @@ inline fn negvXi(comptime ST: type, a: ST) ST {
         else => unreachable,
     };
     const N: UT = @bitSizeOf(ST);
-    const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1)));
+    const min: ST = @as(ST, @bitCast((@as(UT, 1) << (N - 1))));
     if (a == min)
         @panic("compiler_rt negv: overflow");
     return -a;
lib/compiler_rt/parity.zig
@@ -27,9 +27,9 @@ pub fn __parityti2(a: i128) callconv(.C) i32 {
 
 inline fn parityXi2(comptime T: type, a: T) i32 {
     var x = switch (@bitSizeOf(T)) {
-        32 => @bitCast(u32, a),
-        64 => @bitCast(u64, a),
-        128 => @bitCast(u128, a),
+        32 => @as(u32, @bitCast(a)),
+        64 => @as(u64, @bitCast(a)),
+        128 => @as(u128, @bitCast(a)),
         else => unreachable,
     };
     // Bit Twiddling Hacks: Compute parity in parallel
@@ -39,7 +39,7 @@ inline fn parityXi2(comptime T: type, a: T) i32 {
         shift = shift >> 1;
     }
     x &= 0xf;
-    return (@intCast(u16, 0x6996) >> @intCast(u4, x)) & 1; // optimization for >>2 and >>1
+    return (@as(u16, @intCast(0x6996)) >> @as(u4, @intCast(x))) & 1; // optimization for >>2 and >>1
 }
 
 test {
lib/compiler_rt/paritydi2_test.zig
@@ -3,13 +3,13 @@ const parity = @import("parity.zig");
 const testing = std.testing;
 
 fn paritydi2Naive(a: i64) i32 {
-    var x = @bitCast(u64, a);
+    var x = @as(u64, @bitCast(a));
     var has_parity: bool = false;
     while (x > 0) {
         has_parity = !has_parity;
         x = x & (x - 1);
     }
-    return @intCast(i32, @intFromBool(has_parity));
+    return @as(i32, @intCast(@intFromBool(has_parity)));
 }
 
 fn test__paritydi2(a: i64) !void {
@@ -22,9 +22,9 @@ test "paritydi2" {
     try test__paritydi2(0);
     try test__paritydi2(1);
     try test__paritydi2(2);
-    try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffd)));
-    try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffe)));
-    try test__paritydi2(@bitCast(i64, @as(u64, 0xffffffff_ffffffff)));
+    try test__paritydi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffd))));
+    try test__paritydi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffe))));
+    try test__paritydi2(@as(i64, @bitCast(@as(u64, 0xffffffff_ffffffff))));
 
     const RndGen = std.rand.DefaultPrng;
     var rnd = RndGen.init(42);
lib/compiler_rt/paritysi2_test.zig
@@ -3,13 +3,13 @@ const parity = @import("parity.zig");
 const testing = std.testing;
 
 fn paritysi2Naive(a: i32) i32 {
-    var x = @bitCast(u32, a);
+    var x = @as(u32, @bitCast(a));
     var has_parity: bool = false;
     while (x > 0) {
         has_parity = !has_parity;
         x = x & (x - 1);
     }
-    return @intCast(i32, @intFromBool(has_parity));
+    return @as(i32, @intCast(@intFromBool(has_parity)));
 }
 
 fn test__paritysi2(a: i32) !void {
@@ -22,9 +22,9 @@ test "paritysi2" {
     try test__paritysi2(0);
     try test__paritysi2(1);
     try test__paritysi2(2);
-    try test__paritysi2(@bitCast(i32, @as(u32, 0xfffffffd)));
-    try test__paritysi2(@bitCast(i32, @as(u32, 0xfffffffe)));
-    try test__paritysi2(@bitCast(i32, @as(u32, 0xffffffff)));
+    try test__paritysi2(@as(i32, @bitCast(@as(u32, 0xfffffffd))));
+    try test__paritysi2(@as(i32, @bitCast(@as(u32, 0xfffffffe))));
+    try test__paritysi2(@as(i32, @bitCast(@as(u32, 0xffffffff))));
 
     const RndGen = std.rand.DefaultPrng;
     var rnd = RndGen.init(42);
lib/compiler_rt/parityti2_test.zig
@@ -3,13 +3,13 @@ const parity = @import("parity.zig");
 const testing = std.testing;
 
 fn parityti2Naive(a: i128) i32 {
-    var x = @bitCast(u128, a);
+    var x = @as(u128, @bitCast(a));
     var has_parity: bool = false;
     while (x > 0) {
         has_parity = !has_parity;
         x = x & (x - 1);
     }
-    return @intCast(i32, @intFromBool(has_parity));
+    return @as(i32, @intCast(@intFromBool(has_parity)));
 }
 
 fn test__parityti2(a: i128) !void {
@@ -22,9 +22,9 @@ test "parityti2" {
     try test__parityti2(0);
     try test__parityti2(1);
     try test__parityti2(2);
-    try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd)));
-    try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe)));
-    try test__parityti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff)));
+    try test__parityti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd))));
+    try test__parityti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe))));
+    try test__parityti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff))));
 
     const RndGen = std.rand.DefaultPrng;
     var rnd = RndGen.init(42);
lib/compiler_rt/popcount.zig
@@ -37,7 +37,7 @@ inline fn popcountXi2(comptime ST: type, a: ST) i32 {
         i128 => u128,
         else => unreachable,
     };
-    var x = @bitCast(UT, a);
+    var x = @as(UT, @bitCast(a));
     x -= (x >> 1) & (~@as(UT, 0) / 3); // 0x55...55, aggregate duos
     x = ((x >> 2) & (~@as(UT, 0) / 5)) // 0x33...33, aggregate nibbles
     + (x & (~@as(UT, 0) / 5));
@@ -46,7 +46,7 @@ inline fn popcountXi2(comptime ST: type, a: ST) i32 {
     // 8 most significant bits of x + (x<<8) + (x<<16) + ..
     x *%= ~@as(UT, 0) / 255; // 0x01...01
     x >>= (@bitSizeOf(ST) - 8);
-    return @intCast(i32, x);
+    return @as(i32, @intCast(x));
 }
 
 test {
lib/compiler_rt/popcountdi2_test.zig
@@ -5,8 +5,8 @@ const testing = std.testing;
 fn popcountdi2Naive(a: i64) i32 {
     var x = a;
     var r: i32 = 0;
-    while (x != 0) : (x = @bitCast(i64, @bitCast(u64, x) >> 1)) {
-        r += @intCast(i32, x & 1);
+    while (x != 0) : (x = @as(i64, @bitCast(@as(u64, @bitCast(x)) >> 1))) {
+        r += @as(i32, @intCast(x & 1));
     }
     return r;
 }
@@ -21,9 +21,9 @@ test "popcountdi2" {
     try test__popcountdi2(0);
     try test__popcountdi2(1);
     try test__popcountdi2(2);
-    try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffd)));
-    try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_fffffffe)));
-    try test__popcountdi2(@bitCast(i64, @as(u64, 0xffffffff_ffffffff)));
+    try test__popcountdi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffd))));
+    try test__popcountdi2(@as(i64, @bitCast(@as(u64, 0xffffffff_fffffffe))));
+    try test__popcountdi2(@as(i64, @bitCast(@as(u64, 0xffffffff_ffffffff))));
 
     const RndGen = std.rand.DefaultPrng;
     var rnd = RndGen.init(42);
lib/compiler_rt/popcountsi2_test.zig
@@ -5,8 +5,8 @@ const testing = std.testing;
 fn popcountsi2Naive(a: i32) i32 {
     var x = a;
     var r: i32 = 0;
-    while (x != 0) : (x = @bitCast(i32, @bitCast(u32, x) >> 1)) {
-        r += @intCast(i32, x & 1);
+    while (x != 0) : (x = @as(i32, @bitCast(@as(u32, @bitCast(x)) >> 1))) {
+        r += @as(i32, @intCast(x & 1));
     }
     return r;
 }
@@ -21,9 +21,9 @@ test "popcountsi2" {
     try test__popcountsi2(0);
     try test__popcountsi2(1);
     try test__popcountsi2(2);
-    try test__popcountsi2(@bitCast(i32, @as(u32, 0xfffffffd)));
-    try test__popcountsi2(@bitCast(i32, @as(u32, 0xfffffffe)));
-    try test__popcountsi2(@bitCast(i32, @as(u32, 0xffffffff)));
+    try test__popcountsi2(@as(i32, @bitCast(@as(u32, 0xfffffffd))));
+    try test__popcountsi2(@as(i32, @bitCast(@as(u32, 0xfffffffe))));
+    try test__popcountsi2(@as(i32, @bitCast(@as(u32, 0xffffffff))));
 
     const RndGen = std.rand.DefaultPrng;
     var rnd = RndGen.init(42);
lib/compiler_rt/popcountti2_test.zig
@@ -5,8 +5,8 @@ const testing = std.testing;
 fn popcountti2Naive(a: i128) i32 {
     var x = a;
     var r: i32 = 0;
-    while (x != 0) : (x = @bitCast(i128, @bitCast(u128, x) >> 1)) {
-        r += @intCast(i32, x & 1);
+    while (x != 0) : (x = @as(i128, @bitCast(@as(u128, @bitCast(x)) >> 1))) {
+        r += @as(i32, @intCast(x & 1));
     }
     return r;
 }
@@ -21,9 +21,9 @@ test "popcountti2" {
     try test__popcountti2(0);
     try test__popcountti2(1);
     try test__popcountti2(2);
-    try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd)));
-    try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe)));
-    try test__popcountti2(@bitCast(i128, @as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff)));
+    try test__popcountti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffd))));
+    try test__popcountti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_fffffffe))));
+    try test__popcountti2(@as(i128, @bitCast(@as(u128, 0xffffffff_ffffffff_ffffffff_ffffffff))));
 
     const RndGen = std.rand.DefaultPrng;
     var rnd = RndGen.init(42);
lib/compiler_rt/powiXf2.zig
@@ -25,7 +25,7 @@ inline fn powiXf2(comptime FT: type, a: FT, b: i32) FT {
     const is_recip: bool = b < 0;
     var r: FT = 1.0;
     while (true) {
-        if (@bitCast(u32, x_b) & @as(u32, 1) != 0) {
+        if (@as(u32, @bitCast(x_b)) & @as(u32, 1) != 0) {
             r *= x_a;
         }
         x_b = @divTrunc(x_b, @as(i32, 2));
lib/compiler_rt/powiXf2_test.zig
@@ -49,76 +49,76 @@ test "powihf2" {
     try test__powihf2(0, 2, 0);
     try test__powihf2(0, 3, 0);
     try test__powihf2(0, 4, 0);
-    try test__powihf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
-    try test__powihf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0);
+    try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
+    try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0);
 
     try test__powihf2(-0.0, 1, -0.0);
     try test__powihf2(-0.0, 2, 0);
     try test__powihf2(-0.0, 3, -0.0);
     try test__powihf2(-0.0, 4, 0);
-    try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
-    try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0);
+    try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
+    try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0);
 
     try test__powihf2(1, 1, 1);
     try test__powihf2(1, 2, 1);
     try test__powihf2(1, 3, 1);
     try test__powihf2(1, 4, 1);
-    try test__powihf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1);
-    try test__powihf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1);
+    try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1);
+    try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1);
 
     try test__powihf2(inf_f16, 1, inf_f16);
     try test__powihf2(inf_f16, 2, inf_f16);
     try test__powihf2(inf_f16, 3, inf_f16);
     try test__powihf2(inf_f16, 4, inf_f16);
-    try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f16);
-    try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f16);
+    try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f16);
+    try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f16);
 
     try test__powihf2(-inf_f16, 1, -inf_f16);
     try test__powihf2(-inf_f16, 2, inf_f16);
     try test__powihf2(-inf_f16, 3, -inf_f16);
     try test__powihf2(-inf_f16, 4, inf_f16);
-    try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f16);
-    try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f16);
+    try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f16);
+    try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f16);
     //
     try test__powihf2(0, -1, inf_f16);
     try test__powihf2(0, -2, inf_f16);
     try test__powihf2(0, -3, inf_f16);
     try test__powihf2(0, -4, inf_f16);
-    try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f16); // 0 ^ anything = +inf
-    try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f16);
-    try test__powihf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f16);
+    try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f16); // 0 ^ anything = +inf
+    try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f16);
+    try test__powihf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f16);
 
     try test__powihf2(-0.0, -1, -inf_f16);
     try test__powihf2(-0.0, -2, inf_f16);
     try test__powihf2(-0.0, -3, -inf_f16);
     try test__powihf2(-0.0, -4, inf_f16);
-    try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f16); // -0 ^ anything even = +inf
-    try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f16); // -0 ^ anything odd = -inf
-    try test__powihf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f16);
+    try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f16); // -0 ^ anything even = +inf
+    try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f16); // -0 ^ anything odd = -inf
+    try test__powihf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f16);
 
     try test__powihf2(1, -1, 1);
     try test__powihf2(1, -2, 1);
     try test__powihf2(1, -3, 1);
     try test__powihf2(1, -4, 1);
-    try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1); // 1.0 ^ anything = 1
-    try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__powihf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1);
+    try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1); // 1.0 ^ anything = 1
+    try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__powihf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
 
     try test__powihf2(inf_f16, -1, 0);
     try test__powihf2(inf_f16, -2, 0);
     try test__powihf2(inf_f16, -3, 0);
     try test__powihf2(inf_f16, -4, 0);
-    try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000001)), 0);
-    try test__powihf2(inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x80000001))), 0);
+    try test__powihf2(inf_f16, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
     //
     try test__powihf2(-inf_f16, -1, -0.0);
     try test__powihf2(-inf_f16, -2, 0);
     try test__powihf2(-inf_f16, -3, -0.0);
     try test__powihf2(-inf_f16, -4, 0);
-    try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000001)), -0.0);
-    try test__powihf2(-inf_f16, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0);
+    try test__powihf2(-inf_f16, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
 
     try test__powihf2(2, 10, 1024.0);
     try test__powihf2(-2, 10, 1024.0);
@@ -158,76 +158,76 @@ test "powisf2" {
     try test__powisf2(0, 2, 0);
     try test__powisf2(0, 3, 0);
     try test__powisf2(0, 4, 0);
-    try test__powisf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
-    try test__powisf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0);
+    try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
+    try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0);
 
     try test__powisf2(-0.0, 1, -0.0);
     try test__powisf2(-0.0, 2, 0);
     try test__powisf2(-0.0, 3, -0.0);
     try test__powisf2(-0.0, 4, 0);
-    try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
-    try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0);
+    try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
+    try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0);
 
     try test__powisf2(1, 1, 1);
     try test__powisf2(1, 2, 1);
     try test__powisf2(1, 3, 1);
     try test__powisf2(1, 4, 1);
-    try test__powisf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1);
-    try test__powisf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1);
+    try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1);
+    try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1);
 
     try test__powisf2(inf_f32, 1, inf_f32);
     try test__powisf2(inf_f32, 2, inf_f32);
     try test__powisf2(inf_f32, 3, inf_f32);
     try test__powisf2(inf_f32, 4, inf_f32);
-    try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f32);
-    try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f32);
+    try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f32);
+    try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f32);
 
     try test__powisf2(-inf_f32, 1, -inf_f32);
     try test__powisf2(-inf_f32, 2, inf_f32);
     try test__powisf2(-inf_f32, 3, -inf_f32);
     try test__powisf2(-inf_f32, 4, inf_f32);
-    try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f32);
-    try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f32);
+    try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f32);
+    try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f32);
 
     try test__powisf2(0, -1, inf_f32);
     try test__powisf2(0, -2, inf_f32);
     try test__powisf2(0, -3, inf_f32);
     try test__powisf2(0, -4, inf_f32);
-    try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f32);
-    try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f32);
-    try test__powisf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f32);
+    try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f32);
+    try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f32);
+    try test__powisf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f32);
 
     try test__powisf2(-0.0, -1, -inf_f32);
     try test__powisf2(-0.0, -2, inf_f32);
     try test__powisf2(-0.0, -3, -inf_f32);
     try test__powisf2(-0.0, -4, inf_f32);
-    try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f32);
-    try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f32);
-    try test__powisf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f32);
+    try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f32);
+    try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f32);
+    try test__powisf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f32);
 
     try test__powisf2(1, -1, 1);
     try test__powisf2(1, -2, 1);
     try test__powisf2(1, -3, 1);
     try test__powisf2(1, -4, 1);
-    try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1);
-    try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__powisf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1);
+    try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1);
+    try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__powisf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
 
     try test__powisf2(inf_f32, -1, 0);
     try test__powisf2(inf_f32, -2, 0);
     try test__powisf2(inf_f32, -3, 0);
     try test__powisf2(inf_f32, -4, 0);
-    try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000001)), 0);
-    try test__powisf2(inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x80000001))), 0);
+    try test__powisf2(inf_f32, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
 
     try test__powisf2(-inf_f32, -1, -0.0);
     try test__powisf2(-inf_f32, -2, 0);
     try test__powisf2(-inf_f32, -3, -0.0);
     try test__powisf2(-inf_f32, -4, 0);
-    try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000001)), -0.0);
-    try test__powisf2(-inf_f32, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0);
+    try test__powisf2(-inf_f32, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
 
     try test__powisf2(2.0, 10, 1024.0);
     try test__powisf2(-2, 10, 1024.0);
@@ -263,76 +263,76 @@ test "powidf2" {
     try test__powidf2(0, 2, 0);
     try test__powidf2(0, 3, 0);
     try test__powidf2(0, 4, 0);
-    try test__powidf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
-    try test__powidf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0);
+    try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
+    try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0);
 
     try test__powidf2(-0.0, 1, -0.0);
     try test__powidf2(-0.0, 2, 0);
     try test__powidf2(-0.0, 3, -0.0);
     try test__powidf2(-0.0, 4, 0);
-    try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
-    try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0);
+    try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
+    try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0);
 
     try test__powidf2(1, 1, 1);
     try test__powidf2(1, 2, 1);
     try test__powidf2(1, 3, 1);
     try test__powidf2(1, 4, 1);
-    try test__powidf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1);
-    try test__powidf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1);
+    try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1);
+    try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1);
 
     try test__powidf2(inf_f64, 1, inf_f64);
     try test__powidf2(inf_f64, 2, inf_f64);
     try test__powidf2(inf_f64, 3, inf_f64);
     try test__powidf2(inf_f64, 4, inf_f64);
-    try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f64);
-    try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f64);
+    try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f64);
+    try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f64);
 
     try test__powidf2(-inf_f64, 1, -inf_f64);
     try test__powidf2(-inf_f64, 2, inf_f64);
     try test__powidf2(-inf_f64, 3, -inf_f64);
     try test__powidf2(-inf_f64, 4, inf_f64);
-    try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f64);
-    try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f64);
+    try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f64);
+    try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f64);
 
     try test__powidf2(0, -1, inf_f64);
     try test__powidf2(0, -2, inf_f64);
     try test__powidf2(0, -3, inf_f64);
     try test__powidf2(0, -4, inf_f64);
-    try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f64);
-    try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f64);
-    try test__powidf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f64);
+    try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f64);
+    try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f64);
+    try test__powidf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f64);
 
     try test__powidf2(-0.0, -1, -inf_f64);
     try test__powidf2(-0.0, -2, inf_f64);
     try test__powidf2(-0.0, -3, -inf_f64);
     try test__powidf2(-0.0, -4, inf_f64);
-    try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f64);
-    try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f64);
-    try test__powidf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f64);
+    try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f64);
+    try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f64);
+    try test__powidf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f64);
 
     try test__powidf2(1, -1, 1);
     try test__powidf2(1, -2, 1);
     try test__powidf2(1, -3, 1);
     try test__powidf2(1, -4, 1);
-    try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1);
-    try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__powidf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1);
+    try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1);
+    try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__powidf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
 
     try test__powidf2(inf_f64, -1, 0);
     try test__powidf2(inf_f64, -2, 0);
     try test__powidf2(inf_f64, -3, 0);
     try test__powidf2(inf_f64, -4, 0);
-    try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000001)), 0);
-    try test__powidf2(inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x80000001))), 0);
+    try test__powidf2(inf_f64, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
 
     try test__powidf2(-inf_f64, -1, -0.0);
     try test__powidf2(-inf_f64, -2, 0);
     try test__powidf2(-inf_f64, -3, -0.0);
     try test__powidf2(-inf_f64, -4, 0);
-    try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000001)), -0.0);
-    try test__powidf2(-inf_f64, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0);
+    try test__powidf2(-inf_f64, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
 
     try test__powidf2(2, 10, 1024.0);
     try test__powidf2(-2, 10, 1024.0);
@@ -368,76 +368,76 @@ test "powitf2" {
     try test__powitf2(0, 2, 0);
     try test__powitf2(0, 3, 0);
     try test__powitf2(0, 4, 0);
-    try test__powitf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
+    try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
     try test__powitf2(0, 0x7FFFFFFF, 0);
 
     try test__powitf2(-0.0, 1, -0.0);
     try test__powitf2(-0.0, 2, 0);
     try test__powitf2(-0.0, 3, -0.0);
     try test__powitf2(-0.0, 4, 0);
-    try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
-    try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0);
+    try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
+    try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0);
 
     try test__powitf2(1, 1, 1);
     try test__powitf2(1, 2, 1);
     try test__powitf2(1, 3, 1);
     try test__powitf2(1, 4, 1);
-    try test__powitf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1);
-    try test__powitf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1);
+    try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1);
+    try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1);
 
     try test__powitf2(inf_f128, 1, inf_f128);
     try test__powitf2(inf_f128, 2, inf_f128);
     try test__powitf2(inf_f128, 3, inf_f128);
     try test__powitf2(inf_f128, 4, inf_f128);
-    try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f128);
-    try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f128);
+    try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f128);
+    try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f128);
 
     try test__powitf2(-inf_f128, 1, -inf_f128);
     try test__powitf2(-inf_f128, 2, inf_f128);
     try test__powitf2(-inf_f128, 3, -inf_f128);
     try test__powitf2(-inf_f128, 4, inf_f128);
-    try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f128);
-    try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f128);
+    try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f128);
+    try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f128);
 
     try test__powitf2(0, -1, inf_f128);
     try test__powitf2(0, -2, inf_f128);
     try test__powitf2(0, -3, inf_f128);
     try test__powitf2(0, -4, inf_f128);
-    try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f128);
-    try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f128);
-    try test__powitf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f128);
+    try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f128);
+    try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f128);
+    try test__powitf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f128);
 
     try test__powitf2(-0.0, -1, -inf_f128);
     try test__powitf2(-0.0, -2, inf_f128);
     try test__powitf2(-0.0, -3, -inf_f128);
     try test__powitf2(-0.0, -4, inf_f128);
-    try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f128);
-    try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f128);
-    try test__powitf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f128);
+    try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f128);
+    try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f128);
+    try test__powitf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f128);
 
     try test__powitf2(1, -1, 1);
     try test__powitf2(1, -2, 1);
     try test__powitf2(1, -3, 1);
     try test__powitf2(1, -4, 1);
-    try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1);
-    try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__powitf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1);
+    try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1);
+    try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__powitf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
 
     try test__powitf2(inf_f128, -1, 0);
     try test__powitf2(inf_f128, -2, 0);
     try test__powitf2(inf_f128, -3, 0);
     try test__powitf2(inf_f128, -4, 0);
-    try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000001)), 0);
-    try test__powitf2(inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x80000001))), 0);
+    try test__powitf2(inf_f128, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
 
     try test__powitf2(-inf_f128, -1, -0.0);
     try test__powitf2(-inf_f128, -2, 0);
     try test__powitf2(-inf_f128, -3, -0.0);
     try test__powitf2(-inf_f128, -4, 0);
-    try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000001)), -0.0);
-    try test__powitf2(-inf_f128, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0);
+    try test__powitf2(-inf_f128, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
 
     try test__powitf2(2, 10, 1024.0);
     try test__powitf2(-2, 10, 1024.0);
@@ -473,76 +473,76 @@ test "powixf2" {
     try test__powixf2(0, 2, 0);
     try test__powixf2(0, 3, 0);
     try test__powixf2(0, 4, 0);
-    try test__powixf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
-    try test__powixf2(0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 0);
+    try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
+    try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 0);
 
     try test__powixf2(-0.0, 1, -0.0);
     try test__powixf2(-0.0, 2, 0);
     try test__powixf2(-0.0, 3, -0.0);
     try test__powixf2(-0.0, 4, 0);
-    try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 0);
-    try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -0.0);
+    try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 0);
+    try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -0.0);
 
     try test__powixf2(1, 1, 1);
     try test__powixf2(1, 2, 1);
     try test__powixf2(1, 3, 1);
     try test__powixf2(1, 4, 1);
-    try test__powixf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFE)), 1);
-    try test__powixf2(1, @bitCast(i32, @as(u32, 0x7FFFFFFF)), 1);
+    try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), 1);
+    try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), 1);
 
     try test__powixf2(inf_f80, 1, inf_f80);
     try test__powixf2(inf_f80, 2, inf_f80);
     try test__powixf2(inf_f80, 3, inf_f80);
     try test__powixf2(inf_f80, 4, inf_f80);
-    try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f80);
-    try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), inf_f80);
+    try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f80);
+    try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), inf_f80);
 
     try test__powixf2(-inf_f80, 1, -inf_f80);
     try test__powixf2(-inf_f80, 2, inf_f80);
     try test__powixf2(-inf_f80, 3, -inf_f80);
     try test__powixf2(-inf_f80, 4, inf_f80);
-    try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFE)), inf_f80);
-    try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x7FFFFFFF)), -inf_f80);
+    try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFE))), inf_f80);
+    try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x7FFFFFFF))), -inf_f80);
 
     try test__powixf2(0, -1, inf_f80);
     try test__powixf2(0, -2, inf_f80);
     try test__powixf2(0, -3, inf_f80);
     try test__powixf2(0, -4, inf_f80);
-    try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000002)), inf_f80);
-    try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000001)), inf_f80);
-    try test__powixf2(0, @bitCast(i32, @as(u32, 0x80000000)), inf_f80);
+    try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f80);
+    try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x80000001))), inf_f80);
+    try test__powixf2(0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f80);
 
     try test__powixf2(-0.0, -1, -inf_f80);
     try test__powixf2(-0.0, -2, inf_f80);
     try test__powixf2(-0.0, -3, -inf_f80);
     try test__powixf2(-0.0, -4, inf_f80);
-    try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000002)), inf_f80);
-    try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000001)), -inf_f80);
-    try test__powixf2(-0.0, @bitCast(i32, @as(u32, 0x80000000)), inf_f80);
+    try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000002))), inf_f80);
+    try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000001))), -inf_f80);
+    try test__powixf2(-0.0, @as(i32, @bitCast(@as(u32, 0x80000000))), inf_f80);
 
     try test__powixf2(1, -1, 1);
     try test__powixf2(1, -2, 1);
     try test__powixf2(1, -3, 1);
     try test__powixf2(1, -4, 1);
-    try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000002)), 1);
-    try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000001)), 1);
-    try test__powixf2(1, @bitCast(i32, @as(u32, 0x80000000)), 1);
+    try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x80000002))), 1);
+    try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x80000001))), 1);
+    try test__powixf2(1, @as(i32, @bitCast(@as(u32, 0x80000000))), 1);
 
     try test__powixf2(inf_f80, -1, 0);
     try test__powixf2(inf_f80, -2, 0);
     try test__powixf2(inf_f80, -3, 0);
     try test__powixf2(inf_f80, -4, 0);
-    try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000001)), 0);
-    try test__powixf2(inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x80000001))), 0);
+    try test__powixf2(inf_f80, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
 
     try test__powixf2(-inf_f80, -1, -0.0);
     try test__powixf2(-inf_f80, -2, 0);
     try test__powixf2(-inf_f80, -3, -0.0);
     try test__powixf2(-inf_f80, -4, 0);
-    try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000002)), 0);
-    try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000001)), -0.0);
-    try test__powixf2(-inf_f80, @bitCast(i32, @as(u32, 0x80000000)), 0);
+    try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x80000002))), 0);
+    try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x80000001))), -0.0);
+    try test__powixf2(-inf_f80, @as(i32, @bitCast(@as(u32, 0x80000000))), 0);
 
     try test__powixf2(2, 10, 1024.0);
     try test__powixf2(-2, 10, 1024.0);
lib/compiler_rt/rem_pio2.zig
@@ -26,7 +26,7 @@ const pio2_3 = 2.02226624871116645580e-21; // 0x3BA3198A, 0x2E000000
 const pio2_3t = 8.47842766036889956997e-32; // 0x397B839A, 0x252049C1
 
 fn U(x: anytype) usize {
-    return @intCast(usize, x);
+    return @as(usize, @intCast(x));
 }
 
 fn medium(ix: u32, x: f64, y: *[2]f64) i32 {
@@ -41,7 +41,7 @@ fn medium(ix: u32, x: f64, y: *[2]f64) i32 {
 
     // rint(x/(pi/2))
     @"fn" = x * invpio2 + toint - toint;
-    n = @intFromFloat(i32, @"fn");
+    n = @as(i32, @intFromFloat(@"fn"));
     r = x - @"fn" * pio2_1;
     w = @"fn" * pio2_1t; // 1st round, good to 85 bits
     // Matters with directed rounding.
@@ -57,17 +57,17 @@ fn medium(ix: u32, x: f64, y: *[2]f64) i32 {
         w = @"fn" * pio2_1t;
     }
     y[0] = r - w;
-    ui = @bitCast(u64, y[0]);
-    ey = @intCast(i32, (ui >> 52) & 0x7ff);
-    ex = @intCast(i32, ix >> 20);
+    ui = @as(u64, @bitCast(y[0]));
+    ey = @as(i32, @intCast((ui >> 52) & 0x7ff));
+    ex = @as(i32, @intCast(ix >> 20));
     if (ex - ey > 16) { // 2nd round, good to 118 bits
         t = r;
         w = @"fn" * pio2_2;
         r = t - w;
         w = @"fn" * pio2_2t - ((t - r) - w);
         y[0] = r - w;
-        ui = @bitCast(u64, y[0]);
-        ey = @intCast(i32, (ui >> 52) & 0x7ff);
+        ui = @as(u64, @bitCast(y[0]));
+        ey = @as(i32, @intCast((ui >> 52) & 0x7ff));
         if (ex - ey > 49) { // 3rd round, good to 151 bits, covers all cases
             t = r;
             w = @"fn" * pio2_3;
@@ -95,9 +95,9 @@ pub fn rem_pio2(x: f64, y: *[2]f64) i32 {
     var i: i32 = undefined;
     var ui: u64 = undefined;
 
-    ui = @bitCast(u64, x);
+    ui = @as(u64, @bitCast(x));
     sign = ui >> 63 != 0;
-    ix = @truncate(u32, (ui >> 32) & 0x7fffffff);
+    ix = @as(u32, @truncate((ui >> 32) & 0x7fffffff));
     if (ix <= 0x400f6a7a) { // |x| ~<= 5pi/4
         if ((ix & 0xfffff) == 0x921fb) { // |x| ~= pi/2 or 2pi/2
             return medium(ix, x, y);
@@ -171,14 +171,14 @@ pub fn rem_pio2(x: f64, y: *[2]f64) i32 {
         return 0;
     }
     // set z = scalbn(|x|,-ilogb(x)+23)
-    ui = @bitCast(u64, x);
+    ui = @as(u64, @bitCast(x));
     ui &= std.math.maxInt(u64) >> 12;
     ui |= @as(u64, 0x3ff + 23) << 52;
-    z = @bitCast(f64, ui);
+    z = @as(f64, @bitCast(ui));
 
     i = 0;
     while (i < 2) : (i += 1) {
-        tx[U(i)] = @floatFromInt(f64, @intFromFloat(i32, z));
+        tx[U(i)] = @as(f64, @floatFromInt(@as(i32, @intFromFloat(z))));
         z = (z - tx[U(i)]) * 0x1p24;
     }
     tx[U(i)] = z;
@@ -186,7 +186,7 @@ pub fn rem_pio2(x: f64, y: *[2]f64) i32 {
     while (tx[U(i)] == 0.0) {
         i -= 1;
     }
-    n = rem_pio2_large(tx[0..], ty[0..], @intCast(i32, (ix >> 20)) - (0x3ff + 23), i + 1, 1);
+    n = rem_pio2_large(tx[0..], ty[0..], @as(i32, @intCast((ix >> 20))) - (0x3ff + 23), i + 1, 1);
     if (sign) {
         y[0] = -ty[0];
         y[1] = -ty[1];
lib/compiler_rt/rem_pio2_large.zig
@@ -150,7 +150,7 @@ const PIo2 = [_]f64{
 };
 
 fn U(x: anytype) usize {
-    return @intCast(usize, x);
+    return @as(usize, @intCast(x));
 }
 
 /// Returns the last three digits of N with y = x - N*pi/2 so that |y| < pi/2.
@@ -295,7 +295,7 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
         i += 1;
         j += 1;
     }) {
-        f[U(i)] = if (j < 0) 0.0 else @floatFromInt(f64, ipio2[U(j)]);
+        f[U(i)] = if (j < 0) 0.0 else @as(f64, @floatFromInt(ipio2[U(j)]));
     }
 
     // compute q[0],q[1],...q[jk]
@@ -322,22 +322,22 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
             i += 1;
             j -= 1;
         }) {
-            fw = @floatFromInt(f64, @intFromFloat(i32, 0x1p-24 * z));
-            iq[U(i)] = @intFromFloat(i32, z - 0x1p24 * fw);
+            fw = @as(f64, @floatFromInt(@as(i32, @intFromFloat(0x1p-24 * z))));
+            iq[U(i)] = @as(i32, @intFromFloat(z - 0x1p24 * fw));
             z = q[U(j - 1)] + fw;
         }
 
         // compute n
         z = math.scalbn(z, q0); // actual value of z
         z -= 8.0 * @floor(z * 0.125); // trim off integer >= 8
-        n = @intFromFloat(i32, z);
-        z -= @floatFromInt(f64, n);
+        n = @as(i32, @intFromFloat(z));
+        z -= @as(f64, @floatFromInt(n));
         ih = 0;
         if (q0 > 0) { // need iq[jz-1] to determine n
-            i = iq[U(jz - 1)] >> @intCast(u5, 24 - q0);
+            i = iq[U(jz - 1)] >> @as(u5, @intCast(24 - q0));
             n += i;
-            iq[U(jz - 1)] -= i << @intCast(u5, 24 - q0);
-            ih = iq[U(jz - 1)] >> @intCast(u5, 23 - q0);
+            iq[U(jz - 1)] -= i << @as(u5, @intCast(24 - q0));
+            ih = iq[U(jz - 1)] >> @as(u5, @intCast(23 - q0));
         } else if (q0 == 0) {
             ih = iq[U(jz - 1)] >> 23;
         } else if (z >= 0.5) {
@@ -390,7 +390,7 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
 
                 i = jz + 1;
                 while (i <= jz + k) : (i += 1) { // add q[jz+1] to q[jz+k]
-                    f[U(jx + i)] = @floatFromInt(f64, ipio2[U(jv + i)]);
+                    f[U(jx + i)] = @as(f64, @floatFromInt(ipio2[U(jv + i)]));
                     j = 0;
                     fw = 0;
                     while (j <= jx) : (j += 1) {
@@ -414,13 +414,13 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
         } else { // break z into 24-bit if necessary
             z = math.scalbn(z, -q0);
             if (z >= 0x1p24) {
-                fw = @floatFromInt(f64, @intFromFloat(i32, 0x1p-24 * z));
-                iq[U(jz)] = @intFromFloat(i32, z - 0x1p24 * fw);
+                fw = @as(f64, @floatFromInt(@as(i32, @intFromFloat(0x1p-24 * z))));
+                iq[U(jz)] = @as(i32, @intFromFloat(z - 0x1p24 * fw));
                 jz += 1;
                 q0 += 24;
-                iq[U(jz)] = @intFromFloat(i32, fw);
+                iq[U(jz)] = @as(i32, @intFromFloat(fw));
             } else {
-                iq[U(jz)] = @intFromFloat(i32, z);
+                iq[U(jz)] = @as(i32, @intFromFloat(z));
             }
         }
 
@@ -428,7 +428,7 @@ pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
         fw = math.scalbn(@as(f64, 1.0), q0);
         i = jz;
         while (i >= 0) : (i -= 1) {
-            q[U(i)] = fw * @floatFromInt(f64, iq[U(i)]);
+            q[U(i)] = fw * @as(f64, @floatFromInt(iq[U(i)]));
             fw *= 0x1p-24;
         }
 
lib/compiler_rt/rem_pio2f.zig
@@ -30,14 +30,14 @@ pub fn rem_pio2f(x: f32, y: *f64) i32 {
     var e0: u32 = undefined;
     var ui: u32 = undefined;
 
-    ui = @bitCast(u32, x);
+    ui = @as(u32, @bitCast(x));
     ix = ui & 0x7fffffff;
 
     // 25+53 bit pi is good enough for medium size
     if (ix < 0x4dc90fdb) { // |x| ~< 2^28*(pi/2), medium size
         // Use a specialized rint() to get fn.
-        @"fn" = @floatCast(f64, x) * invpio2 + toint - toint;
-        n = @intFromFloat(i32, @"fn");
+        @"fn" = @as(f64, @floatCast(x)) * invpio2 + toint - toint;
+        n = @as(i32, @intFromFloat(@"fn"));
         y.* = x - @"fn" * pio2_1 - @"fn" * pio2_1t;
         // Matters with directed rounding.
         if (y.* < -pio4) {
@@ -59,8 +59,8 @@ pub fn rem_pio2f(x: f32, y: *f64) i32 {
     sign = ui >> 31 != 0;
     e0 = (ix >> 23) - (0x7f + 23); // e0 = ilogb(|x|)-23, positive
     ui = ix - (e0 << 23);
-    tx[0] = @bitCast(f32, ui);
-    n = rem_pio2_large(&tx, &ty, @intCast(i32, e0), 1, 0);
+    tx[0] = @as(f32, @bitCast(ui));
+    n = rem_pio2_large(&tx, &ty, @as(i32, @intCast(e0)), 1, 0);
     if (sign) {
         y.* = -ty[0];
         return -n;
lib/compiler_rt/round.zig
@@ -27,14 +27,14 @@ comptime {
 
 pub fn __roundh(x: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, roundf(x));
+    return @as(f16, @floatCast(roundf(x)));
 }
 
 pub fn roundf(x_: f32) callconv(.C) f32 {
     const f32_toint = 1.0 / math.floatEps(f32);
 
     var x = x_;
-    const u = @bitCast(u32, x);
+    const u = @as(u32, @bitCast(x));
     const e = (u >> 23) & 0xFF;
     var y: f32 = undefined;
 
@@ -46,7 +46,7 @@ pub fn roundf(x_: f32) callconv(.C) f32 {
     }
     if (e < 0x7F - 1) {
         math.doNotOptimizeAway(x + f32_toint);
-        return 0 * @bitCast(f32, u);
+        return 0 * @as(f32, @bitCast(u));
     }
 
     y = x + f32_toint - f32_toint - x;
@@ -69,7 +69,7 @@ pub fn round(x_: f64) callconv(.C) f64 {
     const f64_toint = 1.0 / math.floatEps(f64);
 
     var x = x_;
-    const u = @bitCast(u64, x);
+    const u = @as(u64, @bitCast(x));
     const e = (u >> 52) & 0x7FF;
     var y: f64 = undefined;
 
@@ -81,7 +81,7 @@ pub fn round(x_: f64) callconv(.C) f64 {
     }
     if (e < 0x3ff - 1) {
         math.doNotOptimizeAway(x + f64_toint);
-        return 0 * @bitCast(f64, u);
+        return 0 * @as(f64, @bitCast(u));
     }
 
     y = x + f64_toint - f64_toint - x;
@@ -102,14 +102,14 @@ pub fn round(x_: f64) callconv(.C) f64 {
 
 pub fn __roundx(x: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, roundq(x));
+    return @as(f80, @floatCast(roundq(x)));
 }
 
 pub fn roundq(x_: f128) callconv(.C) f128 {
     const f128_toint = 1.0 / math.floatEps(f128);
 
     var x = x_;
-    const u = @bitCast(u128, x);
+    const u = @as(u128, @bitCast(x));
     const e = (u >> 112) & 0x7FFF;
     var y: f128 = undefined;
 
@@ -121,7 +121,7 @@ pub fn roundq(x_: f128) callconv(.C) f128 {
     }
     if (e < 0x3FFF - 1) {
         math.doNotOptimizeAway(x + f128_toint);
-        return 0 * @bitCast(f128, u);
+        return 0 * @as(f128, @bitCast(u));
     }
 
     y = x + f128_toint - f128_toint - x;
lib/compiler_rt/shift.zig
@@ -37,13 +37,13 @@ inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
 
     if (b >= word_t.bits) {
         output.s.low = 0;
-        output.s.high = input.s.low << @intCast(S, b - word_t.bits);
+        output.s.high = input.s.low << @as(S, @intCast(b - word_t.bits));
     } else if (b == 0) {
         return a;
     } else {
-        output.s.low = input.s.low << @intCast(S, b);
-        output.s.high = input.s.high << @intCast(S, b);
-        output.s.high |= input.s.low >> @intCast(S, word_t.bits - b);
+        output.s.low = input.s.low << @as(S, @intCast(b));
+        output.s.high = input.s.high << @as(S, @intCast(b));
+        output.s.high |= input.s.low >> @as(S, @intCast(word_t.bits - b));
     }
 
     return output.all;
@@ -60,16 +60,16 @@ inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
 
     if (b >= word_t.bits) {
         output.s.high = input.s.high >> (word_t.bits - 1);
-        output.s.low = input.s.high >> @intCast(S, b - word_t.bits);
+        output.s.low = input.s.high >> @as(S, @intCast(b - word_t.bits));
     } else if (b == 0) {
         return a;
     } else {
-        output.s.high = input.s.high >> @intCast(S, b);
-        output.s.low = input.s.high << @intCast(S, word_t.bits - b);
+        output.s.high = input.s.high >> @as(S, @intCast(b));
+        output.s.low = input.s.high << @as(S, @intCast(word_t.bits - b));
         // Avoid sign-extension here
-        output.s.low |= @bitCast(
+        output.s.low |= @as(
             word_t.HalfT,
-            @bitCast(word_t.HalfTU, input.s.low) >> @intCast(S, b),
+            @bitCast(@as(word_t.HalfTU, @bitCast(input.s.low)) >> @as(S, @intCast(b))),
         );
     }
 
@@ -87,13 +87,13 @@ inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
 
     if (b >= word_t.bits) {
         output.s.high = 0;
-        output.s.low = input.s.high >> @intCast(S, b - word_t.bits);
+        output.s.low = input.s.high >> @as(S, @intCast(b - word_t.bits));
     } else if (b == 0) {
         return a;
     } else {
-        output.s.high = input.s.high >> @intCast(S, b);
-        output.s.low = input.s.high << @intCast(S, word_t.bits - b);
-        output.s.low |= input.s.low >> @intCast(S, b);
+        output.s.high = input.s.high >> @as(S, @intCast(b));
+        output.s.low = input.s.high << @as(S, @intCast(word_t.bits - b));
+        output.s.low |= input.s.low >> @as(S, @intCast(b));
     }
 
     return output.all;
lib/compiler_rt/shift_test.zig
@@ -18,346 +18,346 @@ const __lshrti3 = shift.__lshrti3;
 
 fn test__ashlsi3(a: i32, b: i32, expected: u32) !void {
     const x = __ashlsi3(a, b);
-    try testing.expectEqual(expected, @bitCast(u32, x));
+    try testing.expectEqual(expected, @as(u32, @bitCast(x)));
 }
 fn test__ashldi3(a: i64, b: i32, expected: u64) !void {
     const x = __ashldi3(a, b);
-    try testing.expectEqual(expected, @bitCast(u64, x));
+    try testing.expectEqual(expected, @as(u64, @bitCast(x)));
 }
 fn test__ashlti3(a: i128, b: i32, expected: u128) !void {
     const x = __ashlti3(a, b);
-    try testing.expectEqual(expected, @bitCast(u128, x));
+    try testing.expectEqual(expected, @as(u128, @bitCast(x)));
 }
 
 test "ashlsi3" {
-    try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 0, 0x12ABCDEF);
-    try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 1, 0x25579BDE);
-    try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 2, 0x4AAF37BC);
-    try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 3, 0x955E6F78);
-    try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 4, 0x2ABCDEF0);
-
-    try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 28, 0xF0000000);
-    try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 29, 0xE0000000);
-    try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 30, 0xC0000000);
-    try test__ashlsi3(@bitCast(i32, @as(u32, 0x12ABCDEF)), 31, 0x80000000);
+    try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 0, 0x12ABCDEF);
+    try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 1, 0x25579BDE);
+    try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 2, 0x4AAF37BC);
+    try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 3, 0x955E6F78);
+    try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 4, 0x2ABCDEF0);
+
+    try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 28, 0xF0000000);
+    try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 29, 0xE0000000);
+    try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 30, 0xC0000000);
+    try test__ashlsi3(@as(i32, @bitCast(@as(u32, 0x12ABCDEF))), 31, 0x80000000);
 }
 
 test "ashldi3" {
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 0, 0x123456789ABCDEF);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 1, 0x2468ACF13579BDE);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 2, 0x48D159E26AF37BC);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 3, 0x91A2B3C4D5E6F78);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 4, 0x123456789ABCDEF0);
-
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 28, 0x789ABCDEF0000000);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 29, 0xF13579BDE0000000);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 30, 0xE26AF37BC0000000);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 31, 0xC4D5E6F780000000);
-
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 32, 0x89ABCDEF00000000);
-
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 33, 0x13579BDE00000000);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 34, 0x26AF37BC00000000);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 35, 0x4D5E6F7800000000);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 36, 0x9ABCDEF000000000);
-
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 60, 0xF000000000000000);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 61, 0xE000000000000000);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 62, 0xC000000000000000);
-    try test__ashldi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 63, 0x8000000000000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 0, 0x123456789ABCDEF);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 1, 0x2468ACF13579BDE);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 2, 0x48D159E26AF37BC);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 3, 0x91A2B3C4D5E6F78);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 4, 0x123456789ABCDEF0);
+
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 28, 0x789ABCDEF0000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 29, 0xF13579BDE0000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 30, 0xE26AF37BC0000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 31, 0xC4D5E6F780000000);
+
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 32, 0x89ABCDEF00000000);
+
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 33, 0x13579BDE00000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 34, 0x26AF37BC00000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 35, 0x4D5E6F7800000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 36, 0x9ABCDEF000000000);
+
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 60, 0xF000000000000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 61, 0xE000000000000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 62, 0xC000000000000000);
+    try test__ashldi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 63, 0x8000000000000000);
 }
 
 test "ashlti3" {
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 0, 0xFEDCBA9876543215FEDCBA9876543215);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 1, 0xFDB97530ECA8642BFDB97530ECA8642A);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 2, 0xFB72EA61D950C857FB72EA61D950C854);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 3, 0xF6E5D4C3B2A190AFF6E5D4C3B2A190A8);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 4, 0xEDCBA9876543215FEDCBA98765432150);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 28, 0x876543215FEDCBA98765432150000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 29, 0x0ECA8642BFDB97530ECA8642A0000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 30, 0x1D950C857FB72EA61D950C8540000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 31, 0x3B2A190AFF6E5D4C3B2A190A80000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 32, 0x76543215FEDCBA987654321500000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 33, 0xECA8642BFDB97530ECA8642A00000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 34, 0xD950C857FB72EA61D950C85400000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 35, 0xB2A190AFF6E5D4C3B2A190A800000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 36, 0x6543215FEDCBA9876543215000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 60, 0x5FEDCBA9876543215000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 61, 0xBFDB97530ECA8642A000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 62, 0x7FB72EA61D950C854000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 63, 0xFF6E5D4C3B2A190A8000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 64, 0xFEDCBA98765432150000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 65, 0xFDB97530ECA8642A0000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 66, 0xFB72EA61D950C8540000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 67, 0xF6E5D4C3B2A190A80000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 68, 0xEDCBA987654321500000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 92, 0x87654321500000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 93, 0x0ECA8642A00000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 94, 0x1D950C85400000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 95, 0x3B2A190A800000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 96, 0x76543215000000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 97, 0xECA8642A000000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 98, 0xD950C854000000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 99, 0xB2A190A8000000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 100, 0x65432150000000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 124, 0x50000000000000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 125, 0xA0000000000000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 126, 0x40000000000000000000000000000000);
-    try test__ashlti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 127, 0x80000000000000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 0, 0xFEDCBA9876543215FEDCBA9876543215);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 1, 0xFDB97530ECA8642BFDB97530ECA8642A);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 2, 0xFB72EA61D950C857FB72EA61D950C854);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 3, 0xF6E5D4C3B2A190AFF6E5D4C3B2A190A8);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 4, 0xEDCBA9876543215FEDCBA98765432150);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 28, 0x876543215FEDCBA98765432150000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 29, 0x0ECA8642BFDB97530ECA8642A0000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 30, 0x1D950C857FB72EA61D950C8540000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 31, 0x3B2A190AFF6E5D4C3B2A190A80000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 32, 0x76543215FEDCBA987654321500000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 33, 0xECA8642BFDB97530ECA8642A00000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 34, 0xD950C857FB72EA61D950C85400000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 35, 0xB2A190AFF6E5D4C3B2A190A800000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 36, 0x6543215FEDCBA9876543215000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 60, 0x5FEDCBA9876543215000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 61, 0xBFDB97530ECA8642A000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 62, 0x7FB72EA61D950C854000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 63, 0xFF6E5D4C3B2A190A8000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 64, 0xFEDCBA98765432150000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 65, 0xFDB97530ECA8642A0000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 66, 0xFB72EA61D950C8540000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 67, 0xF6E5D4C3B2A190A80000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 68, 0xEDCBA987654321500000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 92, 0x87654321500000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 93, 0x0ECA8642A00000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 94, 0x1D950C85400000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 95, 0x3B2A190A800000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 96, 0x76543215000000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 97, 0xECA8642A000000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 98, 0xD950C854000000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 99, 0xB2A190A8000000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 100, 0x65432150000000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 124, 0x50000000000000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 125, 0xA0000000000000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 126, 0x40000000000000000000000000000000);
+    try test__ashlti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 127, 0x80000000000000000000000000000000);
 }
 
 fn test__ashrsi3(a: i32, b: i32, expected: u32) !void {
     const x = __ashrsi3(a, b);
-    try testing.expectEqual(expected, @bitCast(u32, x));
+    try testing.expectEqual(expected, @as(u32, @bitCast(x)));
 }
 fn test__ashrdi3(a: i64, b: i32, expected: u64) !void {
     const x = __ashrdi3(a, b);
-    try testing.expectEqual(expected, @bitCast(u64, x));
+    try testing.expectEqual(expected, @as(u64, @bitCast(x)));
 }
 fn test__ashrti3(a: i128, b: i32, expected: u128) !void {
     const x = __ashrti3(a, b);
-    try testing.expectEqual(expected, @bitCast(u128, x));
+    try testing.expectEqual(expected, @as(u128, @bitCast(x)));
 }
 
 test "ashrsi3" {
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 0, 0xFEDBCA98);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 1, 0xFF6DE54C);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 2, 0xFFB6F2A6);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 3, 0xFFDB7953);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 4, 0xFFEDBCA9);
-
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 28, 0xFFFFFFFF);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 31, 0xFFFFFFFF);
-
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 0, 0x8CEF8CEF);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 1, 0xC677C677);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 2, 0xE33BE33B);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 3, 0xF19DF19D);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 4, 0xF8CEF8CE);
-
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 28, 0xFFFFFFF8);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 29, 0xFFFFFFFC);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 30, 0xFFFFFFFE);
-    try test__ashrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 31, 0xFFFFFFFF);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 0, 0xFEDBCA98);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 1, 0xFF6DE54C);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 2, 0xFFB6F2A6);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 3, 0xFFDB7953);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 4, 0xFFEDBCA9);
+
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 28, 0xFFFFFFFF);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 31, 0xFFFFFFFF);
+
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 0, 0x8CEF8CEF);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 1, 0xC677C677);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 2, 0xE33BE33B);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 3, 0xF19DF19D);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 4, 0xF8CEF8CE);
+
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 28, 0xFFFFFFF8);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 29, 0xFFFFFFFC);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 30, 0xFFFFFFFE);
+    try test__ashrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 31, 0xFFFFFFFF);
 }
 
 test "ashrdi3" {
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 0, 0x123456789ABCDEF);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 1, 0x91A2B3C4D5E6F7);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 2, 0x48D159E26AF37B);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 3, 0x2468ACF13579BD);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 4, 0x123456789ABCDE);
-
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 28, 0x12345678);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 29, 0x91A2B3C);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 30, 0x48D159E);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 31, 0x2468ACF);
-
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 32, 0x1234567);
-
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 33, 0x91A2B3);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 34, 0x48D159);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 35, 0x2468AC);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 36, 0x123456);
-
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 60, 0);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 61, 0);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 62, 0);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 63, 0);
-
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 0, 0xFEDCBA9876543210);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 1, 0xFF6E5D4C3B2A1908);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 2, 0xFFB72EA61D950C84);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 3, 0xFFDB97530ECA8642);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 4, 0xFFEDCBA987654321);
-
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 28, 0xFFFFFFFFEDCBA987);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 29, 0xFFFFFFFFF6E5D4C3);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 30, 0xFFFFFFFFFB72EA61);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 31, 0xFFFFFFFFFDB97530);
-
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 32, 0xFFFFFFFFFEDCBA98);
-
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 33, 0xFFFFFFFFFF6E5D4C);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 34, 0xFFFFFFFFFFB72EA6);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 35, 0xFFFFFFFFFFDB9753);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 36, 0xFFFFFFFFFFEDCBA9);
-
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 60, 0xFFFFFFFFFFFFFFFA);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 61, 0xFFFFFFFFFFFFFFFD);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 62, 0xFFFFFFFFFFFFFFFE);
-    try test__ashrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 63, 0xFFFFFFFFFFFFFFFF);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 0, 0x123456789ABCDEF);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 1, 0x91A2B3C4D5E6F7);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 2, 0x48D159E26AF37B);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 3, 0x2468ACF13579BD);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 4, 0x123456789ABCDE);
+
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 28, 0x12345678);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 29, 0x91A2B3C);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 30, 0x48D159E);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 31, 0x2468ACF);
+
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 32, 0x1234567);
+
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 33, 0x91A2B3);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 34, 0x48D159);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 35, 0x2468AC);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 36, 0x123456);
+
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 60, 0);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 61, 0);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 62, 0);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 63, 0);
+
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 0, 0xFEDCBA9876543210);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 1, 0xFF6E5D4C3B2A1908);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 2, 0xFFB72EA61D950C84);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 3, 0xFFDB97530ECA8642);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 4, 0xFFEDCBA987654321);
+
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 28, 0xFFFFFFFFEDCBA987);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 29, 0xFFFFFFFFF6E5D4C3);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 30, 0xFFFFFFFFFB72EA61);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 31, 0xFFFFFFFFFDB97530);
+
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 32, 0xFFFFFFFFFEDCBA98);
+
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 33, 0xFFFFFFFFFF6E5D4C);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 34, 0xFFFFFFFFFFB72EA6);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 35, 0xFFFFFFFFFFDB9753);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 36, 0xFFFFFFFFFFEDCBA9);
+
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 60, 0xFFFFFFFFFFFFFFFA);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 61, 0xFFFFFFFFFFFFFFFD);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 62, 0xFFFFFFFFFFFFFFFE);
+    try test__ashrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 63, 0xFFFFFFFFFFFFFFFF);
 }
 
 test "ashrti3" {
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 0, 0xFEDCBA9876543215FEDCBA9876543215);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 1, 0xFF6E5D4C3B2A190AFF6E5D4C3B2A190A);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 2, 0xFFB72EA61D950C857FB72EA61D950C85);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 3, 0xFFDB97530ECA8642BFDB97530ECA8642);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 4, 0xFFEDCBA9876543215FEDCBA987654321);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 28, 0xFFFFFFFFEDCBA9876543215FEDCBA987);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 29, 0xFFFFFFFFF6E5D4C3B2A190AFF6E5D4C3);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 30, 0xFFFFFFFFFB72EA61D950C857FB72EA61);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 31, 0xFFFFFFFFFDB97530ECA8642BFDB97530);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 32, 0xFFFFFFFFFEDCBA9876543215FEDCBA98);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 33, 0xFFFFFFFFFF6E5D4C3B2A190AFF6E5D4C);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 34, 0xFFFFFFFFFFB72EA61D950C857FB72EA6);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 35, 0xFFFFFFFFFFDB97530ECA8642BFDB9753);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 36, 0xFFFFFFFFFFEDCBA9876543215FEDCBA9);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 60, 0xFFFFFFFFFFFFFFFFEDCBA9876543215F);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 61, 0xFFFFFFFFFFFFFFFFF6E5D4C3B2A190AF);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 62, 0xFFFFFFFFFFFFFFFFFB72EA61D950C857);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 63, 0xFFFFFFFFFFFFFFFFFDB97530ECA8642B);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 64, 0xFFFFFFFFFFFFFFFFFEDCBA9876543215);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 65, 0xFFFFFFFFFFFFFFFFFF6E5D4C3B2A190A);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 66, 0xFFFFFFFFFFFFFFFFFFB72EA61D950C85);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 67, 0xFFFFFFFFFFFFFFFFFFDB97530ECA8642);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 68, 0xFFFFFFFFFFFFFFFFFFEDCBA987654321);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 92, 0xFFFFFFFFFFFFFFFFFFFFFFFFEDCBA987);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 93, 0xFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C3);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 94, 0xFFFFFFFFFFFFFFFFFFFFFFFFFB72EA61);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 95, 0xFFFFFFFFFFFFFFFFFFFFFFFFFDB97530);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 96, 0xFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA98);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 97, 0xFFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 98, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFB72EA6);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 99, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFDB9753);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 100, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA9);
-
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 124, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 125, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 126, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
-    try test__ashrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA9876543215)), 127, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 0, 0xFEDCBA9876543215FEDCBA9876543215);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 1, 0xFF6E5D4C3B2A190AFF6E5D4C3B2A190A);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 2, 0xFFB72EA61D950C857FB72EA61D950C85);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 3, 0xFFDB97530ECA8642BFDB97530ECA8642);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 4, 0xFFEDCBA9876543215FEDCBA987654321);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 28, 0xFFFFFFFFEDCBA9876543215FEDCBA987);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 29, 0xFFFFFFFFF6E5D4C3B2A190AFF6E5D4C3);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 30, 0xFFFFFFFFFB72EA61D950C857FB72EA61);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 31, 0xFFFFFFFFFDB97530ECA8642BFDB97530);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 32, 0xFFFFFFFFFEDCBA9876543215FEDCBA98);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 33, 0xFFFFFFFFFF6E5D4C3B2A190AFF6E5D4C);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 34, 0xFFFFFFFFFFB72EA61D950C857FB72EA6);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 35, 0xFFFFFFFFFFDB97530ECA8642BFDB9753);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 36, 0xFFFFFFFFFFEDCBA9876543215FEDCBA9);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 60, 0xFFFFFFFFFFFFFFFFEDCBA9876543215F);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 61, 0xFFFFFFFFFFFFFFFFF6E5D4C3B2A190AF);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 62, 0xFFFFFFFFFFFFFFFFFB72EA61D950C857);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 63, 0xFFFFFFFFFFFFFFFFFDB97530ECA8642B);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 64, 0xFFFFFFFFFFFFFFFFFEDCBA9876543215);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 65, 0xFFFFFFFFFFFFFFFFFF6E5D4C3B2A190A);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 66, 0xFFFFFFFFFFFFFFFFFFB72EA61D950C85);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 67, 0xFFFFFFFFFFFFFFFFFFDB97530ECA8642);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 68, 0xFFFFFFFFFFFFFFFFFFEDCBA987654321);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 92, 0xFFFFFFFFFFFFFFFFFFFFFFFFEDCBA987);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 93, 0xFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C3);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 94, 0xFFFFFFFFFFFFFFFFFFFFFFFFFB72EA61);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 95, 0xFFFFFFFFFFFFFFFFFFFFFFFFFDB97530);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 96, 0xFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA98);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 97, 0xFFFFFFFFFFFFFFFFFFFFFFFFFF6E5D4C);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 98, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFB72EA6);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 99, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFDB9753);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 100, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFEDCBA9);
+
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 124, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 125, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 126, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
+    try test__ashrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA9876543215))), 127, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
 }
 
 fn test__lshrsi3(a: i32, b: i32, expected: u32) !void {
     const x = __lshrsi3(a, b);
-    try testing.expectEqual(expected, @bitCast(u32, x));
+    try testing.expectEqual(expected, @as(u32, @bitCast(x)));
 }
 fn test__lshrdi3(a: i64, b: i32, expected: u64) !void {
     const x = __lshrdi3(a, b);
-    try testing.expectEqual(expected, @bitCast(u64, x));
+    try testing.expectEqual(expected, @as(u64, @bitCast(x)));
 }
 fn test__lshrti3(a: i128, b: i32, expected: u128) !void {
     const x = __lshrti3(a, b);
-    try testing.expectEqual(expected, @bitCast(u128, x));
+    try testing.expectEqual(expected, @as(u128, @bitCast(x)));
 }
 
 test "lshrsi3" {
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 0, 0xFEDBCA98);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 1, 0x7F6DE54C);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 2, 0x3FB6F2A6);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 3, 0x1FDB7953);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 4, 0xFEDBCA9);
-
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 28, 0xF);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 29, 0x7);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 30, 0x3);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0xFEDBCA98)), 31, 0x1);
-
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 0, 0x8CEF8CEF);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 1, 0x4677C677);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 2, 0x233BE33B);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 3, 0x119DF19D);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 4, 0x8CEF8CE);
-
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 28, 0x8);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 29, 0x4);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 30, 0x2);
-    try test__lshrsi3(@bitCast(i32, @as(u32, 0x8CEF8CEF)), 31, 0x1);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 0, 0xFEDBCA98);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 1, 0x7F6DE54C);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 2, 0x3FB6F2A6);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 3, 0x1FDB7953);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 4, 0xFEDBCA9);
+
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 28, 0xF);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 29, 0x7);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 30, 0x3);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0xFEDBCA98))), 31, 0x1);
+
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 0, 0x8CEF8CEF);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 1, 0x4677C677);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 2, 0x233BE33B);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 3, 0x119DF19D);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 4, 0x8CEF8CE);
+
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 28, 0x8);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 29, 0x4);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 30, 0x2);
+    try test__lshrsi3(@as(i32, @bitCast(@as(u32, 0x8CEF8CEF))), 31, 0x1);
 }
 
 test "lshrdi3" {
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 0, 0x123456789ABCDEF);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 1, 0x91A2B3C4D5E6F7);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 2, 0x48D159E26AF37B);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 3, 0x2468ACF13579BD);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 4, 0x123456789ABCDE);
-
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 28, 0x12345678);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 29, 0x91A2B3C);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 30, 0x48D159E);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 31, 0x2468ACF);
-
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 32, 0x1234567);
-
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 33, 0x91A2B3);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 34, 0x48D159);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 35, 0x2468AC);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 36, 0x123456);
-
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 60, 0);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 61, 0);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 62, 0);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0x0123456789ABCDEF)), 63, 0);
-
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 0, 0xFEDCBA9876543210);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 1, 0x7F6E5D4C3B2A1908);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 2, 0x3FB72EA61D950C84);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 3, 0x1FDB97530ECA8642);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 4, 0xFEDCBA987654321);
-
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 28, 0xFEDCBA987);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 29, 0x7F6E5D4C3);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 30, 0x3FB72EA61);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 31, 0x1FDB97530);
-
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 32, 0xFEDCBA98);
-
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 33, 0x7F6E5D4C);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 34, 0x3FB72EA6);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 35, 0x1FDB9753);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xFEDCBA9876543210)), 36, 0xFEDCBA9);
-
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 60, 0xA);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 61, 0x5);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 62, 0x2);
-    try test__lshrdi3(@bitCast(i64, @as(u64, 0xAEDCBA9876543210)), 63, 0x1);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 0, 0x123456789ABCDEF);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 1, 0x91A2B3C4D5E6F7);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 2, 0x48D159E26AF37B);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 3, 0x2468ACF13579BD);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 4, 0x123456789ABCDE);
+
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 28, 0x12345678);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 29, 0x91A2B3C);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 30, 0x48D159E);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 31, 0x2468ACF);
+
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 32, 0x1234567);
+
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 33, 0x91A2B3);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 34, 0x48D159);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 35, 0x2468AC);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 36, 0x123456);
+
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 60, 0);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 61, 0);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 62, 0);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0x0123456789ABCDEF))), 63, 0);
+
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 0, 0xFEDCBA9876543210);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 1, 0x7F6E5D4C3B2A1908);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 2, 0x3FB72EA61D950C84);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 3, 0x1FDB97530ECA8642);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 4, 0xFEDCBA987654321);
+
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 28, 0xFEDCBA987);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 29, 0x7F6E5D4C3);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 30, 0x3FB72EA61);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 31, 0x1FDB97530);
+
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 32, 0xFEDCBA98);
+
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 33, 0x7F6E5D4C);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 34, 0x3FB72EA6);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 35, 0x1FDB9753);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xFEDCBA9876543210))), 36, 0xFEDCBA9);
+
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 60, 0xA);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 61, 0x5);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 62, 0x2);
+    try test__lshrdi3(@as(i64, @bitCast(@as(u64, 0xAEDCBA9876543210))), 63, 0x1);
 }
 
 test "lshrti3" {
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 0, 0xFEDCBA9876543215FEDCBA987654321F);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 1, 0x7F6E5D4C3B2A190AFF6E5D4C3B2A190F);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 2, 0x3FB72EA61D950C857FB72EA61D950C87);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 3, 0x1FDB97530ECA8642BFDB97530ECA8643);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 4, 0xFEDCBA9876543215FEDCBA987654321);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 28, 0xFEDCBA9876543215FEDCBA987);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 29, 0x7F6E5D4C3B2A190AFF6E5D4C3);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 30, 0x3FB72EA61D950C857FB72EA61);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 31, 0x1FDB97530ECA8642BFDB97530);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 32, 0xFEDCBA9876543215FEDCBA98);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 33, 0x7F6E5D4C3B2A190AFF6E5D4C);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 34, 0x3FB72EA61D950C857FB72EA6);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 35, 0x1FDB97530ECA8642BFDB9753);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 36, 0xFEDCBA9876543215FEDCBA9);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 60, 0xFEDCBA9876543215F);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 61, 0x7F6E5D4C3B2A190AF);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 62, 0x3FB72EA61D950C857);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 63, 0x1FDB97530ECA8642B);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 64, 0xFEDCBA9876543215);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 65, 0x7F6E5D4C3B2A190A);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 66, 0x3FB72EA61D950C85);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 67, 0x1FDB97530ECA8642);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 68, 0xFEDCBA987654321);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 92, 0xFEDCBA987);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 93, 0x7F6E5D4C3);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 94, 0x3FB72EA61);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 95, 0x1FDB97530);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 96, 0xFEDCBA98);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 97, 0x7F6E5D4C);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 98, 0x3FB72EA6);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 99, 0x1FDB9753);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 100, 0xFEDCBA9);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 124, 0xF);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 125, 0x7);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 126, 0x3);
-    try test__lshrti3(@bitCast(i128, @as(u128, 0xFEDCBA9876543215FEDCBA987654321F)), 127, 0x1);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 0, 0xFEDCBA9876543215FEDCBA987654321F);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 1, 0x7F6E5D4C3B2A190AFF6E5D4C3B2A190F);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 2, 0x3FB72EA61D950C857FB72EA61D950C87);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 3, 0x1FDB97530ECA8642BFDB97530ECA8643);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 4, 0xFEDCBA9876543215FEDCBA987654321);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 28, 0xFEDCBA9876543215FEDCBA987);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 29, 0x7F6E5D4C3B2A190AFF6E5D4C3);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 30, 0x3FB72EA61D950C857FB72EA61);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 31, 0x1FDB97530ECA8642BFDB97530);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 32, 0xFEDCBA9876543215FEDCBA98);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 33, 0x7F6E5D4C3B2A190AFF6E5D4C);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 34, 0x3FB72EA61D950C857FB72EA6);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 35, 0x1FDB97530ECA8642BFDB9753);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 36, 0xFEDCBA9876543215FEDCBA9);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 60, 0xFEDCBA9876543215F);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 61, 0x7F6E5D4C3B2A190AF);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 62, 0x3FB72EA61D950C857);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 63, 0x1FDB97530ECA8642B);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 64, 0xFEDCBA9876543215);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 65, 0x7F6E5D4C3B2A190A);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 66, 0x3FB72EA61D950C85);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 67, 0x1FDB97530ECA8642);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 68, 0xFEDCBA987654321);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 92, 0xFEDCBA987);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 93, 0x7F6E5D4C3);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 94, 0x3FB72EA61);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 95, 0x1FDB97530);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 96, 0xFEDCBA98);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 97, 0x7F6E5D4C);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 98, 0x3FB72EA6);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 99, 0x1FDB9753);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 100, 0xFEDCBA9);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 124, 0xF);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 125, 0x7);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 126, 0x3);
+    try test__lshrti3(@as(i128, @bitCast(@as(u128, 0xFEDCBA9876543215FEDCBA987654321F))), 127, 0x1);
 }
lib/compiler_rt/sin.zig
@@ -31,7 +31,7 @@ comptime {
 
 pub fn __sinh(x: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, sinf(x));
+    return @as(f16, @floatCast(sinf(x)));
 }
 
 pub fn sinf(x: f32) callconv(.C) f32 {
@@ -41,7 +41,7 @@ pub fn sinf(x: f32) callconv(.C) f32 {
     const s3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2
     const s4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18
 
-    var ix = @bitCast(u32, x);
+    var ix = @as(u32, @bitCast(x));
     const sign = ix >> 31 != 0;
     ix &= 0x7fffffff;
 
@@ -90,7 +90,7 @@ pub fn sinf(x: f32) callconv(.C) f32 {
 }
 
 pub fn sin(x: f64) callconv(.C) f64 {
-    var ix = @bitCast(u64, x) >> 32;
+    var ix = @as(u64, @bitCast(x)) >> 32;
     ix &= 0x7fffffff;
 
     // |x| ~< pi/4
@@ -120,12 +120,12 @@ pub fn sin(x: f64) callconv(.C) f64 {
 
 pub fn __sinx(x: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, sinq(x));
+    return @as(f80, @floatCast(sinq(x)));
 }
 
 pub fn sinq(x: f128) callconv(.C) f128 {
     // TODO: more correct implementation
-    return sin(@floatCast(f64, x));
+    return sin(@as(f64, @floatCast(x)));
 }
 
 pub fn sinl(x: c_longdouble) callconv(.C) c_longdouble {
@@ -180,11 +180,11 @@ test "sin64.special" {
 }
 
 test "sin32 #9901" {
-    const float = @bitCast(f32, @as(u32, 0b11100011111111110000000000000000));
+    const float = @as(f32, @bitCast(@as(u32, 0b11100011111111110000000000000000)));
     _ = sinf(float);
 }
 
 test "sin64 #9901" {
-    const float = @bitCast(f64, @as(u64, 0b1111111101000001000000001111110111111111100000000000000000000001));
+    const float = @as(f64, @bitCast(@as(u64, 0b1111111101000001000000001111110111111111100000000000000000000001)));
     _ = sin(float);
 }
lib/compiler_rt/sincos.zig
@@ -26,8 +26,8 @@ pub fn __sincosh(x: f16, r_sin: *f16, r_cos: *f16) callconv(.C) void {
     var big_sin: f32 = undefined;
     var big_cos: f32 = undefined;
     sincosf(x, &big_sin, &big_cos);
-    r_sin.* = @floatCast(f16, big_sin);
-    r_cos.* = @floatCast(f16, big_cos);
+    r_sin.* = @as(f16, @floatCast(big_sin));
+    r_cos.* = @as(f16, @floatCast(big_cos));
 }
 
 pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void {
@@ -36,7 +36,7 @@ pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void {
     const sc3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2
     const sc4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18
 
-    const pre_ix = @bitCast(u32, x);
+    const pre_ix = @as(u32, @bitCast(x));
     const sign = pre_ix >> 31 != 0;
     const ix = pre_ix & 0x7fffffff;
 
@@ -126,7 +126,7 @@ pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void {
 }
 
 pub fn sincos(x: f64, r_sin: *f64, r_cos: *f64) callconv(.C) void {
-    const ix = @truncate(u32, @bitCast(u64, x) >> 32) & 0x7fffffff;
+    const ix = @as(u32, @truncate(@as(u64, @bitCast(x)) >> 32)) & 0x7fffffff;
 
     // |x| ~< pi/4
     if (ix <= 0x3fe921fb) {
@@ -182,8 +182,8 @@ pub fn __sincosx(x: f80, r_sin: *f80, r_cos: *f80) callconv(.C) void {
     var big_sin: f128 = undefined;
     var big_cos: f128 = undefined;
     sincosq(x, &big_sin, &big_cos);
-    r_sin.* = @floatCast(f80, big_sin);
-    r_cos.* = @floatCast(f80, big_cos);
+    r_sin.* = @as(f80, @floatCast(big_sin));
+    r_cos.* = @as(f80, @floatCast(big_cos));
 }
 
 pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void {
@@ -191,7 +191,7 @@ pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void {
     //return sincos_generic(f128, x, r_sin, r_cos);
     var small_sin: f64 = undefined;
     var small_cos: f64 = undefined;
-    sincos(@floatCast(f64, x), &small_sin, &small_cos);
+    sincos(@as(f64, @floatCast(x)), &small_sin, &small_cos);
     r_sin.* = small_sin;
     r_cos.* = small_cos;
 }
@@ -217,8 +217,8 @@ inline fn sincos_generic(comptime F: type, x: F, r_sin: *F, r_cos: *F) void {
     const sc1pio4: F = 1.0 * math.pi / 4.0;
     const bits = @typeInfo(F).Float.bits;
     const I = std.meta.Int(.unsigned, bits);
-    const ix = @bitCast(I, x) & (math.maxInt(I) >> 1);
-    const se = @truncate(u16, ix >> (bits - 16));
+    const ix = @as(I, @bitCast(x)) & (math.maxInt(I) >> 1);
+    const se = @as(u16, @truncate(ix >> (bits - 16)));
 
     if (se == 0x7fff) {
         const result = x - x;
@@ -227,7 +227,7 @@ inline fn sincos_generic(comptime F: type, x: F, r_sin: *F, r_cos: *F) void {
         return;
     }
 
-    if (@bitCast(F, ix) < sc1pio4) {
+    if (@as(F, @bitCast(ix)) < sc1pio4) {
         if (se < 0x3fff - math.floatFractionalBits(F) - 1) {
             // raise underflow if subnormal
             if (se == 0) {
lib/compiler_rt/sqrt.zig
@@ -20,13 +20,13 @@ comptime {
 
 pub fn __sqrth(x: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, sqrtf(x));
+    return @as(f16, @floatCast(sqrtf(x)));
 }
 
 pub fn sqrtf(x: f32) callconv(.C) f32 {
     const tiny: f32 = 1.0e-30;
-    const sign: i32 = @bitCast(i32, @as(u32, 0x80000000));
-    var ix: i32 = @bitCast(i32, x);
+    const sign: i32 = @as(i32, @bitCast(@as(u32, 0x80000000)));
+    var ix: i32 = @as(i32, @bitCast(x));
 
     if ((ix & 0x7F800000) == 0x7F800000) {
         return x * x + x; // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = snan
@@ -96,7 +96,7 @@ pub fn sqrtf(x: f32) callconv(.C) f32 {
 
     ix = (q >> 1) + 0x3f000000;
     ix += m << 23;
-    return @bitCast(f32, ix);
+    return @as(f32, @bitCast(ix));
 }
 
 /// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound
@@ -105,10 +105,10 @@ pub fn sqrtf(x: f32) callconv(.C) f32 {
 pub fn sqrt(x: f64) callconv(.C) f64 {
     const tiny: f64 = 1.0e-300;
     const sign: u32 = 0x80000000;
-    const u = @bitCast(u64, x);
+    const u = @as(u64, @bitCast(x));
 
-    var ix0 = @intCast(u32, u >> 32);
-    var ix1 = @intCast(u32, u & 0xFFFFFFFF);
+    var ix0 = @as(u32, @intCast(u >> 32));
+    var ix1 = @as(u32, @intCast(u & 0xFFFFFFFF));
 
     // sqrt(nan) = nan, sqrt(+inf) = +inf, sqrt(-inf) = nan
     if (ix0 & 0x7FF00000 == 0x7FF00000) {
@@ -125,7 +125,7 @@ pub fn sqrt(x: f64) callconv(.C) f64 {
     }
 
     // normalize x
-    var m = @intCast(i32, ix0 >> 20);
+    var m = @as(i32, @intCast(ix0 >> 20));
     if (m == 0) {
         // subnormal
         while (ix0 == 0) {
@@ -139,9 +139,9 @@ pub fn sqrt(x: f64) callconv(.C) f64 {
         while (ix0 & 0x00100000 == 0) : (i += 1) {
             ix0 <<= 1;
         }
-        m -= @intCast(i32, i) - 1;
-        ix0 |= ix1 >> @intCast(u5, 32 - i);
-        ix1 <<= @intCast(u5, i);
+        m -= @as(i32, @intCast(i)) - 1;
+        ix0 |= ix1 >> @as(u5, @intCast(32 - i));
+        ix1 <<= @as(u5, @intCast(i));
     }
 
     // unbias exponent
@@ -225,21 +225,21 @@ pub fn sqrt(x: f64) callconv(.C) f64 {
 
     // NOTE: musl here appears to rely on signed twos-complement wraparound. +% has the same
     // behaviour at least.
-    var iix0 = @intCast(i32, ix0);
+    var iix0 = @as(i32, @intCast(ix0));
     iix0 = iix0 +% (m << 20);
 
-    const uz = (@intCast(u64, iix0) << 32) | ix1;
-    return @bitCast(f64, uz);
+    const uz = (@as(u64, @intCast(iix0)) << 32) | ix1;
+    return @as(f64, @bitCast(uz));
 }
 
 pub fn __sqrtx(x: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, sqrtq(x));
+    return @as(f80, @floatCast(sqrtq(x)));
 }
 
 pub fn sqrtq(x: f128) callconv(.C) f128 {
     // TODO: more correct implementation
-    return sqrt(@floatCast(f64, x));
+    return sqrt(@as(f64, @floatCast(x)));
 }
 
 pub fn sqrtl(x: c_longdouble) callconv(.C) c_longdouble {
lib/compiler_rt/subdf3.zig
@@ -11,11 +11,11 @@ comptime {
 }
 
 fn __subdf3(a: f64, b: f64) callconv(.C) f64 {
-    const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
+    const neg_b = @as(f64, @bitCast(@as(u64, @bitCast(b)) ^ (@as(u64, 1) << 63)));
     return a + neg_b;
 }
 
 fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
-    const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
+    const neg_b = @as(f64, @bitCast(@as(u64, @bitCast(b)) ^ (@as(u64, 1) << 63)));
     return a + neg_b;
 }
lib/compiler_rt/subhf3.zig
@@ -7,6 +7,6 @@ comptime {
 }
 
 fn __subhf3(a: f16, b: f16) callconv(.C) f16 {
-    const neg_b = @bitCast(f16, @bitCast(u16, b) ^ (@as(u16, 1) << 15));
+    const neg_b = @as(f16, @bitCast(@as(u16, @bitCast(b)) ^ (@as(u16, 1) << 15)));
     return a + neg_b;
 }
lib/compiler_rt/subsf3.zig
@@ -11,11 +11,11 @@ comptime {
 }
 
 fn __subsf3(a: f32, b: f32) callconv(.C) f32 {
-    const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
+    const neg_b = @as(f32, @bitCast(@as(u32, @bitCast(b)) ^ (@as(u32, 1) << 31)));
     return a + neg_b;
 }
 
 fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 {
-    const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
+    const neg_b = @as(f32, @bitCast(@as(u32, @bitCast(b)) ^ (@as(u32, 1) << 31)));
     return a + neg_b;
 }
lib/compiler_rt/subtf3.zig
@@ -20,6 +20,6 @@ fn _Qp_sub(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
 }
 
 inline fn sub(a: f128, b: f128) f128 {
-    const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127));
+    const neg_b = @as(f128, @bitCast(@as(u128, @bitCast(b)) ^ (@as(u128, 1) << 127)));
     return a + neg_b;
 }
lib/compiler_rt/tan.zig
@@ -33,7 +33,7 @@ comptime {
 
 pub fn __tanh(x: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, tanf(x));
+    return @as(f16, @floatCast(tanf(x)));
 }
 
 pub fn tanf(x: f32) callconv(.C) f32 {
@@ -43,7 +43,7 @@ pub fn tanf(x: f32) callconv(.C) f32 {
     const t3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2
     const t4pio2: f64 = 4.0 * math.pi / 2.0; // 0x401921FB, 0x54442D18
 
-    var ix = @bitCast(u32, x);
+    var ix = @as(u32, @bitCast(x));
     const sign = ix >> 31 != 0;
     ix &= 0x7fffffff;
 
@@ -81,7 +81,7 @@ pub fn tanf(x: f32) callconv(.C) f32 {
 }
 
 pub fn tan(x: f64) callconv(.C) f64 {
-    var ix = @bitCast(u64, x) >> 32;
+    var ix = @as(u64, @bitCast(x)) >> 32;
     ix &= 0x7fffffff;
 
     // |x| ~< pi/4
@@ -106,12 +106,12 @@ pub fn tan(x: f64) callconv(.C) f64 {
 
 pub fn __tanx(x: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, tanq(x));
+    return @as(f80, @floatCast(tanq(x)));
 }
 
 pub fn tanq(x: f128) callconv(.C) f128 {
     // TODO: more correct implementation
-    return tan(@floatCast(f64, x));
+    return tan(@as(f64, @floatCast(x)));
 }
 
 pub fn tanl(x: c_longdouble) callconv(.C) c_longdouble {
lib/compiler_rt/trig.zig
@@ -70,7 +70,7 @@ pub fn __cosdf(x: f64) f32 {
     const z = x * x;
     const w = z * z;
     const r = C2 + z * C3;
-    return @floatCast(f32, ((1.0 + z * C0) + w * C1) + (w * z) * r);
+    return @as(f32, @floatCast(((1.0 + z * C0) + w * C1) + (w * z) * r));
 }
 
 /// kernel sin function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854
@@ -131,7 +131,7 @@ pub fn __sindf(x: f64) f32 {
     const w = z * z;
     const r = S3 + z * S4;
     const s = z * x;
-    return @floatCast(f32, (x + s * (S1 + z * S2)) + s * w * r);
+    return @as(f32, @floatCast((x + s * (S1 + z * S2)) + s * w * r));
 }
 
 /// kernel tan function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854
@@ -199,7 +199,7 @@ pub fn __tan(x_: f64, y_: f64, odd: bool) f64 {
     var hx: u32 = undefined;
     var sign: bool = undefined;
 
-    hx = @intCast(u32, @bitCast(u64, x) >> 32);
+    hx = @as(u32, @intCast(@as(u64, @bitCast(x)) >> 32));
     const big = (hx & 0x7fffffff) >= 0x3FE59428; // |x| >= 0.6744
     if (big) {
         sign = hx >> 31 != 0;
@@ -222,7 +222,7 @@ pub fn __tan(x_: f64, y_: f64, odd: bool) f64 {
     r = y + z * (s * (r + v) + y) + s * T[0];
     w = x + r;
     if (big) {
-        s = 1 - 2 * @floatFromInt(f64, @intFromBool(odd));
+        s = 1 - 2 * @as(f64, @floatFromInt(@intFromBool(odd)));
         v = s - 2.0 * (x + (r - w * w / (w + s)));
         return if (sign) -v else v;
     }
@@ -231,11 +231,11 @@ pub fn __tan(x_: f64, y_: f64, odd: bool) f64 {
     }
     // -1.0/(x+r) has up to 2ulp error, so compute it accurately
     w0 = w;
-    w0 = @bitCast(f64, @bitCast(u64, w0) & 0xffffffff00000000);
+    w0 = @as(f64, @bitCast(@as(u64, @bitCast(w0)) & 0xffffffff00000000));
     v = r - (w0 - x); // w0+v = r+x
     a = -1.0 / w;
     a0 = a;
-    a0 = @bitCast(f64, @bitCast(u64, a0) & 0xffffffff00000000);
+    a0 = @as(f64, @bitCast(@as(u64, @bitCast(a0)) & 0xffffffff00000000));
     return a0 + a * (1.0 + a0 * w0 + a0 * v);
 }
 
@@ -269,5 +269,5 @@ pub fn __tandf(x: f64, odd: bool) f32 {
     const s = z * x;
     const u = T[0] + z * T[1];
     const r0 = (x + s * u) + (s * w) * (t + w * r);
-    return @floatCast(f32, if (odd) -1.0 / r0 else r0);
+    return @as(f32, @floatCast(if (odd) -1.0 / r0 else r0));
 }
lib/compiler_rt/trunc.zig
@@ -27,12 +27,12 @@ comptime {
 
 pub fn __trunch(x: f16) callconv(.C) f16 {
     // TODO: more efficient implementation
-    return @floatCast(f16, truncf(x));
+    return @as(f16, @floatCast(truncf(x)));
 }
 
 pub fn truncf(x: f32) callconv(.C) f32 {
-    const u = @bitCast(u32, x);
-    var e = @intCast(i32, ((u >> 23) & 0xFF)) - 0x7F + 9;
+    const u = @as(u32, @bitCast(x));
+    var e = @as(i32, @intCast(((u >> 23) & 0xFF))) - 0x7F + 9;
     var m: u32 = undefined;
 
     if (e >= 23 + 9) {
@@ -42,18 +42,18 @@ pub fn truncf(x: f32) callconv(.C) f32 {
         e = 1;
     }
 
-    m = @as(u32, math.maxInt(u32)) >> @intCast(u5, e);
+    m = @as(u32, math.maxInt(u32)) >> @as(u5, @intCast(e));
     if (u & m == 0) {
         return x;
     } else {
         math.doNotOptimizeAway(x + 0x1p120);
-        return @bitCast(f32, u & ~m);
+        return @as(f32, @bitCast(u & ~m));
     }
 }
 
 pub fn trunc(x: f64) callconv(.C) f64 {
-    const u = @bitCast(u64, x);
-    var e = @intCast(i32, ((u >> 52) & 0x7FF)) - 0x3FF + 12;
+    const u = @as(u64, @bitCast(x));
+    var e = @as(i32, @intCast(((u >> 52) & 0x7FF))) - 0x3FF + 12;
     var m: u64 = undefined;
 
     if (e >= 52 + 12) {
@@ -63,23 +63,23 @@ pub fn trunc(x: f64) callconv(.C) f64 {
         e = 1;
     }
 
-    m = @as(u64, math.maxInt(u64)) >> @intCast(u6, e);
+    m = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(e));
     if (u & m == 0) {
         return x;
     } else {
         math.doNotOptimizeAway(x + 0x1p120);
-        return @bitCast(f64, u & ~m);
+        return @as(f64, @bitCast(u & ~m));
     }
 }
 
 pub fn __truncx(x: f80) callconv(.C) f80 {
     // TODO: more efficient implementation
-    return @floatCast(f80, truncq(x));
+    return @as(f80, @floatCast(truncq(x)));
 }
 
 pub fn truncq(x: f128) callconv(.C) f128 {
-    const u = @bitCast(u128, x);
-    var e = @intCast(i32, ((u >> 112) & 0x7FFF)) - 0x3FFF + 16;
+    const u = @as(u128, @bitCast(x));
+    var e = @as(i32, @intCast(((u >> 112) & 0x7FFF))) - 0x3FFF + 16;
     var m: u128 = undefined;
 
     if (e >= 112 + 16) {
@@ -89,12 +89,12 @@ pub fn truncq(x: f128) callconv(.C) f128 {
         e = 1;
     }
 
-    m = @as(u128, math.maxInt(u128)) >> @intCast(u7, e);
+    m = @as(u128, math.maxInt(u128)) >> @as(u7, @intCast(e));
     if (u & m == 0) {
         return x;
     } else {
         math.doNotOptimizeAway(x + 0x1p120);
-        return @bitCast(f128, u & ~m);
+        return @as(f128, @bitCast(u & ~m));
     }
 }
 
lib/compiler_rt/truncdfhf2.zig
@@ -12,9 +12,9 @@ comptime {
 }
 
 pub fn __truncdfhf2(a: f64) callconv(.C) common.F16T(f64) {
-    return @bitCast(common.F16T(f64), truncf(f16, f64, a));
+    return @as(common.F16T(f64), @bitCast(truncf(f16, f64, a)));
 }
 
 fn __aeabi_d2h(a: f64) callconv(.AAPCS) u16 {
-    return @bitCast(common.F16T(f64), truncf(f16, f64, a));
+    return @as(common.F16T(f64), @bitCast(truncf(f16, f64, a)));
 }
lib/compiler_rt/truncf.zig
@@ -38,7 +38,7 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t
     const dstNaNCode = dstQNaN - 1;
 
     // Break a into a sign and representation of the absolute value
-    const aRep: src_rep_t = @bitCast(src_rep_t, a);
+    const aRep: src_rep_t = @as(src_rep_t, @bitCast(a));
     const aAbs: src_rep_t = aRep & srcAbsMask;
     const sign: src_rep_t = aRep & srcSignMask;
     var absResult: dst_rep_t = undefined;
@@ -47,7 +47,7 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t
         // The exponent of a is within the range of normal numbers in the
         // destination format.  We can convert by simply right-shifting with
         // rounding and adjusting the exponent.
-        absResult = @truncate(dst_rep_t, aAbs >> (srcSigBits - dstSigBits));
+        absResult = @as(dst_rep_t, @truncate(aAbs >> (srcSigBits - dstSigBits)));
         absResult -%= @as(dst_rep_t, srcExpBias - dstExpBias) << dstSigBits;
 
         const roundBits: src_rep_t = aAbs & roundMask;
@@ -62,18 +62,18 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t
         // a is NaN.
         // Conjure the result by beginning with infinity, setting the qNaN
         // bit and inserting the (truncated) trailing NaN field.
-        absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
+        absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
         absResult |= dstQNaN;
-        absResult |= @intCast(dst_rep_t, ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode);
+        absResult |= @as(dst_rep_t, @intCast(((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode));
     } else if (aAbs >= overflow) {
         // a overflows to infinity.
-        absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
+        absResult = @as(dst_rep_t, @intCast(dstInfExp)) << dstSigBits;
     } else {
         // a underflows on conversion to the destination type or is an exact
         // zero.  The result may be a denormal or zero.  Extract the exponent
         // to get the shift amount for the denormalization.
-        const aExp = @intCast(u32, aAbs >> srcSigBits);
-        const shift = @intCast(u32, srcExpBias - dstExpBias - aExp + 1);
+        const aExp = @as(u32, @intCast(aAbs >> srcSigBits));
+        const shift = @as(u32, @intCast(srcExpBias - dstExpBias - aExp + 1));
 
         const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal;
 
@@ -81,9 +81,9 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t
         if (shift > srcSigBits) {
             absResult = 0;
         } else {
-            const sticky: src_rep_t = @intFromBool(significand << @intCast(SrcShift, srcBits - shift) != 0);
-            const denormalizedSignificand: src_rep_t = significand >> @intCast(SrcShift, shift) | sticky;
-            absResult = @intCast(dst_rep_t, denormalizedSignificand >> (srcSigBits - dstSigBits));
+            const sticky: src_rep_t = @intFromBool(significand << @as(SrcShift, @intCast(srcBits - shift)) != 0);
+            const denormalizedSignificand: src_rep_t = significand >> @as(SrcShift, @intCast(shift)) | sticky;
+            absResult = @as(dst_rep_t, @intCast(denormalizedSignificand >> (srcSigBits - dstSigBits)));
             const roundBits: src_rep_t = denormalizedSignificand & roundMask;
             if (roundBits > halfway) {
                 // Round to nearest
@@ -96,8 +96,8 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t
     }
 
     const result: dst_rep_t align(@alignOf(dst_t)) = absResult |
-        @truncate(dst_rep_t, sign >> @intCast(SrcShift, srcBits - dstBits));
-    return @bitCast(dst_t, result);
+        @as(dst_rep_t, @truncate(sign >> @as(SrcShift, @intCast(srcBits - dstBits))));
+    return @as(dst_t, @bitCast(result));
 }
 
 pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
@@ -133,7 +133,7 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
         // destination format.  We can convert by simply right-shifting with
         // rounding and adjusting the exponent.
         abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
-        abs_result |= @truncate(dst_rep_t, a_rep.fraction >> (src_sig_bits - dst_sig_bits));
+        abs_result |= @as(dst_rep_t, @truncate(a_rep.fraction >> (src_sig_bits - dst_sig_bits)));
         abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
 
         const round_bits = a_rep.fraction & round_mask;
@@ -148,12 +148,12 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
         // a is NaN.
         // Conjure the result by beginning with infinity, setting the qNaN
         // bit and inserting the (truncated) trailing NaN field.
-        abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
+        abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
         abs_result |= dst_qnan;
-        abs_result |= @intCast(dst_rep_t, (a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
+        abs_result |= @as(dst_rep_t, @intCast((a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask));
     } else if (a_rep.exp >= overflow) {
         // a overflows to infinity.
-        abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
+        abs_result = @as(dst_rep_t, @intCast(dst_inf_exp)) << dst_sig_bits;
     } else {
         // a underflows on conversion to the destination type or is an exact
         // zero.  The result may be a denormal or zero.  Extract the exponent
@@ -164,9 +164,9 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
         if (shift > src_sig_bits) {
             abs_result = 0;
         } else {
-            const sticky = @intFromBool(a_rep.fraction << @intCast(u6, shift) != 0);
-            const denormalized_significand = a_rep.fraction >> @intCast(u6, shift) | sticky;
-            abs_result = @intCast(dst_rep_t, denormalized_significand >> (src_sig_bits - dst_sig_bits));
+            const sticky = @intFromBool(a_rep.fraction << @as(u6, @intCast(shift)) != 0);
+            const denormalized_significand = a_rep.fraction >> @as(u6, @intCast(shift)) | sticky;
+            abs_result = @as(dst_rep_t, @intCast(denormalized_significand >> (src_sig_bits - dst_sig_bits)));
             const round_bits = denormalized_significand & round_mask;
             if (round_bits > halfway) {
                 // Round to nearest
@@ -179,7 +179,7 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
     }
 
     const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
-    return @bitCast(dst_t, result);
+    return @as(dst_t, @bitCast(result));
 }
 
 test {
lib/compiler_rt/truncf_test.zig
@@ -10,7 +10,7 @@ const __trunctfdf2 = @import("trunctfdf2.zig").__trunctfdf2;
 const __trunctfxf2 = @import("trunctfxf2.zig").__trunctfxf2;
 
 fn test__truncsfhf2(a: u32, expected: u16) !void {
-    const actual = @bitCast(u16, __truncsfhf2(@bitCast(f32, a)));
+    const actual = @as(u16, @bitCast(__truncsfhf2(@as(f32, @bitCast(a)))));
 
     if (actual == expected) {
         return;
@@ -73,7 +73,7 @@ test "truncsfhf2" {
 }
 
 fn test__truncdfhf2(a: f64, expected: u16) void {
-    const rep = @bitCast(u16, __truncdfhf2(a));
+    const rep = @as(u16, @bitCast(__truncdfhf2(a)));
 
     if (rep == expected) {
         return;
@@ -89,7 +89,7 @@ fn test__truncdfhf2(a: f64, expected: u16) void {
 }
 
 fn test__truncdfhf2_raw(a: u64, expected: u16) void {
-    const actual = @bitCast(u16, __truncdfhf2(@bitCast(f64, a)));
+    const actual = @as(u16, @bitCast(__truncdfhf2(@as(f64, @bitCast(a)))));
 
     if (actual == expected) {
         return;
@@ -141,7 +141,7 @@ test "truncdfhf2" {
 fn test__trunctfsf2(a: f128, expected: u32) void {
     const x = __trunctfsf2(a);
 
-    const rep = @bitCast(u32, x);
+    const rep = @as(u32, @bitCast(x));
     if (rep == expected) {
         return;
     }
@@ -157,11 +157,11 @@ fn test__trunctfsf2(a: f128, expected: u32) void {
 
 test "trunctfsf2" {
     // qnan
-    test__trunctfsf2(@bitCast(f128, @as(u128, 0x7fff800000000000 << 64)), 0x7fc00000);
+    test__trunctfsf2(@as(f128, @bitCast(@as(u128, 0x7fff800000000000 << 64))), 0x7fc00000);
     // nan
-    test__trunctfsf2(@bitCast(f128, @as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7fc08000);
+    test__trunctfsf2(@as(f128, @bitCast(@as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64))), 0x7fc08000);
     // inf
-    test__trunctfsf2(@bitCast(f128, @as(u128, 0x7fff000000000000 << 64)), 0x7f800000);
+    test__trunctfsf2(@as(f128, @bitCast(@as(u128, 0x7fff000000000000 << 64))), 0x7f800000);
     // zero
     test__trunctfsf2(0.0, 0x0);
 
@@ -174,7 +174,7 @@ test "trunctfsf2" {
 fn test__trunctfdf2(a: f128, expected: u64) void {
     const x = __trunctfdf2(a);
 
-    const rep = @bitCast(u64, x);
+    const rep = @as(u64, @bitCast(x));
     if (rep == expected) {
         return;
     }
@@ -190,11 +190,11 @@ fn test__trunctfdf2(a: f128, expected: u64) void {
 
 test "trunctfdf2" {
     // qnan
-    test__trunctfdf2(@bitCast(f128, @as(u128, 0x7fff800000000000 << 64)), 0x7ff8000000000000);
+    test__trunctfdf2(@as(f128, @bitCast(@as(u128, 0x7fff800000000000 << 64))), 0x7ff8000000000000);
     // nan
-    test__trunctfdf2(@bitCast(f128, @as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7ff8100000000000);
+    test__trunctfdf2(@as(f128, @bitCast(@as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64))), 0x7ff8100000000000);
     // inf
-    test__trunctfdf2(@bitCast(f128, @as(u128, 0x7fff000000000000 << 64)), 0x7ff0000000000000);
+    test__trunctfdf2(@as(f128, @bitCast(@as(u128, 0x7fff000000000000 << 64))), 0x7ff0000000000000);
     // zero
     test__trunctfdf2(0.0, 0x0);
 
@@ -207,7 +207,7 @@ test "trunctfdf2" {
 fn test__truncdfsf2(a: f64, expected: u32) void {
     const x = __truncdfsf2(a);
 
-    const rep = @bitCast(u32, x);
+    const rep = @as(u32, @bitCast(x));
     if (rep == expected) {
         return;
     }
@@ -225,11 +225,11 @@ fn test__truncdfsf2(a: f64, expected: u32) void {
 
 test "truncdfsf2" {
     // nan & qnan
-    test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff8000000000000)), 0x7fc00000);
-    test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff0000000000001)), 0x7fc00000);
+    test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0x7ff8000000000000))), 0x7fc00000);
+    test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0x7ff0000000000001))), 0x7fc00000);
     // inf
-    test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff0000000000000)), 0x7f800000);
-    test__truncdfsf2(@bitCast(f64, @as(u64, 0xfff0000000000000)), 0xff800000);
+    test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0x7ff0000000000000))), 0x7f800000);
+    test__truncdfsf2(@as(f64, @bitCast(@as(u64, 0xfff0000000000000))), 0xff800000);
 
     test__truncdfsf2(0.0, 0x0);
     test__truncdfsf2(1.0, 0x3f800000);
@@ -242,7 +242,7 @@ test "truncdfsf2" {
 fn test__trunctfhf2(a: f128, expected: u16) void {
     const x = __trunctfhf2(a);
 
-    const rep = @bitCast(u16, x);
+    const rep = @as(u16, @bitCast(x));
     if (rep == expected) {
         return;
     }
@@ -254,12 +254,12 @@ fn test__trunctfhf2(a: f128, expected: u16) void {
 
 test "trunctfhf2" {
     // qNaN
-    test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff8000000000000000000000000000)), 0x7e00);
+    test__trunctfhf2(@as(f128, @bitCast(@as(u128, 0x7fff8000000000000000000000000000))), 0x7e00);
     // NaN
-    test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000001)), 0x7e00);
+    test__trunctfhf2(@as(f128, @bitCast(@as(u128, 0x7fff0000000000000000000000000001))), 0x7e00);
     // inf
-    test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000000)), 0x7c00);
-    test__trunctfhf2(-@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000000)), 0xfc00);
+    test__trunctfhf2(@as(f128, @bitCast(@as(u128, 0x7fff0000000000000000000000000000))), 0x7c00);
+    test__trunctfhf2(-@as(f128, @bitCast(@as(u128, 0x7fff0000000000000000000000000000))), 0xfc00);
     // zero
     test__trunctfhf2(0.0, 0x0);
     test__trunctfhf2(-0.0, 0x8000);
lib/compiler_rt/truncsfhf2.zig
@@ -13,13 +13,13 @@ comptime {
 }
 
 pub fn __truncsfhf2(a: f32) callconv(.C) common.F16T(f32) {
-    return @bitCast(common.F16T(f32), truncf(f16, f32, a));
+    return @as(common.F16T(f32), @bitCast(truncf(f16, f32, a)));
 }
 
 fn __gnu_f2h_ieee(a: f32) callconv(.C) common.F16T(f32) {
-    return @bitCast(common.F16T(f32), truncf(f16, f32, a));
+    return @as(common.F16T(f32), @bitCast(truncf(f16, f32, a)));
 }
 
 fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
-    return @bitCast(common.F16T(f32), truncf(f16, f32, a));
+    return @as(common.F16T(f32), @bitCast(truncf(f16, f32, a)));
 }
lib/compiler_rt/trunctfhf2.zig
@@ -8,5 +8,5 @@ comptime {
 }
 
 pub fn __trunctfhf2(a: f128) callconv(.C) common.F16T(f128) {
-    return @bitCast(common.F16T(f128), truncf(f16, f128, a));
+    return @as(common.F16T(f128), @bitCast(truncf(f16, f128, a)));
 }
lib/compiler_rt/trunctfxf2.zig
@@ -25,7 +25,7 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
     const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
 
     // Break a into a sign and representation of the absolute value
-    const a_rep = @bitCast(u128, a);
+    const a_rep = @as(u128, @bitCast(a));
     const a_abs = a_rep & src_abs_mask;
     const sign: u16 = if (a_rep & src_sign_mask != 0) 0x8000 else 0;
     const integer_bit = 1 << 63;
@@ -38,13 +38,13 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
         // bit and inserting the (truncated) trailing NaN field.
         res.exp = 0x7fff;
         res.fraction = 0x8000000000000000;
-        res.fraction |= @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits));
+        res.fraction |= @as(u64, @truncate(a_abs >> (src_sig_bits - dst_sig_bits)));
     } else {
         // The exponent of a is within the range of normal numbers in the
         // destination format.  We can convert by simply right-shifting with
         // rounding, adding the explicit integer bit, and adjusting the exponent
-        res.fraction = @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits)) | integer_bit;
-        res.exp = @truncate(u16, a_abs >> src_sig_bits);
+        res.fraction = @as(u64, @truncate(a_abs >> (src_sig_bits - dst_sig_bits))) | integer_bit;
+        res.exp = @as(u16, @truncate(a_abs >> src_sig_bits));
 
         const round_bits = a_abs & round_mask;
         if (round_bits > halfway) {
lib/compiler_rt/truncxfhf2.zig
@@ -8,5 +8,5 @@ comptime {
 }
 
 fn __truncxfhf2(a: f80) callconv(.C) common.F16T(f80) {
-    return @bitCast(common.F16T(f80), trunc_f80(f16, a));
+    return @as(common.F16T(f80), @bitCast(trunc_f80(f16, a)));
 }
lib/compiler_rt/udivmod.zig
@@ -21,11 +21,11 @@ fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
     var un64: T = undefined;
     var un10: T = undefined;
 
-    const s = @intCast(Log2Int(T), @clz(v));
+    const s = @as(Log2Int(T), @intCast(@clz(v)));
     if (s > 0) {
         // Normalize divisor
         v <<= s;
-        un64 = (_u1 << s) | (_u0 >> @intCast(Log2Int(T), (@bitSizeOf(T) - @intCast(T, s))));
+        un64 = (_u1 << s) | (_u0 >> @as(Log2Int(T), @intCast((@bitSizeOf(T) - @as(T, @intCast(s))))));
         un10 = _u0 << s;
     } else {
         // Avoid undefined behavior of (u0 >> @bitSizeOf(T))
@@ -101,8 +101,8 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T {
         return 0;
     }
 
-    var a = @bitCast([2]HalfT, a_);
-    var b = @bitCast([2]HalfT, b_);
+    var a = @as([2]HalfT, @bitCast(a_));
+    var b = @as([2]HalfT, @bitCast(b_));
     var q: [2]HalfT = undefined;
     var r: [2]HalfT = undefined;
 
@@ -119,16 +119,16 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T {
             q[lo] = divwide(HalfT, a[hi] % b[lo], a[lo], b[lo], &r[lo]);
         }
         if (maybe_rem) |rem| {
-            rem.* = @bitCast(T, r);
+            rem.* = @as(T, @bitCast(r));
         }
-        return @bitCast(T, q);
+        return @as(T, @bitCast(q));
     }
 
     // 0 <= shift <= 63
     var shift: Log2Int(T) = @clz(b[hi]) - @clz(a[hi]);
-    var af = @bitCast(T, a);
-    var bf = @bitCast(T, b) << shift;
-    q = @bitCast([2]HalfT, @as(T, 0));
+    var af = @as(T, @bitCast(a));
+    var bf = @as(T, @bitCast(b)) << shift;
+    q = @as([2]HalfT, @bitCast(@as(T, 0)));
 
     for (0..shift + 1) |_| {
         q[lo] <<= 1;
@@ -137,13 +137,13 @@ pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T {
         //     af -= bf;
         //     q[lo] |= 1;
         // }
-        const s = @bitCast(SignedT, bf -% af -% 1) >> (@bitSizeOf(T) - 1);
-        q[lo] |= @intCast(HalfT, s & 1);
-        af -= bf & @bitCast(T, s);
+        const s = @as(SignedT, @bitCast(bf -% af -% 1)) >> (@bitSizeOf(T) - 1);
+        q[lo] |= @as(HalfT, @intCast(s & 1));
+        af -= bf & @as(T, @bitCast(s));
         bf >>= 1;
     }
     if (maybe_rem) |rem| {
-        rem.* = @bitCast(T, af);
+        rem.* = @as(T, @bitCast(af));
     }
-    return @bitCast(T, q);
+    return @as(T, @bitCast(q));
 }
lib/compiler_rt/udivmodei4.zig
@@ -83,23 +83,23 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
         i = 0;
         while (i <= n) : (i += 1) {
             const p = qhat * limb(&vn, i);
-            const t = limb(&un, i + j) - carry - @truncate(u32, p);
-            limb_set(&un, i + j, @truncate(u32, @bitCast(u64, t)));
-            carry = @intCast(i64, p >> 32) - @intCast(i64, t >> 32);
+            const t = limb(&un, i + j) - carry - @as(u32, @truncate(p));
+            limb_set(&un, i + j, @as(u32, @truncate(@as(u64, @bitCast(t)))));
+            carry = @as(i64, @intCast(p >> 32)) - @as(i64, @intCast(t >> 32));
         }
         const t = limb(&un, j + n + 1) -% carry;
-        limb_set(&un, j + n + 1, @truncate(u32, @bitCast(u64, t)));
-        if (q) |q_| limb_set(q_, j, @truncate(u32, qhat));
+        limb_set(&un, j + n + 1, @as(u32, @truncate(@as(u64, @bitCast(t)))));
+        if (q) |q_| limb_set(q_, j, @as(u32, @truncate(qhat)));
         if (t < 0) {
             if (q) |q_| limb_set(q_, j, limb(q_, j) - 1);
             var carry2: u64 = 0;
             i = 0;
             while (i <= n) : (i += 1) {
                 const t2 = @as(u64, limb(&un, i + j)) + @as(u64, limb(&vn, i)) + carry2;
-                limb_set(&un, i + j, @truncate(u32, t2));
+                limb_set(&un, i + j, @as(u32, @truncate(t2)));
                 carry2 = t2 >> 32;
             }
-            limb_set(&un, j + n + 1, @truncate(u32, limb(&un, j + n + 1) + carry2));
+            limb_set(&un, j + n + 1, @as(u32, @truncate(limb(&un, j + n + 1) + carry2)));
         }
         if (j == 0) break;
     }
lib/compiler_rt/udivmodti4.zig
@@ -20,7 +20,7 @@ pub fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) callconv(.C) u128 {
 const v2u64 = @Vector(2, u64);
 
 fn __udivmodti4_windows_x86_64(a: v2u64, b: v2u64, maybe_rem: ?*u128) callconv(.C) v2u64 {
-    return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), maybe_rem));
+    return @as(v2u64, @bitCast(udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), maybe_rem)));
 }
 
 test {
lib/compiler_rt/udivti3.zig
@@ -20,5 +20,5 @@ pub fn __udivti3(a: u128, b: u128) callconv(.C) u128 {
 const v2u64 = @Vector(2, u64);
 
 fn __udivti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
-    return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), null));
+    return @as(v2u64, @bitCast(udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), null)));
 }
lib/compiler_rt/umodti3.zig
@@ -23,6 +23,6 @@ const v2u64 = @Vector(2, u64);
 
 fn __umodti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
     var r: u128 = undefined;
-    _ = udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), &r);
-    return @bitCast(v2u64, r);
+    _ = udivmod(u128, @as(u128, @bitCast(a)), @as(u128, @bitCast(b)), &r);
+    return @as(v2u64, @bitCast(r));
 }
lib/std/atomic/Atomic.zig
@@ -46,7 +46,7 @@ pub fn Atomic(comptime T: type) type {
                     extern "c" fn __tsan_release(addr: *anyopaque) void;
                 };
 
-                const addr = @ptrCast(*anyopaque, self);
+                const addr = @as(*anyopaque, @ptrCast(self));
                 return switch (ordering) {
                     .Unordered, .Monotonic => @compileError(@tagName(ordering) ++ " only applies to atomic loads and stores"),
                     .Acquire => tsan.__tsan_acquire(addr),
@@ -307,7 +307,7 @@ pub fn Atomic(comptime T: type) type {
                 // TODO: emit appropriate tsan fence if compiling with tsan
                 _ = ordering;
 
-                return @intCast(u1, old_bit);
+                return @as(u1, @intCast(old_bit));
             }
         });
     };
@@ -392,8 +392,8 @@ test "Atomic.swap" {
         try testing.expectEqual(a.load(.SeqCst), true);
 
         var b = Atomic(?*u8).init(null);
-        try testing.expectEqual(b.swap(@ptrFromInt(?*u8, @alignOf(u8)), ordering), null);
-        try testing.expectEqual(b.load(.SeqCst), @ptrFromInt(?*u8, @alignOf(u8)));
+        try testing.expectEqual(b.swap(@as(?*u8, @ptrFromInt(@alignOf(u8))), ordering), null);
+        try testing.expectEqual(b.load(.SeqCst), @as(?*u8, @ptrFromInt(@alignOf(u8))));
     }
 }
 
@@ -544,7 +544,7 @@ test "Atomic.bitSet" {
             var x = Atomic(Int).init(0);
 
             for (0..@bitSizeOf(Int)) |bit_index| {
-                const bit = @intCast(std.math.Log2Int(Int), bit_index);
+                const bit = @as(std.math.Log2Int(Int), @intCast(bit_index));
                 const mask = @as(Int, 1) << bit;
 
                 // setting the bit should change the bit
@@ -558,7 +558,7 @@ test "Atomic.bitSet" {
 
                 // all the previous bits should have not changed (still be set)
                 for (0..bit_index) |prev_bit_index| {
-                    const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
+                    const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index));
                     const prev_mask = @as(Int, 1) << prev_bit;
                     try testing.expect(x.load(.SeqCst) & prev_mask != 0);
                 }
@@ -573,7 +573,7 @@ test "Atomic.bitReset" {
             var x = Atomic(Int).init(0);
 
             for (0..@bitSizeOf(Int)) |bit_index| {
-                const bit = @intCast(std.math.Log2Int(Int), bit_index);
+                const bit = @as(std.math.Log2Int(Int), @intCast(bit_index));
                 const mask = @as(Int, 1) << bit;
                 x.storeUnchecked(x.loadUnchecked() | mask);
 
@@ -588,7 +588,7 @@ test "Atomic.bitReset" {
 
                 // all the previous bits should have not changed (still be reset)
                 for (0..bit_index) |prev_bit_index| {
-                    const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
+                    const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index));
                     const prev_mask = @as(Int, 1) << prev_bit;
                     try testing.expect(x.load(.SeqCst) & prev_mask == 0);
                 }
@@ -603,7 +603,7 @@ test "Atomic.bitToggle" {
             var x = Atomic(Int).init(0);
 
             for (0..@bitSizeOf(Int)) |bit_index| {
-                const bit = @intCast(std.math.Log2Int(Int), bit_index);
+                const bit = @as(std.math.Log2Int(Int), @intCast(bit_index));
                 const mask = @as(Int, 1) << bit;
 
                 // toggling the bit should change the bit
@@ -617,7 +617,7 @@ test "Atomic.bitToggle" {
 
                 // all the previous bits should have not changed (still be toggled back)
                 for (0..bit_index) |prev_bit_index| {
-                    const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
+                    const prev_bit = @as(std.math.Log2Int(Int), @intCast(prev_bit_index));
                     const prev_mask = @as(Int, 1) << prev_bit;
                     try testing.expect(x.load(.SeqCst) & prev_mask == 0);
                 }
lib/std/atomic/queue.zig
@@ -248,7 +248,7 @@ fn startPuts(ctx: *Context) u8 {
     const random = prng.random();
     while (put_count != 0) : (put_count -= 1) {
         std.time.sleep(1); // let the os scheduler be our fuzz
-        const x = @bitCast(i32, random.int(u32));
+        const x = @as(i32, @bitCast(random.int(u32)));
         const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
         node.* = .{
             .prev = undefined,
lib/std/atomic/stack.zig
@@ -151,7 +151,7 @@ fn startPuts(ctx: *Context) u8 {
     const random = prng.random();
     while (put_count != 0) : (put_count -= 1) {
         std.time.sleep(1); // let the os scheduler be our fuzz
-        const x = @bitCast(i32, random.int(u32));
+        const x = @as(i32, @bitCast(random.int(u32)));
         const node = ctx.allocator.create(Stack(i32).Node) catch unreachable;
         node.* = Stack(i32).Node{
             .next = undefined,
lib/std/Build/Step/CheckObject.zig
@@ -449,9 +449,9 @@ const MachODumper = struct {
                 },
                 .SYMTAB => if (opts.dump_symtab) {
                     const lc = cmd.cast(macho.symtab_command).?;
-                    symtab = @ptrCast(
+                    symtab = @as(
                         [*]const macho.nlist_64,
-                        @alignCast(@alignOf(macho.nlist_64), &bytes[lc.symoff]),
+                        @ptrCast(@alignCast(&bytes[lc.symoff])),
                     )[0..lc.nsyms];
                     strtab = bytes[lc.stroff..][0..lc.strsize];
                 },
@@ -474,7 +474,7 @@ const MachODumper = struct {
             try writer.print("{s}\n", .{symtab_label});
             for (symtab) |sym| {
                 if (sym.stab()) continue;
-                const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx), 0);
+                const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + sym.n_strx)), 0);
                 if (sym.sect()) {
                     const sect = sections.items[sym.n_sect - 1];
                     try writer.print("{x} ({s},{s})", .{
@@ -487,7 +487,7 @@ const MachODumper = struct {
                     }
                     try writer.print(" {s}\n", .{sym_name});
                 } else if (sym.undf()) {
-                    const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER);
+                    const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER);
                     const import_name = blk: {
                         if (ordinal <= 0) {
                             if (ordinal == macho.BIND_SPECIAL_DYLIB_SELF)
@@ -498,7 +498,7 @@ const MachODumper = struct {
                                 break :blk "flat lookup";
                             unreachable;
                         }
-                        const full_path = imports.items[@bitCast(u16, ordinal) - 1];
+                        const full_path = imports.items[@as(u16, @bitCast(ordinal)) - 1];
                         const basename = fs.path.basename(full_path);
                         assert(basename.len > 0);
                         const ext = mem.lastIndexOfScalar(u8, basename, '.') orelse basename.len;
@@ -950,8 +950,8 @@ const WasmDumper = struct {
         switch (opcode) {
             .i32_const => try writer.print("i32.const {x}\n", .{try std.leb.readILEB128(i32, reader)}),
             .i64_const => try writer.print("i64.const {x}\n", .{try std.leb.readILEB128(i64, reader)}),
-            .f32_const => try writer.print("f32.const {x}\n", .{@bitCast(f32, try reader.readIntLittle(u32))}),
-            .f64_const => try writer.print("f64.const {x}\n", .{@bitCast(f64, try reader.readIntLittle(u64))}),
+            .f32_const => try writer.print("f32.const {x}\n", .{@as(f32, @bitCast(try reader.readIntLittle(u32)))}),
+            .f64_const => try writer.print("f64.const {x}\n", .{@as(f64, @bitCast(try reader.readIntLittle(u64)))}),
             .global_get => try writer.print("global.get {x}\n", .{try std.leb.readULEB128(u32, reader)}),
             else => unreachable,
         }
lib/std/Build/Step/Compile.zig
@@ -321,7 +321,7 @@ pub const BuildId = union(enum) {
     pub fn initHexString(bytes: []const u8) BuildId {
         var result: BuildId = .{ .hexstring = .{
             .bytes = undefined,
-            .len = @intCast(u8, bytes.len),
+            .len = @as(u8, @intCast(bytes.len)),
         } };
         @memcpy(result.hexstring.bytes[0..bytes.len], bytes);
         return result;
@@ -342,7 +342,7 @@ pub const BuildId = union(enum) {
         } else if (mem.startsWith(u8, text, "0x")) {
             var result: BuildId = .{ .hexstring = undefined };
             const slice = try std.fmt.hexToBytes(&result.hexstring.bytes, text[2..]);
-            result.hexstring.len = @intCast(u8, slice.len);
+            result.hexstring.len = @as(u8, @intCast(slice.len));
             return result;
         }
         return error.InvalidBuildIdStyle;
@@ -2059,7 +2059,7 @@ fn findVcpkgRoot(allocator: Allocator) !?[]const u8 {
     const file = fs.cwd().openFile(path_file, .{}) catch return null;
     defer file.close();
 
-    const size = @intCast(usize, try file.getEndPos());
+    const size = @as(usize, @intCast(try file.getEndPos()));
     const vcpkg_path = try allocator.alloc(u8, size);
     const size_read = try file.read(vcpkg_path);
     std.debug.assert(size == size_read);
lib/std/Build/Step/Run.zig
@@ -998,7 +998,7 @@ fn evalZigTest(
             },
             .test_metadata => {
                 const TmHdr = std.zig.Server.Message.TestMetadata;
-                const tm_hdr = @ptrCast(*align(1) const TmHdr, body);
+                const tm_hdr = @as(*align(1) const TmHdr, @ptrCast(body));
                 test_count = tm_hdr.tests_len;
 
                 const names_bytes = body[@sizeOf(TmHdr)..][0 .. test_count * @sizeOf(u32)];
@@ -1034,7 +1034,7 @@ fn evalZigTest(
                 const md = metadata.?;
 
                 const TrHdr = std.zig.Server.Message.TestResults;
-                const tr_hdr = @ptrCast(*align(1) const TrHdr, body);
+                const tr_hdr = @as(*align(1) const TrHdr, @ptrCast(body));
                 fail_count += @intFromBool(tr_hdr.flags.fail);
                 skip_count += @intFromBool(tr_hdr.flags.skip);
                 leak_count += @intFromBool(tr_hdr.flags.leak);
lib/std/Build/Cache.zig
@@ -128,7 +128,7 @@ fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath {
             const sub_path = try gpa.dupe(u8, resolved_path[p.len + 1 ..]);
             gpa.free(resolved_path);
             return PrefixedPath{
-                .prefix = @intCast(u8, i),
+                .prefix = @as(u8, @intCast(i)),
                 .sub_path = sub_path,
             };
         }
@@ -653,7 +653,7 @@ pub const Manifest = struct {
                 return error.FileTooBig;
             }
 
-            const contents = try self.cache.gpa.alloc(u8, @intCast(usize, ch_file.stat.size));
+            const contents = try self.cache.gpa.alloc(u8, @as(usize, @intCast(ch_file.stat.size)));
             errdefer self.cache.gpa.free(contents);
 
             // Hash while reading from disk, to keep the contents in the cpu cache while
lib/std/Build/Step.zig
@@ -355,7 +355,7 @@ pub fn evalZigProcess(
             },
             .error_bundle => {
                 const EbHdr = std.zig.Server.Message.ErrorBundle;
-                const eb_hdr = @ptrCast(*align(1) const EbHdr, body);
+                const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body));
                 const extra_bytes =
                     body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
                 const string_bytes =
@@ -377,7 +377,7 @@ pub fn evalZigProcess(
             },
             .emit_bin_path => {
                 const EbpHdr = std.zig.Server.Message.EmitBinPath;
-                const ebp_hdr = @ptrCast(*align(1) const EbpHdr, body);
+                const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body));
                 s.result_cached = ebp_hdr.flags.cache_hit;
                 result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]);
             },
lib/std/c/darwin.zig
@@ -1177,10 +1177,10 @@ pub const sigset_t = u32;
 pub const empty_sigset: sigset_t = 0;
 
 pub const SIG = struct {
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
-    pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 5);
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+    pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(5));
 
     /// block specified signal set
     pub const _BLOCK = 1;
@@ -1411,7 +1411,7 @@ pub const MAP = struct {
     pub const NOCACHE = 0x0400;
     /// don't reserve needed swap area
     pub const NORESERVE = 0x0040;
-    pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+    pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
 };
 
 pub const MSF = struct {
@@ -1879,7 +1879,7 @@ pub const W = struct {
     pub const UNTRACED = 0x00000002;
 
     pub fn EXITSTATUS(x: u32) u8 {
-        return @intCast(u8, x >> 8);
+        return @as(u8, @intCast(x >> 8));
     }
     pub fn TERMSIG(x: u32) u32 {
         return status(x);
@@ -2463,7 +2463,7 @@ pub const KernE = enum(u32) {
 pub const mach_msg_return_t = kern_return_t;
 
 pub fn getMachMsgError(err: mach_msg_return_t) MachMsgE {
-    return @enumFromInt(MachMsgE, @truncate(u32, @intCast(usize, err)));
+    return @as(MachMsgE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err))))));
 }
 
 /// All special error code bits defined below.
@@ -2665,10 +2665,10 @@ pub const RTLD = struct {
     pub const NODELETE = 0x80;
     pub const FIRST = 0x100;
 
-    pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)));
-    pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2)));
-    pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3)));
-    pub const MAIN_ONLY = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -5)));
+    pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))));
+    pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2)))));
+    pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3)))));
+    pub const MAIN_ONLY = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -5)))));
 };
 
 pub const F = struct {
@@ -3238,14 +3238,14 @@ pub const PosixSpawn = struct {
         pub fn get(self: Attr) Error!u16 {
             var flags: c_short = undefined;
             switch (errno(posix_spawnattr_getflags(&self.attr, &flags))) {
-                .SUCCESS => return @bitCast(u16, flags),
+                .SUCCESS => return @as(u16, @bitCast(flags)),
                 .INVAL => unreachable,
                 else => |err| return unexpectedErrno(err),
             }
         }
 
         pub fn set(self: *Attr, flags: u16) Error!void {
-            switch (errno(posix_spawnattr_setflags(&self.attr, @bitCast(c_short, flags)))) {
+            switch (errno(posix_spawnattr_setflags(&self.attr, @as(c_short, @bitCast(flags))))) {
                 .SUCCESS => return,
                 .INVAL => unreachable,
                 else => |err| return unexpectedErrno(err),
@@ -3281,7 +3281,7 @@ pub const PosixSpawn = struct {
         }
 
         pub fn openZ(self: *Actions, fd: fd_t, path: [*:0]const u8, flags: u32, mode: mode_t) Error!void {
-            switch (errno(posix_spawn_file_actions_addopen(&self.actions, fd, path, @bitCast(c_int, flags), mode))) {
+            switch (errno(posix_spawn_file_actions_addopen(&self.actions, fd, path, @as(c_int, @bitCast(flags)), mode))) {
                 .SUCCESS => return,
                 .BADF => return error.InvalidFileDescriptor,
                 .NOMEM => return error.SystemResources,
@@ -3402,11 +3402,11 @@ pub const PosixSpawn = struct {
     pub fn waitpid(pid: pid_t, flags: u32) Error!std.os.WaitPidResult {
         var status: c_int = undefined;
         while (true) {
-            const rc = waitpid(pid, &status, @intCast(c_int, flags));
+            const rc = waitpid(pid, &status, @as(c_int, @intCast(flags)));
             switch (errno(rc)) {
                 .SUCCESS => return std.os.WaitPidResult{
-                    .pid = @intCast(pid_t, rc),
-                    .status = @bitCast(u32, status),
+                    .pid = @as(pid_t, @intCast(rc)),
+                    .status = @as(u32, @bitCast(status)),
                 },
                 .INTR => continue,
                 .CHILD => return error.ChildExecFailed,
@@ -3418,7 +3418,7 @@ pub const PosixSpawn = struct {
 };
 
 pub fn getKernError(err: kern_return_t) KernE {
-    return @enumFromInt(KernE, @truncate(u32, @intCast(usize, err)));
+    return @as(KernE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err))))));
 }
 
 pub fn unexpectedKernError(err: KernE) std.os.UnexpectedError {
@@ -3585,9 +3585,9 @@ pub const MachTask = extern struct {
                 .top => VM_REGION_TOP_INFO,
             },
             switch (tag) {
-                .basic => @ptrCast(vm_region_info_t, &info.info.basic),
-                .extended => @ptrCast(vm_region_info_t, &info.info.extended),
-                .top => @ptrCast(vm_region_info_t, &info.info.top),
+                .basic => @as(vm_region_info_t, @ptrCast(&info.info.basic)),
+                .extended => @as(vm_region_info_t, @ptrCast(&info.info.extended)),
+                .top => @as(vm_region_info_t, @ptrCast(&info.info.top)),
             },
             &count,
             &objname,
@@ -3640,8 +3640,8 @@ pub const MachTask = extern struct {
             &base_len,
             &nesting,
             switch (tag) {
-                .short => @ptrCast(vm_region_recurse_info_t, &info.info.short),
-                .full => @ptrCast(vm_region_recurse_info_t, &info.info.full),
+                .short => @as(vm_region_recurse_info_t, @ptrCast(&info.info.short)),
+                .full => @as(vm_region_recurse_info_t, @ptrCast(&info.info.full)),
             },
             &count,
         ))) {
@@ -3701,7 +3701,7 @@ pub const MachTask = extern struct {
                 task.port,
                 curr_addr,
                 @intFromPtr(out_buf.ptr),
-                @intCast(mach_msg_type_number_t, curr_size),
+                @as(mach_msg_type_number_t, @intCast(curr_size)),
             ))) {
                 .SUCCESS => {},
                 .FAILURE => return error.PermissionDenied,
@@ -3752,7 +3752,7 @@ pub const MachTask = extern struct {
                 else => |err| return unexpectedKernError(err),
             }
 
-            @memcpy(out_buf[0..curr_bytes_read], @ptrFromInt([*]const u8, vm_memory));
+            @memcpy(out_buf[0..curr_bytes_read], @as([*]const u8, @ptrFromInt(vm_memory)));
             _ = vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read);
 
             out_buf = out_buf[curr_bytes_read..];
@@ -3782,10 +3782,10 @@ pub const MachTask = extern struct {
             switch (getKernError(task_info(
                 task.port,
                 TASK_VM_INFO,
-                @ptrCast(task_info_t, &vm_info),
+                @as(task_info_t, @ptrCast(&vm_info)),
                 &info_count,
             ))) {
-                .SUCCESS => return @intCast(usize, vm_info.page_size),
+                .SUCCESS => return @as(usize, @intCast(vm_info.page_size)),
                 else => {},
             }
         }
@@ -3802,7 +3802,7 @@ pub const MachTask = extern struct {
         switch (getKernError(task_info(
             task.port,
             MACH_TASK_BASIC_INFO,
-            @ptrCast(task_info_t, &info),
+            @as(task_info_t, @ptrCast(&info)),
             &count,
         ))) {
             .SUCCESS => return info,
@@ -3832,7 +3832,7 @@ pub const MachTask = extern struct {
             _ = vm_deallocate(
                 self_task.port,
                 @intFromPtr(list.buf.ptr),
-                @intCast(vm_size_t, list.buf.len * @sizeOf(mach_port_t)),
+                @as(vm_size_t, @intCast(list.buf.len * @sizeOf(mach_port_t))),
             );
         }
     };
@@ -3841,7 +3841,7 @@ pub const MachTask = extern struct {
         var thread_list: mach_port_array_t = undefined;
         var thread_count: mach_msg_type_number_t = undefined;
         switch (getKernError(task_threads(task.port, &thread_list, &thread_count))) {
-            .SUCCESS => return ThreadList{ .buf = @ptrCast([*]MachThread, thread_list)[0..thread_count] },
+            .SUCCESS => return ThreadList{ .buf = @as([*]MachThread, @ptrCast(thread_list))[0..thread_count] },
             else => |err| return unexpectedKernError(err),
         }
     }
@@ -3860,7 +3860,7 @@ pub const MachThread = extern struct {
         switch (getKernError(thread_info(
             thread.port,
             THREAD_BASIC_INFO,
-            @ptrCast(thread_info_t, &info),
+            @as(thread_info_t, @ptrCast(&info)),
             &count,
         ))) {
             .SUCCESS => return info,
@@ -3874,7 +3874,7 @@ pub const MachThread = extern struct {
         switch (getKernError(thread_info(
             thread.port,
             THREAD_IDENTIFIER_INFO,
-            @ptrCast(thread_info_t, &info),
+            @as(thread_info_t, @ptrCast(&info)),
             &count,
         ))) {
             .SUCCESS => return info,
@@ -3962,7 +3962,7 @@ pub const thread_affinity_policy_t = [*]thread_affinity_policy;
 
 pub const THREAD_AFFINITY = struct {
     pub const POLICY = 0;
-    pub const POLICY_COUNT = @intCast(mach_msg_type_number_t, @sizeOf(thread_affinity_policy_data_t) / @sizeOf(integer_t));
+    pub const POLICY_COUNT = @as(mach_msg_type_number_t, @intCast(@sizeOf(thread_affinity_policy_data_t) / @sizeOf(integer_t)));
 };
 
 /// cpu affinity api
@@ -4041,7 +4041,7 @@ pub const host_preferred_user_arch_data_t = host_preferred_user_arch;
 pub const host_preferred_user_arch_t = *host_preferred_user_arch;
 
 fn HostCount(comptime HT: type) mach_msg_type_number_t {
-    return @intCast(mach_msg_type_number_t, @sizeOf(HT) / @sizeOf(integer_t));
+    return @as(mach_msg_type_number_t, @intCast(@sizeOf(HT) / @sizeOf(integer_t)));
 }
 
 pub const HOST = struct {
lib/std/c/dragonfly.zig
@@ -172,7 +172,7 @@ pub const PROT = struct {
 
 pub const MAP = struct {
     pub const FILE = 0;
-    pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+    pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
     pub const ANONYMOUS = ANON;
     pub const COPY = PRIVATE;
     pub const SHARED = 1;
@@ -208,7 +208,7 @@ pub const W = struct {
     pub const TRAPPED = 0x0020;
 
     pub fn EXITSTATUS(s: u32) u8 {
-        return @intCast(u8, (s & 0xff00) >> 8);
+        return @as(u8, @intCast((s & 0xff00) >> 8));
     }
     pub fn TERMSIG(s: u32) u32 {
         return s & 0x7f;
@@ -220,7 +220,7 @@ pub const W = struct {
         return TERMSIG(s) == 0;
     }
     pub fn IFSTOPPED(s: u32) bool {
-        return @truncate(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00;
+        return @as(u16, @truncate((((s & 0xffff) *% 0x10001) >> 8))) > 0x7f00;
     }
     pub fn IFSIGNALED(s: u32) bool {
         return (s & 0xffff) -% 1 < 0xff;
@@ -620,9 +620,9 @@ pub const S = struct {
 pub const BADSIG = SIG.ERR;
 
 pub const SIG = struct {
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
 
     pub const BLOCK = 1;
     pub const UNBLOCK = 2;
@@ -871,10 +871,10 @@ pub const RTLD = struct {
     pub const NODELETE = 0x01000;
     pub const NOLOAD = 0x02000;
 
-    pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)));
-    pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2)));
-    pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3)));
-    pub const ALL = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -4)));
+    pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))));
+    pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2)))));
+    pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3)))));
+    pub const ALL = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -4)))));
 };
 
 pub const dl_phdr_info = extern struct {
lib/std/c/freebsd.zig
@@ -20,11 +20,11 @@ fn __BIT_COUNT(bits: []const c_long) c_long {
 
 fn __BIT_MASK(s: usize) c_long {
     var x = s % CPU_SETSIZE;
-    return @bitCast(c_long, @intCast(c_ulong, 1) << @intCast(u6, x));
+    return @as(c_long, @bitCast(@as(c_ulong, @intCast(1)) << @as(u6, @intCast(x))));
 }
 
 pub fn CPU_COUNT(set: cpuset_t) c_int {
-    return @intCast(c_int, __BIT_COUNT(set.__bits[0..]));
+    return @as(c_int, @intCast(__BIT_COUNT(set.__bits[0..])));
 }
 
 pub fn CPU_ZERO(set: *cpuset_t) void {
@@ -529,7 +529,7 @@ pub const cap_rights_t = extern struct {
 
 pub const CAP = struct {
     pub fn RIGHT(idx: u6, bit: u64) u64 {
-        return (@intCast(u64, 1) << (57 + idx)) | bit;
+        return (@as(u64, @intCast(1)) << (57 + idx)) | bit;
     }
     pub const READ = CAP.RIGHT(0, 0x0000000000000001);
     pub const WRITE = CAP.RIGHT(0, 0x0000000000000002);
@@ -961,7 +961,7 @@ pub const CLOCK = struct {
 };
 
 pub const MAP = struct {
-    pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+    pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
     pub const SHARED = 0x0001;
     pub const PRIVATE = 0x0002;
     pub const FIXED = 0x0010;
@@ -1013,7 +1013,7 @@ pub const W = struct {
     pub const TRAPPED = 32;
 
     pub fn EXITSTATUS(s: u32) u8 {
-        return @intCast(u8, (s & 0xff00) >> 8);
+        return @as(u8, @intCast((s & 0xff00) >> 8));
     }
     pub fn TERMSIG(s: u32) u32 {
         return s & 0x7f;
@@ -1025,7 +1025,7 @@ pub const W = struct {
         return TERMSIG(s) == 0;
     }
     pub fn IFSTOPPED(s: u32) bool {
-        return @truncate(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00;
+        return @as(u16, @truncate((((s & 0xffff) *% 0x10001) >> 8))) > 0x7f00;
     }
     pub fn IFSIGNALED(s: u32) bool {
         return (s & 0xffff) -% 1 < 0xff;
@@ -1086,9 +1086,9 @@ pub const SIG = struct {
     pub const UNBLOCK = 2;
     pub const SETMASK = 3;
 
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
 
     pub const WORDS = 4;
     pub const MAXSIG = 128;
@@ -2626,7 +2626,7 @@ pub const domainset_t = extern struct {
 };
 
 pub fn DOMAINSET_COUNT(set: domainset_t) c_int {
-    return @intCast(c_int, __BIT_COUNT(set.__bits[0..]));
+    return @as(c_int, @intCast(__BIT_COUNT(set.__bits[0..])));
 }
 
 pub const domainset = extern struct {
@@ -2650,7 +2650,7 @@ const ioctl_cmd = enum(u32) {
 };
 
 fn ioImpl(cmd: ioctl_cmd, op: u8, nr: u8, comptime IT: type) u32 {
-    return @bitCast(u32, @intFromEnum(cmd) | @intCast(u32, @truncate(u8, @sizeOf(IT))) << 16 | @intCast(u32, op) << 8 | nr);
+    return @as(u32, @bitCast(@intFromEnum(cmd) | @as(u32, @intCast(@as(u8, @truncate(@sizeOf(IT))))) << 16 | @as(u32, @intCast(op)) << 8 | nr));
 }
 
 pub fn IO(op: u8, nr: u8) u32 {
lib/std/c/haiku.zig
@@ -414,7 +414,7 @@ pub const CLOCK = struct {
 
 pub const MAP = struct {
     /// mmap() error return code
-    pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+    pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
     /// changes are seen by others
     pub const SHARED = 0x01;
     /// changes are only seen by caller
@@ -443,7 +443,7 @@ pub const W = struct {
     pub const NOWAIT = 0x20;
 
     pub fn EXITSTATUS(s: u32) u8 {
-        return @intCast(u8, s & 0xff);
+        return @as(u8, @intCast(s & 0xff));
     }
 
     pub fn TERMSIG(s: u32) u32 {
@@ -481,9 +481,9 @@ pub const SA = struct {
 };
 
 pub const SIG = struct {
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
 
     pub const HUP = 1;
     pub const INT = 2;
lib/std/c/linux.zig
@@ -32,7 +32,7 @@ pub const MADV = linux.MADV;
 pub const MAP = struct {
     pub usingnamespace linux.MAP;
     /// Only used by libc to communicate failure.
-    pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+    pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
 };
 pub const MSF = linux.MSF;
 pub const MMAP2_UNIT = linux.MMAP2_UNIT;
lib/std/c/netbsd.zig
@@ -172,9 +172,9 @@ pub const RTLD = struct {
     pub const NODELETE = 0x01000;
     pub const NOLOAD = 0x02000;
 
-    pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)));
-    pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2)));
-    pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3)));
+    pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))));
+    pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2)))));
+    pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3)))));
 };
 
 pub const dl_phdr_info = extern struct {
@@ -597,7 +597,7 @@ pub const CLOCK = struct {
 };
 
 pub const MAP = struct {
-    pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+    pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
     pub const SHARED = 0x0001;
     pub const PRIVATE = 0x0002;
     pub const REMAPDUP = 0x0004;
@@ -653,7 +653,7 @@ pub const W = struct {
     pub const TRAPPED = 0x00000040;
 
     pub fn EXITSTATUS(s: u32) u8 {
-        return @intCast(u8, (s >> 8) & 0xff);
+        return @as(u8, @intCast((s >> 8) & 0xff));
     }
     pub fn TERMSIG(s: u32) u32 {
         return s & 0x7f;
@@ -1106,9 +1106,9 @@ pub const winsize = extern struct {
 const NSIG = 32;
 
 pub const SIG = struct {
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
 
     pub const WORDS = 4;
     pub const MAXSIG = 128;
lib/std/c/openbsd.zig
@@ -449,7 +449,7 @@ pub const CLOCK = struct {
 };
 
 pub const MAP = struct {
-    pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+    pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
     pub const SHARED = 0x0001;
     pub const PRIVATE = 0x0002;
     pub const FIXED = 0x0010;
@@ -488,7 +488,7 @@ pub const W = struct {
     pub const CONTINUED = 8;
 
     pub fn EXITSTATUS(s: u32) u8 {
-        return @intCast(u8, (s >> 8) & 0xff);
+        return @as(u8, @intCast((s >> 8) & 0xff));
     }
     pub fn TERMSIG(s: u32) u32 {
         return (s & 0x7f);
@@ -1000,11 +1000,11 @@ pub const winsize = extern struct {
 const NSIG = 33;
 
 pub const SIG = struct {
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
-    pub const CATCH = @ptrFromInt(?Sigaction.handler_fn, 2);
-    pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 3);
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+    pub const CATCH = @as(?Sigaction.handler_fn, @ptrFromInt(2));
+    pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(3));
 
     pub const HUP = 1;
     pub const INT = 2;
lib/std/c/solaris.zig
@@ -111,10 +111,10 @@ pub const RTLD = struct {
     pub const FIRST = 0x02000;
     pub const CONFGEN = 0x10000;
 
-    pub const NEXT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1)));
-    pub const DEFAULT = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -2)));
-    pub const SELF = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -3)));
-    pub const PROBE = @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -4)));
+    pub const NEXT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1)))));
+    pub const DEFAULT = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -2)))));
+    pub const SELF = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -3)))));
+    pub const PROBE = @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -4)))));
 };
 
 pub const Flock = extern struct {
@@ -524,7 +524,7 @@ pub const CLOCK = struct {
 };
 
 pub const MAP = struct {
-    pub const FAILED = @ptrFromInt(*anyopaque, maxInt(usize));
+    pub const FAILED = @as(*anyopaque, @ptrFromInt(maxInt(usize)));
     pub const SHARED = 0x0001;
     pub const PRIVATE = 0x0002;
     pub const TYPE = 0x000f;
@@ -583,7 +583,7 @@ pub const W = struct {
     pub const NOWAIT = 0o200;
 
     pub fn EXITSTATUS(s: u32) u8 {
-        return @intCast(u8, (s >> 8) & 0xff);
+        return @as(u8, @intCast((s >> 8) & 0xff));
     }
     pub fn TERMSIG(s: u32) u32 {
         return s & 0x7f;
@@ -886,10 +886,10 @@ pub const winsize = extern struct {
 const NSIG = 75;
 
 pub const SIG = struct {
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
-    pub const HOLD = @ptrFromInt(?Sigaction.handler_fn, 2);
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
+    pub const HOLD = @as(?Sigaction.handler_fn, @ptrFromInt(2));
 
     pub const WORDS = 4;
     pub const MAXSIG = 75;
@@ -1441,7 +1441,7 @@ pub const AT = struct {
     /// Magic value that specify the use of the current working directory
     /// to determine the target of relative file paths in the openat() and
     /// similar syscalls.
-    pub const FDCWD = @bitCast(fd_t, @as(u32, 0xffd19553));
+    pub const FDCWD = @as(fd_t, @bitCast(@as(u32, 0xffd19553)));
 
     /// Do not follow symbolic links
     pub const SYMLINK_NOFOLLOW = 0x1000;
@@ -1907,9 +1907,9 @@ const IoCtlCommand = enum(u32) {
 };
 
 fn ioImpl(cmd: IoCtlCommand, io_type: u8, nr: u8, comptime IOT: type) i32 {
-    const size = @intCast(u32, @truncate(u8, @sizeOf(IOT))) << 16;
-    const t = @intCast(u32, io_type) << 8;
-    return @bitCast(i32, @intFromEnum(cmd) | size | t | nr);
+    const size = @as(u32, @intCast(@as(u8, @truncate(@sizeOf(IOT))))) << 16;
+    const t = @as(u32, @intCast(io_type)) << 8;
+    return @as(i32, @bitCast(@intFromEnum(cmd) | size | t | nr));
 }
 
 pub fn IO(io_type: u8, nr: u8) i32 {
lib/std/compress/deflate/bits_utils.zig
@@ -3,7 +3,7 @@ const math = @import("std").math;
 // Reverse bit-by-bit a N-bit code.
 pub fn bitReverse(comptime T: type, value: T, N: usize) T {
     const r = @bitReverse(value);
-    return r >> @intCast(math.Log2Int(T), @typeInfo(T).Int.bits - N);
+    return r >> @as(math.Log2Int(T), @intCast(@typeInfo(T).Int.bits - N));
 }
 
 test "bitReverse" {
lib/std/compress/deflate/compressor.zig
@@ -160,7 +160,7 @@ fn matchLen(a: []u8, b: []u8, max: u32) u32 {
     var bounded_b = b[0..max];
     for (bounded_a, 0..) |av, i| {
         if (bounded_b[i] != av) {
-            return @intCast(u32, i);
+            return @as(u32, @intCast(i));
         }
     }
     return max;
@@ -313,14 +313,14 @@ pub fn Compressor(comptime WriterType: anytype) type {
                     // the entire table onto the stack (https://golang.org/issue/18625).
                     for (self.hash_prev, 0..) |v, i| {
                         if (v > delta) {
-                            self.hash_prev[i] = @intCast(u32, v - delta);
+                            self.hash_prev[i] = @as(u32, @intCast(v - delta));
                         } else {
                             self.hash_prev[i] = 0;
                         }
                     }
                     for (self.hash_head, 0..) |v, i| {
                         if (v > delta) {
-                            self.hash_head[i] = @intCast(u32, v - delta);
+                            self.hash_head[i] = @as(u32, @intCast(v - delta));
                         } else {
                             self.hash_head[i] = 0;
                         }
@@ -329,7 +329,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
             }
             const n = std.compress.deflate.copy(self.window[self.window_end..], b);
             self.window_end += n;
-            return @intCast(u32, n);
+            return @as(u32, @intCast(n));
         }
 
         fn writeBlock(self: *Self, tokens: []token.Token, index: usize) !void {
@@ -398,13 +398,13 @@ pub fn Compressor(comptime WriterType: anytype) type {
                     // Our chain should point to the previous value.
                     self.hash_prev[di & window_mask] = hh.*;
                     // Set the head of the hash chain to us.
-                    hh.* = @intCast(u32, di + self.hash_offset);
+                    hh.* = @as(u32, @intCast(di + self.hash_offset));
                 }
                 self.hash = new_h;
             }
             // Update window information.
             self.window_end = n;
-            self.index = @intCast(u32, n);
+            self.index = @as(u32, @intCast(n));
         }
 
         const Match = struct {
@@ -471,11 +471,11 @@ pub fn Compressor(comptime WriterType: anytype) type {
                     break;
                 }
 
-                if (@intCast(u32, self.hash_prev[i & window_mask]) < self.hash_offset) {
+                if (@as(u32, @intCast(self.hash_prev[i & window_mask])) < self.hash_offset) {
                     break;
                 }
 
-                i = @intCast(u32, self.hash_prev[i & window_mask]) - self.hash_offset;
+                i = @as(u32, @intCast(self.hash_prev[i & window_mask])) - self.hash_offset;
                 if (i < min_index) {
                     break;
                 }
@@ -576,7 +576,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
                         // Flush current output block if any.
                         if (self.byte_available) {
                             // There is still one pending token that needs to be flushed
-                            self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[self.index - 1]));
+                            self.tokens[self.tokens_count] = token.literalToken(@as(u32, @intCast(self.window[self.index - 1])));
                             self.tokens_count += 1;
                             self.byte_available = false;
                         }
@@ -591,9 +591,9 @@ pub fn Compressor(comptime WriterType: anytype) type {
                     // Update the hash
                     self.hash = hash4(self.window[self.index .. self.index + min_match_length]);
                     var hh = &self.hash_head[self.hash & hash_mask];
-                    self.chain_head = @intCast(u32, hh.*);
-                    self.hash_prev[self.index & window_mask] = @intCast(u32, self.chain_head);
-                    hh.* = @intCast(u32, self.index + self.hash_offset);
+                    self.chain_head = @as(u32, @intCast(hh.*));
+                    self.hash_prev[self.index & window_mask] = @as(u32, @intCast(self.chain_head));
+                    hh.* = @as(u32, @intCast(self.index + self.hash_offset));
                 }
                 var prev_length = self.length;
                 var prev_offset = self.offset;
@@ -614,7 +614,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
                             self.index,
                             self.chain_head -| self.hash_offset,
                             min_match_length - 1,
-                            @intCast(u32, lookahead),
+                            @as(u32, @intCast(lookahead)),
                         );
                         if (fmatch.ok) {
                             self.length = fmatch.length;
@@ -631,12 +631,12 @@ pub fn Compressor(comptime WriterType: anytype) type {
                     // There was a match at the previous step, and the current match is
                     // not better. Output the previous match.
                     if (self.compression_level.fast_skip_hashshing != skip_never) {
-                        self.tokens[self.tokens_count] = token.matchToken(@intCast(u32, self.length - base_match_length), @intCast(u32, self.offset - base_match_offset));
+                        self.tokens[self.tokens_count] = token.matchToken(@as(u32, @intCast(self.length - base_match_length)), @as(u32, @intCast(self.offset - base_match_offset)));
                         self.tokens_count += 1;
                     } else {
                         self.tokens[self.tokens_count] = token.matchToken(
-                            @intCast(u32, prev_length - base_match_length),
-                            @intCast(u32, prev_offset -| base_match_offset),
+                            @as(u32, @intCast(prev_length - base_match_length)),
+                            @as(u32, @intCast(prev_offset -| base_match_offset)),
                         );
                         self.tokens_count += 1;
                     }
@@ -661,7 +661,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
                                 var hh = &self.hash_head[self.hash & hash_mask];
                                 self.hash_prev[index & window_mask] = hh.*;
                                 // Set the head of the hash chain to us.
-                                hh.* = @intCast(u32, index + self.hash_offset);
+                                hh.* = @as(u32, @intCast(index + self.hash_offset));
                             }
                         }
                         self.index = index;
@@ -689,7 +689,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
                         if (self.compression_level.fast_skip_hashshing != skip_never) {
                             i = self.index;
                         }
-                        self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[i]));
+                        self.tokens[self.tokens_count] = token.literalToken(@as(u32, @intCast(self.window[i])));
                         self.tokens_count += 1;
                         if (self.tokens_count == max_flate_block_tokens) {
                             try self.writeBlock(self.tokens[0..self.tokens_count], i + 1);
@@ -707,7 +707,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
         fn fillStore(self: *Self, b: []const u8) u32 {
             const n = std.compress.deflate.copy(self.window[self.window_end..], b);
             self.window_end += n;
-            return @intCast(u32, n);
+            return @as(u32, @intCast(n));
         }
 
         fn store(self: *Self) !void {
lib/std/compress/deflate/compressor_test.zig
@@ -172,7 +172,7 @@ test "deflate/inflate" {
     defer testing.allocator.free(large_data_chunk);
     // fill with random data
     for (large_data_chunk, 0..) |_, i| {
-        large_data_chunk[i] = @truncate(u8, i) *% @truncate(u8, i);
+        large_data_chunk[i] = @as(u8, @truncate(i)) *% @as(u8, @truncate(i));
     }
     try testToFromWithLimit(large_data_chunk, limits);
 }
lib/std/compress/deflate/decompressor.zig
@@ -130,30 +130,30 @@ const HuffmanDecoder = struct {
         // Exception: To be compatible with zlib, we also need to
         // accept degenerate single-code codings. See also
         // TestDegenerateHuffmanCoding.
-        if (code != @as(u32, 1) << @intCast(u5, max) and !(code == 1 and max == 1)) {
+        if (code != @as(u32, 1) << @as(u5, @intCast(max)) and !(code == 1 and max == 1)) {
             return false;
         }
 
         self.min = min;
         if (max > huffman_chunk_bits) {
-            var num_links = @as(u32, 1) << @intCast(u5, max - huffman_chunk_bits);
-            self.link_mask = @intCast(u32, num_links - 1);
+            var num_links = @as(u32, 1) << @as(u5, @intCast(max - huffman_chunk_bits));
+            self.link_mask = @as(u32, @intCast(num_links - 1));
 
             // create link tables
             var link = next_code[huffman_chunk_bits + 1] >> 1;
             self.links = try self.allocator.alloc([]u16, huffman_num_chunks - link);
             self.sub_chunks = ArrayList(u32).init(self.allocator);
             self.initialized = true;
-            var j = @intCast(u32, link);
+            var j = @as(u32, @intCast(link));
             while (j < huffman_num_chunks) : (j += 1) {
-                var reverse = @intCast(u32, bu.bitReverse(u16, @intCast(u16, j), 16));
-                reverse >>= @intCast(u32, 16 - huffman_chunk_bits);
-                var off = j - @intCast(u32, link);
+                var reverse = @as(u32, @intCast(bu.bitReverse(u16, @as(u16, @intCast(j)), 16)));
+                reverse >>= @as(u32, @intCast(16 - huffman_chunk_bits));
+                var off = j - @as(u32, @intCast(link));
                 if (sanity) {
                     // check we are not overwriting an existing chunk
                     assert(self.chunks[reverse] == 0);
                 }
-                self.chunks[reverse] = @intCast(u16, off << huffman_value_shift | (huffman_chunk_bits + 1));
+                self.chunks[reverse] = @as(u16, @intCast(off << huffman_value_shift | (huffman_chunk_bits + 1)));
                 self.links[off] = try self.allocator.alloc(u16, num_links);
                 if (sanity) {
                     // initialize to a known invalid chunk code (0) to see if we overwrite
@@ -170,12 +170,12 @@ const HuffmanDecoder = struct {
             }
             var ncode = next_code[n];
             next_code[n] += 1;
-            var chunk = @intCast(u16, (li << huffman_value_shift) | n);
-            var reverse = @intCast(u16, bu.bitReverse(u16, @intCast(u16, ncode), 16));
-            reverse >>= @intCast(u4, 16 - n);
+            var chunk = @as(u16, @intCast((li << huffman_value_shift) | n));
+            var reverse = @as(u16, @intCast(bu.bitReverse(u16, @as(u16, @intCast(ncode)), 16)));
+            reverse >>= @as(u4, @intCast(16 - n));
             if (n <= huffman_chunk_bits) {
                 var off = reverse;
-                while (off < self.chunks.len) : (off += @as(u16, 1) << @intCast(u4, n)) {
+                while (off < self.chunks.len) : (off += @as(u16, 1) << @as(u4, @intCast(n))) {
                     // We should never need to overwrite
                     // an existing chunk. Also, 0 is
                     // never a valid chunk, because the
@@ -198,12 +198,12 @@ const HuffmanDecoder = struct {
                 var link_tab = self.links[value];
                 reverse >>= huffman_chunk_bits;
                 var off = reverse;
-                while (off < link_tab.len) : (off += @as(u16, 1) << @intCast(u4, n - huffman_chunk_bits)) {
+                while (off < link_tab.len) : (off += @as(u16, 1) << @as(u4, @intCast(n - huffman_chunk_bits))) {
                     if (sanity) {
                         // check we are not overwriting an existing chunk
                         assert(link_tab[off] == 0);
                     }
-                    link_tab[off] = @intCast(u16, chunk);
+                    link_tab[off] = @as(u16, @intCast(chunk));
                 }
             }
         }
@@ -494,21 +494,21 @@ pub fn Decompressor(comptime ReaderType: type) type {
             while (self.nb < 5 + 5 + 4) {
                 try self.moreBits();
             }
-            var nlit = @intCast(u32, self.b & 0x1F) + 257;
+            var nlit = @as(u32, @intCast(self.b & 0x1F)) + 257;
             if (nlit > max_num_lit) {
                 corrupt_input_error_offset = self.roffset;
                 self.err = InflateError.CorruptInput;
                 return InflateError.CorruptInput;
             }
             self.b >>= 5;
-            var ndist = @intCast(u32, self.b & 0x1F) + 1;
+            var ndist = @as(u32, @intCast(self.b & 0x1F)) + 1;
             if (ndist > max_num_dist) {
                 corrupt_input_error_offset = self.roffset;
                 self.err = InflateError.CorruptInput;
                 return InflateError.CorruptInput;
             }
             self.b >>= 5;
-            var nclen = @intCast(u32, self.b & 0xF) + 4;
+            var nclen = @as(u32, @intCast(self.b & 0xF)) + 4;
             // num_codes is 19, so nclen is always valid.
             self.b >>= 4;
             self.nb -= 5 + 5 + 4;
@@ -519,7 +519,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
                 while (self.nb < 3) {
                     try self.moreBits();
                 }
-                self.codebits[code_order[i]] = @intCast(u32, self.b & 0x7);
+                self.codebits[code_order[i]] = @as(u32, @intCast(self.b & 0x7));
                 self.b >>= 3;
                 self.nb -= 3;
             }
@@ -575,8 +575,8 @@ pub fn Decompressor(comptime ReaderType: type) type {
                 while (self.nb < nb) {
                     try self.moreBits();
                 }
-                rep += @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1);
-                self.b >>= @intCast(u5, nb);
+                rep += @as(u32, @intCast(self.b & (@as(u32, 1) << @as(u5, @intCast(nb))) - 1));
+                self.b >>= @as(u5, @intCast(nb));
                 self.nb -= nb;
                 if (i + rep > n) {
                     corrupt_input_error_offset = self.roffset;
@@ -623,7 +623,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
                         var length: u32 = 0;
                         switch (v) {
                             0...255 => {
-                                self.dict.writeByte(@intCast(u8, v));
+                                self.dict.writeByte(@as(u8, @intCast(v)));
                                 if (self.dict.availWrite() == 0) {
                                     self.to_read = self.dict.readFlush();
                                     self.step = huffmanBlock;
@@ -676,8 +676,8 @@ pub fn Decompressor(comptime ReaderType: type) type {
                             while (self.nb < n) {
                                 try self.moreBits();
                             }
-                            length += @intCast(u32, self.b) & ((@as(u32, 1) << @intCast(u5, n)) - 1);
-                            self.b >>= @intCast(u5, n);
+                            length += @as(u32, @intCast(self.b)) & ((@as(u32, 1) << @as(u5, @intCast(n))) - 1);
+                            self.b >>= @as(u5, @intCast(n));
                             self.nb -= n;
                         }
 
@@ -686,9 +686,9 @@ pub fn Decompressor(comptime ReaderType: type) type {
                             while (self.nb < 5) {
                                 try self.moreBits();
                             }
-                            dist = @intCast(
+                            dist = @as(
                                 u32,
-                                bu.bitReverse(u8, @intCast(u8, (self.b & 0x1F) << 3), 8),
+                                @intCast(bu.bitReverse(u8, @as(u8, @intCast((self.b & 0x1F) << 3)), 8)),
                             );
                             self.b >>= 5;
                             self.nb -= 5;
@@ -699,16 +699,16 @@ pub fn Decompressor(comptime ReaderType: type) type {
                         switch (dist) {
                             0...3 => dist += 1,
                             4...max_num_dist - 1 => { // 4...29
-                                var nb = @intCast(u32, dist - 2) >> 1;
+                                var nb = @as(u32, @intCast(dist - 2)) >> 1;
                                 // have 1 bit in bottom of dist, need nb more.
-                                var extra = (dist & 1) << @intCast(u5, nb);
+                                var extra = (dist & 1) << @as(u5, @intCast(nb));
                                 while (self.nb < nb) {
                                     try self.moreBits();
                                 }
-                                extra |= @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1);
-                                self.b >>= @intCast(u5, nb);
+                                extra |= @as(u32, @intCast(self.b & (@as(u32, 1) << @as(u5, @intCast(nb))) - 1));
+                                self.b >>= @as(u5, @intCast(nb));
                                 self.nb -= nb;
-                                dist = (@as(u32, 1) << @intCast(u5, nb + 1)) + 1 + extra;
+                                dist = (@as(u32, 1) << @as(u5, @intCast(nb + 1))) + 1 + extra;
                             },
                             else => {
                                 corrupt_input_error_offset = self.roffset;
@@ -762,10 +762,10 @@ pub fn Decompressor(comptime ReaderType: type) type {
                 self.err = InflateError.UnexpectedEndOfStream;
                 return InflateError.UnexpectedEndOfStream;
             };
-            self.roffset += @intCast(u64, nr);
-            var n = @intCast(u32, self.buf[0]) | @intCast(u32, self.buf[1]) << 8;
-            var nn = @intCast(u32, self.buf[2]) | @intCast(u32, self.buf[3]) << 8;
-            if (@intCast(u16, nn) != @truncate(u16, ~n)) {
+            self.roffset += @as(u64, @intCast(nr));
+            var n = @as(u32, @intCast(self.buf[0])) | @as(u32, @intCast(self.buf[1])) << 8;
+            var nn = @as(u32, @intCast(self.buf[2])) | @as(u32, @intCast(self.buf[3])) << 8;
+            if (@as(u16, @intCast(nn)) != @as(u16, @truncate(~n))) {
                 corrupt_input_error_offset = self.roffset;
                 self.err = InflateError.CorruptInput;
                 return InflateError.CorruptInput;
@@ -793,9 +793,9 @@ pub fn Decompressor(comptime ReaderType: type) type {
             if (cnt < buf.len) {
                 self.err = InflateError.UnexpectedEndOfStream;
             }
-            self.roffset += @intCast(u64, cnt);
-            self.copy_len -= @intCast(u32, cnt);
-            self.dict.writeMark(@intCast(u32, cnt));
+            self.roffset += @as(u64, @intCast(cnt));
+            self.copy_len -= @as(u32, @intCast(cnt));
+            self.dict.writeMark(@as(u32, @intCast(cnt)));
             if (self.err != null) {
                 return InflateError.UnexpectedEndOfStream;
             }
@@ -826,7 +826,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
                 return InflateError.BadReaderState;
             };
             self.roffset += 1;
-            self.b |= @as(u32, c) << @intCast(u5, self.nb);
+            self.b |= @as(u32, c) << @as(u5, @intCast(self.nb));
             self.nb += 8;
             return;
         }
@@ -854,14 +854,14 @@ pub fn Decompressor(comptime ReaderType: type) type {
                         return InflateError.BadReaderState;
                     };
                     self.roffset += 1;
-                    b |= @intCast(u32, c) << @intCast(u5, nb & 31);
+                    b |= @as(u32, @intCast(c)) << @as(u5, @intCast(nb & 31));
                     nb += 8;
                 }
                 var chunk = h.chunks[b & (huffman_num_chunks - 1)];
-                n = @intCast(u32, chunk & huffman_count_mask);
+                n = @as(u32, @intCast(chunk & huffman_count_mask));
                 if (n > huffman_chunk_bits) {
                     chunk = h.links[chunk >> huffman_value_shift][(b >> huffman_chunk_bits) & h.link_mask];
-                    n = @intCast(u32, chunk & huffman_count_mask);
+                    n = @as(u32, @intCast(chunk & huffman_count_mask));
                 }
                 if (n <= nb) {
                     if (n == 0) {
@@ -871,9 +871,9 @@ pub fn Decompressor(comptime ReaderType: type) type {
                         self.err = InflateError.CorruptInput;
                         return InflateError.CorruptInput;
                     }
-                    self.b = b >> @intCast(u5, n & 31);
+                    self.b = b >> @as(u5, @intCast(n & 31));
                     self.nb = nb - n;
-                    return @intCast(u32, chunk >> huffman_value_shift);
+                    return @as(u32, @intCast(chunk >> huffman_value_shift));
                 }
             }
         }
lib/std/compress/deflate/deflate_fast.zig
@@ -30,23 +30,23 @@ const table_size = 1 << table_bits; // Size of the table.
 const buffer_reset = math.maxInt(i32) - max_store_block_size * 2;
 
 fn load32(b: []u8, i: i32) u32 {
-    var s = b[@intCast(usize, i) .. @intCast(usize, i) + 4];
-    return @intCast(u32, s[0]) |
-        @intCast(u32, s[1]) << 8 |
-        @intCast(u32, s[2]) << 16 |
-        @intCast(u32, s[3]) << 24;
+    var s = b[@as(usize, @intCast(i)) .. @as(usize, @intCast(i)) + 4];
+    return @as(u32, @intCast(s[0])) |
+        @as(u32, @intCast(s[1])) << 8 |
+        @as(u32, @intCast(s[2])) << 16 |
+        @as(u32, @intCast(s[3])) << 24;
 }
 
 fn load64(b: []u8, i: i32) u64 {
-    var s = b[@intCast(usize, i)..@intCast(usize, i + 8)];
-    return @intCast(u64, s[0]) |
-        @intCast(u64, s[1]) << 8 |
-        @intCast(u64, s[2]) << 16 |
-        @intCast(u64, s[3]) << 24 |
-        @intCast(u64, s[4]) << 32 |
-        @intCast(u64, s[5]) << 40 |
-        @intCast(u64, s[6]) << 48 |
-        @intCast(u64, s[7]) << 56;
+    var s = b[@as(usize, @intCast(i))..@as(usize, @intCast(i + 8))];
+    return @as(u64, @intCast(s[0])) |
+        @as(u64, @intCast(s[1])) << 8 |
+        @as(u64, @intCast(s[2])) << 16 |
+        @as(u64, @intCast(s[3])) << 24 |
+        @as(u64, @intCast(s[4])) << 32 |
+        @as(u64, @intCast(s[5])) << 40 |
+        @as(u64, @intCast(s[6])) << 48 |
+        @as(u64, @intCast(s[7])) << 56;
 }
 
 fn hash(u: u32) u32 {
@@ -117,7 +117,7 @@ pub const DeflateFast = struct {
         // s_limit is when to stop looking for offset/length copies. The input_margin
         // lets us use a fast path for emitLiteral in the main loop, while we are
         // looking for copies.
-        var s_limit = @intCast(i32, src.len - input_margin);
+        var s_limit = @as(i32, @intCast(src.len - input_margin));
 
         // next_emit is where in src the next emitLiteral should start from.
         var next_emit: i32 = 0;
@@ -170,7 +170,7 @@ pub const DeflateFast = struct {
             // A 4-byte match has been found. We'll later see if more than 4 bytes
             // match. But, prior to the match, src[next_emit..s] are unmatched. Emit
             // them as literal bytes.
-            emitLiteral(dst, tokens_count, src[@intCast(usize, next_emit)..@intCast(usize, s)]);
+            emitLiteral(dst, tokens_count, src[@as(usize, @intCast(next_emit))..@as(usize, @intCast(s))]);
 
             // Call emitCopy, and then see if another emitCopy could be our next
             // move. Repeat until we find no match for the input immediately after
@@ -192,8 +192,8 @@ pub const DeflateFast = struct {
 
                 // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
                 dst[tokens_count.*] = token.matchToken(
-                    @intCast(u32, l + 4 - base_match_length),
-                    @intCast(u32, s - t - base_match_offset),
+                    @as(u32, @intCast(l + 4 - base_match_length)),
+                    @as(u32, @intCast(s - t - base_match_offset)),
                 );
                 tokens_count.* += 1;
                 s += l;
@@ -209,22 +209,22 @@ pub const DeflateFast = struct {
                 // are faster as one load64 call (with some shifts) instead of
                 // three load32 calls.
                 var x = load64(src, s - 1);
-                var prev_hash = hash(@truncate(u32, x));
+                var prev_hash = hash(@as(u32, @truncate(x)));
                 self.table[prev_hash & table_mask] = TableEntry{
                     .offset = self.cur + s - 1,
-                    .val = @truncate(u32, x),
+                    .val = @as(u32, @truncate(x)),
                 };
                 x >>= 8;
-                var curr_hash = hash(@truncate(u32, x));
+                var curr_hash = hash(@as(u32, @truncate(x)));
                 candidate = self.table[curr_hash & table_mask];
                 self.table[curr_hash & table_mask] = TableEntry{
                     .offset = self.cur + s,
-                    .val = @truncate(u32, x),
+                    .val = @as(u32, @truncate(x)),
                 };
 
                 var offset = s - (candidate.offset - self.cur);
-                if (offset > max_match_offset or @truncate(u32, x) != candidate.val) {
-                    cv = @truncate(u32, x >> 8);
+                if (offset > max_match_offset or @as(u32, @truncate(x)) != candidate.val) {
+                    cv = @as(u32, @truncate(x >> 8));
                     next_hash = hash(cv);
                     s += 1;
                     break;
@@ -232,18 +232,18 @@ pub const DeflateFast = struct {
             }
         }
 
-        if (@intCast(u32, next_emit) < src.len) {
-            emitLiteral(dst, tokens_count, src[@intCast(usize, next_emit)..]);
+        if (@as(u32, @intCast(next_emit)) < src.len) {
+            emitLiteral(dst, tokens_count, src[@as(usize, @intCast(next_emit))..]);
         }
-        self.cur += @intCast(i32, src.len);
-        self.prev_len = @intCast(u32, src.len);
+        self.cur += @as(i32, @intCast(src.len));
+        self.prev_len = @as(u32, @intCast(src.len));
         @memcpy(self.prev[0..self.prev_len], src);
         return;
     }
 
     fn emitLiteral(dst: []token.Token, tokens_count: *u16, lit: []u8) void {
         for (lit) |v| {
-            dst[tokens_count.*] = token.literalToken(@intCast(u32, v));
+            dst[tokens_count.*] = token.literalToken(@as(u32, @intCast(v)));
             tokens_count.* += 1;
         }
         return;
@@ -253,60 +253,60 @@ pub const DeflateFast = struct {
     // t can be negative to indicate the match is starting in self.prev.
     // We assume that src[s-4 .. s] and src[t-4 .. t] already match.
     fn matchLen(self: *Self, s: i32, t: i32, src: []u8) i32 {
-        var s1 = @intCast(u32, s) + max_match_length - 4;
+        var s1 = @as(u32, @intCast(s)) + max_match_length - 4;
         if (s1 > src.len) {
-            s1 = @intCast(u32, src.len);
+            s1 = @as(u32, @intCast(src.len));
         }
 
         // If we are inside the current block
         if (t >= 0) {
-            var b = src[@intCast(usize, t)..];
-            var a = src[@intCast(usize, s)..@intCast(usize, s1)];
+            var b = src[@as(usize, @intCast(t))..];
+            var a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))];
             b = b[0..a.len];
             // Extend the match to be as long as possible.
             for (a, 0..) |_, i| {
                 if (a[i] != b[i]) {
-                    return @intCast(i32, i);
+                    return @as(i32, @intCast(i));
                 }
             }
-            return @intCast(i32, a.len);
+            return @as(i32, @intCast(a.len));
         }
 
         // We found a match in the previous block.
-        var tp = @intCast(i32, self.prev_len) + t;
+        var tp = @as(i32, @intCast(self.prev_len)) + t;
         if (tp < 0) {
             return 0;
         }
 
         // Extend the match to be as long as possible.
-        var a = src[@intCast(usize, s)..@intCast(usize, s1)];
-        var b = self.prev[@intCast(usize, tp)..@intCast(usize, self.prev_len)];
+        var a = src[@as(usize, @intCast(s))..@as(usize, @intCast(s1))];
+        var b = self.prev[@as(usize, @intCast(tp))..@as(usize, @intCast(self.prev_len))];
         if (b.len > a.len) {
             b = b[0..a.len];
         }
         a = a[0..b.len];
         for (b, 0..) |_, i| {
             if (a[i] != b[i]) {
-                return @intCast(i32, i);
+                return @as(i32, @intCast(i));
             }
         }
 
         // If we reached our limit, we matched everything we are
         // allowed to in the previous block and we return.
-        var n = @intCast(i32, b.len);
-        if (@intCast(u32, s + n) == s1) {
+        var n = @as(i32, @intCast(b.len));
+        if (@as(u32, @intCast(s + n)) == s1) {
             return n;
         }
 
         // Continue looking for more matches in the current block.
-        a = src[@intCast(usize, s + n)..@intCast(usize, s1)];
+        a = src[@as(usize, @intCast(s + n))..@as(usize, @intCast(s1))];
         b = src[0..a.len];
         for (a, 0..) |_, i| {
             if (a[i] != b[i]) {
-                return @intCast(i32, i) + n;
+                return @as(i32, @intCast(i)) + n;
             }
         }
-        return @intCast(i32, a.len) + n;
+        return @as(i32, @intCast(a.len)) + n;
     }
 
     // Reset resets the encoding history.
@@ -574,7 +574,7 @@ test "best speed match 2/2" {
 
         var e = DeflateFast{
             .prev = previous,
-            .prev_len = @intCast(u32, previous.len),
+            .prev_len = @as(u32, @intCast(previous.len)),
             .table = undefined,
             .allocator = undefined,
             .cur = 0,
@@ -617,7 +617,7 @@ test "best speed shift offsets" {
     try expect(want_first_tokens > want_second_tokens);
 
     // Forward the current indicator to before wraparound.
-    enc.cur = buffer_reset - @intCast(i32, test_data.len);
+    enc.cur = buffer_reset - @as(i32, @intCast(test_data.len));
 
     // Part 1 before wrap, should match clean state.
     tokens_count = 0;
lib/std/compress/deflate/deflate_fast_test.zig
@@ -19,7 +19,7 @@ test "best speed" {
     defer testing.allocator.free(abcabc);
 
     for (abcabc, 0..) |_, i| {
-        abcabc[i] = @intCast(u8, i % 128);
+        abcabc[i] = @as(u8, @intCast(i % 128));
     }
 
     var tc_01 = [_]u32{ 65536, 0 };
@@ -119,16 +119,16 @@ test "best speed max match offset" {
                 //	zeros1 is between 0 and 30 zeros.
                 // The difference between the two abc's will be offset, which
                 // is max_match_offset plus or minus a small adjustment.
-                var src_len: usize = @intCast(usize, offset + @as(i32, abc.len) + @intCast(i32, extra));
+                var src_len: usize = @as(usize, @intCast(offset + @as(i32, abc.len) + @as(i32, @intCast(extra))));
                 var src = try testing.allocator.alloc(u8, src_len);
                 defer testing.allocator.free(src);
 
                 @memcpy(src[0..abc.len], abc);
                 if (!do_match_before) {
-                    const src_offset: usize = @intCast(usize, offset - @as(i32, xyz.len));
+                    const src_offset: usize = @as(usize, @intCast(offset - @as(i32, xyz.len)));
                     @memcpy(src[src_offset..][0..xyz.len], xyz);
                 }
-                const src_offset: usize = @intCast(usize, offset);
+                const src_offset: usize = @as(usize, @intCast(offset));
                 @memcpy(src[src_offset..][0..abc.len], abc);
 
                 var compressed = ArrayList(u8).init(testing.allocator);
lib/std/compress/deflate/dict_decoder.zig
@@ -49,7 +49,7 @@ pub const DictDecoder = struct {
         if (dict != null) {
             const src = dict.?[dict.?.len -| self.hist.len..];
             @memcpy(self.hist[0..src.len], src);
-            self.wr_pos = @intCast(u32, dict.?.len);
+            self.wr_pos = @as(u32, @intCast(dict.?.len));
         }
 
         if (self.wr_pos == self.hist.len) {
@@ -66,7 +66,7 @@ pub const DictDecoder = struct {
     // Reports the total amount of historical data in the dictionary.
     pub fn histSize(self: *Self) u32 {
         if (self.full) {
-            return @intCast(u32, self.hist.len);
+            return @as(u32, @intCast(self.hist.len));
         }
         return self.wr_pos;
     }
@@ -78,7 +78,7 @@ pub const DictDecoder = struct {
 
     // Reports the available amount of output buffer space.
     pub fn availWrite(self: *Self) u32 {
-        return @intCast(u32, self.hist.len - self.wr_pos);
+        return @as(u32, @intCast(self.hist.len - self.wr_pos));
     }
 
     // Returns a slice of the available buffer to write data to.
@@ -110,10 +110,10 @@ pub const DictDecoder = struct {
     fn copy(dst: []u8, src: []const u8) u32 {
         if (src.len > dst.len) {
             mem.copyForwards(u8, dst, src[0..dst.len]);
-            return @intCast(u32, dst.len);
+            return @as(u32, @intCast(dst.len));
         }
         mem.copyForwards(u8, dst[0..src.len], src);
-        return @intCast(u32, src.len);
+        return @as(u32, @intCast(src.len));
     }
 
     // Copies a string at a given (dist, length) to the output.
@@ -125,10 +125,10 @@ pub const DictDecoder = struct {
         assert(0 < dist and dist <= self.histSize());
         var dst_base = self.wr_pos;
         var dst_pos = dst_base;
-        var src_pos: i32 = @intCast(i32, dst_pos) - @intCast(i32, dist);
+        var src_pos: i32 = @as(i32, @intCast(dst_pos)) - @as(i32, @intCast(dist));
         var end_pos = dst_pos + length;
         if (end_pos > self.hist.len) {
-            end_pos = @intCast(u32, self.hist.len);
+            end_pos = @as(u32, @intCast(self.hist.len));
         }
 
         // Copy non-overlapping section after destination position.
@@ -139,8 +139,8 @@ pub const DictDecoder = struct {
         // Thus, a backwards copy is performed here; that is, the exact bytes in
         // the source prior to the copy is placed in the destination.
         if (src_pos < 0) {
-            src_pos += @intCast(i32, self.hist.len);
-            dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@intCast(usize, src_pos)..]);
+            src_pos += @as(i32, @intCast(self.hist.len));
+            dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@as(usize, @intCast(src_pos))..]);
             src_pos = 0;
         }
 
@@ -160,7 +160,7 @@ pub const DictDecoder = struct {
         //    dst_pos = end_pos;
         //
         while (dst_pos < end_pos) {
-            dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@intCast(usize, src_pos)..dst_pos]);
+            dst_pos += copy(self.hist[dst_pos..end_pos], self.hist[@as(usize, @intCast(src_pos))..dst_pos]);
         }
 
         self.wr_pos = dst_pos;
lib/std/compress/deflate/huffman_bit_writer.zig
@@ -107,7 +107,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
             }
             var n = self.nbytes;
             while (self.nbits != 0) {
-                self.bytes[n] = @truncate(u8, self.bits);
+                self.bytes[n] = @as(u8, @truncate(self.bits));
                 self.bits >>= 8;
                 if (self.nbits > 8) { // Avoid underflow
                     self.nbits -= 8;
@@ -132,7 +132,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
             if (self.err) {
                 return;
             }
-            self.bits |= @intCast(u64, b) << @intCast(u6, self.nbits);
+            self.bits |= @as(u64, @intCast(b)) << @as(u6, @intCast(self.nbits));
             self.nbits += nb;
             if (self.nbits >= 48) {
                 var bits = self.bits;
@@ -140,12 +140,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                 self.nbits -= 48;
                 var n = self.nbytes;
                 var bytes = self.bytes[n..][0..6];
-                bytes[0] = @truncate(u8, bits);
-                bytes[1] = @truncate(u8, bits >> 8);
-                bytes[2] = @truncate(u8, bits >> 16);
-                bytes[3] = @truncate(u8, bits >> 24);
-                bytes[4] = @truncate(u8, bits >> 32);
-                bytes[5] = @truncate(u8, bits >> 40);
+                bytes[0] = @as(u8, @truncate(bits));
+                bytes[1] = @as(u8, @truncate(bits >> 8));
+                bytes[2] = @as(u8, @truncate(bits >> 16));
+                bytes[3] = @as(u8, @truncate(bits >> 24));
+                bytes[4] = @as(u8, @truncate(bits >> 32));
+                bytes[5] = @as(u8, @truncate(bits >> 40));
                 n += 6;
                 if (n >= buffer_flush_size) {
                     try self.write(self.bytes[0..n]);
@@ -165,7 +165,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                 return;
             }
             while (self.nbits != 0) {
-                self.bytes[n] = @truncate(u8, self.bits);
+                self.bytes[n] = @as(u8, @truncate(self.bits));
                 self.bits >>= 8;
                 self.nbits -= 8;
                 n += 1;
@@ -209,12 +209,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
             // Copy the concatenated code sizes to codegen. Put a marker at the end.
             var cgnl = codegen[0..num_literals];
             for (cgnl, 0..) |_, i| {
-                cgnl[i] = @intCast(u8, lit_enc.codes[i].len);
+                cgnl[i] = @as(u8, @intCast(lit_enc.codes[i].len));
             }
 
             cgnl = codegen[num_literals .. num_literals + num_offsets];
             for (cgnl, 0..) |_, i| {
-                cgnl[i] = @intCast(u8, off_enc.codes[i].len);
+                cgnl[i] = @as(u8, @intCast(off_enc.codes[i].len));
             }
             codegen[num_literals + num_offsets] = bad_code;
 
@@ -243,7 +243,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                         }
                         codegen[out_index] = 16;
                         out_index += 1;
-                        codegen[out_index] = @intCast(u8, n - 3);
+                        codegen[out_index] = @as(u8, @intCast(n - 3));
                         out_index += 1;
                         self.codegen_freq[16] += 1;
                         count -= n;
@@ -256,7 +256,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                         }
                         codegen[out_index] = 18;
                         out_index += 1;
-                        codegen[out_index] = @intCast(u8, n - 11);
+                        codegen[out_index] = @as(u8, @intCast(n - 11));
                         out_index += 1;
                         self.codegen_freq[18] += 1;
                         count -= n;
@@ -265,7 +265,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                         // 3 <= count <= 10
                         codegen[out_index] = 17;
                         out_index += 1;
-                        codegen[out_index] = @intCast(u8, count - 3);
+                        codegen[out_index] = @as(u8, @intCast(count - 3));
                         out_index += 1;
                         self.codegen_freq[17] += 1;
                         count = 0;
@@ -307,8 +307,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                 extra_bits;
 
             return DynamicSize{
-                .size = @intCast(u32, size),
-                .num_codegens = @intCast(u32, num_codegens),
+                .size = @as(u32, @intCast(size)),
+                .num_codegens = @as(u32, @intCast(num_codegens)),
             };
         }
 
@@ -328,7 +328,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                 return .{ .size = 0, .storable = false };
             }
             if (in.?.len <= deflate_const.max_store_block_size) {
-                return .{ .size = @intCast(u32, (in.?.len + 5) * 8), .storable = true };
+                return .{ .size = @as(u32, @intCast((in.?.len + 5) * 8)), .storable = true };
             }
             return .{ .size = 0, .storable = false };
         }
@@ -337,20 +337,20 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
             if (self.err) {
                 return;
             }
-            self.bits |= @intCast(u64, c.code) << @intCast(u6, self.nbits);
-            self.nbits += @intCast(u32, c.len);
+            self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits));
+            self.nbits += @as(u32, @intCast(c.len));
             if (self.nbits >= 48) {
                 var bits = self.bits;
                 self.bits >>= 48;
                 self.nbits -= 48;
                 var n = self.nbytes;
                 var bytes = self.bytes[n..][0..6];
-                bytes[0] = @truncate(u8, bits);
-                bytes[1] = @truncate(u8, bits >> 8);
-                bytes[2] = @truncate(u8, bits >> 16);
-                bytes[3] = @truncate(u8, bits >> 24);
-                bytes[4] = @truncate(u8, bits >> 32);
-                bytes[5] = @truncate(u8, bits >> 40);
+                bytes[0] = @as(u8, @truncate(bits));
+                bytes[1] = @as(u8, @truncate(bits >> 8));
+                bytes[2] = @as(u8, @truncate(bits >> 16));
+                bytes[3] = @as(u8, @truncate(bits >> 24));
+                bytes[4] = @as(u8, @truncate(bits >> 32));
+                bytes[5] = @as(u8, @truncate(bits >> 40));
                 n += 6;
                 if (n >= buffer_flush_size) {
                     try self.write(self.bytes[0..n]);
@@ -381,36 +381,36 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                 first_bits = 5;
             }
             try self.writeBits(first_bits, 3);
-            try self.writeBits(@intCast(u32, num_literals - 257), 5);
-            try self.writeBits(@intCast(u32, num_offsets - 1), 5);
-            try self.writeBits(@intCast(u32, num_codegens - 4), 4);
+            try self.writeBits(@as(u32, @intCast(num_literals - 257)), 5);
+            try self.writeBits(@as(u32, @intCast(num_offsets - 1)), 5);
+            try self.writeBits(@as(u32, @intCast(num_codegens - 4)), 4);
 
             var i: u32 = 0;
             while (i < num_codegens) : (i += 1) {
-                var value = @intCast(u32, self.codegen_encoding.codes[codegen_order[i]].len);
-                try self.writeBits(@intCast(u32, value), 3);
+                var value = @as(u32, @intCast(self.codegen_encoding.codes[codegen_order[i]].len));
+                try self.writeBits(@as(u32, @intCast(value)), 3);
             }
 
             i = 0;
             while (true) {
-                var code_word: u32 = @intCast(u32, self.codegen[i]);
+                var code_word: u32 = @as(u32, @intCast(self.codegen[i]));
                 i += 1;
                 if (code_word == bad_code) {
                     break;
                 }
-                try self.writeCode(self.codegen_encoding.codes[@intCast(u32, code_word)]);
+                try self.writeCode(self.codegen_encoding.codes[@as(u32, @intCast(code_word))]);
 
                 switch (code_word) {
                     16 => {
-                        try self.writeBits(@intCast(u32, self.codegen[i]), 2);
+                        try self.writeBits(@as(u32, @intCast(self.codegen[i])), 2);
                         i += 1;
                     },
                     17 => {
-                        try self.writeBits(@intCast(u32, self.codegen[i]), 3);
+                        try self.writeBits(@as(u32, @intCast(self.codegen[i])), 3);
                         i += 1;
                     },
                     18 => {
-                        try self.writeBits(@intCast(u32, self.codegen[i]), 7);
+                        try self.writeBits(@as(u32, @intCast(self.codegen[i])), 7);
                         i += 1;
                     },
                     else => {},
@@ -428,8 +428,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
             }
             try self.writeBits(flag, 3);
             try self.flush();
-            try self.writeBits(@intCast(u32, length), 16);
-            try self.writeBits(@intCast(u32, ~@intCast(u16, length)), 16);
+            try self.writeBits(@as(u32, @intCast(length)), 16);
+            try self.writeBits(@as(u32, @intCast(~@as(u16, @intCast(length)))), 16);
         }
 
         fn writeFixedHeader(self: *Self, is_eof: bool) Error!void {
@@ -476,14 +476,14 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                 var length_code: u32 = length_codes_start + 8;
                 while (length_code < num_literals) : (length_code += 1) {
                     // First eight length codes have extra size = 0.
-                    extra_bits += @intCast(u32, self.literal_freq[length_code]) *
-                        @intCast(u32, length_extra_bits[length_code - length_codes_start]);
+                    extra_bits += @as(u32, @intCast(self.literal_freq[length_code])) *
+                        @as(u32, @intCast(length_extra_bits[length_code - length_codes_start]));
                 }
                 var offset_code: u32 = 4;
                 while (offset_code < num_offsets) : (offset_code += 1) {
                     // First four offset codes have extra size = 0.
-                    extra_bits += @intCast(u32, self.offset_freq[offset_code]) *
-                        @intCast(u32, offset_extra_bits[offset_code]);
+                    extra_bits += @as(u32, @intCast(self.offset_freq[offset_code])) *
+                        @as(u32, @intCast(offset_extra_bits[offset_code]));
                 }
             }
 
@@ -621,12 +621,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
             self.literal_freq[token.literal(deflate_const.end_block_marker)] += 1;
 
             // get the number of literals
-            num_literals = @intCast(u32, self.literal_freq.len);
+            num_literals = @as(u32, @intCast(self.literal_freq.len));
             while (self.literal_freq[num_literals - 1] == 0) {
                 num_literals -= 1;
             }
             // get the number of offsets
-            num_offsets = @intCast(u32, self.offset_freq.len);
+            num_offsets = @as(u32, @intCast(self.offset_freq.len));
             while (num_offsets > 0 and self.offset_freq[num_offsets - 1] == 0) {
                 num_offsets -= 1;
             }
@@ -664,18 +664,18 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                 var length = token.length(t);
                 var length_code = token.lengthCode(length);
                 try self.writeCode(le_codes[length_code + length_codes_start]);
-                var extra_length_bits = @intCast(u32, length_extra_bits[length_code]);
+                var extra_length_bits = @as(u32, @intCast(length_extra_bits[length_code]));
                 if (extra_length_bits > 0) {
-                    var extra_length = @intCast(u32, length - length_base[length_code]);
+                    var extra_length = @as(u32, @intCast(length - length_base[length_code]));
                     try self.writeBits(extra_length, extra_length_bits);
                 }
                 // Write the offset
                 var offset = token.offset(t);
                 var offset_code = token.offsetCode(offset);
                 try self.writeCode(oe_codes[offset_code]);
-                var extra_offset_bits = @intCast(u32, offset_extra_bits[offset_code]);
+                var extra_offset_bits = @as(u32, @intCast(offset_extra_bits[offset_code]));
                 if (extra_offset_bits > 0) {
-                    var extra_offset = @intCast(u32, offset - offset_base[offset_code]);
+                    var extra_offset = @as(u32, @intCast(offset - offset_base[offset_code]));
                     try self.writeBits(extra_offset, extra_offset_bits);
                 }
             }
@@ -742,8 +742,8 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
             for (input) |t| {
                 // Bitwriting inlined, ~30% speedup
                 var c = encoding[t];
-                self.bits |= @intCast(u64, c.code) << @intCast(u6, self.nbits);
-                self.nbits += @intCast(u32, c.len);
+                self.bits |= @as(u64, @intCast(c.code)) << @as(u6, @intCast(self.nbits));
+                self.nbits += @as(u32, @intCast(c.len));
                 if (self.nbits < 48) {
                     continue;
                 }
@@ -752,12 +752,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
                 self.bits >>= 48;
                 self.nbits -= 48;
                 var bytes = self.bytes[n..][0..6];
-                bytes[0] = @truncate(u8, bits);
-                bytes[1] = @truncate(u8, bits >> 8);
-                bytes[2] = @truncate(u8, bits >> 16);
-                bytes[3] = @truncate(u8, bits >> 24);
-                bytes[4] = @truncate(u8, bits >> 32);
-                bytes[5] = @truncate(u8, bits >> 40);
+                bytes[0] = @as(u8, @truncate(bits));
+                bytes[1] = @as(u8, @truncate(bits >> 8));
+                bytes[2] = @as(u8, @truncate(bits >> 16));
+                bytes[3] = @as(u8, @truncate(bits >> 24));
+                bytes[4] = @as(u8, @truncate(bits >> 32));
+                bytes[5] = @as(u8, @truncate(bits >> 40));
                 n += 6;
                 if (n < buffer_flush_size) {
                     continue;
lib/std/compress/deflate/huffman_code.zig
@@ -73,7 +73,7 @@ pub const HuffmanEncoder = struct {
         // Set list to be the set of all non-zero literals and their frequencies
         for (freq, 0..) |f, i| {
             if (f != 0) {
-                list[count] = LiteralNode{ .literal = @intCast(u16, i), .freq = f };
+                list[count] = LiteralNode{ .literal = @as(u16, @intCast(i)), .freq = f };
                 count += 1;
             } else {
                 list[count] = LiteralNode{ .literal = 0x00, .freq = 0 };
@@ -88,7 +88,7 @@ pub const HuffmanEncoder = struct {
             // two or fewer literals, everything has bit length 1.
             for (list, 0..) |node, i| {
                 // "list" is in order of increasing literal value.
-                self.codes[node.literal].set(@intCast(u16, i), 1);
+                self.codes[node.literal].set(@as(u16, @intCast(i)), 1);
             }
             return;
         }
@@ -105,7 +105,7 @@ pub const HuffmanEncoder = struct {
         var total: u32 = 0;
         for (freq, 0..) |f, i| {
             if (f != 0) {
-                total += @intCast(u32, f) * @intCast(u32, self.codes[i].len);
+                total += @as(u32, @intCast(f)) * @as(u32, @intCast(self.codes[i].len));
             }
         }
         return total;
@@ -167,7 +167,7 @@ pub const HuffmanEncoder = struct {
         }
 
         // We need a total of 2*n - 2 items at top level and have already generated 2.
-        levels[max_bits].needed = 2 * @intCast(u32, n) - 4;
+        levels[max_bits].needed = 2 * @as(u32, @intCast(n)) - 4;
 
         {
             var level = max_bits;
@@ -267,19 +267,19 @@ pub const HuffmanEncoder = struct {
             // are encoded using "bits" bits, and get the values
             // code, code + 1, ....  The code values are
             // assigned in literal order (not frequency order).
-            var chunk = list[list.len - @intCast(u32, bits) ..];
+            var chunk = list[list.len - @as(u32, @intCast(bits)) ..];
 
             self.lns = chunk;
             mem.sort(LiteralNode, self.lns, {}, byLiteral);
 
             for (chunk) |node| {
                 self.codes[node.literal] = HuffCode{
-                    .code = bu.bitReverse(u16, code, @intCast(u5, n)),
-                    .len = @intCast(u16, n),
+                    .code = bu.bitReverse(u16, code, @as(u5, @intCast(n))),
+                    .len = @as(u16, @intCast(n)),
                 };
                 code += 1;
             }
-            list = list[0 .. list.len - @intCast(u32, bits)];
+            list = list[0 .. list.len - @as(u32, @intCast(bits))];
         }
     }
 };
@@ -332,7 +332,7 @@ pub fn generateFixedLiteralEncoding(allocator: Allocator) !HuffmanEncoder {
                 size = 8;
             },
         }
-        codes[ch] = HuffCode{ .code = bu.bitReverse(u16, bits, @intCast(u5, size)), .len = size };
+        codes[ch] = HuffCode{ .code = bu.bitReverse(u16, bits, @as(u5, @intCast(size))), .len = size };
     }
     return h;
 }
@@ -341,7 +341,7 @@ pub fn generateFixedOffsetEncoding(allocator: Allocator) !HuffmanEncoder {
     var h = try newHuffmanEncoder(allocator, 30);
     var codes = h.codes;
     for (codes, 0..) |_, ch| {
-        codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @intCast(u16, ch), 5), .len = 5 };
+        codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @as(u16, @intCast(ch)), 5), .len = 5 };
     }
     return h;
 }
lib/std/compress/deflate/token.zig
@@ -70,16 +70,16 @@ pub fn matchToken(xlength: u32, xoffset: u32) Token {
 
 // Returns the literal of a literal token
 pub fn literal(t: Token) u32 {
-    return @intCast(u32, t - literal_type);
+    return @as(u32, @intCast(t - literal_type));
 }
 
 // Returns the extra offset of a match token
 pub fn offset(t: Token) u32 {
-    return @intCast(u32, t) & offset_mask;
+    return @as(u32, @intCast(t)) & offset_mask;
 }
 
 pub fn length(t: Token) u32 {
-    return @intCast(u32, (t - match_type) >> length_shift);
+    return @as(u32, @intCast((t - match_type) >> length_shift));
 }
 
 pub fn lengthCode(len: u32) u32 {
@@ -88,10 +88,10 @@ pub fn lengthCode(len: u32) u32 {
 
 // Returns the offset code corresponding to a specific offset
 pub fn offsetCode(off: u32) u32 {
-    if (off < @intCast(u32, offset_codes.len)) {
+    if (off < @as(u32, @intCast(offset_codes.len))) {
         return offset_codes[off];
     }
-    if (off >> 7 < @intCast(u32, offset_codes.len)) {
+    if (off >> 7 < @as(u32, @intCast(offset_codes.len))) {
         return offset_codes[off >> 7] + 14;
     }
     return offset_codes[off >> 14] + 28;
lib/std/compress/lzma/decode.zig
@@ -52,11 +52,11 @@ pub const Params = struct {
             return error.CorruptInput;
         }
 
-        const lc = @intCast(u4, props % 9);
+        const lc = @as(u4, @intCast(props % 9));
         props /= 9;
-        const lp = @intCast(u3, props % 5);
+        const lp = @as(u3, @intCast(props % 5));
         props /= 5;
-        const pb = @intCast(u3, props);
+        const pb = @as(u3, @intCast(props));
 
         const dict_size_provided = try reader.readIntLittle(u32);
         const dict_size = @max(0x1000, dict_size_provided);
@@ -342,7 +342,7 @@ pub const DecoderState = struct {
             result = (result << 1) ^ @intFromBool(try decoder.decodeBit(reader, &probs[result], update));
         }
 
-        return @truncate(u8, result - 0x100);
+        return @as(u8, @truncate(result - 0x100));
     }
 
     fn decodeDistance(
@@ -358,7 +358,7 @@ pub const DecoderState = struct {
         if (pos_slot < 4)
             return pos_slot;
 
-        const num_direct_bits = @intCast(u5, (pos_slot >> 1) - 1);
+        const num_direct_bits = @as(u5, @intCast((pos_slot >> 1) - 1));
         var result = (2 ^ (pos_slot & 1)) << num_direct_bits;
 
         if (pos_slot < 14) {
lib/std/compress/lzma2/decode.zig
@@ -119,11 +119,11 @@ pub const Decoder = struct {
                     return error.CorruptInput;
                 }
 
-                const lc = @intCast(u4, props % 9);
+                const lc = @as(u4, @intCast(props % 9));
                 props /= 9;
-                const lp = @intCast(u3, props % 5);
+                const lp = @as(u3, @intCast(props % 5));
                 props /= 5;
-                const pb = @intCast(u3, props);
+                const pb = @as(u3, @intCast(props));
 
                 if (lc + lp > 4) {
                     return error.CorruptInput;
lib/std/compress/xz/block.zig
@@ -108,7 +108,7 @@ pub fn Decoder(comptime ReaderType: type) type {
                     has_unpacked_size: bool,
                 };
 
-                const flags = @bitCast(Flags, try header_reader.readByte());
+                const flags = @as(Flags, @bitCast(try header_reader.readByte()));
                 const filter_count = @as(u3, flags.last_filter_index) + 1;
                 if (filter_count > 1)
                     return error.Unsupported;
@@ -124,9 +124,9 @@ pub fn Decoder(comptime ReaderType: type) type {
                     _,
                 };
 
-                const filter_id = @enumFromInt(
+                const filter_id = @as(
                     FilterId,
-                    try std.leb.readULEB128(u64, header_reader),
+                    @enumFromInt(try std.leb.readULEB128(u64, header_reader)),
                 );
 
                 if (@intFromEnum(filter_id) >= 0x4000_0000_0000_0000)
lib/std/compress/zstandard/decode/block.zig
@@ -894,7 +894,7 @@ pub fn decodeBlockReader(
 /// Decode the header of a block.
 pub fn decodeBlockHeader(src: *const [3]u8) frame.Zstandard.Block.Header {
     const last_block = src[0] & 1 == 1;
-    const block_type = @enumFromInt(frame.Zstandard.Block.Type, (src[0] & 0b110) >> 1);
+    const block_type = @as(frame.Zstandard.Block.Type, @enumFromInt((src[0] & 0b110) >> 1));
     const block_size = ((src[0] & 0b11111000) >> 3) + (@as(u21, src[1]) << 5) + (@as(u21, src[2]) << 13);
     return .{
         .last_block = last_block,
@@ -1008,7 +1008,7 @@ pub fn decodeLiteralsSection(
                 try huffman.decodeHuffmanTree(counting_reader.reader(), buffer)
             else
                 null;
-            const huffman_tree_size = @intCast(usize, counting_reader.bytes_read);
+            const huffman_tree_size = @as(usize, @intCast(counting_reader.bytes_read));
             const total_streams_size = std.math.sub(usize, header.compressed_size.?, huffman_tree_size) catch
                 return error.MalformedLiteralsSection;
 
@@ -1058,8 +1058,8 @@ fn decodeStreams(size_format: u2, stream_data: []const u8) !LiteralsSection.Stre
 ///   - `error.EndOfStream` if there are not enough bytes in `source`
 pub fn decodeLiteralsHeader(source: anytype) !LiteralsSection.Header {
     const byte0 = try source.readByte();
-    const block_type = @enumFromInt(LiteralsSection.BlockType, byte0 & 0b11);
-    const size_format = @intCast(u2, (byte0 & 0b1100) >> 2);
+    const block_type = @as(LiteralsSection.BlockType, @enumFromInt(byte0 & 0b11));
+    const size_format = @as(u2, @intCast((byte0 & 0b1100) >> 2));
     var regenerated_size: u20 = undefined;
     var compressed_size: ?u18 = null;
     switch (block_type) {
@@ -1132,9 +1132,9 @@ pub fn decodeSequencesHeader(
 
     const compression_modes = try source.readByte();
 
-    const matches_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b00001100) >> 2);
-    const offsets_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b00110000) >> 4);
-    const literal_mode = @enumFromInt(SequencesSection.Header.Mode, (compression_modes & 0b11000000) >> 6);
+    const matches_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00001100) >> 2));
+    const offsets_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b00110000) >> 4));
+    const literal_mode = @as(SequencesSection.Header.Mode, @enumFromInt((compression_modes & 0b11000000) >> 6));
     if (compression_modes & 0b11 != 0) return error.ReservedBitSet;
 
     return SequencesSection.Header{
lib/std/compress/zstandard/decode/fse.zig
@@ -69,7 +69,7 @@ pub fn decodeFseTable(
 }
 
 fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
-    const total_probability = @intCast(u16, entries.len);
+    const total_probability = @as(u16, @intCast(entries.len));
     const accuracy_log = std.math.log2_int(u16, total_probability);
     assert(total_probability <= 1 << 9);
 
@@ -77,7 +77,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
     for (values, 0..) |value, i| {
         if (value == 0) {
             entries[entries.len - 1 - less_than_one_count] = Table.Fse{
-                .symbol = @intCast(u8, i),
+                .symbol = @as(u8, @intCast(i)),
                 .baseline = 0,
                 .bits = accuracy_log,
             };
@@ -99,7 +99,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
         const share_size_log = std.math.log2_int(u16, share_size);
 
         for (0..probability) |i| {
-            temp_states[i] = @intCast(u16, position);
+            temp_states[i] = @as(u16, @intCast(position));
             position += (entries.len >> 1) + (entries.len >> 3) + 3;
             position &= entries.len - 1;
             while (position >= entries.len - less_than_one_count) {
@@ -110,13 +110,13 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
         std.mem.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16));
         for (0..probability) |i| {
             entries[temp_states[i]] = if (i < double_state_count) Table.Fse{
-                .symbol = @intCast(u8, symbol),
+                .symbol = @as(u8, @intCast(symbol)),
                 .bits = share_size_log + 1,
-                .baseline = single_state_count * share_size + @intCast(u16, i) * 2 * share_size,
+                .baseline = single_state_count * share_size + @as(u16, @intCast(i)) * 2 * share_size,
             } else Table.Fse{
-                .symbol = @intCast(u8, symbol),
+                .symbol = @as(u8, @intCast(symbol)),
                 .bits = share_size_log,
-                .baseline = (@intCast(u16, i) - double_state_count) * share_size,
+                .baseline = (@as(u16, @intCast(i)) - double_state_count) * share_size,
             };
         }
     }
lib/std/compress/zstandard/decode/huffman.zig
@@ -109,8 +109,8 @@ fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights
     const weights_byte_count = (encoded_symbol_count + 1) / 2;
     for (0..weights_byte_count) |i| {
         const byte = try source.readByte();
-        weights[2 * i] = @intCast(u4, byte >> 4);
-        weights[2 * i + 1] = @intCast(u4, byte & 0xF);
+        weights[2 * i] = @as(u4, @intCast(byte >> 4));
+        weights[2 * i + 1] = @as(u4, @intCast(byte & 0xF));
     }
     return encoded_symbol_count + 1;
 }
@@ -118,7 +118,7 @@ fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights
 fn assignSymbols(weight_sorted_prefixed_symbols: []LiteralsSection.HuffmanTree.PrefixedSymbol, weights: [256]u4) usize {
     for (0..weight_sorted_prefixed_symbols.len) |i| {
         weight_sorted_prefixed_symbols[i] = .{
-            .symbol = @intCast(u8, i),
+            .symbol = @as(u8, @intCast(i)),
             .weight = undefined,
             .prefix = undefined,
         };
@@ -167,7 +167,7 @@ fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffm
         weight_power_sum_big += (@as(u16, 1) << value) >> 1;
     }
     if (weight_power_sum_big >= 1 << 11) return error.MalformedHuffmanTree;
-    const weight_power_sum = @intCast(u16, weight_power_sum_big);
+    const weight_power_sum = @as(u16, @intCast(weight_power_sum_big));
 
     // advance to next power of two (even if weight_power_sum is a power of 2)
     // TODO: is it valid to have weight_power_sum == 0?
@@ -179,7 +179,7 @@ fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffm
     const prefixed_symbol_count = assignSymbols(weight_sorted_prefixed_symbols[0..symbol_count], weights.*);
     const tree = LiteralsSection.HuffmanTree{
         .max_bit_count = max_number_of_bits,
-        .symbol_count_minus_one = @intCast(u8, prefixed_symbol_count - 1),
+        .symbol_count_minus_one = @as(u8, @intCast(prefixed_symbol_count - 1)),
         .nodes = weight_sorted_prefixed_symbols,
     };
     return tree;
lib/std/compress/zstandard/decompress.zig
@@ -260,7 +260,7 @@ pub fn decodeFrameArrayList(
 /// Returns the frame checksum corresponding to the data fed into `hasher`
 pub fn computeChecksum(hasher: *std.hash.XxHash64) u32 {
     const hash = hasher.final();
-    return @intCast(u32, hash & 0xFFFFFFFF);
+    return @as(u32, @intCast(hash & 0xFFFFFFFF));
 }
 
 const FrameError = error{
@@ -398,7 +398,7 @@ pub const FrameContext = struct {
         const window_size = if (window_size_raw > window_size_max)
             return error.WindowTooLarge
         else
-            @intCast(usize, window_size_raw);
+            @as(usize, @intCast(window_size_raw));
 
         const should_compute_checksum =
             frame_header.descriptor.content_checksum_flag and verify_checksum;
@@ -585,7 +585,7 @@ pub fn frameWindowSize(header: ZstandardHeader) ?u64 {
         const exponent = (descriptor & 0b11111000) >> 3;
         const mantissa = descriptor & 0b00000111;
         const window_log = 10 + exponent;
-        const window_base = @as(u64, 1) << @intCast(u6, window_log);
+        const window_base = @as(u64, 1) << @as(u6, @intCast(window_log));
         const window_add = (window_base / 8) * mantissa;
         return window_base + window_add;
     } else return header.content_size;
@@ -599,7 +599,7 @@ pub fn frameWindowSize(header: ZstandardHeader) ?u64 {
 pub fn decodeZstandardHeader(
     source: anytype,
 ) (@TypeOf(source).Error || error{ EndOfStream, ReservedBitSet })!ZstandardHeader {
-    const descriptor = @bitCast(ZstandardHeader.Descriptor, try source.readByte());
+    const descriptor = @as(ZstandardHeader.Descriptor, @bitCast(try source.readByte()));
 
     if (descriptor.reserved) return error.ReservedBitSet;
 
lib/std/compress/gzip.zig
@@ -89,7 +89,7 @@ pub fn Decompress(comptime ReaderType: type) type {
 
             if (FLG & FHCRC != 0) {
                 const hash = try source.readIntLittle(u16);
-                if (hash != @truncate(u16, hasher.hasher.final()))
+                if (hash != @as(u16, @truncate(hasher.hasher.final())))
                     return error.WrongChecksum;
             }
 
lib/std/compress/xz.zig
@@ -18,7 +18,7 @@ fn readStreamFlags(reader: anytype, check: *Check) !void {
     if (reserved1 != 0)
         return error.CorruptInput;
 
-    check.* = @enumFromInt(Check, try bit_reader.readBitsNoEof(u4, 4));
+    check.* = @as(Check, @enumFromInt(try bit_reader.readBitsNoEof(u4, 4)));
 
     const reserved2 = try bit_reader.readBitsNoEof(u4, 4);
     if (reserved2 != 0)
lib/std/compress/zlib.zig
@@ -41,7 +41,7 @@ pub fn DecompressStream(comptime ReaderType: type) type {
             // verify the header checksum
             if (header_u16 % 31 != 0)
                 return error.BadHeader;
-            const header = @bitCast(ZLibHeader, header_u16);
+            const header = @as(ZLibHeader, @bitCast(header_u16));
 
             // The CM field must be 8 to indicate the use of DEFLATE
             if (header.compression_method != ZLibHeader.DEFLATE)
@@ -130,9 +130,9 @@ pub fn CompressStream(comptime WriterType: type) type {
                 .preset_dict = 0,
                 .checksum = 0,
             };
-            header.checksum = @truncate(u5, 31 - @bitCast(u16, header) % 31);
+            header.checksum = @as(u5, @truncate(31 - @as(u16, @bitCast(header)) % 31));
 
-            try dest.writeIntBig(u16, @bitCast(u16, header));
+            try dest.writeIntBig(u16, @as(u16, @bitCast(header)));
 
             const compression_level: deflate.Compression = switch (options.level) {
                 .no_compression => .no_compression,
lib/std/crypto/25519/curve25519.zig
@@ -54,7 +54,7 @@ pub const Curve25519 = struct {
         var swap: u8 = 0;
         var pos: usize = bits - 1;
         while (true) : (pos -= 1) {
-            const bit = (s[pos >> 3] >> @truncate(u3, pos)) & 1;
+            const bit = (s[pos >> 3] >> @as(u3, @truncate(pos))) & 1;
             swap ^= bit;
             Fe.cSwap2(&x2, &x3, &z2, &z3, swap);
             swap = bit;
lib/std/crypto/25519/edwards25519.zig
@@ -162,8 +162,8 @@ pub const Edwards25519 = struct {
         const reduced = if ((s[s.len - 1] & 0x80) == 0) s else scalar.reduce(s);
         var e: [2 * 32]i8 = undefined;
         for (reduced, 0..) |x, i| {
-            e[i * 2 + 0] = @as(i8, @truncate(u4, x));
-            e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
+            e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x)));
+            e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4)));
         }
         // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7
         var carry: i8 = 0;
@@ -190,9 +190,9 @@ pub const Edwards25519 = struct {
         while (true) : (pos -= 1) {
             const slot = e[pos];
             if (slot > 0) {
-                q = q.add(pc[@intCast(usize, slot)]);
+                q = q.add(pc[@as(usize, @intCast(slot))]);
             } else if (slot < 0) {
-                q = q.sub(pc[@intCast(usize, -slot)]);
+                q = q.sub(pc[@as(usize, @intCast(-slot))]);
             }
             if (pos == 0) break;
             q = q.dbl().dbl().dbl().dbl();
@@ -206,7 +206,7 @@ pub const Edwards25519 = struct {
         var q = Edwards25519.identityElement;
         var pos: usize = 252;
         while (true) : (pos -= 4) {
-            const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos)));
+            const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos)))));
             if (vartime) {
                 if (slot != 0) {
                     q = q.add(pc[slot]);
@@ -283,15 +283,15 @@ pub const Edwards25519 = struct {
         while (true) : (pos -= 1) {
             const slot1 = e1[pos];
             if (slot1 > 0) {
-                q = q.add(pc1[@intCast(usize, slot1)]);
+                q = q.add(pc1[@as(usize, @intCast(slot1))]);
             } else if (slot1 < 0) {
-                q = q.sub(pc1[@intCast(usize, -slot1)]);
+                q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
             }
             const slot2 = e2[pos];
             if (slot2 > 0) {
-                q = q.add(pc2[@intCast(usize, slot2)]);
+                q = q.add(pc2[@as(usize, @intCast(slot2))]);
             } else if (slot2 < 0) {
-                q = q.sub(pc2[@intCast(usize, -slot2)]);
+                q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
             }
             if (pos == 0) break;
             q = q.dbl().dbl().dbl().dbl();
@@ -326,9 +326,9 @@ pub const Edwards25519 = struct {
             for (es, 0..) |e, i| {
                 const slot = e[pos];
                 if (slot > 0) {
-                    q = q.add(pcs[i][@intCast(usize, slot)]);
+                    q = q.add(pcs[i][@as(usize, @intCast(slot))]);
                 } else if (slot < 0) {
-                    q = q.sub(pcs[i][@intCast(usize, -slot)]);
+                    q = q.sub(pcs[i][@as(usize, @intCast(-slot))]);
                 }
             }
             if (pos == 0) break;
@@ -427,7 +427,7 @@ pub const Edwards25519 = struct {
         }
         const empty_block = [_]u8{0} ** H.block_length;
         var t = [3]u8{ 0, n * h_l, 0 };
-        var xctx_len_u8 = [1]u8{@intCast(u8, xctx.len)};
+        var xctx_len_u8 = [1]u8{@as(u8, @intCast(xctx.len))};
         var st = H.init(.{});
         st.update(empty_block[0..]);
         st.update(s);
lib/std/crypto/25519/field.zig
@@ -254,11 +254,11 @@ pub const Fe = struct {
         var rs: [5]u64 = undefined;
         comptime var i = 0;
         inline while (i < 4) : (i += 1) {
-            rs[i] = @truncate(u64, r[i]) & MASK51;
-            r[i + 1] += @intCast(u64, r[i] >> 51);
+            rs[i] = @as(u64, @truncate(r[i])) & MASK51;
+            r[i + 1] += @as(u64, @intCast(r[i] >> 51));
         }
-        rs[4] = @truncate(u64, r[4]) & MASK51;
-        var carry = @intCast(u64, r[4] >> 51);
+        rs[4] = @as(u64, @truncate(r[4])) & MASK51;
+        var carry = @as(u64, @intCast(r[4] >> 51));
         rs[0] += 19 * carry;
         carry = rs[0] >> 51;
         rs[0] &= MASK51;
@@ -278,8 +278,8 @@ pub const Fe = struct {
         var r: [5]u128 = undefined;
         comptime var i = 0;
         inline while (i < 5) : (i += 1) {
-            ax[i] = @intCast(u128, a.limbs[i]);
-            bx[i] = @intCast(u128, b.limbs[i]);
+            ax[i] = @as(u128, @intCast(a.limbs[i]));
+            bx[i] = @as(u128, @intCast(b.limbs[i]));
         }
         i = 1;
         inline while (i < 5) : (i += 1) {
@@ -299,7 +299,7 @@ pub const Fe = struct {
         var r: [5]u128 = undefined;
         comptime var i = 0;
         inline while (i < 5) : (i += 1) {
-            ax[i] = @intCast(u128, a.limbs[i]);
+            ax[i] = @as(u128, @intCast(a.limbs[i]));
         }
         const a0_2 = 2 * ax[0];
         const a1_2 = 2 * ax[1];
@@ -334,15 +334,15 @@ pub const Fe = struct {
 
     /// Multiply a field element with a small (32-bit) integer
     pub inline fn mul32(a: Fe, comptime n: u32) Fe {
-        const sn = @intCast(u128, n);
+        const sn = @as(u128, @intCast(n));
         var fe: Fe = undefined;
         var x: u128 = 0;
         comptime var i = 0;
         inline while (i < 5) : (i += 1) {
             x = a.limbs[i] * sn + (x >> 51);
-            fe.limbs[i] = @truncate(u64, x) & MASK51;
+            fe.limbs[i] = @as(u64, @truncate(x)) & MASK51;
         }
-        fe.limbs[0] += @intCast(u64, x >> 51) * 19;
+        fe.limbs[0] += @as(u64, @intCast(x >> 51)) * 19;
 
         return fe;
     }
@@ -402,7 +402,7 @@ pub const Fe = struct {
         const t2 = t.sqn(30).mul(t);
         const t3 = t2.sqn(60).mul(t2);
         const t4 = t3.sqn(120).mul(t3).sqn(10).mul(u).sqn(3).mul(_11).sq();
-        return @bitCast(bool, @truncate(u1, ~(t4.toBytes()[1] & 1)));
+        return @as(bool, @bitCast(@as(u1, @truncate(~(t4.toBytes()[1] & 1)))));
     }
 
     fn uncheckedSqrt(x2: Fe) Fe {
lib/std/crypto/25519/scalar.zig
@@ -27,8 +27,8 @@ pub fn rejectNonCanonical(s: CompressedScalar) NonCanonicalError!void {
     while (true) : (i -= 1) {
         const xs = @as(u16, s[i]);
         const xfield_order_s = @as(u16, field_order_s[i]);
-        c |= @intCast(u8, ((xs -% xfield_order_s) >> 8) & n);
-        n &= @intCast(u8, ((xs ^ xfield_order_s) -% 1) >> 8);
+        c |= @as(u8, @intCast(((xs -% xfield_order_s) >> 8) & n));
+        n &= @as(u8, @intCast(((xs ^ xfield_order_s) -% 1) >> 8));
         if (i == 0) break;
     }
     if (c == 0) {
@@ -89,7 +89,7 @@ pub fn neg(s: CompressedScalar) CompressedScalar {
     var i: usize = 0;
     while (i < 64) : (i += 1) {
         carry = @as(u32, fs[i]) -% sx[i] -% @as(u32, carry);
-        sx[i] = @truncate(u8, carry);
+        sx[i] = @as(u8, @truncate(carry));
         carry = (carry >> 8) & 1;
     }
     return reduce64(sx);
@@ -129,7 +129,7 @@ pub const Scalar = struct {
         while (i < 4) : (i += 1) {
             mem.writeIntLittle(u64, bytes[i * 7 ..][0..8], expanded.limbs[i]);
         }
-        mem.writeIntLittle(u32, bytes[i * 7 ..][0..4], @intCast(u32, expanded.limbs[i]));
+        mem.writeIntLittle(u32, bytes[i * 7 ..][0..4], @as(u32, @intCast(expanded.limbs[i])));
         return bytes;
     }
 
@@ -234,42 +234,42 @@ pub const Scalar = struct {
         const z80 = xy440;
 
         const carry0 = z00 >> 56;
-        const t10 = @truncate(u64, z00) & 0xffffffffffffff;
+        const t10 = @as(u64, @truncate(z00)) & 0xffffffffffffff;
         const c00 = carry0;
         const t00 = t10;
         const carry1 = (z10 + c00) >> 56;
-        const t11 = @truncate(u64, (z10 + c00)) & 0xffffffffffffff;
+        const t11 = @as(u64, @truncate((z10 + c00))) & 0xffffffffffffff;
         const c10 = carry1;
         const t12 = t11;
         const carry2 = (z20 + c10) >> 56;
-        const t13 = @truncate(u64, (z20 + c10)) & 0xffffffffffffff;
+        const t13 = @as(u64, @truncate((z20 + c10))) & 0xffffffffffffff;
         const c20 = carry2;
         const t20 = t13;
         const carry3 = (z30 + c20) >> 56;
-        const t14 = @truncate(u64, (z30 + c20)) & 0xffffffffffffff;
+        const t14 = @as(u64, @truncate((z30 + c20))) & 0xffffffffffffff;
         const c30 = carry3;
         const t30 = t14;
         const carry4 = (z40 + c30) >> 56;
-        const t15 = @truncate(u64, (z40 + c30)) & 0xffffffffffffff;
+        const t15 = @as(u64, @truncate((z40 + c30))) & 0xffffffffffffff;
         const c40 = carry4;
         const t40 = t15;
         const carry5 = (z50 + c40) >> 56;
-        const t16 = @truncate(u64, (z50 + c40)) & 0xffffffffffffff;
+        const t16 = @as(u64, @truncate((z50 + c40))) & 0xffffffffffffff;
         const c50 = carry5;
         const t50 = t16;
         const carry6 = (z60 + c50) >> 56;
-        const t17 = @truncate(u64, (z60 + c50)) & 0xffffffffffffff;
+        const t17 = @as(u64, @truncate((z60 + c50))) & 0xffffffffffffff;
         const c60 = carry6;
         const t60 = t17;
         const carry7 = (z70 + c60) >> 56;
-        const t18 = @truncate(u64, (z70 + c60)) & 0xffffffffffffff;
+        const t18 = @as(u64, @truncate((z70 + c60))) & 0xffffffffffffff;
         const c70 = carry7;
         const t70 = t18;
         const carry8 = (z80 + c70) >> 56;
-        const t19 = @truncate(u64, (z80 + c70)) & 0xffffffffffffff;
+        const t19 = @as(u64, @truncate((z80 + c70))) & 0xffffffffffffff;
         const c80 = carry8;
         const t80 = t19;
-        const t90 = (@truncate(u64, c80));
+        const t90 = (@as(u64, @truncate(c80)));
         const r0 = t00;
         const r1 = t12;
         const r2 = t20;
@@ -356,26 +356,26 @@ pub const Scalar = struct {
         const carry12 = (z32 + c21) >> 56;
         const c31 = carry12;
         const carry13 = (z42 + c31) >> 56;
-        const t24 = @truncate(u64, z42 + c31) & 0xffffffffffffff;
+        const t24 = @as(u64, @truncate(z42 + c31)) & 0xffffffffffffff;
         const c41 = carry13;
         const t41 = t24;
         const carry14 = (z5 + c41) >> 56;
-        const t25 = @truncate(u64, z5 + c41) & 0xffffffffffffff;
+        const t25 = @as(u64, @truncate(z5 + c41)) & 0xffffffffffffff;
         const c5 = carry14;
         const t5 = t25;
         const carry15 = (z6 + c5) >> 56;
-        const t26 = @truncate(u64, z6 + c5) & 0xffffffffffffff;
+        const t26 = @as(u64, @truncate(z6 + c5)) & 0xffffffffffffff;
         const c6 = carry15;
         const t6 = t26;
         const carry16 = (z7 + c6) >> 56;
-        const t27 = @truncate(u64, z7 + c6) & 0xffffffffffffff;
+        const t27 = @as(u64, @truncate(z7 + c6)) & 0xffffffffffffff;
         const c7 = carry16;
         const t7 = t27;
         const carry17 = (z8 + c7) >> 56;
-        const t28 = @truncate(u64, z8 + c7) & 0xffffffffffffff;
+        const t28 = @as(u64, @truncate(z8 + c7)) & 0xffffffffffffff;
         const c8 = carry17;
         const t8 = t28;
-        const t9 = @truncate(u64, c8);
+        const t9 = @as(u64, @truncate(c8));
 
         const qmu4_ = t41;
         const qmu5_ = t5;
@@ -425,22 +425,22 @@ pub const Scalar = struct {
         const xy31 = @as(u128, qdiv3) * @as(u128, m1);
         const xy40 = @as(u128, qdiv4) * @as(u128, m0);
         const carry18 = xy00 >> 56;
-        const t29 = @truncate(u64, xy00) & 0xffffffffffffff;
+        const t29 = @as(u64, @truncate(xy00)) & 0xffffffffffffff;
         const c0 = carry18;
         const t01 = t29;
         const carry19 = (xy01 + xy10 + c0) >> 56;
-        const t31 = @truncate(u64, xy01 + xy10 + c0) & 0xffffffffffffff;
+        const t31 = @as(u64, @truncate(xy01 + xy10 + c0)) & 0xffffffffffffff;
         const c12 = carry19;
         const t110 = t31;
         const carry20 = (xy02 + xy11 + xy20 + c12) >> 56;
-        const t32 = @truncate(u64, xy02 + xy11 + xy20 + c12) & 0xffffffffffffff;
+        const t32 = @as(u64, @truncate(xy02 + xy11 + xy20 + c12)) & 0xffffffffffffff;
         const c22 = carry20;
         const t210 = t32;
         const carry = (xy03 + xy12 + xy21 + xy30 + c22) >> 56;
-        const t33 = @truncate(u64, xy03 + xy12 + xy21 + xy30 + c22) & 0xffffffffffffff;
+        const t33 = @as(u64, @truncate(xy03 + xy12 + xy21 + xy30 + c22)) & 0xffffffffffffff;
         const c32 = carry;
         const t34 = t33;
-        const t42 = @truncate(u64, xy04 + xy13 + xy22 + xy31 + xy40 + c32) & 0xffffffffff;
+        const t42 = @as(u64, @truncate(xy04 + xy13 + xy22 + xy31 + xy40 + c32)) & 0xffffffffff;
 
         const qmul0 = t01;
         const qmul1 = t110;
@@ -498,7 +498,7 @@ pub const Scalar = struct {
         const t = ((b << 56) + s4) -% (y41 + b3);
         const b4 = b;
         const t4 = t;
-        const mask = (b4 -% @intCast(u64, ((1))));
+        const mask = (b4 -% @as(u64, @intCast(((1)))));
         const z04 = s0 ^ (mask & (s0 ^ t0));
         const z14 = s1 ^ (mask & (s1 ^ t1));
         const z24 = s2 ^ (mask & (s2 ^ t2));
@@ -691,26 +691,26 @@ const ScalarDouble = struct {
         const carry3 = (z31 + c20) >> 56;
         const c30 = carry3;
         const carry4 = (z41 + c30) >> 56;
-        const t103 = @as(u64, @truncate(u64, z41 + c30)) & 0xffffffffffffff;
+        const t103 = @as(u64, @as(u64, @truncate(z41 + c30))) & 0xffffffffffffff;
         const c40 = carry4;
         const t410 = t103;
         const carry5 = (z5 + c40) >> 56;
-        const t104 = @as(u64, @truncate(u64, z5 + c40)) & 0xffffffffffffff;
+        const t104 = @as(u64, @as(u64, @truncate(z5 + c40))) & 0xffffffffffffff;
         const c5 = carry5;
         const t51 = t104;
         const carry6 = (z6 + c5) >> 56;
-        const t105 = @as(u64, @truncate(u64, z6 + c5)) & 0xffffffffffffff;
+        const t105 = @as(u64, @as(u64, @truncate(z6 + c5))) & 0xffffffffffffff;
         const c6 = carry6;
         const t61 = t105;
         const carry7 = (z7 + c6) >> 56;
-        const t106 = @as(u64, @truncate(u64, z7 + c6)) & 0xffffffffffffff;
+        const t106 = @as(u64, @as(u64, @truncate(z7 + c6))) & 0xffffffffffffff;
         const c7 = carry7;
         const t71 = t106;
         const carry8 = (z8 + c7) >> 56;
-        const t107 = @as(u64, @truncate(u64, z8 + c7)) & 0xffffffffffffff;
+        const t107 = @as(u64, @as(u64, @truncate(z8 + c7))) & 0xffffffffffffff;
         const c8 = carry8;
         const t81 = t107;
-        const t91 = @as(u64, @truncate(u64, c8));
+        const t91 = @as(u64, @as(u64, @truncate(c8)));
 
         const qmu4_ = t410;
         const qmu5_ = t51;
@@ -760,22 +760,22 @@ const ScalarDouble = struct {
         const xy31 = @as(u128, qdiv3) * @as(u128, m1);
         const xy40 = @as(u128, qdiv4) * @as(u128, m0);
         const carry9 = xy00 >> 56;
-        const t108 = @truncate(u64, xy00) & 0xffffffffffffff;
+        const t108 = @as(u64, @truncate(xy00)) & 0xffffffffffffff;
         const c0 = carry9;
         const t010 = t108;
         const carry10 = (xy01 + xy10 + c0) >> 56;
-        const t109 = @truncate(u64, xy01 + xy10 + c0) & 0xffffffffffffff;
+        const t109 = @as(u64, @truncate(xy01 + xy10 + c0)) & 0xffffffffffffff;
         const c11 = carry10;
         const t110 = t109;
         const carry11 = (xy02 + xy11 + xy20 + c11) >> 56;
-        const t1010 = @truncate(u64, xy02 + xy11 + xy20 + c11) & 0xffffffffffffff;
+        const t1010 = @as(u64, @truncate(xy02 + xy11 + xy20 + c11)) & 0xffffffffffffff;
         const c21 = carry11;
         const t210 = t1010;
         const carry = (xy03 + xy12 + xy21 + xy30 + c21) >> 56;
-        const t1011 = @truncate(u64, xy03 + xy12 + xy21 + xy30 + c21) & 0xffffffffffffff;
+        const t1011 = @as(u64, @truncate(xy03 + xy12 + xy21 + xy30 + c21)) & 0xffffffffffffff;
         const c31 = carry;
         const t310 = t1011;
-        const t411 = @truncate(u64, xy04 + xy13 + xy22 + xy31 + xy40 + c31) & 0xffffffffff;
+        const t411 = @as(u64, @truncate(xy04 + xy13 + xy22 + xy31 + xy40 + c31)) & 0xffffffffff;
 
         const qmul0 = t010;
         const qmul1 = t110;
lib/std/crypto/aes/soft.zig
@@ -51,13 +51,13 @@ pub const Block = struct {
         const s3 = block.repr[3];
 
         var x: [4]u32 = undefined;
-        x = table_lookup(&table_encrypt, @truncate(u8, s0), @truncate(u8, s1 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 24));
+        x = table_lookup(&table_encrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s3 >> 24)));
         var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
-        x = table_lookup(&table_encrypt, @truncate(u8, s1), @truncate(u8, s2 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 24));
+        x = table_lookup(&table_encrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s0 >> 24)));
         var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
-        x = table_lookup(&table_encrypt, @truncate(u8, s2), @truncate(u8, s3 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 24));
+        x = table_lookup(&table_encrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s1 >> 24)));
         var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
-        x = table_lookup(&table_encrypt, @truncate(u8, s3), @truncate(u8, s0 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 24));
+        x = table_lookup(&table_encrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s2 >> 24)));
         var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
 
         t0 ^= round_key.repr[0];
@@ -77,31 +77,31 @@ pub const Block = struct {
 
         var x: [4]u32 = undefined;
         x = .{
-            table_encrypt[0][@truncate(u8, s0)],
-            table_encrypt[1][@truncate(u8, s1 >> 8)],
-            table_encrypt[2][@truncate(u8, s2 >> 16)],
-            table_encrypt[3][@truncate(u8, s3 >> 24)],
+            table_encrypt[0][@as(u8, @truncate(s0))],
+            table_encrypt[1][@as(u8, @truncate(s1 >> 8))],
+            table_encrypt[2][@as(u8, @truncate(s2 >> 16))],
+            table_encrypt[3][@as(u8, @truncate(s3 >> 24))],
         };
         var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
         x = .{
-            table_encrypt[0][@truncate(u8, s1)],
-            table_encrypt[1][@truncate(u8, s2 >> 8)],
-            table_encrypt[2][@truncate(u8, s3 >> 16)],
-            table_encrypt[3][@truncate(u8, s0 >> 24)],
+            table_encrypt[0][@as(u8, @truncate(s1))],
+            table_encrypt[1][@as(u8, @truncate(s2 >> 8))],
+            table_encrypt[2][@as(u8, @truncate(s3 >> 16))],
+            table_encrypt[3][@as(u8, @truncate(s0 >> 24))],
         };
         var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
         x = .{
-            table_encrypt[0][@truncate(u8, s2)],
-            table_encrypt[1][@truncate(u8, s3 >> 8)],
-            table_encrypt[2][@truncate(u8, s0 >> 16)],
-            table_encrypt[3][@truncate(u8, s1 >> 24)],
+            table_encrypt[0][@as(u8, @truncate(s2))],
+            table_encrypt[1][@as(u8, @truncate(s3 >> 8))],
+            table_encrypt[2][@as(u8, @truncate(s0 >> 16))],
+            table_encrypt[3][@as(u8, @truncate(s1 >> 24))],
         };
         var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
         x = .{
-            table_encrypt[0][@truncate(u8, s3)],
-            table_encrypt[1][@truncate(u8, s0 >> 8)],
-            table_encrypt[2][@truncate(u8, s1 >> 16)],
-            table_encrypt[3][@truncate(u8, s2 >> 24)],
+            table_encrypt[0][@as(u8, @truncate(s3))],
+            table_encrypt[1][@as(u8, @truncate(s0 >> 8))],
+            table_encrypt[2][@as(u8, @truncate(s1 >> 16))],
+            table_encrypt[3][@as(u8, @truncate(s2 >> 24))],
         };
         var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
 
@@ -122,13 +122,13 @@ pub const Block = struct {
 
         // Last round uses s-box directly and XORs to produce output.
         var x: [4]u8 = undefined;
-        x = sbox_lookup(&sbox_encrypt, @truncate(u8, s3 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s0));
+        x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s3 >> 24)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s0)));
         var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
-        x = sbox_lookup(&sbox_encrypt, @truncate(u8, s0 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s1));
+        x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s0 >> 24)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s1)));
         var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
-        x = sbox_lookup(&sbox_encrypt, @truncate(u8, s1 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s2));
+        x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s1 >> 24)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s2)));
         var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
-        x = sbox_lookup(&sbox_encrypt, @truncate(u8, s2 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s3));
+        x = sbox_lookup(&sbox_encrypt, @as(u8, @truncate(s2 >> 24)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s3)));
         var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
 
         t0 ^= round_key.repr[0];
@@ -147,13 +147,13 @@ pub const Block = struct {
         const s3 = block.repr[3];
 
         var x: [4]u32 = undefined;
-        x = table_lookup(&table_decrypt, @truncate(u8, s0), @truncate(u8, s3 >> 8), @truncate(u8, s2 >> 16), @truncate(u8, s1 >> 24));
+        x = table_lookup(&table_decrypt, @as(u8, @truncate(s0)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s1 >> 24)));
         var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
-        x = table_lookup(&table_decrypt, @truncate(u8, s1), @truncate(u8, s0 >> 8), @truncate(u8, s3 >> 16), @truncate(u8, s2 >> 24));
+        x = table_lookup(&table_decrypt, @as(u8, @truncate(s1)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s2 >> 24)));
         var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
-        x = table_lookup(&table_decrypt, @truncate(u8, s2), @truncate(u8, s1 >> 8), @truncate(u8, s0 >> 16), @truncate(u8, s3 >> 24));
+        x = table_lookup(&table_decrypt, @as(u8, @truncate(s2)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s3 >> 24)));
         var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
-        x = table_lookup(&table_decrypt, @truncate(u8, s3), @truncate(u8, s2 >> 8), @truncate(u8, s1 >> 16), @truncate(u8, s0 >> 24));
+        x = table_lookup(&table_decrypt, @as(u8, @truncate(s3)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s0 >> 24)));
         var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
 
         t0 ^= round_key.repr[0];
@@ -173,31 +173,31 @@ pub const Block = struct {
 
         var x: [4]u32 = undefined;
         x = .{
-            table_decrypt[0][@truncate(u8, s0)],
-            table_decrypt[1][@truncate(u8, s3 >> 8)],
-            table_decrypt[2][@truncate(u8, s2 >> 16)],
-            table_decrypt[3][@truncate(u8, s1 >> 24)],
+            table_decrypt[0][@as(u8, @truncate(s0))],
+            table_decrypt[1][@as(u8, @truncate(s3 >> 8))],
+            table_decrypt[2][@as(u8, @truncate(s2 >> 16))],
+            table_decrypt[3][@as(u8, @truncate(s1 >> 24))],
         };
         var t0 = x[0] ^ x[1] ^ x[2] ^ x[3];
         x = .{
-            table_decrypt[0][@truncate(u8, s1)],
-            table_decrypt[1][@truncate(u8, s0 >> 8)],
-            table_decrypt[2][@truncate(u8, s3 >> 16)],
-            table_decrypt[3][@truncate(u8, s2 >> 24)],
+            table_decrypt[0][@as(u8, @truncate(s1))],
+            table_decrypt[1][@as(u8, @truncate(s0 >> 8))],
+            table_decrypt[2][@as(u8, @truncate(s3 >> 16))],
+            table_decrypt[3][@as(u8, @truncate(s2 >> 24))],
         };
         var t1 = x[0] ^ x[1] ^ x[2] ^ x[3];
         x = .{
-            table_decrypt[0][@truncate(u8, s2)],
-            table_decrypt[1][@truncate(u8, s1 >> 8)],
-            table_decrypt[2][@truncate(u8, s0 >> 16)],
-            table_decrypt[3][@truncate(u8, s3 >> 24)],
+            table_decrypt[0][@as(u8, @truncate(s2))],
+            table_decrypt[1][@as(u8, @truncate(s1 >> 8))],
+            table_decrypt[2][@as(u8, @truncate(s0 >> 16))],
+            table_decrypt[3][@as(u8, @truncate(s3 >> 24))],
         };
         var t2 = x[0] ^ x[1] ^ x[2] ^ x[3];
         x = .{
-            table_decrypt[0][@truncate(u8, s3)],
-            table_decrypt[1][@truncate(u8, s2 >> 8)],
-            table_decrypt[2][@truncate(u8, s1 >> 16)],
-            table_decrypt[3][@truncate(u8, s0 >> 24)],
+            table_decrypt[0][@as(u8, @truncate(s3))],
+            table_decrypt[1][@as(u8, @truncate(s2 >> 8))],
+            table_decrypt[2][@as(u8, @truncate(s1 >> 16))],
+            table_decrypt[3][@as(u8, @truncate(s0 >> 24))],
         };
         var t3 = x[0] ^ x[1] ^ x[2] ^ x[3];
 
@@ -218,13 +218,13 @@ pub const Block = struct {
 
         // Last round uses s-box directly and XORs to produce output.
         var x: [4]u8 = undefined;
-        x = sbox_lookup(&sbox_decrypt, @truncate(u8, s1 >> 24), @truncate(u8, s2 >> 16), @truncate(u8, s3 >> 8), @truncate(u8, s0));
+        x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s1 >> 24)), @as(u8, @truncate(s2 >> 16)), @as(u8, @truncate(s3 >> 8)), @as(u8, @truncate(s0)));
         var t0 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
-        x = sbox_lookup(&sbox_decrypt, @truncate(u8, s2 >> 24), @truncate(u8, s3 >> 16), @truncate(u8, s0 >> 8), @truncate(u8, s1));
+        x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s2 >> 24)), @as(u8, @truncate(s3 >> 16)), @as(u8, @truncate(s0 >> 8)), @as(u8, @truncate(s1)));
         var t1 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
-        x = sbox_lookup(&sbox_decrypt, @truncate(u8, s3 >> 24), @truncate(u8, s0 >> 16), @truncate(u8, s1 >> 8), @truncate(u8, s2));
+        x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s3 >> 24)), @as(u8, @truncate(s0 >> 16)), @as(u8, @truncate(s1 >> 8)), @as(u8, @truncate(s2)));
         var t2 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
-        x = sbox_lookup(&sbox_decrypt, @truncate(u8, s0 >> 24), @truncate(u8, s1 >> 16), @truncate(u8, s2 >> 8), @truncate(u8, s3));
+        x = sbox_lookup(&sbox_decrypt, @as(u8, @truncate(s0 >> 24)), @as(u8, @truncate(s1 >> 16)), @as(u8, @truncate(s2 >> 8)), @as(u8, @truncate(s3)));
         var t3 = @as(u32, x[0]) << 24 | @as(u32, x[1]) << 16 | @as(u32, x[2]) << 8 | @as(u32, x[3]);
 
         t0 ^= round_key.repr[0];
@@ -348,7 +348,7 @@ fn KeySchedule(comptime Aes: type) type {
             const subw = struct {
                 // Apply sbox_encrypt to each byte in w.
                 fn func(w: u32) u32 {
-                    const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, w), @truncate(u8, w >> 8), @truncate(u8, w >> 16), @truncate(u8, w >> 24));
+                    const x = sbox_lookup(&sbox_key_schedule, @as(u8, @truncate(w)), @as(u8, @truncate(w >> 8)), @as(u8, @truncate(w >> 16)), @as(u8, @truncate(w >> 24)));
                     return @as(u32, x[3]) << 24 | @as(u32, x[2]) << 16 | @as(u32, x[1]) << 8 | @as(u32, x[0]);
                 }
             }.func;
@@ -386,7 +386,7 @@ fn KeySchedule(comptime Aes: type) type {
                 inline while (j < 4) : (j += 1) {
                     var rk = round_keys[(ei + j) / 4].repr[(ei + j) % 4];
                     if (i > 0 and i + 4 < total_words) {
-                        const x = sbox_lookup(&sbox_key_schedule, @truncate(u8, rk >> 24), @truncate(u8, rk >> 16), @truncate(u8, rk >> 8), @truncate(u8, rk));
+                        const x = sbox_lookup(&sbox_key_schedule, @as(u8, @truncate(rk >> 24)), @as(u8, @truncate(rk >> 16)), @as(u8, @truncate(rk >> 8)), @as(u8, @truncate(rk)));
                         const y = table_lookup(&table_decrypt, x[3], x[2], x[1], x[0]);
                         rk = y[0] ^ y[1] ^ y[2] ^ y[3];
                     }
@@ -664,7 +664,7 @@ fn mul(a: u8, b: u8) u8 {
         }
     }
 
-    return @truncate(u8, s);
+    return @as(u8, @truncate(s));
 }
 
 const cache_line_bytes = 64;
lib/std/crypto/Certificate/Bundle/macos.zig
@@ -21,7 +21,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
     const reader = stream.reader();
 
     const db_header = try reader.readStructBig(ApplDbHeader);
-    assert(mem.eql(u8, "kych", &@bitCast([4]u8, db_header.signature)));
+    assert(mem.eql(u8, "kych", &@as([4]u8, @bitCast(db_header.signature))));
 
     try stream.seekTo(db_header.schema_offset);
 
@@ -42,7 +42,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
 
         const table_header = try reader.readStructBig(TableHeader);
 
-        if (@enumFromInt(std.os.darwin.cssm.DB_RECORDTYPE, table_header.table_id) != .X509_CERTIFICATE) {
+        if (@as(std.os.darwin.cssm.DB_RECORDTYPE, @enumFromInt(table_header.table_id)) != .X509_CERTIFICATE) {
             continue;
         }
 
@@ -61,7 +61,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
 
             try cb.bytes.ensureUnusedCapacity(gpa, cert_header.cert_size);
 
-            const cert_start = @intCast(u32, cb.bytes.items.len);
+            const cert_start = @as(u32, @intCast(cb.bytes.items.len));
             const dest_buf = cb.bytes.allocatedSlice()[cert_start..];
             cb.bytes.items.len += try reader.readAtLeast(dest_buf, cert_header.cert_size);
 
lib/std/crypto/Certificate/Bundle.zig
@@ -131,7 +131,7 @@ pub fn rescanWindows(cb: *Bundle, gpa: Allocator) RescanWindowsError!void {
 
     var ctx = w.crypt32.CertEnumCertificatesInStore(store, null);
     while (ctx) |context| : (ctx = w.crypt32.CertEnumCertificatesInStore(store, ctx)) {
-        const decoded_start = @intCast(u32, cb.bytes.items.len);
+        const decoded_start = @as(u32, @intCast(cb.bytes.items.len));
         const encoded_cert = context.pbCertEncoded[0..context.cbCertEncoded];
         try cb.bytes.appendSlice(gpa, encoded_cert);
         try cb.parseCert(gpa, decoded_start, now_sec);
@@ -213,7 +213,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom
     const needed_capacity = std.math.cast(u32, decoded_size_upper_bound + size) orelse
         return error.CertificateAuthorityBundleTooBig;
     try cb.bytes.ensureUnusedCapacity(gpa, needed_capacity);
-    const end_reserved = @intCast(u32, cb.bytes.items.len + decoded_size_upper_bound);
+    const end_reserved = @as(u32, @intCast(cb.bytes.items.len + decoded_size_upper_bound));
     const buffer = cb.bytes.allocatedSlice()[end_reserved..];
     const end_index = try file.readAll(buffer);
     const encoded_bytes = buffer[0..end_index];
@@ -230,7 +230,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom
             return error.MissingEndCertificateMarker;
         start_index = cert_end + end_marker.len;
         const encoded_cert = mem.trim(u8, encoded_bytes[cert_start..cert_end], " \t\r\n");
-        const decoded_start = @intCast(u32, cb.bytes.items.len);
+        const decoded_start = @as(u32, @intCast(cb.bytes.items.len));
         const dest_buf = cb.bytes.allocatedSlice()[decoded_start..];
         cb.bytes.items.len += try base64.decode(dest_buf, encoded_cert);
         try cb.parseCert(gpa, decoded_start, now_sec);
lib/std/crypto/pcurves/p256/p256_64.zig
@@ -119,8 +119,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
     @setRuntimeSafety(mode == .Debug);
 
     const x = @as(u128, arg1) * @as(u128, arg2);
-    out1.* = @truncate(u64, x);
-    out2.* = @truncate(u64, x >> 64);
+    out1.* = @as(u64, @truncate(x));
+    out2.* = @as(u64, @truncate(x >> 64));
 }
 
 /// The function cmovznzU64 is a single-word conditional move.
@@ -1355,62 +1355,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void {
     const x2 = (arg1[2]);
     const x3 = (arg1[1]);
     const x4 = (arg1[0]);
-    const x5 = @truncate(u8, (x4 & @as(u64, 0xff)));
+    const x5 = @as(u8, @truncate((x4 & @as(u64, 0xff))));
     const x6 = (x4 >> 8);
-    const x7 = @truncate(u8, (x6 & @as(u64, 0xff)));
+    const x7 = @as(u8, @truncate((x6 & @as(u64, 0xff))));
     const x8 = (x6 >> 8);
-    const x9 = @truncate(u8, (x8 & @as(u64, 0xff)));
+    const x9 = @as(u8, @truncate((x8 & @as(u64, 0xff))));
     const x10 = (x8 >> 8);
-    const x11 = @truncate(u8, (x10 & @as(u64, 0xff)));
+    const x11 = @as(u8, @truncate((x10 & @as(u64, 0xff))));
     const x12 = (x10 >> 8);
-    const x13 = @truncate(u8, (x12 & @as(u64, 0xff)));
+    const x13 = @as(u8, @truncate((x12 & @as(u64, 0xff))));
     const x14 = (x12 >> 8);
-    const x15 = @truncate(u8, (x14 & @as(u64, 0xff)));
+    const x15 = @as(u8, @truncate((x14 & @as(u64, 0xff))));
     const x16 = (x14 >> 8);
-    const x17 = @truncate(u8, (x16 & @as(u64, 0xff)));
-    const x18 = @truncate(u8, (x16 >> 8));
-    const x19 = @truncate(u8, (x3 & @as(u64, 0xff)));
+    const x17 = @as(u8, @truncate((x16 & @as(u64, 0xff))));
+    const x18 = @as(u8, @truncate((x16 >> 8)));
+    const x19 = @as(u8, @truncate((x3 & @as(u64, 0xff))));
     const x20 = (x3 >> 8);
-    const x21 = @truncate(u8, (x20 & @as(u64, 0xff)));
+    const x21 = @as(u8, @truncate((x20 & @as(u64, 0xff))));
     const x22 = (x20 >> 8);
-    const x23 = @truncate(u8, (x22 & @as(u64, 0xff)));
+    const x23 = @as(u8, @truncate((x22 & @as(u64, 0xff))));
     const x24 = (x22 >> 8);
-    const x25 = @truncate(u8, (x24 & @as(u64, 0xff)));
+    const x25 = @as(u8, @truncate((x24 & @as(u64, 0xff))));
     const x26 = (x24 >> 8);
-    const x27 = @truncate(u8, (x26 & @as(u64, 0xff)));
+    const x27 = @as(u8, @truncate((x26 & @as(u64, 0xff))));
     const x28 = (x26 >> 8);
-    const x29 = @truncate(u8, (x28 & @as(u64, 0xff)));
+    const x29 = @as(u8, @truncate((x28 & @as(u64, 0xff))));
     const x30 = (x28 >> 8);
-    const x31 = @truncate(u8, (x30 & @as(u64, 0xff)));
-    const x32 = @truncate(u8, (x30 >> 8));
-    const x33 = @truncate(u8, (x2 & @as(u64, 0xff)));
+    const x31 = @as(u8, @truncate((x30 & @as(u64, 0xff))));
+    const x32 = @as(u8, @truncate((x30 >> 8)));
+    const x33 = @as(u8, @truncate((x2 & @as(u64, 0xff))));
     const x34 = (x2 >> 8);
-    const x35 = @truncate(u8, (x34 & @as(u64, 0xff)));
+    const x35 = @as(u8, @truncate((x34 & @as(u64, 0xff))));
     const x36 = (x34 >> 8);
-    const x37 = @truncate(u8, (x36 & @as(u64, 0xff)));
+    const x37 = @as(u8, @truncate((x36 & @as(u64, 0xff))));
     const x38 = (x36 >> 8);
-    const x39 = @truncate(u8, (x38 & @as(u64, 0xff)));
+    const x39 = @as(u8, @truncate((x38 & @as(u64, 0xff))));
     const x40 = (x38 >> 8);
-    const x41 = @truncate(u8, (x40 & @as(u64, 0xff)));
+    const x41 = @as(u8, @truncate((x40 & @as(u64, 0xff))));
     const x42 = (x40 >> 8);
-    const x43 = @truncate(u8, (x42 & @as(u64, 0xff)));
+    const x43 = @as(u8, @truncate((x42 & @as(u64, 0xff))));
     const x44 = (x42 >> 8);
-    const x45 = @truncate(u8, (x44 & @as(u64, 0xff)));
-    const x46 = @truncate(u8, (x44 >> 8));
-    const x47 = @truncate(u8, (x1 & @as(u64, 0xff)));
+    const x45 = @as(u8, @truncate((x44 & @as(u64, 0xff))));
+    const x46 = @as(u8, @truncate((x44 >> 8)));
+    const x47 = @as(u8, @truncate((x1 & @as(u64, 0xff))));
     const x48 = (x1 >> 8);
-    const x49 = @truncate(u8, (x48 & @as(u64, 0xff)));
+    const x49 = @as(u8, @truncate((x48 & @as(u64, 0xff))));
     const x50 = (x48 >> 8);
-    const x51 = @truncate(u8, (x50 & @as(u64, 0xff)));
+    const x51 = @as(u8, @truncate((x50 & @as(u64, 0xff))));
     const x52 = (x50 >> 8);
-    const x53 = @truncate(u8, (x52 & @as(u64, 0xff)));
+    const x53 = @as(u8, @truncate((x52 & @as(u64, 0xff))));
     const x54 = (x52 >> 8);
-    const x55 = @truncate(u8, (x54 & @as(u64, 0xff)));
+    const x55 = @as(u8, @truncate((x54 & @as(u64, 0xff))));
     const x56 = (x54 >> 8);
-    const x57 = @truncate(u8, (x56 & @as(u64, 0xff)));
+    const x57 = @as(u8, @truncate((x56 & @as(u64, 0xff))));
     const x58 = (x56 >> 8);
-    const x59 = @truncate(u8, (x58 & @as(u64, 0xff)));
-    const x60 = @truncate(u8, (x58 >> 8));
+    const x59 = @as(u8, @truncate((x58 & @as(u64, 0xff))));
+    const x60 = @as(u8, @truncate((x58 >> 8)));
     out1[0] = x5;
     out1[1] = x7;
     out1[2] = x9;
@@ -1593,7 +1593,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
     var x1: u64 = undefined;
     var x2: u1 = undefined;
     addcarryxU64(&x1, &x2, 0x0, (~arg1), @as(u64, 0x1));
-    const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & @as(u64, 0x1))));
+    const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & @as(u64, 0x1)))));
     var x4: u64 = undefined;
     var x5: u1 = undefined;
     addcarryxU64(&x4, &x5, 0x0, (~arg1), @as(u64, 0x1));
@@ -1707,7 +1707,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
     cmovznzU64(&x72, x3, (arg5[2]), x66);
     var x73: u64 = undefined;
     cmovznzU64(&x73, x3, (arg5[3]), x68);
-    const x74 = @truncate(u1, (x22 & @as(u64, 0x1)));
+    const x74 = @as(u1, @truncate((x22 & @as(u64, 0x1))));
     var x75: u64 = undefined;
     cmovznzU64(&x75, x74, @as(u64, 0x0), x7);
     var x76: u64 = undefined;
lib/std/crypto/pcurves/p256/p256_scalar_64.zig
@@ -119,8 +119,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
     @setRuntimeSafety(mode == .Debug);
 
     const x = @as(u128, arg1) * @as(u128, arg2);
-    out1.* = @truncate(u64, x);
-    out2.* = @truncate(u64, x >> 64);
+    out1.* = @as(u64, @truncate(x));
+    out2.* = @as(u64, @truncate(x >> 64));
 }
 
 /// The function cmovznzU64 is a single-word conditional move.
@@ -1559,62 +1559,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void {
     const x2 = (arg1[2]);
     const x3 = (arg1[1]);
     const x4 = (arg1[0]);
-    const x5 = @truncate(u8, (x4 & @as(u64, 0xff)));
+    const x5 = @as(u8, @truncate((x4 & @as(u64, 0xff))));
     const x6 = (x4 >> 8);
-    const x7 = @truncate(u8, (x6 & @as(u64, 0xff)));
+    const x7 = @as(u8, @truncate((x6 & @as(u64, 0xff))));
     const x8 = (x6 >> 8);
-    const x9 = @truncate(u8, (x8 & @as(u64, 0xff)));
+    const x9 = @as(u8, @truncate((x8 & @as(u64, 0xff))));
     const x10 = (x8 >> 8);
-    const x11 = @truncate(u8, (x10 & @as(u64, 0xff)));
+    const x11 = @as(u8, @truncate((x10 & @as(u64, 0xff))));
     const x12 = (x10 >> 8);
-    const x13 = @truncate(u8, (x12 & @as(u64, 0xff)));
+    const x13 = @as(u8, @truncate((x12 & @as(u64, 0xff))));
     const x14 = (x12 >> 8);
-    const x15 = @truncate(u8, (x14 & @as(u64, 0xff)));
+    const x15 = @as(u8, @truncate((x14 & @as(u64, 0xff))));
     const x16 = (x14 >> 8);
-    const x17 = @truncate(u8, (x16 & @as(u64, 0xff)));
-    const x18 = @truncate(u8, (x16 >> 8));
-    const x19 = @truncate(u8, (x3 & @as(u64, 0xff)));
+    const x17 = @as(u8, @truncate((x16 & @as(u64, 0xff))));
+    const x18 = @as(u8, @truncate((x16 >> 8)));
+    const x19 = @as(u8, @truncate((x3 & @as(u64, 0xff))));
     const x20 = (x3 >> 8);
-    const x21 = @truncate(u8, (x20 & @as(u64, 0xff)));
+    const x21 = @as(u8, @truncate((x20 & @as(u64, 0xff))));
     const x22 = (x20 >> 8);
-    const x23 = @truncate(u8, (x22 & @as(u64, 0xff)));
+    const x23 = @as(u8, @truncate((x22 & @as(u64, 0xff))));
     const x24 = (x22 >> 8);
-    const x25 = @truncate(u8, (x24 & @as(u64, 0xff)));
+    const x25 = @as(u8, @truncate((x24 & @as(u64, 0xff))));
     const x26 = (x24 >> 8);
-    const x27 = @truncate(u8, (x26 & @as(u64, 0xff)));
+    const x27 = @as(u8, @truncate((x26 & @as(u64, 0xff))));
     const x28 = (x26 >> 8);
-    const x29 = @truncate(u8, (x28 & @as(u64, 0xff)));
+    const x29 = @as(u8, @truncate((x28 & @as(u64, 0xff))));
     const x30 = (x28 >> 8);
-    const x31 = @truncate(u8, (x30 & @as(u64, 0xff)));
-    const x32 = @truncate(u8, (x30 >> 8));
-    const x33 = @truncate(u8, (x2 & @as(u64, 0xff)));
+    const x31 = @as(u8, @truncate((x30 & @as(u64, 0xff))));
+    const x32 = @as(u8, @truncate((x30 >> 8)));
+    const x33 = @as(u8, @truncate((x2 & @as(u64, 0xff))));
     const x34 = (x2 >> 8);
-    const x35 = @truncate(u8, (x34 & @as(u64, 0xff)));
+    const x35 = @as(u8, @truncate((x34 & @as(u64, 0xff))));
     const x36 = (x34 >> 8);
-    const x37 = @truncate(u8, (x36 & @as(u64, 0xff)));
+    const x37 = @as(u8, @truncate((x36 & @as(u64, 0xff))));
     const x38 = (x36 >> 8);
-    const x39 = @truncate(u8, (x38 & @as(u64, 0xff)));
+    const x39 = @as(u8, @truncate((x38 & @as(u64, 0xff))));
     const x40 = (x38 >> 8);
-    const x41 = @truncate(u8, (x40 & @as(u64, 0xff)));
+    const x41 = @as(u8, @truncate((x40 & @as(u64, 0xff))));
     const x42 = (x40 >> 8);
-    const x43 = @truncate(u8, (x42 & @as(u64, 0xff)));
+    const x43 = @as(u8, @truncate((x42 & @as(u64, 0xff))));
     const x44 = (x42 >> 8);
-    const x45 = @truncate(u8, (x44 & @as(u64, 0xff)));
-    const x46 = @truncate(u8, (x44 >> 8));
-    const x47 = @truncate(u8, (x1 & @as(u64, 0xff)));
+    const x45 = @as(u8, @truncate((x44 & @as(u64, 0xff))));
+    const x46 = @as(u8, @truncate((x44 >> 8)));
+    const x47 = @as(u8, @truncate((x1 & @as(u64, 0xff))));
     const x48 = (x1 >> 8);
-    const x49 = @truncate(u8, (x48 & @as(u64, 0xff)));
+    const x49 = @as(u8, @truncate((x48 & @as(u64, 0xff))));
     const x50 = (x48 >> 8);
-    const x51 = @truncate(u8, (x50 & @as(u64, 0xff)));
+    const x51 = @as(u8, @truncate((x50 & @as(u64, 0xff))));
     const x52 = (x50 >> 8);
-    const x53 = @truncate(u8, (x52 & @as(u64, 0xff)));
+    const x53 = @as(u8, @truncate((x52 & @as(u64, 0xff))));
     const x54 = (x52 >> 8);
-    const x55 = @truncate(u8, (x54 & @as(u64, 0xff)));
+    const x55 = @as(u8, @truncate((x54 & @as(u64, 0xff))));
     const x56 = (x54 >> 8);
-    const x57 = @truncate(u8, (x56 & @as(u64, 0xff)));
+    const x57 = @as(u8, @truncate((x56 & @as(u64, 0xff))));
     const x58 = (x56 >> 8);
-    const x59 = @truncate(u8, (x58 & @as(u64, 0xff)));
-    const x60 = @truncate(u8, (x58 >> 8));
+    const x59 = @as(u8, @truncate((x58 & @as(u64, 0xff))));
+    const x60 = @as(u8, @truncate((x58 >> 8)));
     out1[0] = x5;
     out1[1] = x7;
     out1[2] = x9;
@@ -1797,7 +1797,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
     var x1: u64 = undefined;
     var x2: u1 = undefined;
     addcarryxU64(&x1, &x2, 0x0, (~arg1), @as(u64, 0x1));
-    const x3 = @truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & @as(u64, 0x1)));
+    const x3 = @as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & @as(u64, 0x1))));
     var x4: u64 = undefined;
     var x5: u1 = undefined;
     addcarryxU64(&x4, &x5, 0x0, (~arg1), @as(u64, 0x1));
@@ -1911,7 +1911,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
     cmovznzU64(&x72, x3, (arg5[2]), x66);
     var x73: u64 = undefined;
     cmovznzU64(&x73, x3, (arg5[3]), x68);
-    const x74 = @truncate(u1, (x22 & @as(u64, 0x1)));
+    const x74 = @as(u1, @truncate((x22 & @as(u64, 0x1))));
     var x75: u64 = undefined;
     cmovznzU64(&x75, x74, @as(u64, 0x0), x7);
     var x76: u64 = undefined;
lib/std/crypto/pcurves/p384/p384_64.zig
@@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
     @setRuntimeSafety(mode == .Debug);
 
     const x = @as(u128, arg1) * @as(u128, arg2);
-    out1.* = @truncate(u64, x);
-    out2.* = @truncate(u64, x >> 64);
+    out1.* = @as(u64, @truncate(x));
+    out2.* = @as(u64, @truncate(x >> 64));
 }
 
 /// The function cmovznzU64 is a single-word conditional move.
@@ -2928,90 +2928,90 @@ pub fn toBytes(out1: *[48]u8, arg1: [6]u64) void {
     const x4 = (arg1[2]);
     const x5 = (arg1[1]);
     const x6 = (arg1[0]);
-    const x7 = @truncate(u8, (x6 & 0xff));
+    const x7 = @as(u8, @truncate((x6 & 0xff)));
     const x8 = (x6 >> 8);
-    const x9 = @truncate(u8, (x8 & 0xff));
+    const x9 = @as(u8, @truncate((x8 & 0xff)));
     const x10 = (x8 >> 8);
-    const x11 = @truncate(u8, (x10 & 0xff));
+    const x11 = @as(u8, @truncate((x10 & 0xff)));
     const x12 = (x10 >> 8);
-    const x13 = @truncate(u8, (x12 & 0xff));
+    const x13 = @as(u8, @truncate((x12 & 0xff)));
     const x14 = (x12 >> 8);
-    const x15 = @truncate(u8, (x14 & 0xff));
+    const x15 = @as(u8, @truncate((x14 & 0xff)));
     const x16 = (x14 >> 8);
-    const x17 = @truncate(u8, (x16 & 0xff));
+    const x17 = @as(u8, @truncate((x16 & 0xff)));
     const x18 = (x16 >> 8);
-    const x19 = @truncate(u8, (x18 & 0xff));
-    const x20 = @truncate(u8, (x18 >> 8));
-    const x21 = @truncate(u8, (x5 & 0xff));
+    const x19 = @as(u8, @truncate((x18 & 0xff)));
+    const x20 = @as(u8, @truncate((x18 >> 8)));
+    const x21 = @as(u8, @truncate((x5 & 0xff)));
     const x22 = (x5 >> 8);
-    const x23 = @truncate(u8, (x22 & 0xff));
+    const x23 = @as(u8, @truncate((x22 & 0xff)));
     const x24 = (x22 >> 8);
-    const x25 = @truncate(u8, (x24 & 0xff));
+    const x25 = @as(u8, @truncate((x24 & 0xff)));
     const x26 = (x24 >> 8);
-    const x27 = @truncate(u8, (x26 & 0xff));
+    const x27 = @as(u8, @truncate((x26 & 0xff)));
     const x28 = (x26 >> 8);
-    const x29 = @truncate(u8, (x28 & 0xff));
+    const x29 = @as(u8, @truncate((x28 & 0xff)));
     const x30 = (x28 >> 8);
-    const x31 = @truncate(u8, (x30 & 0xff));
+    const x31 = @as(u8, @truncate((x30 & 0xff)));
     const x32 = (x30 >> 8);
-    const x33 = @truncate(u8, (x32 & 0xff));
-    const x34 = @truncate(u8, (x32 >> 8));
-    const x35 = @truncate(u8, (x4 & 0xff));
+    const x33 = @as(u8, @truncate((x32 & 0xff)));
+    const x34 = @as(u8, @truncate((x32 >> 8)));
+    const x35 = @as(u8, @truncate((x4 & 0xff)));
     const x36 = (x4 >> 8);
-    const x37 = @truncate(u8, (x36 & 0xff));
+    const x37 = @as(u8, @truncate((x36 & 0xff)));
     const x38 = (x36 >> 8);
-    const x39 = @truncate(u8, (x38 & 0xff));
+    const x39 = @as(u8, @truncate((x38 & 0xff)));
     const x40 = (x38 >> 8);
-    const x41 = @truncate(u8, (x40 & 0xff));
+    const x41 = @as(u8, @truncate((x40 & 0xff)));
     const x42 = (x40 >> 8);
-    const x43 = @truncate(u8, (x42 & 0xff));
+    const x43 = @as(u8, @truncate((x42 & 0xff)));
     const x44 = (x42 >> 8);
-    const x45 = @truncate(u8, (x44 & 0xff));
+    const x45 = @as(u8, @truncate((x44 & 0xff)));
     const x46 = (x44 >> 8);
-    const x47 = @truncate(u8, (x46 & 0xff));
-    const x48 = @truncate(u8, (x46 >> 8));
-    const x49 = @truncate(u8, (x3 & 0xff));
+    const x47 = @as(u8, @truncate((x46 & 0xff)));
+    const x48 = @as(u8, @truncate((x46 >> 8)));
+    const x49 = @as(u8, @truncate((x3 & 0xff)));
     const x50 = (x3 >> 8);
-    const x51 = @truncate(u8, (x50 & 0xff));
+    const x51 = @as(u8, @truncate((x50 & 0xff)));
     const x52 = (x50 >> 8);
-    const x53 = @truncate(u8, (x52 & 0xff));
+    const x53 = @as(u8, @truncate((x52 & 0xff)));
     const x54 = (x52 >> 8);
-    const x55 = @truncate(u8, (x54 & 0xff));
+    const x55 = @as(u8, @truncate((x54 & 0xff)));
     const x56 = (x54 >> 8);
-    const x57 = @truncate(u8, (x56 & 0xff));
+    const x57 = @as(u8, @truncate((x56 & 0xff)));
     const x58 = (x56 >> 8);
-    const x59 = @truncate(u8, (x58 & 0xff));
+    const x59 = @as(u8, @truncate((x58 & 0xff)));
     const x60 = (x58 >> 8);
-    const x61 = @truncate(u8, (x60 & 0xff));
-    const x62 = @truncate(u8, (x60 >> 8));
-    const x63 = @truncate(u8, (x2 & 0xff));
+    const x61 = @as(u8, @truncate((x60 & 0xff)));
+    const x62 = @as(u8, @truncate((x60 >> 8)));
+    const x63 = @as(u8, @truncate((x2 & 0xff)));
     const x64 = (x2 >> 8);
-    const x65 = @truncate(u8, (x64 & 0xff));
+    const x65 = @as(u8, @truncate((x64 & 0xff)));
     const x66 = (x64 >> 8);
-    const x67 = @truncate(u8, (x66 & 0xff));
+    const x67 = @as(u8, @truncate((x66 & 0xff)));
     const x68 = (x66 >> 8);
-    const x69 = @truncate(u8, (x68 & 0xff));
+    const x69 = @as(u8, @truncate((x68 & 0xff)));
     const x70 = (x68 >> 8);
-    const x71 = @truncate(u8, (x70 & 0xff));
+    const x71 = @as(u8, @truncate((x70 & 0xff)));
     const x72 = (x70 >> 8);
-    const x73 = @truncate(u8, (x72 & 0xff));
+    const x73 = @as(u8, @truncate((x72 & 0xff)));
     const x74 = (x72 >> 8);
-    const x75 = @truncate(u8, (x74 & 0xff));
-    const x76 = @truncate(u8, (x74 >> 8));
-    const x77 = @truncate(u8, (x1 & 0xff));
+    const x75 = @as(u8, @truncate((x74 & 0xff)));
+    const x76 = @as(u8, @truncate((x74 >> 8)));
+    const x77 = @as(u8, @truncate((x1 & 0xff)));
     const x78 = (x1 >> 8);
-    const x79 = @truncate(u8, (x78 & 0xff));
+    const x79 = @as(u8, @truncate((x78 & 0xff)));
     const x80 = (x78 >> 8);
-    const x81 = @truncate(u8, (x80 & 0xff));
+    const x81 = @as(u8, @truncate((x80 & 0xff)));
     const x82 = (x80 >> 8);
-    const x83 = @truncate(u8, (x82 & 0xff));
+    const x83 = @as(u8, @truncate((x82 & 0xff)));
     const x84 = (x82 >> 8);
-    const x85 = @truncate(u8, (x84 & 0xff));
+    const x85 = @as(u8, @truncate((x84 & 0xff)));
     const x86 = (x84 >> 8);
-    const x87 = @truncate(u8, (x86 & 0xff));
+    const x87 = @as(u8, @truncate((x86 & 0xff)));
     const x88 = (x86 >> 8);
-    const x89 = @truncate(u8, (x88 & 0xff));
-    const x90 = @truncate(u8, (x88 >> 8));
+    const x89 = @as(u8, @truncate((x88 & 0xff)));
+    const x90 = @as(u8, @truncate((x88 >> 8)));
     out1[0] = x7;
     out1[1] = x9;
     out1[2] = x11;
@@ -3246,7 +3246,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[
     var x1: u64 = undefined;
     var x2: u1 = undefined;
     addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1);
-    const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1)));
+    const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1))));
     var x4: u64 = undefined;
     var x5: u1 = undefined;
     addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1);
@@ -3408,7 +3408,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[
     cmovznzU64(&x102, x3, (arg5[4]), x94);
     var x103: u64 = undefined;
     cmovznzU64(&x103, x3, (arg5[5]), x96);
-    const x104 = @truncate(u1, (x28 & 0x1));
+    const x104 = @as(u1, @truncate((x28 & 0x1)));
     var x105: u64 = undefined;
     cmovznzU64(&x105, x104, 0x0, x7);
     var x106: u64 = undefined;
lib/std/crypto/pcurves/p384/p384_scalar_64.zig
@@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
     @setRuntimeSafety(mode == .Debug);
 
     const x = @as(u128, arg1) * @as(u128, arg2);
-    out1.* = @truncate(u64, x);
-    out2.* = @truncate(u64, x >> 64);
+    out1.* = @as(u64, @truncate(x));
+    out2.* = @as(u64, @truncate(x >> 64));
 }
 
 /// The function cmovznzU64 is a single-word conditional move.
@@ -2982,90 +2982,90 @@ pub fn toBytes(out1: *[48]u8, arg1: [6]u64) void {
     const x4 = (arg1[2]);
     const x5 = (arg1[1]);
     const x6 = (arg1[0]);
-    const x7 = @truncate(u8, (x6 & 0xff));
+    const x7 = @as(u8, @truncate((x6 & 0xff)));
     const x8 = (x6 >> 8);
-    const x9 = @truncate(u8, (x8 & 0xff));
+    const x9 = @as(u8, @truncate((x8 & 0xff)));
     const x10 = (x8 >> 8);
-    const x11 = @truncate(u8, (x10 & 0xff));
+    const x11 = @as(u8, @truncate((x10 & 0xff)));
     const x12 = (x10 >> 8);
-    const x13 = @truncate(u8, (x12 & 0xff));
+    const x13 = @as(u8, @truncate((x12 & 0xff)));
     const x14 = (x12 >> 8);
-    const x15 = @truncate(u8, (x14 & 0xff));
+    const x15 = @as(u8, @truncate((x14 & 0xff)));
     const x16 = (x14 >> 8);
-    const x17 = @truncate(u8, (x16 & 0xff));
+    const x17 = @as(u8, @truncate((x16 & 0xff)));
     const x18 = (x16 >> 8);
-    const x19 = @truncate(u8, (x18 & 0xff));
-    const x20 = @truncate(u8, (x18 >> 8));
-    const x21 = @truncate(u8, (x5 & 0xff));
+    const x19 = @as(u8, @truncate((x18 & 0xff)));
+    const x20 = @as(u8, @truncate((x18 >> 8)));
+    const x21 = @as(u8, @truncate((x5 & 0xff)));
     const x22 = (x5 >> 8);
-    const x23 = @truncate(u8, (x22 & 0xff));
+    const x23 = @as(u8, @truncate((x22 & 0xff)));
     const x24 = (x22 >> 8);
-    const x25 = @truncate(u8, (x24 & 0xff));
+    const x25 = @as(u8, @truncate((x24 & 0xff)));
     const x26 = (x24 >> 8);
-    const x27 = @truncate(u8, (x26 & 0xff));
+    const x27 = @as(u8, @truncate((x26 & 0xff)));
     const x28 = (x26 >> 8);
-    const x29 = @truncate(u8, (x28 & 0xff));
+    const x29 = @as(u8, @truncate((x28 & 0xff)));
     const x30 = (x28 >> 8);
-    const x31 = @truncate(u8, (x30 & 0xff));
+    const x31 = @as(u8, @truncate((x30 & 0xff)));
     const x32 = (x30 >> 8);
-    const x33 = @truncate(u8, (x32 & 0xff));
-    const x34 = @truncate(u8, (x32 >> 8));
-    const x35 = @truncate(u8, (x4 & 0xff));
+    const x33 = @as(u8, @truncate((x32 & 0xff)));
+    const x34 = @as(u8, @truncate((x32 >> 8)));
+    const x35 = @as(u8, @truncate((x4 & 0xff)));
     const x36 = (x4 >> 8);
-    const x37 = @truncate(u8, (x36 & 0xff));
+    const x37 = @as(u8, @truncate((x36 & 0xff)));
     const x38 = (x36 >> 8);
-    const x39 = @truncate(u8, (x38 & 0xff));
+    const x39 = @as(u8, @truncate((x38 & 0xff)));
     const x40 = (x38 >> 8);
-    const x41 = @truncate(u8, (x40 & 0xff));
+    const x41 = @as(u8, @truncate((x40 & 0xff)));
     const x42 = (x40 >> 8);
-    const x43 = @truncate(u8, (x42 & 0xff));
+    const x43 = @as(u8, @truncate((x42 & 0xff)));
     const x44 = (x42 >> 8);
-    const x45 = @truncate(u8, (x44 & 0xff));
+    const x45 = @as(u8, @truncate((x44 & 0xff)));
     const x46 = (x44 >> 8);
-    const x47 = @truncate(u8, (x46 & 0xff));
-    const x48 = @truncate(u8, (x46 >> 8));
-    const x49 = @truncate(u8, (x3 & 0xff));
+    const x47 = @as(u8, @truncate((x46 & 0xff)));
+    const x48 = @as(u8, @truncate((x46 >> 8)));
+    const x49 = @as(u8, @truncate((x3 & 0xff)));
     const x50 = (x3 >> 8);
-    const x51 = @truncate(u8, (x50 & 0xff));
+    const x51 = @as(u8, @truncate((x50 & 0xff)));
     const x52 = (x50 >> 8);
-    const x53 = @truncate(u8, (x52 & 0xff));
+    const x53 = @as(u8, @truncate((x52 & 0xff)));
     const x54 = (x52 >> 8);
-    const x55 = @truncate(u8, (x54 & 0xff));
+    const x55 = @as(u8, @truncate((x54 & 0xff)));
     const x56 = (x54 >> 8);
-    const x57 = @truncate(u8, (x56 & 0xff));
+    const x57 = @as(u8, @truncate((x56 & 0xff)));
     const x58 = (x56 >> 8);
-    const x59 = @truncate(u8, (x58 & 0xff));
+    const x59 = @as(u8, @truncate((x58 & 0xff)));
     const x60 = (x58 >> 8);
-    const x61 = @truncate(u8, (x60 & 0xff));
-    const x62 = @truncate(u8, (x60 >> 8));
-    const x63 = @truncate(u8, (x2 & 0xff));
+    const x61 = @as(u8, @truncate((x60 & 0xff)));
+    const x62 = @as(u8, @truncate((x60 >> 8)));
+    const x63 = @as(u8, @truncate((x2 & 0xff)));
     const x64 = (x2 >> 8);
-    const x65 = @truncate(u8, (x64 & 0xff));
+    const x65 = @as(u8, @truncate((x64 & 0xff)));
     const x66 = (x64 >> 8);
-    const x67 = @truncate(u8, (x66 & 0xff));
+    const x67 = @as(u8, @truncate((x66 & 0xff)));
     const x68 = (x66 >> 8);
-    const x69 = @truncate(u8, (x68 & 0xff));
+    const x69 = @as(u8, @truncate((x68 & 0xff)));
     const x70 = (x68 >> 8);
-    const x71 = @truncate(u8, (x70 & 0xff));
+    const x71 = @as(u8, @truncate((x70 & 0xff)));
     const x72 = (x70 >> 8);
-    const x73 = @truncate(u8, (x72 & 0xff));
+    const x73 = @as(u8, @truncate((x72 & 0xff)));
     const x74 = (x72 >> 8);
-    const x75 = @truncate(u8, (x74 & 0xff));
-    const x76 = @truncate(u8, (x74 >> 8));
-    const x77 = @truncate(u8, (x1 & 0xff));
+    const x75 = @as(u8, @truncate((x74 & 0xff)));
+    const x76 = @as(u8, @truncate((x74 >> 8)));
+    const x77 = @as(u8, @truncate((x1 & 0xff)));
     const x78 = (x1 >> 8);
-    const x79 = @truncate(u8, (x78 & 0xff));
+    const x79 = @as(u8, @truncate((x78 & 0xff)));
     const x80 = (x78 >> 8);
-    const x81 = @truncate(u8, (x80 & 0xff));
+    const x81 = @as(u8, @truncate((x80 & 0xff)));
     const x82 = (x80 >> 8);
-    const x83 = @truncate(u8, (x82 & 0xff));
+    const x83 = @as(u8, @truncate((x82 & 0xff)));
     const x84 = (x82 >> 8);
-    const x85 = @truncate(u8, (x84 & 0xff));
+    const x85 = @as(u8, @truncate((x84 & 0xff)));
     const x86 = (x84 >> 8);
-    const x87 = @truncate(u8, (x86 & 0xff));
+    const x87 = @as(u8, @truncate((x86 & 0xff)));
     const x88 = (x86 >> 8);
-    const x89 = @truncate(u8, (x88 & 0xff));
-    const x90 = @truncate(u8, (x88 >> 8));
+    const x89 = @as(u8, @truncate((x88 & 0xff)));
+    const x90 = @as(u8, @truncate((x88 >> 8)));
     out1[0] = x7;
     out1[1] = x9;
     out1[2] = x11;
@@ -3300,7 +3300,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[
     var x1: u64 = undefined;
     var x2: u1 = undefined;
     addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1);
-    const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1)));
+    const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1))));
     var x4: u64 = undefined;
     var x5: u1 = undefined;
     addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1);
@@ -3462,7 +3462,7 @@ pub fn divstep(out1: *u64, out2: *[7]u64, out3: *[7]u64, out4: *[6]u64, out5: *[
     cmovznzU64(&x102, x3, (arg5[4]), x94);
     var x103: u64 = undefined;
     cmovznzU64(&x103, x3, (arg5[5]), x96);
-    const x104 = @truncate(u1, (x28 & 0x1));
+    const x104 = @as(u1, @truncate((x28 & 0x1)));
     var x105: u64 = undefined;
     cmovznzU64(&x105, x104, 0x0, x7);
     var x106: u64 = undefined;
lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig
@@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
     @setRuntimeSafety(mode == .Debug);
 
     const x = @as(u128, arg1) * @as(u128, arg2);
-    out1.* = @truncate(u64, x);
-    out2.* = @truncate(u64, x >> 64);
+    out1.* = @as(u64, @truncate(x));
+    out2.* = @as(u64, @truncate(x >> 64));
 }
 
 /// The function cmovznzU64 is a single-word conditional move.
@@ -1488,62 +1488,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void {
     const x2 = (arg1[2]);
     const x3 = (arg1[1]);
     const x4 = (arg1[0]);
-    const x5 = @truncate(u8, (x4 & 0xff));
+    const x5 = @as(u8, @truncate((x4 & 0xff)));
     const x6 = (x4 >> 8);
-    const x7 = @truncate(u8, (x6 & 0xff));
+    const x7 = @as(u8, @truncate((x6 & 0xff)));
     const x8 = (x6 >> 8);
-    const x9 = @truncate(u8, (x8 & 0xff));
+    const x9 = @as(u8, @truncate((x8 & 0xff)));
     const x10 = (x8 >> 8);
-    const x11 = @truncate(u8, (x10 & 0xff));
+    const x11 = @as(u8, @truncate((x10 & 0xff)));
     const x12 = (x10 >> 8);
-    const x13 = @truncate(u8, (x12 & 0xff));
+    const x13 = @as(u8, @truncate((x12 & 0xff)));
     const x14 = (x12 >> 8);
-    const x15 = @truncate(u8, (x14 & 0xff));
+    const x15 = @as(u8, @truncate((x14 & 0xff)));
     const x16 = (x14 >> 8);
-    const x17 = @truncate(u8, (x16 & 0xff));
-    const x18 = @truncate(u8, (x16 >> 8));
-    const x19 = @truncate(u8, (x3 & 0xff));
+    const x17 = @as(u8, @truncate((x16 & 0xff)));
+    const x18 = @as(u8, @truncate((x16 >> 8)));
+    const x19 = @as(u8, @truncate((x3 & 0xff)));
     const x20 = (x3 >> 8);
-    const x21 = @truncate(u8, (x20 & 0xff));
+    const x21 = @as(u8, @truncate((x20 & 0xff)));
     const x22 = (x20 >> 8);
-    const x23 = @truncate(u8, (x22 & 0xff));
+    const x23 = @as(u8, @truncate((x22 & 0xff)));
     const x24 = (x22 >> 8);
-    const x25 = @truncate(u8, (x24 & 0xff));
+    const x25 = @as(u8, @truncate((x24 & 0xff)));
     const x26 = (x24 >> 8);
-    const x27 = @truncate(u8, (x26 & 0xff));
+    const x27 = @as(u8, @truncate((x26 & 0xff)));
     const x28 = (x26 >> 8);
-    const x29 = @truncate(u8, (x28 & 0xff));
+    const x29 = @as(u8, @truncate((x28 & 0xff)));
     const x30 = (x28 >> 8);
-    const x31 = @truncate(u8, (x30 & 0xff));
-    const x32 = @truncate(u8, (x30 >> 8));
-    const x33 = @truncate(u8, (x2 & 0xff));
+    const x31 = @as(u8, @truncate((x30 & 0xff)));
+    const x32 = @as(u8, @truncate((x30 >> 8)));
+    const x33 = @as(u8, @truncate((x2 & 0xff)));
     const x34 = (x2 >> 8);
-    const x35 = @truncate(u8, (x34 & 0xff));
+    const x35 = @as(u8, @truncate((x34 & 0xff)));
     const x36 = (x34 >> 8);
-    const x37 = @truncate(u8, (x36 & 0xff));
+    const x37 = @as(u8, @truncate((x36 & 0xff)));
     const x38 = (x36 >> 8);
-    const x39 = @truncate(u8, (x38 & 0xff));
+    const x39 = @as(u8, @truncate((x38 & 0xff)));
     const x40 = (x38 >> 8);
-    const x41 = @truncate(u8, (x40 & 0xff));
+    const x41 = @as(u8, @truncate((x40 & 0xff)));
     const x42 = (x40 >> 8);
-    const x43 = @truncate(u8, (x42 & 0xff));
+    const x43 = @as(u8, @truncate((x42 & 0xff)));
     const x44 = (x42 >> 8);
-    const x45 = @truncate(u8, (x44 & 0xff));
-    const x46 = @truncate(u8, (x44 >> 8));
-    const x47 = @truncate(u8, (x1 & 0xff));
+    const x45 = @as(u8, @truncate((x44 & 0xff)));
+    const x46 = @as(u8, @truncate((x44 >> 8)));
+    const x47 = @as(u8, @truncate((x1 & 0xff)));
     const x48 = (x1 >> 8);
-    const x49 = @truncate(u8, (x48 & 0xff));
+    const x49 = @as(u8, @truncate((x48 & 0xff)));
     const x50 = (x48 >> 8);
-    const x51 = @truncate(u8, (x50 & 0xff));
+    const x51 = @as(u8, @truncate((x50 & 0xff)));
     const x52 = (x50 >> 8);
-    const x53 = @truncate(u8, (x52 & 0xff));
+    const x53 = @as(u8, @truncate((x52 & 0xff)));
     const x54 = (x52 >> 8);
-    const x55 = @truncate(u8, (x54 & 0xff));
+    const x55 = @as(u8, @truncate((x54 & 0xff)));
     const x56 = (x54 >> 8);
-    const x57 = @truncate(u8, (x56 & 0xff));
+    const x57 = @as(u8, @truncate((x56 & 0xff)));
     const x58 = (x56 >> 8);
-    const x59 = @truncate(u8, (x58 & 0xff));
-    const x60 = @truncate(u8, (x58 >> 8));
+    const x59 = @as(u8, @truncate((x58 & 0xff)));
+    const x60 = @as(u8, @truncate((x58 >> 8)));
     out1[0] = x5;
     out1[1] = x7;
     out1[2] = x9;
@@ -1726,7 +1726,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
     var x1: u64 = undefined;
     var x2: u1 = undefined;
     addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1);
-    const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1)));
+    const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1))));
     var x4: u64 = undefined;
     var x5: u1 = undefined;
     addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1);
@@ -1840,7 +1840,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
     cmovznzU64(&x72, x3, (arg5[2]), x66);
     var x73: u64 = undefined;
     cmovznzU64(&x73, x3, (arg5[3]), x68);
-    const x74 = @truncate(u1, (x22 & 0x1));
+    const x74 = @as(u1, @truncate((x22 & 0x1)));
     var x75: u64 = undefined;
     cmovznzU64(&x75, x74, 0x0, x7);
     var x76: u64 = undefined;
lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig
@@ -88,8 +88,8 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
     @setRuntimeSafety(mode == .Debug);
 
     const x = @as(u128, arg1) * @as(u128, arg2);
-    out1.* = @truncate(u64, x);
-    out2.* = @truncate(u64, x >> 64);
+    out1.* = @as(u64, @truncate(x));
+    out2.* = @as(u64, @truncate(x >> 64));
 }
 
 /// The function cmovznzU64 is a single-word conditional move.
@@ -1548,62 +1548,62 @@ pub fn toBytes(out1: *[32]u8, arg1: [4]u64) void {
     const x2 = (arg1[2]);
     const x3 = (arg1[1]);
     const x4 = (arg1[0]);
-    const x5 = @truncate(u8, (x4 & 0xff));
+    const x5 = @as(u8, @truncate((x4 & 0xff)));
     const x6 = (x4 >> 8);
-    const x7 = @truncate(u8, (x6 & 0xff));
+    const x7 = @as(u8, @truncate((x6 & 0xff)));
     const x8 = (x6 >> 8);
-    const x9 = @truncate(u8, (x8 & 0xff));
+    const x9 = @as(u8, @truncate((x8 & 0xff)));
     const x10 = (x8 >> 8);
-    const x11 = @truncate(u8, (x10 & 0xff));
+    const x11 = @as(u8, @truncate((x10 & 0xff)));
     const x12 = (x10 >> 8);
-    const x13 = @truncate(u8, (x12 & 0xff));
+    const x13 = @as(u8, @truncate((x12 & 0xff)));
     const x14 = (x12 >> 8);
-    const x15 = @truncate(u8, (x14 & 0xff));
+    const x15 = @as(u8, @truncate((x14 & 0xff)));
     const x16 = (x14 >> 8);
-    const x17 = @truncate(u8, (x16 & 0xff));
-    const x18 = @truncate(u8, (x16 >> 8));
-    const x19 = @truncate(u8, (x3 & 0xff));
+    const x17 = @as(u8, @truncate((x16 & 0xff)));
+    const x18 = @as(u8, @truncate((x16 >> 8)));
+    const x19 = @as(u8, @truncate((x3 & 0xff)));
     const x20 = (x3 >> 8);
-    const x21 = @truncate(u8, (x20 & 0xff));
+    const x21 = @as(u8, @truncate((x20 & 0xff)));
     const x22 = (x20 >> 8);
-    const x23 = @truncate(u8, (x22 & 0xff));
+    const x23 = @as(u8, @truncate((x22 & 0xff)));
     const x24 = (x22 >> 8);
-    const x25 = @truncate(u8, (x24 & 0xff));
+    const x25 = @as(u8, @truncate((x24 & 0xff)));
     const x26 = (x24 >> 8);
-    const x27 = @truncate(u8, (x26 & 0xff));
+    const x27 = @as(u8, @truncate((x26 & 0xff)));
     const x28 = (x26 >> 8);
-    const x29 = @truncate(u8, (x28 & 0xff));
+    const x29 = @as(u8, @truncate((x28 & 0xff)));
     const x30 = (x28 >> 8);
-    const x31 = @truncate(u8, (x30 & 0xff));
-    const x32 = @truncate(u8, (x30 >> 8));
-    const x33 = @truncate(u8, (x2 & 0xff));
+    const x31 = @as(u8, @truncate((x30 & 0xff)));
+    const x32 = @as(u8, @truncate((x30 >> 8)));
+    const x33 = @as(u8, @truncate((x2 & 0xff)));
     const x34 = (x2 >> 8);
-    const x35 = @truncate(u8, (x34 & 0xff));
+    const x35 = @as(u8, @truncate((x34 & 0xff)));
     const x36 = (x34 >> 8);
-    const x37 = @truncate(u8, (x36 & 0xff));
+    const x37 = @as(u8, @truncate((x36 & 0xff)));
     const x38 = (x36 >> 8);
-    const x39 = @truncate(u8, (x38 & 0xff));
+    const x39 = @as(u8, @truncate((x38 & 0xff)));
     const x40 = (x38 >> 8);
-    const x41 = @truncate(u8, (x40 & 0xff));
+    const x41 = @as(u8, @truncate((x40 & 0xff)));
     const x42 = (x40 >> 8);
-    const x43 = @truncate(u8, (x42 & 0xff));
+    const x43 = @as(u8, @truncate((x42 & 0xff)));
     const x44 = (x42 >> 8);
-    const x45 = @truncate(u8, (x44 & 0xff));
-    const x46 = @truncate(u8, (x44 >> 8));
-    const x47 = @truncate(u8, (x1 & 0xff));
+    const x45 = @as(u8, @truncate((x44 & 0xff)));
+    const x46 = @as(u8, @truncate((x44 >> 8)));
+    const x47 = @as(u8, @truncate((x1 & 0xff)));
     const x48 = (x1 >> 8);
-    const x49 = @truncate(u8, (x48 & 0xff));
+    const x49 = @as(u8, @truncate((x48 & 0xff)));
     const x50 = (x48 >> 8);
-    const x51 = @truncate(u8, (x50 & 0xff));
+    const x51 = @as(u8, @truncate((x50 & 0xff)));
     const x52 = (x50 >> 8);
-    const x53 = @truncate(u8, (x52 & 0xff));
+    const x53 = @as(u8, @truncate((x52 & 0xff)));
     const x54 = (x52 >> 8);
-    const x55 = @truncate(u8, (x54 & 0xff));
+    const x55 = @as(u8, @truncate((x54 & 0xff)));
     const x56 = (x54 >> 8);
-    const x57 = @truncate(u8, (x56 & 0xff));
+    const x57 = @as(u8, @truncate((x56 & 0xff)));
     const x58 = (x56 >> 8);
-    const x59 = @truncate(u8, (x58 & 0xff));
-    const x60 = @truncate(u8, (x58 >> 8));
+    const x59 = @as(u8, @truncate((x58 & 0xff)));
+    const x60 = @as(u8, @truncate((x58 >> 8)));
     out1[0] = x5;
     out1[1] = x7;
     out1[2] = x9;
@@ -1786,7 +1786,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
     var x1: u64 = undefined;
     var x2: u1 = undefined;
     addcarryxU64(&x1, &x2, 0x0, (~arg1), 0x1);
-    const x3 = (@truncate(u1, (x1 >> 63)) & @truncate(u1, ((arg3[0]) & 0x1)));
+    const x3 = (@as(u1, @truncate((x1 >> 63))) & @as(u1, @truncate(((arg3[0]) & 0x1))));
     var x4: u64 = undefined;
     var x5: u1 = undefined;
     addcarryxU64(&x4, &x5, 0x0, (~arg1), 0x1);
@@ -1900,7 +1900,7 @@ pub fn divstep(out1: *u64, out2: *[5]u64, out3: *[5]u64, out4: *[4]u64, out5: *[
     cmovznzU64(&x72, x3, (arg5[2]), x66);
     var x73: u64 = undefined;
     cmovznzU64(&x73, x3, (arg5[3]), x68);
-    const x74 = @truncate(u1, (x22 & 0x1));
+    const x74 = @as(u1, @truncate((x22 & 0x1)));
     var x75: u64 = undefined;
     cmovznzU64(&x75, x74, 0x0, x7);
     var x76: u64 = undefined;
lib/std/crypto/pcurves/common.zig
@@ -120,7 +120,7 @@ pub fn Field(comptime params: FieldParams) type {
         /// Return true if the element is odd.
         pub fn isOdd(fe: Fe) bool {
             const s = fe.toBytes(.Little);
-            return @truncate(u1, s[0]) != 0;
+            return @as(u1, @truncate(s[0])) != 0;
         }
 
         /// Conditonally replace a field element with `a` if `c` is positive.
@@ -179,7 +179,7 @@ pub fn Field(comptime params: FieldParams) type {
             var x: T = n;
             var t = a;
             while (true) {
-                if (@truncate(u1, x) != 0) fe = fe.mul(t);
+                if (@as(u1, @truncate(x)) != 0) fe = fe.mul(t);
                 x >>= 1;
                 if (x == 0) break;
                 t = t.sq();
@@ -233,7 +233,7 @@ pub fn Field(comptime params: FieldParams) type {
             }
             var v_opp: Limbs = undefined;
             fiat.opp(&v_opp, v);
-            fiat.selectznz(&v, @truncate(u1, f[f.len - 1] >> (@bitSizeOf(Word) - 1)), v, v_opp);
+            fiat.selectznz(&v, @as(u1, @truncate(f[f.len - 1] >> (@bitSizeOf(Word) - 1))), v, v_opp);
 
             const precomp = blk: {
                 var precomp: Limbs = undefined;
lib/std/crypto/pcurves/p256.zig
@@ -318,7 +318,7 @@ pub const P256 = struct {
         var t = P256.identityElement;
         comptime var i: u8 = 1;
         inline while (i < pc.len) : (i += 1) {
-            t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8));
+            t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8)));
         }
         return t;
     }
@@ -326,8 +326,8 @@ pub const P256 = struct {
     fn slide(s: [32]u8) [2 * 32 + 1]i8 {
         var e: [2 * 32 + 1]i8 = undefined;
         for (s, 0..) |x, i| {
-            e[i * 2 + 0] = @as(i8, @truncate(u4, x));
-            e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
+            e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x)));
+            e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4)));
         }
         // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7
         var carry: i8 = 0;
@@ -351,9 +351,9 @@ pub const P256 = struct {
         while (true) : (pos -= 1) {
             const slot = e[pos];
             if (slot > 0) {
-                q = q.add(pc[@intCast(usize, slot)]);
+                q = q.add(pc[@as(usize, @intCast(slot))]);
             } else if (slot < 0) {
-                q = q.sub(pc[@intCast(usize, -slot)]);
+                q = q.sub(pc[@as(usize, @intCast(-slot))]);
             }
             if (pos == 0) break;
             q = q.dbl().dbl().dbl().dbl();
@@ -366,7 +366,7 @@ pub const P256 = struct {
         var q = P256.identityElement;
         var pos: usize = 252;
         while (true) : (pos -= 4) {
-            const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos)));
+            const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos)))));
             if (vartime) {
                 if (slot != 0) {
                     q = q.add(pc[slot]);
@@ -445,15 +445,15 @@ pub const P256 = struct {
         while (true) : (pos -= 1) {
             const slot1 = e1[pos];
             if (slot1 > 0) {
-                q = q.add(pc1[@intCast(usize, slot1)]);
+                q = q.add(pc1[@as(usize, @intCast(slot1))]);
             } else if (slot1 < 0) {
-                q = q.sub(pc1[@intCast(usize, -slot1)]);
+                q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
             }
             const slot2 = e2[pos];
             if (slot2 > 0) {
-                q = q.add(pc2[@intCast(usize, slot2)]);
+                q = q.add(pc2[@as(usize, @intCast(slot2))]);
             } else if (slot2 < 0) {
-                q = q.sub(pc2[@intCast(usize, -slot2)]);
+                q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
             }
             if (pos == 0) break;
             q = q.dbl().dbl().dbl().dbl();
lib/std/crypto/pcurves/p384.zig
@@ -318,7 +318,7 @@ pub const P384 = struct {
         var t = P384.identityElement;
         comptime var i: u8 = 1;
         inline while (i < pc.len) : (i += 1) {
-            t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8));
+            t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8)));
         }
         return t;
     }
@@ -326,8 +326,8 @@ pub const P384 = struct {
     fn slide(s: [48]u8) [2 * 48 + 1]i8 {
         var e: [2 * 48 + 1]i8 = undefined;
         for (s, 0..) |x, i| {
-            e[i * 2 + 0] = @as(i8, @truncate(u4, x));
-            e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
+            e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x)));
+            e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4)));
         }
         // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7
         var carry: i8 = 0;
@@ -351,9 +351,9 @@ pub const P384 = struct {
         while (true) : (pos -= 1) {
             const slot = e[pos];
             if (slot > 0) {
-                q = q.add(pc[@intCast(usize, slot)]);
+                q = q.add(pc[@as(usize, @intCast(slot))]);
             } else if (slot < 0) {
-                q = q.sub(pc[@intCast(usize, -slot)]);
+                q = q.sub(pc[@as(usize, @intCast(-slot))]);
             }
             if (pos == 0) break;
             q = q.dbl().dbl().dbl().dbl();
@@ -366,7 +366,7 @@ pub const P384 = struct {
         var q = P384.identityElement;
         var pos: usize = 380;
         while (true) : (pos -= 4) {
-            const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos)));
+            const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos)))));
             if (vartime) {
                 if (slot != 0) {
                     q = q.add(pc[slot]);
@@ -445,15 +445,15 @@ pub const P384 = struct {
         while (true) : (pos -= 1) {
             const slot1 = e1[pos];
             if (slot1 > 0) {
-                q = q.add(pc1[@intCast(usize, slot1)]);
+                q = q.add(pc1[@as(usize, @intCast(slot1))]);
             } else if (slot1 < 0) {
-                q = q.sub(pc1[@intCast(usize, -slot1)]);
+                q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
             }
             const slot2 = e2[pos];
             if (slot2 > 0) {
-                q = q.add(pc2[@intCast(usize, slot2)]);
+                q = q.add(pc2[@as(usize, @intCast(slot2))]);
             } else if (slot2 < 0) {
-                q = q.sub(pc2[@intCast(usize, -slot2)]);
+                q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
             }
             if (pos == 0) break;
             q = q.dbl().dbl().dbl().dbl();
lib/std/crypto/pcurves/secp256k1.zig
@@ -67,8 +67,8 @@ pub const Secp256k1 = struct {
             const t1 = math.mulWide(u256, k, 21949224512762693861512883645436906316123769664773102907882521278123970637873);
             const t2 = math.mulWide(u256, k, 103246583619904461035481197785446227098457807945486720222659797044629401272177);
 
-            const c1 = @truncate(u128, t1 >> 384) + @truncate(u1, t1 >> 383);
-            const c2 = @truncate(u128, t2 >> 384) + @truncate(u1, t2 >> 383);
+            const c1 = @as(u128, @truncate(t1 >> 384)) + @as(u1, @truncate(t1 >> 383));
+            const c2 = @as(u128, @truncate(t2 >> 384)) + @as(u1, @truncate(t2 >> 383));
 
             var buf: [32]u8 = undefined;
 
@@ -346,7 +346,7 @@ pub const Secp256k1 = struct {
         var t = Secp256k1.identityElement;
         comptime var i: u8 = 1;
         inline while (i < pc.len) : (i += 1) {
-            t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8));
+            t.cMov(pc[i], @as(u1, @truncate((@as(usize, b ^ i) -% 1) >> 8)));
         }
         return t;
     }
@@ -354,8 +354,8 @@ pub const Secp256k1 = struct {
     fn slide(s: [32]u8) [2 * 32 + 1]i8 {
         var e: [2 * 32 + 1]i8 = undefined;
         for (s, 0..) |x, i| {
-            e[i * 2 + 0] = @as(i8, @truncate(u4, x));
-            e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
+            e[i * 2 + 0] = @as(i8, @as(u4, @truncate(x)));
+            e[i * 2 + 1] = @as(i8, @as(u4, @truncate(x >> 4)));
         }
         // Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7
         var carry: i8 = 0;
@@ -379,9 +379,9 @@ pub const Secp256k1 = struct {
         while (true) : (pos -= 1) {
             const slot = e[pos];
             if (slot > 0) {
-                q = q.add(pc[@intCast(usize, slot)]);
+                q = q.add(pc[@as(usize, @intCast(slot))]);
             } else if (slot < 0) {
-                q = q.sub(pc[@intCast(usize, -slot)]);
+                q = q.sub(pc[@as(usize, @intCast(-slot))]);
             }
             if (pos == 0) break;
             q = q.dbl().dbl().dbl().dbl();
@@ -394,7 +394,7 @@ pub const Secp256k1 = struct {
         var q = Secp256k1.identityElement;
         var pos: usize = 252;
         while (true) : (pos -= 4) {
-            const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos)));
+            const slot = @as(u4, @truncate((s[pos >> 3] >> @as(u3, @truncate(pos)))));
             if (vartime) {
                 if (slot != 0) {
                     q = q.add(pc[slot]);
@@ -482,15 +482,15 @@ pub const Secp256k1 = struct {
         while (true) : (pos -= 1) {
             const slot1 = e1[pos];
             if (slot1 > 0) {
-                q = q.add(pc1[@intCast(usize, slot1)]);
+                q = q.add(pc1[@as(usize, @intCast(slot1))]);
             } else if (slot1 < 0) {
-                q = q.sub(pc1[@intCast(usize, -slot1)]);
+                q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
             }
             const slot2 = e2[pos];
             if (slot2 > 0) {
-                q = q.add(pc2[@intCast(usize, slot2)]);
+                q = q.add(pc2[@as(usize, @intCast(slot2))]);
             } else if (slot2 < 0) {
-                q = q.sub(pc2[@intCast(usize, -slot2)]);
+                q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
             }
             if (pos == 0) break;
             q = q.dbl().dbl().dbl().dbl();
@@ -523,15 +523,15 @@ pub const Secp256k1 = struct {
         while (true) : (pos -= 1) {
             const slot1 = e1[pos];
             if (slot1 > 0) {
-                q = q.add(pc1[@intCast(usize, slot1)]);
+                q = q.add(pc1[@as(usize, @intCast(slot1))]);
             } else if (slot1 < 0) {
-                q = q.sub(pc1[@intCast(usize, -slot1)]);
+                q = q.sub(pc1[@as(usize, @intCast(-slot1))]);
             }
             const slot2 = e2[pos];
             if (slot2 > 0) {
-                q = q.add(pc2[@intCast(usize, slot2)]);
+                q = q.add(pc2[@as(usize, @intCast(slot2))]);
             } else if (slot2 < 0) {
-                q = q.sub(pc2[@intCast(usize, -slot2)]);
+                q = q.sub(pc2[@as(usize, @intCast(-slot2))]);
             }
             if (pos == 0) break;
             q = q.dbl().dbl().dbl().dbl();
lib/std/crypto/tls/Client.zig
@@ -140,7 +140,7 @@ pub fn InitError(comptime Stream: type) type {
 ///
 /// `host` is only borrowed during this function call.
 pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) InitError(@TypeOf(stream))!Client {
-    const host_len = @intCast(u16, host.len);
+    const host_len = @as(u16, @intCast(host.len));
 
     var random_buffer: [128]u8 = undefined;
     crypto.random.bytes(&random_buffer);
@@ -194,7 +194,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
         int2(host_len);
 
     const extensions_header =
-        int2(@intCast(u16, extensions_payload.len + host_len)) ++
+        int2(@as(u16, @intCast(extensions_payload.len + host_len))) ++
         extensions_payload;
 
     const legacy_compression_methods = 0x0100;
@@ -209,13 +209,13 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
 
     const out_handshake =
         [_]u8{@intFromEnum(tls.HandshakeType.client_hello)} ++
-        int3(@intCast(u24, client_hello.len + host_len)) ++
+        int3(@as(u24, @intCast(client_hello.len + host_len))) ++
         client_hello;
 
     const plaintext_header = [_]u8{
         @intFromEnum(tls.ContentType.handshake),
         0x03, 0x01, // legacy_record_version
-    } ++ int2(@intCast(u16, out_handshake.len + host_len)) ++ out_handshake;
+    } ++ int2(@as(u16, @intCast(out_handshake.len + host_len))) ++ out_handshake;
 
     {
         var iovecs = [_]std.os.iovec_const{
@@ -457,7 +457,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
                         const auth_tag = record_decoder.array(P.AEAD.tag_length).*;
                         const V = @Vector(P.AEAD.nonce_length, u8);
                         const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8);
-                        const operand: V = pad ++ @bitCast([8]u8, big(read_seq));
+                        const operand: V = pad ++ @as([8]u8, @bitCast(big(read_seq)));
                         read_seq += 1;
                         const nonce = @as(V, p.server_handshake_iv) ^ operand;
                         P.AEAD.decrypt(cleartext, ciphertext, auth_tag, record_header, nonce, p.server_handshake_key) catch
@@ -466,7 +466,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
                     },
                 };
 
-                const inner_ct = @enumFromInt(tls.ContentType, cleartext[cleartext.len - 1]);
+                const inner_ct = @as(tls.ContentType, @enumFromInt(cleartext[cleartext.len - 1]));
                 if (inner_ct != .handshake) return error.TlsUnexpectedMessage;
 
                 var ctd = tls.Decoder.fromTheirSlice(cleartext[0 .. cleartext.len - 1]);
@@ -520,7 +520,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
 
                                 const subject_cert: Certificate = .{
                                     .buffer = certd.buf,
-                                    .index = @intCast(u32, certd.idx),
+                                    .index = @as(u32, @intCast(certd.idx)),
                                 };
                                 const subject = try subject_cert.parse();
                                 if (cert_index == 0) {
@@ -534,7 +534,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
                                     if (pub_key.len > main_cert_pub_key_buf.len)
                                         return error.CertificatePublicKeyInvalid;
                                     @memcpy(main_cert_pub_key_buf[0..pub_key.len], pub_key);
-                                    main_cert_pub_key_len = @intCast(@TypeOf(main_cert_pub_key_len), pub_key.len);
+                                    main_cert_pub_key_len = @as(@TypeOf(main_cert_pub_key_len), @intCast(pub_key.len));
                                 } else {
                                     try prev_cert.verify(subject, now_sec);
                                 }
@@ -679,7 +679,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
                                 .write_seq = 0,
                                 .partial_cleartext_idx = 0,
                                 .partial_ciphertext_idx = 0,
-                                .partial_ciphertext_end = @intCast(u15, leftover.len),
+                                .partial_ciphertext_end = @as(u15, @intCast(leftover.len)),
                                 .received_close_notify = false,
                                 .application_cipher = app_cipher,
                                 .partially_read_buffer = undefined,
@@ -797,11 +797,11 @@ fn prepareCiphertextRecord(
             const overhead_len = tls.record_header_len + P.AEAD.tag_length + 1;
             const close_notify_alert_reserved = tls.close_notify_alert.len + overhead_len;
             while (true) {
-                const encrypted_content_len = @intCast(u16, @min(
+                const encrypted_content_len = @as(u16, @intCast(@min(
                     @min(bytes.len - bytes_i, max_ciphertext_len - 1),
                     ciphertext_buf.len - close_notify_alert_reserved -
                         overhead_len - ciphertext_end,
-                ));
+                )));
                 if (encrypted_content_len == 0) return .{
                     .iovec_end = iovec_end,
                     .ciphertext_end = ciphertext_end,
@@ -826,7 +826,7 @@ fn prepareCiphertextRecord(
                 const auth_tag = ciphertext_buf[ciphertext_end..][0..P.AEAD.tag_length];
                 ciphertext_end += auth_tag.len;
                 const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8);
-                const operand: V = pad ++ @bitCast([8]u8, big(c.write_seq));
+                const operand: V = pad ++ @as([8]u8, @bitCast(big(c.write_seq)));
                 c.write_seq += 1; // TODO send key_update on overflow
                 const nonce = @as(V, p.client_iv) ^ operand;
                 P.AEAD.encrypt(ciphertext, auth_tag, cleartext, ad, nonce, p.client_key);
@@ -920,7 +920,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
     // Give away the buffered cleartext we have, if any.
     const partial_cleartext = c.partially_read_buffer[c.partial_cleartext_idx..c.partial_ciphertext_idx];
     if (partial_cleartext.len > 0) {
-        const amt = @intCast(u15, vp.put(partial_cleartext));
+        const amt = @as(u15, @intCast(vp.put(partial_cleartext)));
         c.partial_cleartext_idx += amt;
 
         if (c.partial_cleartext_idx == c.partial_ciphertext_idx and
@@ -1037,7 +1037,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
             in = 0;
             continue;
         }
-        const ct = @enumFromInt(tls.ContentType, frag[in]);
+        const ct = @as(tls.ContentType, @enumFromInt(frag[in]));
         in += 1;
         const legacy_version = mem.readIntBig(u16, frag[in..][0..2]);
         in += 2;
@@ -1070,8 +1070,8 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
         switch (ct) {
             .alert => {
                 if (in + 2 > frag.len) return error.TlsDecodeError;
-                const level = @enumFromInt(tls.AlertLevel, frag[in]);
-                const desc = @enumFromInt(tls.AlertDescription, frag[in + 1]);
+                const level = @as(tls.AlertLevel, @enumFromInt(frag[in]));
+                const desc = @as(tls.AlertDescription, @enumFromInt(frag[in + 1]));
                 _ = level;
 
                 try desc.toError();
@@ -1089,7 +1089,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
                         in += ciphertext_len;
                         const auth_tag = frag[in..][0..P.AEAD.tag_length].*;
                         const pad = [1]u8{0} ** (P.AEAD.nonce_length - 8);
-                        const operand: V = pad ++ @bitCast([8]u8, big(c.read_seq));
+                        const operand: V = pad ++ @as([8]u8, @bitCast(big(c.read_seq)));
                         const nonce: [P.AEAD.nonce_length]u8 = @as(V, p.server_iv) ^ operand;
                         const out_buf = vp.peek();
                         const cleartext_buf = if (ciphertext.len <= out_buf.len)
@@ -1105,11 +1105,11 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
 
                 c.read_seq = try std.math.add(u64, c.read_seq, 1);
 
-                const inner_ct = @enumFromInt(tls.ContentType, cleartext[cleartext.len - 1]);
+                const inner_ct = @as(tls.ContentType, @enumFromInt(cleartext[cleartext.len - 1]));
                 switch (inner_ct) {
                     .alert => {
-                        const level = @enumFromInt(tls.AlertLevel, cleartext[0]);
-                        const desc = @enumFromInt(tls.AlertDescription, cleartext[1]);
+                        const level = @as(tls.AlertLevel, @enumFromInt(cleartext[0]));
+                        const desc = @as(tls.AlertDescription, @enumFromInt(cleartext[1]));
                         if (desc == .close_notify) {
                             c.received_close_notify = true;
                             c.partial_ciphertext_end = c.partial_ciphertext_idx;
@@ -1124,7 +1124,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
                     .handshake => {
                         var ct_i: usize = 0;
                         while (true) {
-                            const handshake_type = @enumFromInt(tls.HandshakeType, cleartext[ct_i]);
+                            const handshake_type = @as(tls.HandshakeType, @enumFromInt(cleartext[ct_i]));
                             ct_i += 1;
                             const handshake_len = mem.readIntBig(u24, cleartext[ct_i..][0..3]);
                             ct_i += 3;
@@ -1148,7 +1148,7 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
                                     }
                                     c.read_seq = 0;
 
-                                    switch (@enumFromInt(tls.KeyUpdateRequest, handshake[0])) {
+                                    switch (@as(tls.KeyUpdateRequest, @enumFromInt(handshake[0]))) {
                                         .update_requested => {
                                             switch (c.application_cipher) {
                                                 inline else => |*p| {
@@ -1186,13 +1186,13 @@ pub fn readvAdvanced(c: *Client, stream: anytype, iovecs: []const std.os.iovec)
                                     c.partially_read_buffer[c.partial_ciphertext_idx..][0..msg.len],
                                     msg,
                                 );
-                                c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), c.partial_ciphertext_idx + msg.len);
+                                c.partial_ciphertext_idx = @as(@TypeOf(c.partial_ciphertext_idx), @intCast(c.partial_ciphertext_idx + msg.len));
                             } else {
                                 const amt = vp.put(msg);
                                 if (amt < msg.len) {
                                     const rest = msg[amt..];
                                     c.partial_cleartext_idx = 0;
-                                    c.partial_ciphertext_idx = @intCast(@TypeOf(c.partial_ciphertext_idx), rest.len);
+                                    c.partial_ciphertext_idx = @as(@TypeOf(c.partial_ciphertext_idx), @intCast(rest.len));
                                     @memcpy(c.partially_read_buffer[0..rest.len], rest);
                                 }
                             }
@@ -1220,12 +1220,12 @@ fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize {
     const saved_buf = frag[in..];
     if (c.partial_ciphertext_idx > c.partial_cleartext_idx) {
         // There is cleartext at the beginning already which we need to preserve.
-        c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + saved_buf.len);
+        c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(c.partial_ciphertext_idx + saved_buf.len));
         @memcpy(c.partially_read_buffer[c.partial_ciphertext_idx..][0..saved_buf.len], saved_buf);
     } else {
         c.partial_cleartext_idx = 0;
         c.partial_ciphertext_idx = 0;
-        c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), saved_buf.len);
+        c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(saved_buf.len));
         @memcpy(c.partially_read_buffer[0..saved_buf.len], saved_buf);
     }
     return out;
@@ -1235,14 +1235,14 @@ fn finishRead(c: *Client, frag: []const u8, in: usize, out: usize) usize {
 fn finishRead2(c: *Client, first: []const u8, frag1: []const u8, out: usize) usize {
     if (c.partial_ciphertext_idx > c.partial_cleartext_idx) {
         // There is cleartext at the beginning already which we need to preserve.
-        c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), c.partial_ciphertext_idx + first.len + frag1.len);
+        c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(c.partial_ciphertext_idx + first.len + frag1.len));
         // TODO: eliminate this call to copyForwards
         std.mem.copyForwards(u8, c.partially_read_buffer[c.partial_ciphertext_idx..][0..first.len], first);
         @memcpy(c.partially_read_buffer[c.partial_ciphertext_idx + first.len ..][0..frag1.len], frag1);
     } else {
         c.partial_cleartext_idx = 0;
         c.partial_ciphertext_idx = 0;
-        c.partial_ciphertext_end = @intCast(@TypeOf(c.partial_ciphertext_end), first.len + frag1.len);
+        c.partial_ciphertext_end = @as(@TypeOf(c.partial_ciphertext_end), @intCast(first.len + frag1.len));
         // TODO: eliminate this call to copyForwards
         std.mem.copyForwards(u8, c.partially_read_buffer[0..first.len], first);
         @memcpy(c.partially_read_buffer[first.len..][0..frag1.len], frag1);
lib/std/crypto/aegis.zig
@@ -625,7 +625,7 @@ test "Aegis MAC" {
     const key = [_]u8{0x00} ** Aegis128LMac.key_length;
     var msg: [64]u8 = undefined;
     for (&msg, 0..) |*m, i| {
-        m.* = @truncate(u8, i);
+        m.* = @as(u8, @truncate(i));
     }
     const st_init = Aegis128LMac.init(&key);
     var st = st_init;
lib/std/crypto/aes_ocb.zig
@@ -86,18 +86,18 @@ fn AesOcb(comptime Aes: anytype) type {
 
         fn getOffset(aes_enc_ctx: EncryptCtx, npub: [nonce_length]u8) Block {
             var nx = [_]u8{0} ** 16;
-            nx[0] = @intCast(u8, @truncate(u7, tag_length * 8) << 1);
+            nx[0] = @as(u8, @intCast(@as(u7, @truncate(tag_length * 8)) << 1));
             nx[16 - nonce_length - 1] = 1;
             nx[nx.len - nonce_length ..].* = npub;
 
-            const bottom = @truncate(u6, nx[15]);
+            const bottom = @as(u6, @truncate(nx[15]));
             nx[15] &= 0xc0;
             var ktop_: Block = undefined;
             aes_enc_ctx.encrypt(&ktop_, &nx);
             const ktop = mem.readIntBig(u128, &ktop_);
-            var stretch = (@as(u192, ktop) << 64) | @as(u192, @truncate(u64, ktop >> 64) ^ @truncate(u64, ktop >> 56));
+            var stretch = (@as(u192, ktop) << 64) | @as(u192, @as(u64, @truncate(ktop >> 64)) ^ @as(u64, @truncate(ktop >> 56)));
             var offset: Block = undefined;
-            mem.writeIntBig(u128, &offset, @truncate(u128, stretch >> (64 - @as(u7, bottom))));
+            mem.writeIntBig(u128, &offset, @as(u128, @truncate(stretch >> (64 - @as(u7, bottom)))));
             return offset;
         }
 
lib/std/crypto/argon2.zig
@@ -95,7 +95,7 @@ pub const Params = struct {
     pub fn fromLimits(ops_limit: u32, mem_limit: usize) Self {
         const m = mem_limit / 1024;
         std.debug.assert(m <= max_int);
-        return .{ .t = ops_limit, .m = @intCast(u32, m), .p = 1 };
+        return .{ .t = ops_limit, .m = @as(u32, @intCast(m)), .p = 1 };
     }
 };
 
@@ -111,26 +111,26 @@ fn initHash(
     var tmp: [4]u8 = undefined;
     var b2 = Blake2b512.init(.{});
     mem.writeIntLittle(u32, parameters[0..4], params.p);
-    mem.writeIntLittle(u32, parameters[4..8], @intCast(u32, dk_len));
+    mem.writeIntLittle(u32, parameters[4..8], @as(u32, @intCast(dk_len)));
     mem.writeIntLittle(u32, parameters[8..12], params.m);
     mem.writeIntLittle(u32, parameters[12..16], params.t);
     mem.writeIntLittle(u32, parameters[16..20], version);
     mem.writeIntLittle(u32, parameters[20..24], @intFromEnum(mode));
     b2.update(&parameters);
-    mem.writeIntLittle(u32, &tmp, @intCast(u32, password.len));
+    mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(password.len)));
     b2.update(&tmp);
     b2.update(password);
-    mem.writeIntLittle(u32, &tmp, @intCast(u32, salt.len));
+    mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(salt.len)));
     b2.update(&tmp);
     b2.update(salt);
     const secret = params.secret orelse "";
     std.debug.assert(secret.len <= max_int);
-    mem.writeIntLittle(u32, &tmp, @intCast(u32, secret.len));
+    mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(secret.len)));
     b2.update(&tmp);
     b2.update(secret);
     const ad = params.ad orelse "";
     std.debug.assert(ad.len <= max_int);
-    mem.writeIntLittle(u32, &tmp, @intCast(u32, ad.len));
+    mem.writeIntLittle(u32, &tmp, @as(u32, @intCast(ad.len)));
     b2.update(&tmp);
     b2.update(ad);
     b2.final(h0[0..Blake2b512.digest_length]);
@@ -140,7 +140,7 @@ fn initHash(
 fn blake2bLong(out: []u8, in: []const u8) void {
     const H = Blake2b512;
     var outlen_bytes: [4]u8 = undefined;
-    mem.writeIntLittle(u32, &outlen_bytes, @intCast(u32, out.len));
+    mem.writeIntLittle(u32, &outlen_bytes, @as(u32, @intCast(out.len)));
 
     var out_buf: [H.digest_length]u8 = undefined;
 
@@ -391,7 +391,7 @@ fn Rp(a: usize, b: usize, c: usize, d: usize) QuarterRound {
 }
 
 fn fBlaMka(x: u64, y: u64) u64 {
-    const xy = @as(u64, @truncate(u32, x)) * @as(u64, @truncate(u32, y));
+    const xy = @as(u64, @as(u32, @truncate(x))) * @as(u64, @as(u32, @truncate(y)));
     return x +% y +% 2 *% xy;
 }
 
@@ -448,7 +448,7 @@ fn indexAlpha(
     lane: u24,
     index: u32,
 ) u32 {
-    var ref_lane = @intCast(u32, rand >> 32) % threads;
+    var ref_lane = @as(u32, @intCast(rand >> 32)) % threads;
     if (n == 0 and slice == 0) {
         ref_lane = lane;
     }
@@ -467,10 +467,10 @@ fn indexAlpha(
     if (index == 0 or lane == ref_lane) {
         m -= 1;
     }
-    var p = @as(u64, @truncate(u32, rand));
+    var p = @as(u64, @as(u32, @truncate(rand)));
     p = (p * p) >> 32;
     p = (p * m) >> 32;
-    return ref_lane * lanes + @intCast(u32, ((s + m - (p + 1)) % lanes));
+    return ref_lane * lanes + @as(u32, @intCast(((s + m - (p + 1)) % lanes)));
 }
 
 /// Derives a key from the password, salt, and argon2 parameters.
lib/std/crypto/ascon.zig
@@ -95,8 +95,8 @@ pub fn State(comptime endian: builtin.Endian) type {
         /// XOR a byte into the state at a given offset.
         pub fn addByte(self: *Self, byte: u8, offset: usize) void {
             const z = switch (endian) {
-                .Big => 64 - 8 - 8 * @truncate(u6, offset % 8),
-                .Little => 8 * @truncate(u6, offset % 8),
+                .Big => 64 - 8 - 8 * @as(u6, @truncate(offset % 8)),
+                .Little => 8 * @as(u6, @truncate(offset % 8)),
             };
             self.st[offset / 8] ^= @as(u64, byte) << z;
         }
lib/std/crypto/bcrypt.zig
@@ -376,10 +376,10 @@ pub const State = struct {
     const Halves = struct { l: u32, r: u32 };
 
     fn halfRound(state: *const State, i: u32, j: u32, n: usize) u32 {
-        var r = state.sboxes[0][@truncate(u8, j >> 24)];
-        r +%= state.sboxes[1][@truncate(u8, j >> 16)];
-        r ^= state.sboxes[2][@truncate(u8, j >> 8)];
-        r +%= state.sboxes[3][@truncate(u8, j)];
+        var r = state.sboxes[0][@as(u8, @truncate(j >> 24))];
+        r +%= state.sboxes[1][@as(u8, @truncate(j >> 16))];
+        r ^= state.sboxes[2][@as(u8, @truncate(j >> 8))];
+        r +%= state.sboxes[3][@as(u8, @truncate(j))];
         return i ^ r ^ state.subkeys[n];
     }
 
lib/std/crypto/benchmark.zig
@@ -54,8 +54,8 @@ pub fn benchmarkHash(comptime Hash: anytype, comptime bytes: comptime_int) !u64
 
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, bytes / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(bytes / elapsed_s));
 
     return throughput;
 }
@@ -95,8 +95,8 @@ pub fn benchmarkMac(comptime Mac: anytype, comptime bytes: comptime_int) !u64 {
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, bytes / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(bytes / elapsed_s));
 
     return throughput;
 }
@@ -125,8 +125,8 @@ pub fn benchmarkKeyExchange(comptime DhKeyExchange: anytype, comptime exchange_c
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, exchange_count / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(exchange_count / elapsed_s));
 
     return throughput;
 }
@@ -148,8 +148,8 @@ pub fn benchmarkSignature(comptime Signature: anytype, comptime signatures_count
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, signatures_count / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(signatures_count / elapsed_s));
 
     return throughput;
 }
@@ -172,8 +172,8 @@ pub fn benchmarkSignatureVerification(comptime Signature: anytype, comptime sign
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, signatures_count / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(signatures_count / elapsed_s));
 
     return throughput;
 }
@@ -201,8 +201,8 @@ pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = batch.len * @intFromFloat(u64, signatures_count / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = batch.len * @as(u64, @intFromFloat(signatures_count / elapsed_s));
 
     return throughput;
 }
@@ -227,8 +227,8 @@ pub fn benchmarkKem(comptime Kem: anytype, comptime kems_count: comptime_int) !u
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, kems_count / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s));
 
     return throughput;
 }
@@ -249,8 +249,8 @@ pub fn benchmarkKemDecaps(comptime Kem: anytype, comptime kems_count: comptime_i
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, kems_count / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s));
 
     return throughput;
 }
@@ -267,8 +267,8 @@ pub fn benchmarkKemKeyGen(comptime Kem: anytype, comptime kems_count: comptime_i
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, kems_count / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(kems_count / elapsed_s));
 
     return throughput;
 }
@@ -309,8 +309,8 @@ pub fn benchmarkAead(comptime Aead: anytype, comptime bytes: comptime_int) !u64
     mem.doNotOptimizeAway(&in);
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, 2 * bytes / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(2 * bytes / elapsed_s));
 
     return throughput;
 }
@@ -338,8 +338,8 @@ pub fn benchmarkAes(comptime Aes: anytype, comptime count: comptime_int) !u64 {
     mem.doNotOptimizeAway(&in);
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, count / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(count / elapsed_s));
 
     return throughput;
 }
@@ -367,8 +367,8 @@ pub fn benchmarkAes8(comptime Aes: anytype, comptime count: comptime_int) !u64 {
     mem.doNotOptimizeAway(&in);
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, 8 * count / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(8 * count / elapsed_s));
 
     return throughput;
 }
@@ -406,7 +406,7 @@ fn benchmarkPwhash(
     const password = "testpass" ** 2;
     const opts = .{
         .allocator = allocator,
-        .params = @ptrCast(*const ty.Params, @alignCast(std.meta.alignment(ty.Params), params)).*,
+        .params = @as(*const ty.Params, @ptrCast(@alignCast(params))).*,
         .encoding = .phc,
     };
     var buf: [256]u8 = undefined;
@@ -422,7 +422,7 @@ fn benchmarkPwhash(
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
     const throughput = elapsed_s / count;
 
     return throughput;
lib/std/crypto/blake2.zig
@@ -80,7 +80,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
 
             const key_len = if (options.key) |key| key.len else 0;
             // default parameters
-            d.h[0] ^= 0x01010000 ^ @truncate(u32, key_len << 8) ^ @intCast(u32, options.expected_out_bits >> 3);
+            d.h[0] ^= 0x01010000 ^ @as(u32, @truncate(key_len << 8)) ^ @as(u32, @intCast(options.expected_out_bits >> 3));
             d.t = 0;
             d.buf_len = 0;
 
@@ -127,7 +127,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
             // Copy any remainder for next pass.
             const b_slice = b[off..];
             @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
-            d.buf_len += @intCast(u8, b_slice.len);
+            d.buf_len += @as(u8, @intCast(b_slice.len));
         }
 
         pub fn final(d: *Self, out: *[digest_length]u8) void {
@@ -135,7 +135,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
             d.t += d.buf_len;
             d.round(d.buf[0..], true);
             for (&d.h) |*x| x.* = mem.nativeToLittle(u32, x.*);
-            out.* = @ptrCast(*[digest_length]u8, &d.h).*;
+            out.* = @as(*[digest_length]u8, @ptrCast(&d.h)).*;
         }
 
         fn round(d: *Self, b: *const [64]u8, last: bool) void {
@@ -152,8 +152,8 @@ pub fn Blake2s(comptime out_bits: usize) type {
                 v[k + 8] = iv[k];
             }
 
-            v[12] ^= @truncate(u32, d.t);
-            v[13] ^= @intCast(u32, d.t >> 32);
+            v[12] ^= @as(u32, @truncate(d.t));
+            v[13] ^= @as(u32, @intCast(d.t >> 32));
             if (last) v[14] = ~v[14];
 
             const rounds = comptime [_]RoundParam{
@@ -563,7 +563,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
             // Copy any remainder for next pass.
             const b_slice = b[off..];
             @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
-            d.buf_len += @intCast(u8, b_slice.len);
+            d.buf_len += @as(u8, @intCast(b_slice.len));
         }
 
         pub fn final(d: *Self, out: *[digest_length]u8) void {
@@ -571,7 +571,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
             d.t += d.buf_len;
             d.round(d.buf[0..], true);
             for (&d.h) |*x| x.* = mem.nativeToLittle(u64, x.*);
-            out.* = @ptrCast(*[digest_length]u8, &d.h).*;
+            out.* = @as(*[digest_length]u8, @ptrCast(&d.h)).*;
         }
 
         fn round(d: *Self, b: *const [128]u8, last: bool) void {
@@ -588,8 +588,8 @@ pub fn Blake2b(comptime out_bits: usize) type {
                 v[k + 8] = iv[k];
             }
 
-            v[12] ^= @truncate(u64, d.t);
-            v[13] ^= @intCast(u64, d.t >> 64);
+            v[12] ^= @as(u64, @truncate(d.t));
+            v[13] ^= @as(u64, @intCast(d.t >> 64));
             if (last) v[14] = ~v[14];
 
             const rounds = comptime [_]RoundParam{
lib/std/crypto/blake3.zig
@@ -89,7 +89,7 @@ const CompressVectorized = struct {
         counter: u64,
         flags: u8,
     ) [16]u32 {
-        const md = Lane{ @truncate(u32, counter), @truncate(u32, counter >> 32), block_len, @as(u32, flags) };
+        const md = Lane{ @as(u32, @truncate(counter)), @as(u32, @truncate(counter >> 32)), block_len, @as(u32, flags) };
         var rows = Rows{ chaining_value[0..4].*, chaining_value[4..8].*, IV[0..4].*, md };
 
         var m = Rows{ block_words[0..4].*, block_words[4..8].*, block_words[8..12].*, block_words[12..16].* };
@@ -134,7 +134,7 @@ const CompressVectorized = struct {
         rows[2] ^= @Vector(4, u32){ chaining_value[0], chaining_value[1], chaining_value[2], chaining_value[3] };
         rows[3] ^= @Vector(4, u32){ chaining_value[4], chaining_value[5], chaining_value[6], chaining_value[7] };
 
-        return @bitCast([16]u32, rows);
+        return @as([16]u32, @bitCast(rows));
     }
 };
 
@@ -184,8 +184,8 @@ const CompressGeneric = struct {
             IV[1],
             IV[2],
             IV[3],
-            @truncate(u32, counter),
-            @truncate(u32, counter >> 32),
+            @as(u32, @truncate(counter)),
+            @as(u32, @truncate(counter >> 32)),
             block_len,
             flags,
         };
@@ -206,7 +206,7 @@ else
     CompressGeneric.compress;
 
 fn first8Words(words: [16]u32) [8]u32 {
-    return @ptrCast(*const [8]u32, &words).*;
+    return @as(*const [8]u32, @ptrCast(&words)).*;
 }
 
 fn wordsFromLittleEndianBytes(comptime count: usize, bytes: [count * 4]u8) [count]u32 {
@@ -285,7 +285,7 @@ const ChunkState = struct {
         const want = BLOCK_LEN - self.block_len;
         const take = @min(want, input.len);
         @memcpy(self.block[self.block_len..][0..take], input[0..take]);
-        self.block_len += @truncate(u8, take);
+        self.block_len += @as(u8, @truncate(take));
         return input[take..];
     }
 
@@ -658,7 +658,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void {
 
     // Setup input pattern
     var input_pattern: [251]u8 = undefined;
-    for (&input_pattern, 0..) |*e, i| e.* = @truncate(u8, i);
+    for (&input_pattern, 0..) |*e, i| e.* = @as(u8, @truncate(i));
 
     // Write repeating input pattern to hasher
     var input_counter = input_len;
lib/std/crypto/Certificate.zig
@@ -312,7 +312,7 @@ pub const Parsed = struct {
         while (name_i < general_names.slice.end) {
             const general_name = try der.Element.parse(subject_alt_name, name_i);
             name_i = general_name.slice.end;
-            switch (@enumFromInt(GeneralNameTag, @intFromEnum(general_name.identifier.tag))) {
+            switch (@as(GeneralNameTag, @enumFromInt(@intFromEnum(general_name.identifier.tag)))) {
                 .dNSName => {
                     const dns_name = subject_alt_name[general_name.slice.start..general_name.slice.end];
                     if (checkHostName(host_name, dns_name)) return;
@@ -379,7 +379,7 @@ pub fn parse(cert: Certificate) ParseError!Parsed {
     const tbs_certificate = try der.Element.parse(cert_bytes, certificate.slice.start);
     const version_elem = try der.Element.parse(cert_bytes, tbs_certificate.slice.start);
     const version = try parseVersion(cert_bytes, version_elem);
-    const serial_number = if (@bitCast(u8, version_elem.identifier) == 0xa0)
+    const serial_number = if (@as(u8, @bitCast(version_elem.identifier)) == 0xa0)
         try der.Element.parse(cert_bytes, version_elem.slice.end)
     else
         version_elem;
@@ -597,8 +597,8 @@ const Date = struct {
             var month: u4 = 1;
             while (month < date.month) : (month += 1) {
                 const days: u64 = std.time.epoch.getDaysInMonth(
-                    @enumFromInt(std.time.epoch.YearLeapKind, @intFromBool(is_leap)),
-                    @enumFromInt(std.time.epoch.Month, month),
+                    @as(std.time.epoch.YearLeapKind, @enumFromInt(@intFromBool(is_leap))),
+                    @as(std.time.epoch.Month, @enumFromInt(month)),
                 );
                 sec += days * std.time.epoch.secs_per_day;
             }
@@ -685,7 +685,7 @@ fn parseEnum(comptime E: type, bytes: []const u8, element: der.Element) ParseEnu
 pub const ParseVersionError = error{ UnsupportedCertificateVersion, CertificateFieldHasInvalidLength };
 
 pub fn parseVersion(bytes: []const u8, version_elem: der.Element) ParseVersionError!Version {
-    if (@bitCast(u8, version_elem.identifier) != 0xa0)
+    if (@as(u8, @bitCast(version_elem.identifier)) != 0xa0)
         return .v1;
 
     if (version_elem.slice.end - version_elem.slice.start != 3)
@@ -864,7 +864,7 @@ pub const der = struct {
 
         pub fn parse(bytes: []const u8, index: u32) ParseElementError!Element {
             var i = index;
-            const identifier = @bitCast(Identifier, bytes[i]);
+            const identifier = @as(Identifier, @bitCast(bytes[i]));
             i += 1;
             const size_byte = bytes[i];
             i += 1;
@@ -878,7 +878,7 @@ pub const der = struct {
                 };
             }
 
-            const len_size = @truncate(u7, size_byte);
+            const len_size = @as(u7, @truncate(size_byte));
             if (len_size > @sizeOf(u32)) {
                 return error.CertificateFieldHasInvalidLength;
             }
@@ -1042,10 +1042,10 @@ pub const rsa = struct {
             var hashed: [Hash.digest_length]u8 = undefined;
 
             while (idx < len) {
-                c[0] = @intCast(u8, (counter >> 24) & 0xFF);
-                c[1] = @intCast(u8, (counter >> 16) & 0xFF);
-                c[2] = @intCast(u8, (counter >> 8) & 0xFF);
-                c[3] = @intCast(u8, counter & 0xFF);
+                c[0] = @as(u8, @intCast((counter >> 24) & 0xFF));
+                c[1] = @as(u8, @intCast((counter >> 16) & 0xFF));
+                c[2] = @as(u8, @intCast((counter >> 8) & 0xFF));
+                c[3] = @as(u8, @intCast(counter & 0xFF));
 
                 std.mem.copyForwards(u8, hash[seed.len..], &c);
                 Hash.hash(&hash, &hashed, .{});
lib/std/crypto/chacha20.zig
@@ -587,8 +587,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type {
 
             const k = keyToWords(key);
             var c: [4]u32 = undefined;
-            c[0] = @truncate(u32, counter);
-            c[1] = @truncate(u32, counter >> 32);
+            c[0] = @as(u32, @truncate(counter));
+            c[1] = @as(u32, @truncate(counter >> 32));
             c[2] = mem.readIntLittle(u32, nonce[0..4]);
             c[3] = mem.readIntLittle(u32, nonce[4..8]);
             ChaChaImpl(rounds_nb).chacha20Xor(out, in, k, c, true);
@@ -600,8 +600,8 @@ fn ChaChaWith64BitNonce(comptime rounds_nb: usize) type {
 
             const k = keyToWords(key);
             var c: [4]u32 = undefined;
-            c[0] = @truncate(u32, counter);
-            c[1] = @truncate(u32, counter >> 32);
+            c[0] = @as(u32, @truncate(counter));
+            c[1] = @as(u32, @truncate(counter >> 32));
             c[2] = mem.readIntLittle(u32, nonce[0..4]);
             c[3] = mem.readIntLittle(u32, nonce[4..8]);
             ChaChaImpl(rounds_nb).chacha20Stream(out, k, c, true);
lib/std/crypto/ecdsa.zig
@@ -122,9 +122,9 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
             pub fn toDer(self: Signature, buf: *[der_encoded_max_length]u8) []u8 {
                 var fb = io.fixedBufferStream(buf);
                 const w = fb.writer();
-                const r_len = @intCast(u8, self.r.len + (self.r[0] >> 7));
-                const s_len = @intCast(u8, self.s.len + (self.s[0] >> 7));
-                const seq_len = @intCast(u8, 2 + r_len + 2 + s_len);
+                const r_len = @as(u8, @intCast(self.r.len + (self.r[0] >> 7)));
+                const s_len = @as(u8, @intCast(self.s.len + (self.s[0] >> 7)));
+                const seq_len = @as(u8, @intCast(2 + r_len + 2 + s_len));
                 w.writeAll(&[_]u8{ 0x30, seq_len }) catch unreachable;
                 w.writeAll(&[_]u8{ 0x02, r_len }) catch unreachable;
                 if (self.r[0] >> 7 != 0) {
lib/std/crypto/ff.zig
@@ -100,7 +100,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
             var x = x_;
             var out = Self.zero;
             for (0..out.limbs.capacity()) |i| {
-                const t = if (@bitSizeOf(T) > t_bits) @truncate(TLimb, x) else x;
+                const t = if (@bitSizeOf(T) > t_bits) @as(TLimb, @truncate(x)) else x;
                 out.limbs.set(i, t);
                 x = math.shr(T, x, t_bits);
             }
@@ -143,9 +143,9 @@ pub fn Uint(comptime max_bits: comptime_int) type {
                 var remaining_bits = t_bits;
                 var limb = self.limbs.get(i);
                 while (remaining_bits >= 8) {
-                    bytes[out_i] |= math.shl(u8, @truncate(u8, limb), shift);
+                    bytes[out_i] |= math.shl(u8, @as(u8, @truncate(limb)), shift);
                     const consumed = 8 - shift;
-                    limb >>= @truncate(u4, consumed);
+                    limb >>= @as(u4, @truncate(consumed));
                     remaining_bits -= consumed;
                     shift = 0;
                     switch (endian) {
@@ -169,7 +169,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
                         },
                     }
                 }
-                bytes[out_i] |= @truncate(u8, limb);
+                bytes[out_i] |= @as(u8, @truncate(limb));
                 shift = remaining_bits;
             }
         }
@@ -190,7 +190,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
                 shift += 8;
                 if (shift >= t_bits) {
                     shift -= t_bits;
-                    out.limbs.set(out_i, @truncate(TLimb, out.limbs.get(out_i)));
+                    out.limbs.set(out_i, @as(TLimb, @truncate(out.limbs.get(out_i))));
                     const overflow = math.shr(Limb, bi, 8 - shift);
                     out_i += 1;
                     if (out_i >= out.limbs.len) {
@@ -242,7 +242,7 @@ pub fn Uint(comptime max_bits: comptime_int) type {
 
         /// Returns `true` if the integer is odd.
         pub fn isOdd(x: Self) bool {
-            return @bitCast(bool, @truncate(u1, x.limbs.get(0)));
+            return @as(bool, @bitCast(@as(u1, @truncate(x.limbs.get(0)))));
         }
 
         /// Adds `y` to `x`, and returns `true` if the operation overflowed.
@@ -273,8 +273,8 @@ pub fn Uint(comptime max_bits: comptime_int) type {
             var carry: u1 = 0;
             for (0..x.limbs_count()) |i| {
                 const res = x_limbs[i] + y_limbs[i] + carry;
-                x_limbs[i] = ct.select(on, @truncate(TLimb, res), x_limbs[i]);
-                carry = @truncate(u1, res >> t_bits);
+                x_limbs[i] = ct.select(on, @as(TLimb, @truncate(res)), x_limbs[i]);
+                carry = @as(u1, @truncate(res >> t_bits));
             }
             return carry;
         }
@@ -288,8 +288,8 @@ pub fn Uint(comptime max_bits: comptime_int) type {
             var borrow: u1 = 0;
             for (0..x.limbs_count()) |i| {
                 const res = x_limbs[i] -% y_limbs[i] -% borrow;
-                x_limbs[i] = ct.select(on, @truncate(TLimb, res), x_limbs[i]);
-                borrow = @truncate(u1, res >> t_bits);
+                x_limbs[i] = ct.select(on, @as(TLimb, @truncate(res)), x_limbs[i]);
+                borrow = @as(u1, @truncate(res >> t_bits));
             }
             return borrow;
         }
@@ -432,7 +432,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
             inline for (0..comptime math.log2_int(usize, t_bits)) |_| {
                 y = y *% (2 -% lo *% y);
             }
-            const m0inv = (@as(Limb, 1) << t_bits) - (@truncate(TLimb, y));
+            const m0inv = (@as(Limb, 1) << t_bits) - (@as(TLimb, @truncate(y)));
 
             const zero = Fe{ .v = FeUint.zero };
 
@@ -508,18 +508,18 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
             var need_sub = false;
             var i: usize = t_bits - 1;
             while (true) : (i -= 1) {
-                var carry = @truncate(u1, math.shr(Limb, y, i));
+                var carry = @as(u1, @truncate(math.shr(Limb, y, i)));
                 var borrow: u1 = 0;
                 for (0..self.limbs_count()) |j| {
                     const l = ct.select(need_sub, d_limbs[j], x_limbs[j]);
                     var res = (l << 1) + carry;
-                    x_limbs[j] = @truncate(TLimb, res);
-                    carry = @truncate(u1, res >> t_bits);
+                    x_limbs[j] = @as(TLimb, @truncate(res));
+                    carry = @as(u1, @truncate(res >> t_bits));
 
                     res = x_limbs[j] -% m_limbs[j] -% borrow;
-                    d_limbs[j] = @truncate(TLimb, res);
+                    d_limbs[j] = @as(TLimb, @truncate(res));
 
-                    borrow = @truncate(u1, res >> t_bits);
+                    borrow = @as(u1, @truncate(res >> t_bits));
                 }
                 need_sub = ct.eql(carry, borrow);
                 if (i == 0) break;
@@ -531,7 +531,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
         pub fn add(self: Self, x: Fe, y: Fe) Fe {
             var out = x;
             const overflow = out.v.addWithOverflow(y.v);
-            const underflow = @bitCast(u1, ct.limbsCmpLt(out.v, self.v));
+            const underflow = @as(u1, @bitCast(ct.limbsCmpLt(out.v, self.v)));
             const need_sub = ct.eql(overflow, underflow);
             _ = out.v.conditionalSubWithOverflow(need_sub, self.v);
             return out;
@@ -540,7 +540,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
         /// Subtracts two field elements (mod m).
         pub fn sub(self: Self, x: Fe, y: Fe) Fe {
             var out = x;
-            const underflow = @bitCast(bool, out.v.subWithOverflow(y.v));
+            const underflow = @as(bool, @bitCast(out.v.subWithOverflow(y.v)));
             _ = out.v.conditionalAddWithOverflow(underflow, self.v);
             return out;
         }
@@ -601,7 +601,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
 
                 var wide = ct.mulWide(a_limbs[i], b_limbs[0]);
                 var z_lo = @addWithOverflow(d_limbs[0], wide.lo);
-                const f = @truncate(TLimb, z_lo[0] *% self.m0inv);
+                const f = @as(TLimb, @truncate(z_lo[0] *% self.m0inv));
                 var z_hi = wide.hi +% z_lo[1];
                 wide = ct.mulWide(f, m_limbs[0]);
                 z_lo = @addWithOverflow(z_lo[0], wide.lo);
@@ -620,13 +620,13 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
                     z_lo = @addWithOverflow(z_lo[0], carry);
                     z_hi +%= z_lo[1];
                     if (j > 0) {
-                        d_limbs[j - 1] = @truncate(TLimb, z_lo[0]);
+                        d_limbs[j - 1] = @as(TLimb, @truncate(z_lo[0]));
                     }
                     carry = (z_hi << 1) | (z_lo[0] >> t_bits);
                 }
                 const z = overflow + carry;
-                d_limbs[self.limbs_count() - 1] = @truncate(TLimb, z);
-                overflow = @truncate(u1, z >> t_bits);
+                d_limbs[self.limbs_count() - 1] = @as(TLimb, @truncate(z));
+                overflow = @as(u1, @truncate(z >> t_bits));
             }
             return overflow;
         }
@@ -735,7 +735,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type {
                         t0 = pc[k - 1];
                     } else {
                         for (pc, 0..) |t, i| {
-                            t0.v.cmov(ct.eql(k, @truncate(u8, i + 1)), t.v);
+                            t0.v.cmov(ct.eql(k, @as(u8, @truncate(i + 1))), t.v);
                         }
                     }
                     const t1 = self.montgomeryMul(out, t0);
@@ -771,7 +771,7 @@ const ct_protected = struct {
     fn eql(x: anytype, y: @TypeOf(x)) bool {
         const c1 = @subWithOverflow(x, y)[1];
         const c2 = @subWithOverflow(y, x)[1];
-        return @bitCast(bool, 1 - (c1 | c2));
+        return @as(bool, @bitCast(1 - (c1 | c2)));
     }
 
     // Compares two big integers in constant time, returning true if x < y.
@@ -782,28 +782,28 @@ const ct_protected = struct {
 
         var c: u1 = 0;
         for (0..x.limbs_count()) |i| {
-            c = @truncate(u1, (x_limbs[i] -% y_limbs[i] -% c) >> t_bits);
+            c = @as(u1, @truncate((x_limbs[i] -% y_limbs[i] -% c) >> t_bits));
         }
-        return @bitCast(bool, c);
+        return @as(bool, @bitCast(c));
     }
 
     // Compares two big integers in constant time, returning true if x >= y.
     fn limbsCmpGeq(x: anytype, y: @TypeOf(x)) bool {
-        return @bitCast(bool, 1 - @intFromBool(ct.limbsCmpLt(x, y)));
+        return @as(bool, @bitCast(1 - @intFromBool(ct.limbsCmpLt(x, y))));
     }
 
     // Multiplies two limbs and returns the result as a wide limb.
     fn mulWide(x: Limb, y: Limb) WideLimb {
         const half_bits = @typeInfo(Limb).Int.bits / 2;
         const Half = meta.Int(.unsigned, half_bits);
-        const x0 = @truncate(Half, x);
-        const x1 = @truncate(Half, x >> half_bits);
-        const y0 = @truncate(Half, y);
-        const y1 = @truncate(Half, y >> half_bits);
+        const x0 = @as(Half, @truncate(x));
+        const x1 = @as(Half, @truncate(x >> half_bits));
+        const y0 = @as(Half, @truncate(y));
+        const y1 = @as(Half, @truncate(y >> half_bits));
         const w0 = math.mulWide(Half, x0, y0);
         const t = math.mulWide(Half, x1, y0) + (w0 >> half_bits);
-        var w1: Limb = @truncate(Half, t);
-        const w2 = @truncate(Half, t >> half_bits);
+        var w1: Limb = @as(Half, @truncate(t));
+        const w2 = @as(Half, @truncate(t >> half_bits));
         w1 += math.mulWide(Half, x0, y1);
         const hi = math.mulWide(Half, x1, y1) + w2 + (w1 >> half_bits);
         const lo = x *% y;
@@ -847,8 +847,8 @@ const ct_unprotected = struct {
     fn mulWide(x: Limb, y: Limb) WideLimb {
         const wide = math.mulWide(Limb, x, y);
         return .{
-            .hi = @truncate(Limb, wide >> @typeInfo(Limb).Int.bits),
-            .lo = @truncate(Limb, wide),
+            .hi = @as(Limb, @truncate(wide >> @typeInfo(Limb).Int.bits)),
+            .lo = @as(Limb, @truncate(wide)),
         };
     }
 };
lib/std/crypto/ghash_polyval.zig
@@ -96,28 +96,28 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
                     const product = asm (
                         \\ vpclmulqdq $0x11, %[x], %[y], %[out]
                         : [out] "=x" (-> @Vector(2, u64)),
-                        : [x] "x" (@bitCast(@Vector(2, u64), x)),
-                          [y] "x" (@bitCast(@Vector(2, u64), y)),
+                        : [x] "x" (@as(@Vector(2, u64), @bitCast(x))),
+                          [y] "x" (@as(@Vector(2, u64), @bitCast(y))),
                     );
-                    return @bitCast(u128, product);
+                    return @as(u128, @bitCast(product));
                 },
                 .lo => {
                     const product = asm (
                         \\ vpclmulqdq $0x00, %[x], %[y], %[out]
                         : [out] "=x" (-> @Vector(2, u64)),
-                        : [x] "x" (@bitCast(@Vector(2, u64), x)),
-                          [y] "x" (@bitCast(@Vector(2, u64), y)),
+                        : [x] "x" (@as(@Vector(2, u64), @bitCast(x))),
+                          [y] "x" (@as(@Vector(2, u64), @bitCast(y))),
                     );
-                    return @bitCast(u128, product);
+                    return @as(u128, @bitCast(product));
                 },
                 .hi_lo => {
                     const product = asm (
                         \\ vpclmulqdq $0x10, %[x], %[y], %[out]
                         : [out] "=x" (-> @Vector(2, u64)),
-                        : [x] "x" (@bitCast(@Vector(2, u64), x)),
-                          [y] "x" (@bitCast(@Vector(2, u64), y)),
+                        : [x] "x" (@as(@Vector(2, u64), @bitCast(x))),
+                          [y] "x" (@as(@Vector(2, u64), @bitCast(y))),
                     );
-                    return @bitCast(u128, product);
+                    return @as(u128, @bitCast(product));
                 },
             }
         }
@@ -129,28 +129,28 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
                     const product = asm (
                         \\ pmull2 %[out].1q, %[x].2d, %[y].2d
                         : [out] "=w" (-> @Vector(2, u64)),
-                        : [x] "w" (@bitCast(@Vector(2, u64), x)),
-                          [y] "w" (@bitCast(@Vector(2, u64), y)),
+                        : [x] "w" (@as(@Vector(2, u64), @bitCast(x))),
+                          [y] "w" (@as(@Vector(2, u64), @bitCast(y))),
                     );
-                    return @bitCast(u128, product);
+                    return @as(u128, @bitCast(product));
                 },
                 .lo => {
                     const product = asm (
                         \\ pmull %[out].1q, %[x].1d, %[y].1d
                         : [out] "=w" (-> @Vector(2, u64)),
-                        : [x] "w" (@bitCast(@Vector(2, u64), x)),
-                          [y] "w" (@bitCast(@Vector(2, u64), y)),
+                        : [x] "w" (@as(@Vector(2, u64), @bitCast(x))),
+                          [y] "w" (@as(@Vector(2, u64), @bitCast(y))),
                     );
-                    return @bitCast(u128, product);
+                    return @as(u128, @bitCast(product));
                 },
                 .hi_lo => {
                     const product = asm (
                         \\ pmull %[out].1q, %[x].1d, %[y].1d
                         : [out] "=w" (-> @Vector(2, u64)),
-                        : [x] "w" (@bitCast(@Vector(2, u64), x >> 64)),
-                          [y] "w" (@bitCast(@Vector(2, u64), y)),
+                        : [x] "w" (@as(@Vector(2, u64), @bitCast(x >> 64))),
+                          [y] "w" (@as(@Vector(2, u64), @bitCast(y))),
                     );
-                    return @bitCast(u128, product);
+                    return @as(u128, @bitCast(product));
                 },
             }
         }
@@ -167,8 +167,8 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
 
         // Software carryless multiplication of two 64-bit integers using native 128-bit registers.
         fn clmulSoft128(x_: u128, y_: u128, comptime half: Selector) u128 {
-            const x = @truncate(u64, if (half == .hi or half == .hi_lo) x_ >> 64 else x_);
-            const y = @truncate(u64, if (half == .hi) y_ >> 64 else y_);
+            const x = @as(u64, @truncate(if (half == .hi or half == .hi_lo) x_ >> 64 else x_));
+            const y = @as(u64, @truncate(if (half == .hi) y_ >> 64 else y_));
 
             const x0 = x & 0x1111111111111110;
             const x1 = x & 0x2222222222222220;
@@ -216,12 +216,12 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
 
         // Software carryless multiplication of two 128-bit integers using 64-bit registers.
         fn clmulSoft128_64(x_: u128, y_: u128, comptime half: Selector) u128 {
-            const a = @truncate(u64, if (half == .hi or half == .hi_lo) x_ >> 64 else x_);
-            const b = @truncate(u64, if (half == .hi) y_ >> 64 else y_);
-            const a0 = @truncate(u32, a);
-            const a1 = @truncate(u32, a >> 32);
-            const b0 = @truncate(u32, b);
-            const b1 = @truncate(u32, b >> 32);
+            const a = @as(u64, @truncate(if (half == .hi or half == .hi_lo) x_ >> 64 else x_));
+            const b = @as(u64, @truncate(if (half == .hi) y_ >> 64 else y_));
+            const a0 = @as(u32, @truncate(a));
+            const a1 = @as(u32, @truncate(a >> 32));
+            const b0 = @as(u32, @truncate(b));
+            const b1 = @as(u32, @truncate(b >> 32));
             const lo = clmulSoft32(a0, b0);
             const hi = clmulSoft32(a1, b1);
             const mid = clmulSoft32(a0 ^ a1, b0 ^ b1) ^ lo ^ hi;
@@ -256,8 +256,8 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
         // Multiply two 128-bit integers in GF(2^128).
         inline fn clmul128(x: u128, y: u128) I256 {
             if (mul_algorithm == .karatsuba) {
-                const x_hi = @truncate(u64, x >> 64);
-                const y_hi = @truncate(u64, y >> 64);
+                const x_hi = @as(u64, @truncate(x >> 64));
+                const y_hi = @as(u64, @truncate(y >> 64));
                 const r_lo = clmul(x, y, .lo);
                 const r_hi = clmul(x, y, .hi);
                 const r_mid = clmul(x ^ x_hi, y ^ y_hi, .lo) ^ r_lo ^ r_hi;
@@ -407,7 +407,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
             st.pad();
             mem.writeInt(u128, out[0..16], st.acc, endian);
 
-            utils.secureZero(u8, @ptrCast([*]u8, st)[0..@sizeOf(Self)]);
+            utils.secureZero(u8, @as([*]u8, @ptrCast(st))[0..@sizeOf(Self)]);
         }
 
         /// Compute the GHASH of a message.
@@ -442,7 +442,7 @@ test "ghash2" {
     var key: [16]u8 = undefined;
     var i: usize = 0;
     while (i < key.len) : (i += 1) {
-        key[i] = @intCast(u8, i * 15 + 1);
+        key[i] = @as(u8, @intCast(i * 15 + 1));
     }
     const tvs = [_]struct { len: usize, hash: [:0]const u8 }{
         .{ .len = 5263, .hash = "b9395f37c131cd403a327ccf82ec016a" },
@@ -461,7 +461,7 @@ test "ghash2" {
         var m: [tv.len]u8 = undefined;
         i = 0;
         while (i < m.len) : (i += 1) {
-            m[i] = @truncate(u8, i % 254 + 1);
+            m[i] = @as(u8, @truncate(i % 254 + 1));
         }
         var st = Ghash.init(&key);
         st.update(&m);
lib/std/crypto/isap.zig
@@ -67,7 +67,7 @@ pub const IsapA128A = struct {
         var i: usize = 0;
         while (i < y.len * 8 - 1) : (i += 1) {
             const cur_byte_pos = i / 8;
-            const cur_bit_pos = @truncate(u3, 7 - (i % 8));
+            const cur_bit_pos = @as(u3, @truncate(7 - (i % 8)));
             const cur_bit = ((y[cur_byte_pos] >> cur_bit_pos) & 1) << 7;
             isap.st.addByte(cur_bit, 0);
             isap.st.permuteR(1);
lib/std/crypto/keccak_p.zig
@@ -33,7 +33,7 @@ pub fn KeccakF(comptime f: u11) type {
                 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008,
             };
             var rc: [max_rounds]T = undefined;
-            for (&rc, RC64[0..max_rounds]) |*t, c| t.* = @truncate(T, c);
+            for (&rc, RC64[0..max_rounds]) |*t, c| t.* = @as(T, @truncate(c));
             break :rc rc;
         };
 
@@ -75,7 +75,7 @@ pub fn KeccakF(comptime f: u11) type {
 
         /// XOR a byte into the state at a given offset.
         pub fn addByte(self: *Self, byte: u8, offset: usize) void {
-            const z = @sizeOf(T) * @truncate(math.Log2Int(T), offset % @sizeOf(T));
+            const z = @sizeOf(T) * @as(math.Log2Int(T), @truncate(offset % @sizeOf(T)));
             self.st[offset / @sizeOf(T)] ^= @as(T, byte) << z;
         }
 
lib/std/crypto/kyber_d00.zig
@@ -579,7 +579,7 @@ test "invNTTReductions bounds" {
             if (j < 0) {
                 break;
             }
-            xs[@intCast(usize, j)] = 1;
+            xs[@as(usize, @intCast(j))] = 1;
         }
     }
 }
@@ -615,7 +615,7 @@ fn invertMod(a: anytype, p: @TypeOf(a)) @TypeOf(a) {
 
 // Reduce mod q for testing.
 fn modQ32(x: i32) i16 {
-    var y = @intCast(i16, @rem(x, @as(i32, Q)));
+    var y = @as(i16, @intCast(@rem(x, @as(i32, Q))));
     if (y < 0) {
         y += Q;
     }
@@ -638,7 +638,7 @@ fn montReduce(x: i32) i16 {
     // Note that x q' might be as big as 2ยณยฒ and could overflow the int32
     // multiplication in the last line.  However for any int32s a and b,
     // we have int32(int64(a)*int64(b)) = int32(a*b) and so the result is ok.
-    const m = @truncate(i16, @truncate(i32, x *% qInv));
+    const m = @as(i16, @truncate(@as(i32, @truncate(x *% qInv))));
 
     // Note that x - m q is divisible by R; indeed modulo R we have
     //
@@ -652,7 +652,7 @@ fn montReduce(x: i32) i16 {
     // and as both 2ยนโต q โ‰ค m q, x < 2ยนโต q, we have
     // 2ยนโถ q โ‰ค x - m q < 2ยนโถ and so q โ‰ค (x - m q) / R < q as desired.
     const yR = x - @as(i32, m) * @as(i32, Q);
-    return @bitCast(i16, @truncate(u16, @bitCast(u32, yR) >> 16));
+    return @as(i16, @bitCast(@as(u16, @truncate(@as(u32, @bitCast(yR)) >> 16))));
 }
 
 test "Test montReduce" {
@@ -676,7 +676,7 @@ fn feToMont(x: i16) i16 {
 test "Test feToMont" {
     var x: i32 = -(1 << 15);
     while (x < 1 << 15) : (x += 1) {
-        const y = feToMont(@intCast(i16, x));
+        const y = feToMont(@as(i16, @intCast(x)));
         try testing.expectEqual(modQ32(@as(i32, y)), modQ32(x * r_mod_q));
     }
 }
@@ -703,14 +703,14 @@ fn feBarrettReduce(x: i16) i16 {
     // To actually compute this, note that
     //
     //  โŒŠx 20156/2ยฒโถโŒ‹ = (20159 x) >> 26.
-    return x -% @intCast(i16, (@as(i32, x) * 20159) >> 26) *% Q;
+    return x -% @as(i16, @intCast((@as(i32, x) * 20159) >> 26)) *% Q;
 }
 
 test "Test Barrett reduction" {
     var x: i32 = -(1 << 15);
     while (x < 1 << 15) : (x += 1) {
-        var y1 = feBarrettReduce(@intCast(i16, x));
-        const y2 = @mod(@intCast(i16, x), Q);
+        var y1 = feBarrettReduce(@as(i16, @intCast(x)));
+        const y2 = @mod(@as(i16, @intCast(x)), Q);
         if (x < 0 and @rem(-x, Q) == 0) {
             y1 -= Q;
         }
@@ -729,9 +729,9 @@ fn csubq(x: i16) i16 {
 test "Test csubq" {
     var x: i32 = -29439;
     while (x < 1 << 15) : (x += 1) {
-        const y1 = csubq(@intCast(i16, x));
-        var y2 = @intCast(i16, x);
-        if (@intCast(i16, x) >= Q) {
+        const y1 = csubq(@as(i16, @intCast(x)));
+        var y2 = @as(i16, @intCast(x));
+        if (@as(i16, @intCast(x)) >= Q) {
             y2 -= Q;
         }
         try testing.expectEqual(y1, y2);
@@ -762,7 +762,7 @@ fn computeZetas() [128]i16 {
     @setEvalBranchQuota(10000);
     var ret: [128]i16 = undefined;
     for (&ret, 0..) |*r, i| {
-        const t = @intCast(i16, mpow(@as(i32, zeta), @bitReverse(@intCast(u7, i)), Q));
+        const t = @as(i16, @intCast(mpow(@as(i32, zeta), @bitReverse(@as(u7, @intCast(i))), Q)));
         r.* = csubq(feBarrettReduce(feToMont(t)));
     }
     return ret;
@@ -945,7 +945,7 @@ const Poly = struct {
                 if (i < 0) {
                     break;
                 }
-                p.cs[@intCast(usize, i)] = feBarrettReduce(p.cs[@intCast(usize, i)]);
+                p.cs[@as(usize, @intCast(i))] = feBarrettReduce(p.cs[@as(usize, @intCast(i))]);
             }
         }
 
@@ -1020,8 +1020,8 @@ const Poly = struct {
                 //                  = โŒŠ(2แตˆ/q)x+ยฝโŒ‹ modโบ 2แตˆ
                 //                  = โŒŠ((x << d) + q/2) / qโŒ‹ modโบ 2แตˆ
                 //                  = DIV((x << d) + q/2, q) & ((1<<d) - 1)
-                const t = @intCast(u32, p.cs[in_off + i]) << d;
-                in[i] = @intCast(u16, @divFloor(t + q_over_2, Q) & two_d_min_1);
+                const t = @as(u32, @intCast(p.cs[in_off + i])) << d;
+                in[i] = @as(u16, @intCast(@divFloor(t + q_over_2, Q) & two_d_min_1));
             }
 
             // Now we pack the d-bit integers from `in' into out as bytes.
@@ -1032,7 +1032,7 @@ const Poly = struct {
                 comptime var todo: usize = 8;
                 inline while (todo > 0) {
                     const out_shift = comptime 8 - todo;
-                    out[out_off + j] |= @truncate(u8, (in[i] >> in_shift) << out_shift);
+                    out[out_off + j] |= @as(u8, @truncate((in[i] >> in_shift) << out_shift));
 
                     const done = comptime @min(@min(d, todo), d - in_shift);
                     todo -= done;
@@ -1094,7 +1094,7 @@ const Poly = struct {
                 //                    = โŒŠ(qx + 2แตˆโปยน)/2แตˆโŒ‹
                 //                    = (qx + (1<<(d-1))) >> d
                 const qx = @as(u32, out) * @as(u32, Q);
-                ret.cs[out_off + i] = @intCast(i16, (qx + (1 << (d - 1))) >> d);
+                ret.cs[out_off + i] = @as(i16, @intCast((qx + (1 << (d - 1))) >> d));
             }
 
             in_off += in_batch_size;
@@ -1209,8 +1209,8 @@ const Poly = struct {
             // Extract each a and b separately and set coefficient in polynomial.
             inline for (0..batch_count) |j| {
                 const mask2 = comptime (1 << eta) - 1;
-                const a = @intCast(i16, (d >> (comptime (2 * j * eta))) & mask2);
-                const b = @intCast(i16, (d >> (comptime ((2 * j + 1) * eta))) & mask2);
+                const a = @as(i16, @intCast((d >> (comptime (2 * j * eta))) & mask2));
+                const b = @as(i16, @intCast((d >> (comptime ((2 * j + 1) * eta))) & mask2));
                 ret.cs[batch_count * i + j] = a - b;
             }
         }
@@ -1246,7 +1246,7 @@ const Poly = struct {
 
                 inline for (ts) |t| {
                     if (t < Q) {
-                        ret.cs[i] = @intCast(i16, t);
+                        ret.cs[i] = @as(i16, @intCast(t));
                         i += 1;
 
                         if (i == N) {
@@ -1266,11 +1266,11 @@ const Poly = struct {
     fn toBytes(p: Poly) [bytes_length]u8 {
         var ret: [bytes_length]u8 = undefined;
         for (0..comptime N / 2) |i| {
-            const t0 = @intCast(u16, p.cs[2 * i]);
-            const t1 = @intCast(u16, p.cs[2 * i + 1]);
-            ret[3 * i] = @truncate(u8, t0);
-            ret[3 * i + 1] = @truncate(u8, (t0 >> 8) | (t1 << 4));
-            ret[3 * i + 2] = @truncate(u8, t1 >> 4);
+            const t0 = @as(u16, @intCast(p.cs[2 * i]));
+            const t1 = @as(u16, @intCast(p.cs[2 * i + 1]));
+            ret[3 * i] = @as(u8, @truncate(t0));
+            ret[3 * i + 1] = @as(u8, @truncate((t0 >> 8) | (t1 << 4)));
+            ret[3 * i + 2] = @as(u8, @truncate(t1 >> 4));
         }
         return ret;
     }
@@ -1356,7 +1356,7 @@ fn Vec(comptime K: u8) type {
         fn noise(comptime eta: u8, nonce: u8, seed: *const [32]u8) Self {
             var ret: Self = undefined;
             for (0..K) |i| {
-                ret.ps[i] = Poly.noise(eta, nonce + @intCast(u8, i), seed);
+                ret.ps[i] = Poly.noise(eta, nonce + @as(u8, @intCast(i)), seed);
             }
             return ret;
         }
@@ -1534,7 +1534,7 @@ test "Compression" {
 test "noise" {
     var seed: [32]u8 = undefined;
     for (&seed, 0..) |*s, i| {
-        s.* = @intCast(u8, i);
+        s.* = @as(u8, @intCast(i));
     }
     try testing.expectEqual(Poly.noise(3, 37, &seed).cs, .{
         0,  0,  1,  -1, 0,  2,  0,  -1, -1, 3,  0,  1,  -2, -2, 0,  1,  -2,
@@ -1580,7 +1580,7 @@ test "noise" {
 test "uniform sampling" {
     var seed: [32]u8 = undefined;
     for (&seed, 0..) |*s, i| {
-        s.* = @intCast(u8, i);
+        s.* = @as(u8, @intCast(i));
     }
     try testing.expectEqual(Poly.uniform(seed, 1, 0).cs, .{
         797,  993,  161,  6,    2608, 2385, 2096, 2661, 1676, 247,  2440,
@@ -1623,17 +1623,17 @@ test "Test inner PKE" {
     var seed: [32]u8 = undefined;
     var pt: [32]u8 = undefined;
     for (&seed, &pt, 0..) |*s, *p, i| {
-        s.* = @intCast(u8, i);
-        p.* = @intCast(u8, i + 32);
+        s.* = @as(u8, @intCast(i));
+        p.* = @as(u8, @intCast(i + 32));
     }
     inline for (modes) |mode| {
         for (0..100) |i| {
             var pk: mode.InnerPk = undefined;
             var sk: mode.InnerSk = undefined;
-            seed[0] = @intCast(u8, i);
+            seed[0] = @as(u8, @intCast(i));
             mode.innerKeyFromSeed(seed, &pk, &sk);
             for (0..10) |j| {
-                seed[1] = @intCast(u8, j);
+                seed[1] = @as(u8, @intCast(j));
                 try testing.expectEqual(sk.decrypt(&pk.encrypt(&pt, &seed)), pt);
             }
         }
@@ -1643,18 +1643,18 @@ test "Test inner PKE" {
 test "Test happy flow" {
     var seed: [64]u8 = undefined;
     for (&seed, 0..) |*s, i| {
-        s.* = @intCast(u8, i);
+        s.* = @as(u8, @intCast(i));
     }
     inline for (modes) |mode| {
         for (0..100) |i| {
-            seed[0] = @intCast(u8, i);
+            seed[0] = @as(u8, @intCast(i));
             const kp = try mode.KeyPair.create(seed);
             const sk = try mode.SecretKey.fromBytes(&kp.secret_key.toBytes());
             try testing.expectEqual(sk, kp.secret_key);
             const pk = try mode.PublicKey.fromBytes(&kp.public_key.toBytes());
             try testing.expectEqual(pk, kp.public_key);
             for (0..10) |j| {
-                seed[1] = @intCast(u8, j);
+                seed[1] = @as(u8, @intCast(j));
                 const e = pk.encaps(seed[0..32].*);
                 try testing.expectEqual(e.shared_secret, try sk.decaps(&e.ciphertext));
             }
@@ -1675,7 +1675,7 @@ test "NIST KAT test" {
         const mode = modeHash[0];
         var seed: [48]u8 = undefined;
         for (&seed, 0..) |*s, i| {
-            s.* = @intCast(u8, i);
+            s.* = @as(u8, @intCast(i));
         }
         var f = sha2.Sha256.init(.{});
         const fw = f.writer();
lib/std/crypto/md5.zig
@@ -80,7 +80,7 @@ pub const Md5 = struct {
         // Copy any remainder for next pass.
         const b_slice = b[off..];
         @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
-        d.buf_len += @intCast(u8, b_slice.len);
+        d.buf_len += @as(u8, @intCast(b_slice.len));
 
         // Md5 uses the bottom 64-bits for length padding
         d.total_len +%= b.len;
@@ -103,9 +103,9 @@ pub const Md5 = struct {
         // Append message length.
         var i: usize = 1;
         var len = d.total_len >> 5;
-        d.buf[56] = @intCast(u8, d.total_len & 0x1f) << 3;
+        d.buf[56] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
         while (i < 8) : (i += 1) {
-            d.buf[56 + i] = @intCast(u8, len & 0xff);
+            d.buf[56 + i] = @as(u8, @intCast(len & 0xff));
             len >>= 8;
         }
 
lib/std/crypto/pbkdf2.zig
@@ -74,7 +74,7 @@ pub fn pbkdf2(dk: []u8, password: []const u8, salt: []const u8, rounds: u32, com
     //      block
     //
 
-    const blocks_count = @intCast(u32, std.math.divCeil(usize, dk_len, h_len) catch unreachable);
+    const blocks_count = @as(u32, @intCast(std.math.divCeil(usize, dk_len, h_len) catch unreachable));
     var r = dk_len % h_len;
     if (r == 0) {
         r = h_len;
lib/std/crypto/phc_encoding.zig
@@ -193,7 +193,7 @@ pub fn serialize(params: anytype, str: []u8) Error![]const u8 {
 pub fn calcSize(params: anytype) usize {
     var buf = io.countingWriter(io.null_writer);
     serializeTo(params, buf.writer()) catch unreachable;
-    return @intCast(usize, buf.bytes_written);
+    return @as(usize, @intCast(buf.bytes_written));
 }
 
 fn serializeTo(params: anytype, out: anytype) !void {
lib/std/crypto/poly1305.zig
@@ -76,12 +76,12 @@ pub const Poly1305 = struct {
             const m1 = h1r0 +% h0r1;
             const m2 = h2r0 +% h1r1;
 
-            const t0 = @truncate(u64, m0);
-            v = @addWithOverflow(@truncate(u64, m1), @truncate(u64, m0 >> 64));
+            const t0 = @as(u64, @truncate(m0));
+            v = @addWithOverflow(@as(u64, @truncate(m1)), @as(u64, @truncate(m0 >> 64)));
             const t1 = v[0];
-            v = add(@truncate(u64, m2), @truncate(u64, m1 >> 64), v[1]);
+            v = add(@as(u64, @truncate(m2)), @as(u64, @truncate(m1 >> 64)), v[1]);
             const t2 = v[0];
-            v = add(@truncate(u64, m3), @truncate(u64, m2 >> 64), v[1]);
+            v = add(@as(u64, @truncate(m3)), @as(u64, @truncate(m2 >> 64)), v[1]);
             const t3 = v[0];
 
             // Partial reduction
@@ -98,9 +98,9 @@ pub const Poly1305 = struct {
             h1 = v[0];
             h2 +%= v[1];
             const cc = (cclo | (@as(u128, cchi) << 64)) >> 2;
-            v = @addWithOverflow(h0, @truncate(u64, cc));
+            v = @addWithOverflow(h0, @as(u64, @truncate(cc)));
             h0 = v[0];
-            v = add(h1, @truncate(u64, cc >> 64), v[1]);
+            v = add(h1, @as(u64, @truncate(cc >> 64)), v[1]);
             h1 = v[0];
             h2 +%= v[1];
         }
@@ -185,7 +185,7 @@ pub const Poly1305 = struct {
         mem.writeIntLittle(u64, out[0..8], st.h[0]);
         mem.writeIntLittle(u64, out[8..16], st.h[1]);
 
-        utils.secureZero(u8, @ptrCast([*]u8, st)[0..@sizeOf(Poly1305)]);
+        utils.secureZero(u8, @as([*]u8, @ptrCast(st))[0..@sizeOf(Poly1305)]);
     }
 
     pub fn create(out: *[mac_length]u8, msg: []const u8, key: *const [key_length]u8) void {
lib/std/crypto/salsa20.zig
@@ -337,8 +337,8 @@ pub fn Salsa(comptime rounds: comptime_int) type {
             var d: [4]u32 = undefined;
             d[0] = mem.readIntLittle(u32, nonce[0..4]);
             d[1] = mem.readIntLittle(u32, nonce[4..8]);
-            d[2] = @truncate(u32, counter);
-            d[3] = @truncate(u32, counter >> 32);
+            d[2] = @as(u32, @truncate(counter));
+            d[3] = @as(u32, @truncate(counter >> 32));
             SalsaImpl(rounds).salsaXor(out, in, keyToWords(key), d);
         }
     };
lib/std/crypto/scrypt.zig
@@ -73,11 +73,11 @@ fn salsaXor(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16)
 }
 
 fn blockMix(tmp: *align(16) [16]u32, in: []align(16) const u32, out: []align(16) u32, r: u30) void {
-    blockCopy(tmp, @alignCast(16, in[(2 * r - 1) * 16 ..]), 1);
+    blockCopy(tmp, @alignCast(in[(2 * r - 1) * 16 ..]), 1);
     var i: usize = 0;
     while (i < 2 * r) : (i += 2) {
-        salsaXor(tmp, @alignCast(16, in[i * 16 ..]), @alignCast(16, out[i * 8 ..]));
-        salsaXor(tmp, @alignCast(16, in[i * 16 + 16 ..]), @alignCast(16, out[i * 8 + r * 16 ..]));
+        salsaXor(tmp, @alignCast(in[i * 16 ..]), @alignCast(out[i * 8 ..]));
+        salsaXor(tmp, @alignCast(in[i * 16 + 16 ..]), @alignCast(out[i * 8 + r * 16 ..]));
     }
 }
 
@@ -87,8 +87,8 @@ fn integerify(b: []align(16) const u32, r: u30) u64 {
 }
 
 fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16) u32) void {
-    var x = @alignCast(16, xy[0 .. 32 * r]);
-    var y = @alignCast(16, xy[32 * r ..]);
+    var x: []align(16) u32 = @alignCast(xy[0 .. 32 * r]);
+    var y: []align(16) u32 = @alignCast(xy[32 * r ..]);
 
     for (x, 0..) |*v1, j| {
         v1.* = mem.readIntSliceLittle(u32, b[4 * j ..]);
@@ -97,21 +97,21 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16)
     var tmp: [16]u32 align(16) = undefined;
     var i: usize = 0;
     while (i < n) : (i += 2) {
-        blockCopy(@alignCast(16, v[i * (32 * r) ..]), x, 2 * r);
+        blockCopy(@alignCast(v[i * (32 * r) ..]), x, 2 * r);
         blockMix(&tmp, x, y, r);
 
-        blockCopy(@alignCast(16, v[(i + 1) * (32 * r) ..]), y, 2 * r);
+        blockCopy(@alignCast(v[(i + 1) * (32 * r) ..]), y, 2 * r);
         blockMix(&tmp, y, x, r);
     }
 
     i = 0;
     while (i < n) : (i += 2) {
-        var j = @intCast(usize, integerify(x, r) & (n - 1));
-        blockXor(x, @alignCast(16, v[j * (32 * r) ..]), 2 * r);
+        var j = @as(usize, @intCast(integerify(x, r) & (n - 1)));
+        blockXor(x, @alignCast(v[j * (32 * r) ..]), 2 * r);
         blockMix(&tmp, x, y, r);
 
-        j = @intCast(usize, integerify(y, r) & (n - 1));
-        blockXor(y, @alignCast(16, v[j * (32 * r) ..]), 2 * r);
+        j = @as(usize, @intCast(integerify(y, r) & (n - 1)));
+        blockXor(y, @alignCast(v[j * (32 * r) ..]), 2 * r);
         blockMix(&tmp, y, x, r);
     }
 
@@ -147,12 +147,12 @@ pub const Params = struct {
         const r: u30 = 8;
         if (ops < mem_limit / 32) {
             const max_n = ops / (r * 4);
-            return Self{ .r = r, .p = 1, .ln = @intCast(u6, math.log2(max_n)) };
+            return Self{ .r = r, .p = 1, .ln = @as(u6, @intCast(math.log2(max_n))) };
         } else {
-            const max_n = mem_limit / (@intCast(usize, r) * 128);
-            const ln = @intCast(u6, math.log2(max_n));
+            const max_n = mem_limit / (@as(usize, @intCast(r)) * 128);
+            const ln = @as(u6, @intCast(math.log2(max_n)));
             const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln));
-            return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln };
+            return Self{ .r = r, .p = @as(u30, @intCast(max_rp / @as(u64, r))), .ln = ln };
         }
     }
 };
@@ -185,7 +185,7 @@ pub fn kdf(
 
     const n64 = @as(u64, 1) << params.ln;
     if (n64 > max_size) return KdfError.WeakParameters;
-    const n = @intCast(usize, n64);
+    const n = @as(usize, @intCast(n64));
     if (@as(u64, params.r) * @as(u64, params.p) >= 1 << 30 or
         params.r > max_int / 128 / @as(u64, params.p) or
         params.r > max_int / 256 or
@@ -201,7 +201,7 @@ pub fn kdf(
     try pwhash.pbkdf2(dk, password, salt, 1, HmacSha256);
     var i: u32 = 0;
     while (i < params.p) : (i += 1) {
-        smix(@alignCast(16, dk[i * 128 * params.r ..]), params.r, n, v, xy);
+        smix(@alignCast(dk[i * 128 * params.r ..]), params.r, n, v, xy);
     }
     try pwhash.pbkdf2(derived_key, password, dk, 1, HmacSha256);
 }
@@ -309,7 +309,7 @@ const crypt_format = struct {
     pub fn calcSize(params: anytype) usize {
         var buf = io.countingWriter(io.null_writer);
         serializeTo(params, buf.writer()) catch unreachable;
-        return @intCast(usize, buf.bytes_written);
+        return @as(usize, @intCast(buf.bytes_written));
     }
 
     fn serializeTo(params: anytype, out: anytype) !void {
@@ -343,7 +343,7 @@ const crypt_format = struct {
             fn intEncode(dst: []u8, src: anytype) void {
                 var n = src;
                 for (dst) |*x| {
-                    x.* = map64[@truncate(u6, n)];
+                    x.* = map64[@as(u6, @truncate(n))];
                     n = math.shr(@TypeOf(src), n, 6);
                 }
             }
@@ -352,7 +352,7 @@ const crypt_format = struct {
                 var v: T = 0;
                 for (src, 0..) |x, i| {
                     const vi = mem.indexOfScalar(u8, &map64, x) orelse return EncodingError.InvalidEncoding;
-                    v |= @intCast(T, vi) << @intCast(math.Log2Int(T), i * 6);
+                    v |= @as(T, @intCast(vi)) << @as(math.Log2Int(T), @intCast(i * 6));
                 }
                 return v;
             }
@@ -366,10 +366,10 @@ const crypt_format = struct {
                 const leftover = src[i * 4 ..];
                 var v: u24 = 0;
                 for (leftover, 0..) |_, j| {
-                    v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @intCast(u5, j * 6);
+                    v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @as(u5, @intCast(j * 6));
                 }
                 for (dst[i * 3 ..], 0..) |*x, j| {
-                    x.* = @truncate(u8, v >> @intCast(u5, j * 8));
+                    x.* = @as(u8, @truncate(v >> @as(u5, @intCast(j * 8))));
                 }
             }
 
@@ -382,7 +382,7 @@ const crypt_format = struct {
                 const leftover = src[i * 3 ..];
                 var v: u24 = 0;
                 for (leftover, 0..) |x, j| {
-                    v |= @as(u24, x) << @intCast(u5, j * 8);
+                    v |= @as(u24, x) << @as(u5, @intCast(j * 8));
                 }
                 intEncode(dst[i * 4 ..], v);
             }
lib/std/crypto/sha1.zig
@@ -75,7 +75,7 @@ pub const Sha1 = struct {
 
         // Copy any remainder for next pass.
         @memcpy(d.buf[d.buf_len..][0 .. b.len - off], b[off..]);
-        d.buf_len += @intCast(u8, b[off..].len);
+        d.buf_len += @as(u8, @intCast(b[off..].len));
 
         d.total_len += b.len;
     }
@@ -97,9 +97,9 @@ pub const Sha1 = struct {
         // Append message length.
         var i: usize = 1;
         var len = d.total_len >> 5;
-        d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3;
+        d.buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
         while (i < 8) : (i += 1) {
-            d.buf[63 - i] = @intCast(u8, len & 0xff);
+            d.buf[63 - i] = @as(u8, @intCast(len & 0xff));
             len >>= 8;
         }
 
lib/std/crypto/sha2.zig
@@ -132,7 +132,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
             // Copy any remainder for next pass.
             const b_slice = b[off..];
             @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
-            d.buf_len += @intCast(u8, b[off..].len);
+            d.buf_len += @as(u8, @intCast(b[off..].len));
 
             d.total_len += b.len;
         }
@@ -159,9 +159,9 @@ fn Sha2x32(comptime params: Sha2Params32) type {
             // Append message length.
             var i: usize = 1;
             var len = d.total_len >> 5;
-            d.buf[63] = @intCast(u8, d.total_len & 0x1f) << 3;
+            d.buf[63] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
             while (i < 8) : (i += 1) {
-                d.buf[63 - i] = @intCast(u8, len & 0xff);
+                d.buf[63 - i] = @as(u8, @intCast(len & 0xff));
                 len >>= 8;
             }
 
@@ -194,7 +194,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
 
         fn round(d: *Self, b: *const [64]u8) void {
             var s: [64]u32 align(16) = undefined;
-            for (@ptrCast(*align(1) const [16]u32, b), 0..) |*elem, i| {
+            for (@as(*align(1) const [16]u32, @ptrCast(b)), 0..) |*elem, i| {
                 s[i] = mem.readIntBig(u32, mem.asBytes(elem));
             }
 
@@ -203,7 +203,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
                     .aarch64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.aarch64.featureSetHas(builtin.cpu.features, .sha2)) {
                         var x: v4u32 = d.s[0..4].*;
                         var y: v4u32 = d.s[4..8].*;
-                        const s_v = @ptrCast(*[16]v4u32, &s);
+                        const s_v = @as(*[16]v4u32, @ptrCast(&s));
 
                         comptime var k: u8 = 0;
                         inline while (k < 16) : (k += 1) {
@@ -241,7 +241,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
                     .x86_64 => if (builtin.zig_backend != .stage2_c and comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sha)) {
                         var x: v4u32 = [_]u32{ d.s[5], d.s[4], d.s[1], d.s[0] };
                         var y: v4u32 = [_]u32{ d.s[7], d.s[6], d.s[3], d.s[2] };
-                        const s_v = @ptrCast(*[16]v4u32, &s);
+                        const s_v = @as(*[16]v4u32, @ptrCast(&s));
 
                         comptime var k: u8 = 0;
                         inline while (k < 16) : (k += 1) {
@@ -273,7 +273,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
                                 : [x] "=x" (-> v4u32),
                                 : [_] "0" (x),
                                   [y] "x" (y),
-                                  [_] "{xmm0}" (@bitCast(v4u32, @bitCast(u128, w) >> 64)),
+                                  [_] "{xmm0}" (@as(v4u32, @bitCast(@as(u128, @bitCast(w)) >> 64))),
                             );
                         }
 
@@ -624,7 +624,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
             // Copy any remainder for next pass.
             const b_slice = b[off..];
             @memcpy(d.buf[d.buf_len..][0..b_slice.len], b_slice);
-            d.buf_len += @intCast(u8, b[off..].len);
+            d.buf_len += @as(u8, @intCast(b[off..].len));
 
             d.total_len += b.len;
         }
@@ -651,9 +651,9 @@ fn Sha2x64(comptime params: Sha2Params64) type {
             // Append message length.
             var i: usize = 1;
             var len = d.total_len >> 5;
-            d.buf[127] = @intCast(u8, d.total_len & 0x1f) << 3;
+            d.buf[127] = @as(u8, @intCast(d.total_len & 0x1f)) << 3;
             while (i < 16) : (i += 1) {
-                d.buf[127 - i] = @intCast(u8, len & 0xff);
+                d.buf[127 - i] = @as(u8, @intCast(len & 0xff));
                 len >>= 8;
             }
 
lib/std/crypto/siphash.zig
@@ -83,13 +83,13 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
                 @call(.always_inline, round, .{ self, blob });
             }
 
-            self.msg_len +%= @truncate(u8, b.len);
+            self.msg_len +%= @as(u8, @truncate(b.len));
         }
 
         fn final(self: *Self, b: []const u8) T {
             std.debug.assert(b.len < 8);
 
-            self.msg_len +%= @truncate(u8, b.len);
+            self.msg_len +%= @as(u8, @truncate(b.len));
 
             var buf = [_]u8{0} ** 8;
             @memcpy(buf[0..b.len], b);
@@ -202,7 +202,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
 
             const b_slice = b[off + aligned_len ..];
             @memcpy(self.buf[self.buf_len..][0..b_slice.len], b_slice);
-            self.buf_len += @intCast(u8, b_slice.len);
+            self.buf_len += @as(u8, @intCast(b_slice.len));
         }
 
         pub fn peek(self: Self) [mac_length]u8 {
@@ -329,7 +329,7 @@ test "siphash64-2-4 sanity" {
 
     var buffer: [64]u8 = undefined;
     for (vectors, 0..) |vector, i| {
-        buffer[i] = @intCast(u8, i);
+        buffer[i] = @as(u8, @intCast(i));
 
         var out: [siphash.mac_length]u8 = undefined;
         siphash.create(&out, buffer[0..i], test_key);
@@ -409,7 +409,7 @@ test "siphash128-2-4 sanity" {
 
     var buffer: [64]u8 = undefined;
     for (vectors, 0..) |vector, i| {
-        buffer[i] = @intCast(u8, i);
+        buffer[i] = @as(u8, @intCast(i));
 
         var out: [siphash.mac_length]u8 = undefined;
         siphash.create(&out, buffer[0..i], test_key[0..]);
@@ -420,7 +420,7 @@ test "siphash128-2-4 sanity" {
 test "iterative non-divisible update" {
     var buf: [1024]u8 = undefined;
     for (&buf, 0..) |*e, i| {
-        e.* = @truncate(u8, i);
+        e.* = @as(u8, @truncate(i));
     }
 
     const key = "0x128dad08f12307";
lib/std/crypto/tlcsprng.zig
@@ -102,7 +102,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
             wipe_mem = mem.asBytes(&S.buf);
         }
     }
-    const ctx = @ptrCast(*Context, wipe_mem.ptr);
+    const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
 
     switch (ctx.init_state) {
         .uninitialized => {
@@ -158,7 +158,7 @@ fn childAtForkHandler() callconv(.C) void {
 }
 
 fn fillWithCsprng(buffer: []u8) void {
-    const ctx = @ptrCast(*Context, wipe_mem.ptr);
+    const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
     return ctx.rng.fill(buffer);
 }
 
@@ -174,7 +174,7 @@ fn initAndFill(buffer: []u8) void {
     // the `std.options.cryptoRandomSeed` function is provided.
     std.options.cryptoRandomSeed(&seed);
 
-    const ctx = @ptrCast(*Context, wipe_mem.ptr);
+    const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
     ctx.rng = Rng.init(seed);
     std.crypto.utils.secureZero(u8, &seed);
 
lib/std/crypto/tls.zig
@@ -371,12 +371,12 @@ pub fn hkdfExpandLabel(
     const tls13 = "tls13 ";
     var buf: [2 + 1 + tls13.len + max_label_len + 1 + max_context_len]u8 = undefined;
     mem.writeIntBig(u16, buf[0..2], len);
-    buf[2] = @intCast(u8, tls13.len + label.len);
+    buf[2] = @as(u8, @intCast(tls13.len + label.len));
     buf[3..][0..tls13.len].* = tls13.*;
     var i: usize = 3 + tls13.len;
     @memcpy(buf[i..][0..label.len], label);
     i += label.len;
-    buf[i] = @intCast(u8, context.len);
+    buf[i] = @as(u8, @intCast(context.len));
     i += 1;
     @memcpy(buf[i..][0..context.len], context);
     i += context.len;
@@ -411,24 +411,24 @@ pub inline fn enum_array(comptime E: type, comptime tags: []const E) [2 + @sizeO
     assert(@sizeOf(E) == 2);
     var result: [tags.len * 2]u8 = undefined;
     for (tags, 0..) |elem, i| {
-        result[i * 2] = @truncate(u8, @intFromEnum(elem) >> 8);
-        result[i * 2 + 1] = @truncate(u8, @intFromEnum(elem));
+        result[i * 2] = @as(u8, @truncate(@intFromEnum(elem) >> 8));
+        result[i * 2 + 1] = @as(u8, @truncate(@intFromEnum(elem)));
     }
     return array(2, result);
 }
 
 pub inline fn int2(x: u16) [2]u8 {
     return .{
-        @truncate(u8, x >> 8),
-        @truncate(u8, x),
+        @as(u8, @truncate(x >> 8)),
+        @as(u8, @truncate(x)),
     };
 }
 
 pub inline fn int3(x: u24) [3]u8 {
     return .{
-        @truncate(u8, x >> 16),
-        @truncate(u8, x >> 8),
-        @truncate(u8, x),
+        @as(u8, @truncate(x >> 16)),
+        @as(u8, @truncate(x >> 8)),
+        @as(u8, @truncate(x)),
     };
 }
 
@@ -513,7 +513,7 @@ pub const Decoder = struct {
             .Enum => |info| {
                 const int = d.decode(info.tag_type);
                 if (info.is_exhaustive) @compileError("exhaustive enum cannot be used");
-                return @enumFromInt(T, int);
+                return @as(T, @enumFromInt(int));
             },
             else => @compileError("unsupported type: " ++ @typeName(T)),
         }
lib/std/crypto/utils.zig
@@ -24,7 +24,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool {
             const s = @typeInfo(C).Int.bits;
             const Cu = std.meta.Int(.unsigned, s);
             const Cext = std.meta.Int(.unsigned, s + 1);
-            return @bitCast(bool, @truncate(u1, (@as(Cext, @bitCast(Cu, acc)) -% 1) >> s));
+            return @as(bool, @bitCast(@as(u1, @truncate((@as(Cext, @as(Cu, @bitCast(acc))) -% 1) >> s))));
         },
         .Vector => |info| {
             const C = info.child;
@@ -35,7 +35,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool {
             const s = @typeInfo(C).Int.bits;
             const Cu = std.meta.Int(.unsigned, s);
             const Cext = std.meta.Int(.unsigned, s + 1);
-            return @bitCast(bool, @truncate(u1, (@as(Cext, @bitCast(Cu, acc)) -% 1) >> s));
+            return @as(bool, @bitCast(@as(u1, @truncate((@as(Cext, @as(Cu, @bitCast(acc))) -% 1) >> s))));
         },
         else => {
             @compileError("Only arrays and vectors can be compared");
@@ -60,14 +60,14 @@ pub fn timingSafeCompare(comptime T: type, a: []const T, b: []const T, endian: E
             i -= 1;
             const x1 = a[i];
             const x2 = b[i];
-            gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq;
-            eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits);
+            gt |= @as(T, @truncate((@as(Cext, x2) -% @as(Cext, x1)) >> bits)) & eq;
+            eq &= @as(T, @truncate((@as(Cext, (x2 ^ x1)) -% 1) >> bits));
         }
     } else {
         for (a, 0..) |x1, i| {
             const x2 = b[i];
-            gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq;
-            eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits);
+            gt |= @as(T, @truncate((@as(Cext, x2) -% @as(Cext, x1)) >> bits)) & eq;
+            eq &= @as(T, @truncate((@as(Cext, (x2 ^ x1)) -% 1) >> bits));
         }
     }
     if (gt != 0) {
@@ -102,7 +102,7 @@ pub fn timingSafeAdd(comptime T: type, a: []const T, b: []const T, result: []T,
             carry = ov1[1] | ov2[1];
         }
     }
-    return @bitCast(bool, carry);
+    return @as(bool, @bitCast(carry));
 }
 
 /// Subtract two integers serialized as arrays of the same size, in constant time.
@@ -129,7 +129,7 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T,
             borrow = ov1[1] | ov2[1];
         }
     }
-    return @bitCast(bool, borrow);
+    return @as(bool, @bitCast(borrow));
 }
 
 /// Sets a slice to zeroes.
lib/std/event/lock.zig
@@ -55,7 +55,7 @@ pub const Lock = struct {
         const head = switch (self.head) {
             UNLOCKED => unreachable,
             LOCKED => null,
-            else => @ptrFromInt(*Waiter, self.head),
+            else => @as(*Waiter, @ptrFromInt(self.head)),
         };
 
         if (head) |h| {
@@ -102,7 +102,7 @@ pub const Lock = struct {
                         break :blk null;
                     },
                     else => {
-                        const waiter = @ptrFromInt(*Waiter, self.lock.head);
+                        const waiter = @as(*Waiter, @ptrFromInt(self.lock.head));
                         self.lock.head = if (waiter.next == null) LOCKED else @intFromPtr(waiter.next);
                         if (waiter.next) |next|
                             next.tail = waiter.tail;
@@ -130,7 +130,7 @@ test "std.event.Lock" {
     var lock = Lock{};
     testLock(&lock);
 
-    const expected_result = [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
+    const expected_result = [1]i32{3 * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len;
     try testing.expectEqualSlices(i32, &expected_result, &shared_test_data);
 }
 fn testLock(lock: *Lock) void {
lib/std/event/loop.zig
@@ -556,7 +556,7 @@ pub const Loop = struct {
                 self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.IN);
             },
             .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
-                self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT);
+                self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT);
             },
             else => @compileError("Unsupported OS"),
         }
@@ -568,7 +568,7 @@ pub const Loop = struct {
                 self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT);
             },
             .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
-                self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
+                self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
             },
             else => @compileError("Unsupported OS"),
         }
@@ -580,8 +580,8 @@ pub const Loop = struct {
                 self.linuxWaitFd(fd, os.linux.EPOLL.ET | os.linux.EPOLL.ONESHOT | os.linux.EPOLL.OUT | os.linux.EPOLL.IN);
             },
             .macos, .ios, .tvos, .watchos, .freebsd, .netbsd, .dragonfly, .openbsd => {
-                self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_READ, os.system.EV_ONESHOT);
-                self.bsdWaitKev(@intCast(usize, fd), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
+                self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_READ, os.system.EV_ONESHOT);
+                self.bsdWaitKev(@as(usize, @intCast(fd)), os.system.EVFILT_WRITE, os.system.EV_ONESHOT);
             },
             else => @compileError("Unsupported OS"),
         }
@@ -1415,7 +1415,7 @@ pub const Loop = struct {
                     var events: [1]os.linux.epoll_event = undefined;
                     const count = os.epoll_wait(self.os_data.epollfd, events[0..], -1);
                     for (events[0..count]) |ev| {
-                        const resume_node = @ptrFromInt(*ResumeNode, ev.data.ptr);
+                        const resume_node = @as(*ResumeNode, @ptrFromInt(ev.data.ptr));
                         const handle = resume_node.handle;
                         const resume_node_id = resume_node.id;
                         switch (resume_node_id) {
@@ -1439,7 +1439,7 @@ pub const Loop = struct {
                     const empty_kevs = &[0]os.Kevent{};
                     const count = os.kevent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable;
                     for (eventlist[0..count]) |ev| {
-                        const resume_node = @ptrFromInt(*ResumeNode, ev.udata);
+                        const resume_node = @as(*ResumeNode, @ptrFromInt(ev.udata));
                         const handle = resume_node.handle;
                         const resume_node_id = resume_node.id;
                         switch (resume_node_id) {
lib/std/event/rwlock.zig
@@ -223,7 +223,7 @@ test "std.event.RwLock" {
 
     _ = testLock(std.heap.page_allocator, &lock);
 
-    const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
+    const expected_result = [1]i32{shared_it_count * @as(i32, @intCast(shared_test_data.len))} ** shared_test_data.len;
     try testing.expectEqualSlices(i32, expected_result, shared_test_data);
 }
 fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void {
@@ -244,12 +244,12 @@ fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void {
     }
 
     for (write_nodes) |*write_node| {
-        const casted = @ptrCast(*const @Frame(writeRunner), write_node.data);
+        const casted = @as(*const @Frame(writeRunner), @ptrCast(write_node.data));
         await casted;
         allocator.destroy(casted);
     }
     for (read_nodes) |*read_node| {
-        const casted = @ptrCast(*const @Frame(readRunner), read_node.data);
+        const casted = @as(*const @Frame(readRunner), @ptrCast(read_node.data));
         await casted;
         allocator.destroy(casted);
     }
@@ -287,6 +287,6 @@ fn readRunner(lock: *RwLock) callconv(.Async) void {
         defer handle.release();
 
         try testing.expect(shared_test_index == 0);
-        try testing.expect(shared_test_data[i] == @intCast(i32, shared_count));
+        try testing.expect(shared_test_data[i] == @as(i32, @intCast(shared_count)));
     }
 }
lib/std/fmt/parse_float/common.zig
@@ -32,7 +32,7 @@ pub fn BiasedFp(comptime T: type) type {
 
         pub fn toFloat(self: Self, comptime FloatT: type, negative: bool) FloatT {
             var word = self.f;
-            word |= @intCast(MantissaT, self.e) << std.math.floatMantissaBits(FloatT);
+            word |= @as(MantissaT, @intCast(self.e)) << std.math.floatMantissaBits(FloatT);
             var f = floatFromUnsigned(FloatT, MantissaT, word);
             if (negative) f = -f;
             return f;
@@ -42,10 +42,10 @@ pub fn BiasedFp(comptime T: type) type {
 
 pub fn floatFromUnsigned(comptime T: type, comptime MantissaT: type, v: MantissaT) T {
     return switch (T) {
-        f16 => @bitCast(f16, @truncate(u16, v)),
-        f32 => @bitCast(f32, @truncate(u32, v)),
-        f64 => @bitCast(f64, @truncate(u64, v)),
-        f128 => @bitCast(f128, v),
+        f16 => @as(f16, @bitCast(@as(u16, @truncate(v)))),
+        f32 => @as(f32, @bitCast(@as(u32, @truncate(v)))),
+        f64 => @as(f64, @bitCast(@as(u64, @truncate(v)))),
+        f128 => @as(f128, @bitCast(v)),
         else => unreachable,
     };
 }
lib/std/fmt/parse_float/convert_eisel_lemire.zig
@@ -36,7 +36,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) {
     }
 
     // Normalize our significant digits, so the most-significant bit is set.
-    const lz = @clz(@bitCast(u64, w));
+    const lz = @clz(@as(u64, @bitCast(w)));
     w = math.shl(u64, w, lz);
 
     const r = computeProductApprox(q, w, float_info.mantissa_explicit_bits + 3);
@@ -62,9 +62,9 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) {
         }
     }
 
-    const upper_bit = @intCast(i32, r.hi >> 63);
-    var mantissa = math.shr(u64, r.hi, upper_bit + 64 - @intCast(i32, float_info.mantissa_explicit_bits) - 3);
-    var power2 = power(@intCast(i32, q)) + upper_bit - @intCast(i32, lz) - float_info.minimum_exponent;
+    const upper_bit = @as(i32, @intCast(r.hi >> 63));
+    var mantissa = math.shr(u64, r.hi, upper_bit + 64 - @as(i32, @intCast(float_info.mantissa_explicit_bits)) - 3);
+    var power2 = power(@as(i32, @intCast(q))) + upper_bit - @as(i32, @intCast(lz)) - float_info.minimum_exponent;
     if (power2 <= 0) {
         if (-power2 + 1 >= 64) {
             // Have more than 64 bits below the minimum exponent, must be 0.
@@ -93,7 +93,7 @@ pub fn convertEiselLemire(comptime T: type, q: i64, w_: u64) ?BiasedFp(f64) {
         q >= float_info.min_exponent_round_to_even and
         q <= float_info.max_exponent_round_to_even and
         mantissa & 3 == 1 and
-        math.shl(u64, mantissa, (upper_bit + 64 - @intCast(i32, float_info.mantissa_explicit_bits) - 3)) == r.hi)
+        math.shl(u64, mantissa, (upper_bit + 64 - @as(i32, @intCast(float_info.mantissa_explicit_bits)) - 3)) == r.hi)
     {
         // Zero the lowest bit, so we don't round up.
         mantissa &= ~@as(u64, 1);
@@ -139,8 +139,8 @@ const U128 = struct {
     pub fn mul(a: u64, b: u64) U128 {
         const x = @as(u128, a) * b;
         return .{
-            .hi = @truncate(u64, x >> 64),
-            .lo = @truncate(u64, x),
+            .hi = @as(u64, @truncate(x >> 64)),
+            .lo = @as(u64, @truncate(x)),
         };
     }
 };
@@ -161,7 +161,7 @@ fn computeProductApprox(q: i64, w: u64, comptime precision: usize) U128 {
     // 5^q < 2^64, then the multiplication always provides an exact value.
     // That means whenever we need to round ties to even, we always have
     // an exact value.
-    const index = @intCast(usize, q - @intCast(i64, eisel_lemire_smallest_power_of_five));
+    const index = @as(usize, @intCast(q - @as(i64, @intCast(eisel_lemire_smallest_power_of_five))));
     const pow5 = eisel_lemire_table_powers_of_five_128[index];
 
     // Only need one multiplication as long as there is 1 zero but
lib/std/fmt/parse_float/convert_fast.zig
@@ -108,19 +108,19 @@ pub fn convertFast(comptime T: type, n: Number(T)) ?T {
     var value: T = 0;
     if (n.exponent <= info.max_exponent_fast_path) {
         // normal fast path
-        value = @floatFromInt(T, n.mantissa);
+        value = @as(T, @floatFromInt(n.mantissa));
         value = if (n.exponent < 0)
-            value / fastPow10(T, @intCast(usize, -n.exponent))
+            value / fastPow10(T, @as(usize, @intCast(-n.exponent)))
         else
-            value * fastPow10(T, @intCast(usize, n.exponent));
+            value * fastPow10(T, @as(usize, @intCast(n.exponent)));
     } else {
         // disguised fast path
         const shift = n.exponent - info.max_exponent_fast_path;
-        const mantissa = math.mul(MantissaT, n.mantissa, fastIntPow10(MantissaT, @intCast(usize, shift))) catch return null;
+        const mantissa = math.mul(MantissaT, n.mantissa, fastIntPow10(MantissaT, @as(usize, @intCast(shift)))) catch return null;
         if (mantissa > info.max_mantissa_fast_path) {
             return null;
         }
-        value = @floatFromInt(T, mantissa) * fastPow10(T, info.max_exponent_fast_path);
+        value = @as(T, @floatFromInt(mantissa)) * fastPow10(T, info.max_exponent_fast_path);
     }
 
     if (n.negative) {
lib/std/fmt/parse_float/convert_hex.zig
@@ -81,7 +81,7 @@ pub fn convertHex(comptime T: type, n_: Number(T)) T {
     }
 
     var bits = n.mantissa & ((1 << mantissa_bits) - 1);
-    bits |= @intCast(MantissaT, (n.exponent - exp_bias) & ((1 << exp_bits) - 1)) << mantissa_bits;
+    bits |= @as(MantissaT, @intCast((n.exponent - exp_bias) & ((1 << exp_bits) - 1))) << mantissa_bits;
     if (n.negative) {
         bits |= 1 << (mantissa_bits + exp_bits);
     }
lib/std/fmt/parse_float/convert_slow.zig
@@ -48,13 +48,13 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) {
     var exp2: i32 = 0;
     // Shift right toward (1/2 .. 1]
     while (d.decimal_point > 0) {
-        const n = @intCast(usize, d.decimal_point);
+        const n = @as(usize, @intCast(d.decimal_point));
         const shift = getShift(n);
         d.rightShift(shift);
         if (d.decimal_point < -Decimal(T).decimal_point_range) {
             return BiasedFp(T).zero();
         }
-        exp2 += @intCast(i32, shift);
+        exp2 += @as(i32, @intCast(shift));
     }
     //  Shift left toward (1/2 .. 1]
     while (d.decimal_point <= 0) {
@@ -66,7 +66,7 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) {
                     else => 1,
                 };
             } else {
-                const n = @intCast(usize, -d.decimal_point);
+                const n = @as(usize, @intCast(-d.decimal_point));
                 break :blk getShift(n);
             }
         };
@@ -74,17 +74,17 @@ pub fn convertSlow(comptime T: type, s: []const u8) BiasedFp(T) {
         if (d.decimal_point > Decimal(T).decimal_point_range) {
             return BiasedFp(T).inf(T);
         }
-        exp2 -= @intCast(i32, shift);
+        exp2 -= @as(i32, @intCast(shift));
     }
     // We are now in the range [1/2 .. 1] but the binary format uses [1 .. 2]
     exp2 -= 1;
     while (min_exponent + 1 > exp2) {
-        var n = @intCast(usize, (min_exponent + 1) - exp2);
+        var n = @as(usize, @intCast((min_exponent + 1) - exp2));
         if (n > max_shift) {
             n = max_shift;
         }
         d.rightShift(n);
-        exp2 += @intCast(i32, n);
+        exp2 += @as(i32, @intCast(n));
     }
     if (exp2 - min_exponent >= infinite_power) {
         return BiasedFp(T).inf(T);
lib/std/fmt/parse_float/decimal.zig
@@ -114,7 +114,7 @@ pub fn Decimal(comptime T: type) type {
                 return math.maxInt(MantissaT);
             }
 
-            const dp = @intCast(usize, self.decimal_point);
+            const dp = @as(usize, @intCast(self.decimal_point));
             var n: MantissaT = 0;
 
             var i: usize = 0;
@@ -155,7 +155,7 @@ pub fn Decimal(comptime T: type) type {
                 const quotient = n / 10;
                 const remainder = n - (10 * quotient);
                 if (write_index < max_digits) {
-                    self.digits[write_index] = @intCast(u8, remainder);
+                    self.digits[write_index] = @as(u8, @intCast(remainder));
                 } else if (remainder > 0) {
                     self.truncated = true;
                 }
@@ -167,7 +167,7 @@ pub fn Decimal(comptime T: type) type {
                 const quotient = n / 10;
                 const remainder = n - (10 * quotient);
                 if (write_index < max_digits) {
-                    self.digits[write_index] = @intCast(u8, remainder);
+                    self.digits[write_index] = @as(u8, @intCast(remainder));
                 } else if (remainder > 0) {
                     self.truncated = true;
                 }
@@ -178,7 +178,7 @@ pub fn Decimal(comptime T: type) type {
             if (self.num_digits > max_digits) {
                 self.num_digits = max_digits;
             }
-            self.decimal_point += @intCast(i32, num_new_digits);
+            self.decimal_point += @as(i32, @intCast(num_new_digits));
             self.trim();
         }
 
@@ -202,7 +202,7 @@ pub fn Decimal(comptime T: type) type {
                 }
             }
 
-            self.decimal_point -= @intCast(i32, read_index) - 1;
+            self.decimal_point -= @as(i32, @intCast(read_index)) - 1;
             if (self.decimal_point < -decimal_point_range) {
                 self.num_digits = 0;
                 self.decimal_point = 0;
@@ -212,14 +212,14 @@ pub fn Decimal(comptime T: type) type {
 
             const mask = math.shl(MantissaT, 1, shift) - 1;
             while (read_index < self.num_digits) {
-                const new_digit = @intCast(u8, math.shr(MantissaT, n, shift));
+                const new_digit = @as(u8, @intCast(math.shr(MantissaT, n, shift)));
                 n = (10 * (n & mask)) + self.digits[read_index];
                 read_index += 1;
                 self.digits[write_index] = new_digit;
                 write_index += 1;
             }
             while (n > 0) {
-                const new_digit = @intCast(u8, math.shr(MantissaT, n, shift));
+                const new_digit = @as(u8, @intCast(math.shr(MantissaT, n, shift)));
                 n = 10 * (n & mask);
                 if (write_index < max_digits) {
                     self.digits[write_index] = new_digit;
@@ -268,7 +268,7 @@ pub fn Decimal(comptime T: type) type {
                 while (stream.scanDigit(10)) |digit| {
                     d.tryAddDigit(digit);
                 }
-                d.decimal_point = @intCast(i32, marker) - @intCast(i32, stream.offsetTrue());
+                d.decimal_point = @as(i32, @intCast(marker)) - @as(i32, @intCast(stream.offsetTrue()));
             }
             if (d.num_digits != 0) {
                 // Ignore trailing zeros if any
@@ -284,9 +284,9 @@ pub fn Decimal(comptime T: type) type {
                     i -= 1;
                     if (i == 0) break;
                 }
-                d.decimal_point += @intCast(i32, n_trailing_zeros);
+                d.decimal_point += @as(i32, @intCast(n_trailing_zeros));
                 d.num_digits -= n_trailing_zeros;
-                d.decimal_point += @intCast(i32, d.num_digits);
+                d.decimal_point += @as(i32, @intCast(d.num_digits));
                 if (d.num_digits > max_digits) {
                     d.truncated = true;
                     d.num_digits = max_digits;
lib/std/fmt/parse_float/parse.zig
@@ -21,7 +21,7 @@ fn parse8Digits(v_: u64) u64 {
     v = (v * 10) + (v >> 8); // will not overflow, fits in 63 bits
     const v1 = (v & mask) *% mul1;
     const v2 = ((v >> 16) & mask) *% mul2;
-    return @as(u64, @truncate(u32, (v1 +% v2) >> 32));
+    return @as(u64, @as(u32, @truncate((v1 +% v2) >> 32)));
 }
 
 /// Parse digits until a non-digit character is found.
@@ -106,7 +106,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
     var mantissa: MantissaT = 0;
     tryParseDigits(MantissaT, stream, &mantissa, info.base);
     var int_end = stream.offsetTrue();
-    var n_digits = @intCast(isize, stream.offsetTrue());
+    var n_digits = @as(isize, @intCast(stream.offsetTrue()));
     // the base being 16 implies a 0x prefix, which shouldn't be included in the digit count
     if (info.base == 16) n_digits -= 2;
 
@@ -117,8 +117,8 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
         const marker = stream.offsetTrue();
         tryParseDigits(MantissaT, stream, &mantissa, info.base);
         const n_after_dot = stream.offsetTrue() - marker;
-        exponent = -@intCast(i64, n_after_dot);
-        n_digits += @intCast(isize, n_after_dot);
+        exponent = -@as(i64, @intCast(n_after_dot));
+        n_digits += @as(isize, @intCast(n_after_dot));
     }
 
     // adjust required shift to offset mantissa for base-16 (2^4)
@@ -163,7 +163,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
         // '0' = '.' + 2
         const next = stream.firstUnchecked();
         if (next != '_') {
-            n_digits -= @intCast(isize, next -| ('0' - 1));
+            n_digits -= @as(isize, @intCast(next -| ('0' - 1)));
         } else {
             stream.underscore_count += 1;
         }
@@ -179,7 +179,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
         exponent = blk: {
             if (mantissa >= min_n_digit_int(MantissaT, info.max_mantissa_digits)) {
                 // big int
-                break :blk @intCast(i64, int_end) - @intCast(i64, stream.offsetTrue());
+                break :blk @as(i64, @intCast(int_end)) - @as(i64, @intCast(stream.offsetTrue()));
             } else {
                 // the next byte must be present and be '.'
                 // We know this is true because we had more than 19
@@ -190,7 +190,7 @@ fn parsePartialNumberBase(comptime T: type, stream: *FloatStream, negative: bool
                 stream.advance(1);
                 var marker = stream.offsetTrue();
                 tryParseNDigits(MantissaT, stream, &mantissa, info.base, info.max_mantissa_digits);
-                break :blk @intCast(i64, marker) - @intCast(i64, stream.offsetTrue());
+                break :blk @as(i64, @intCast(marker)) - @as(i64, @intCast(stream.offsetTrue()));
             }
         };
         // add back the explicit part
lib/std/fmt/errol.zig
@@ -29,11 +29,11 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro
     switch (mode) {
         RoundMode.Decimal => {
             if (float_decimal.exp >= 0) {
-                round_digit = precision + @intCast(usize, float_decimal.exp);
+                round_digit = precision + @as(usize, @intCast(float_decimal.exp));
             } else {
                 // if a small negative exp, then adjust we need to offset by the number
                 // of leading zeros that will occur.
-                const min_exp_required = @intCast(usize, -float_decimal.exp);
+                const min_exp_required = @as(usize, @intCast(-float_decimal.exp));
                 if (precision > min_exp_required) {
                     round_digit = precision - min_exp_required;
                 }
@@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro
                 float_decimal.exp += 1;
 
                 // Re-size the buffer to use the reserved leading byte.
-                const one_before = @ptrFromInt([*]u8, @intFromPtr(&float_decimal.digits[0]) - 1);
+                const one_before = @as([*]u8, @ptrFromInt(@intFromPtr(&float_decimal.digits[0]) - 1));
                 float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1];
                 float_decimal.digits[0] = '1';
                 return;
@@ -80,7 +80,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro
 
 /// Corrected Errol3 double to ASCII conversion.
 pub fn errol3(value: f64, buffer: []u8) FloatDecimal {
-    const bits = @bitCast(u64, value);
+    const bits = @as(u64, @bitCast(value));
     const i = tableLowerBound(bits);
     if (i < enum3.len and enum3[i] == bits) {
         const data = enum3_data[i];
@@ -113,16 +113,16 @@ fn errolSlow(val: f64, buffer: []u8) FloatDecimal {
     // normalize the midpoint
 
     const e = math.frexp(val).exponent;
-    var exp = @intFromFloat(i16, @floor(307 + @floatFromInt(f64, e) * 0.30103));
+    var exp = @as(i16, @intFromFloat(@floor(307 + @as(f64, @floatFromInt(e)) * 0.30103)));
     if (exp < 20) {
         exp = 20;
-    } else if (@intCast(usize, exp) >= lookup_table.len) {
-        exp = @intCast(i16, lookup_table.len - 1);
+    } else if (@as(usize, @intCast(exp)) >= lookup_table.len) {
+        exp = @as(i16, @intCast(lookup_table.len - 1));
     }
 
-    var mid = lookup_table[@intCast(usize, exp)];
+    var mid = lookup_table[@as(usize, @intCast(exp))];
     mid = hpProd(mid, val);
-    const lten = lookup_table[@intCast(usize, exp)].val;
+    const lten = lookup_table[@as(usize, @intCast(exp))].val;
 
     exp -= 307;
 
@@ -171,25 +171,25 @@ fn errolSlow(val: f64, buffer: []u8) FloatDecimal {
     var buf_index: usize = 0;
     const bound = buffer.len - 1;
     while (buf_index < bound) {
-        var hdig = @intFromFloat(u8, @floor(high.val));
-        if ((high.val == @floatFromInt(f64, hdig)) and (high.off < 0)) hdig -= 1;
+        var hdig = @as(u8, @intFromFloat(@floor(high.val)));
+        if ((high.val == @as(f64, @floatFromInt(hdig))) and (high.off < 0)) hdig -= 1;
 
-        var ldig = @intFromFloat(u8, @floor(low.val));
-        if ((low.val == @floatFromInt(f64, ldig)) and (low.off < 0)) ldig -= 1;
+        var ldig = @as(u8, @intFromFloat(@floor(low.val)));
+        if ((low.val == @as(f64, @floatFromInt(ldig))) and (low.off < 0)) ldig -= 1;
 
         if (ldig != hdig) break;
 
         buffer[buf_index] = hdig + '0';
         buf_index += 1;
-        high.val -= @floatFromInt(f64, hdig);
-        low.val -= @floatFromInt(f64, ldig);
+        high.val -= @as(f64, @floatFromInt(hdig));
+        low.val -= @as(f64, @floatFromInt(ldig));
         hpMul10(&high);
         hpMul10(&low);
     }
 
     const tmp = (high.val + low.val) / 2.0;
-    var mdig = @intFromFloat(u8, @floor(tmp + 0.5));
-    if ((@floatFromInt(f64, mdig) - tmp) == 0.5 and (mdig & 0x1) != 0) mdig -= 1;
+    var mdig = @as(u8, @intFromFloat(@floor(tmp + 0.5)));
+    if ((@as(f64, @floatFromInt(mdig)) - tmp) == 0.5 and (mdig & 0x1) != 0) mdig -= 1;
 
     buffer[buf_index] = mdig + '0';
     buf_index += 1;
@@ -248,9 +248,9 @@ fn split(val: f64, hi: *f64, lo: *f64) void {
 }
 
 fn gethi(in: f64) f64 {
-    const bits = @bitCast(u64, in);
+    const bits = @as(u64, @bitCast(in));
     const new_bits = bits & 0xFFFFFFFFF8000000;
-    return @bitCast(f64, new_bits);
+    return @as(f64, @bitCast(new_bits));
 }
 
 /// Normalize the number by factoring in the error.
@@ -303,21 +303,21 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
 
     assert((val > 9.007199254740992e15) and val < (3.40282366920938e38));
 
-    var mid = @intFromFloat(u128, val);
+    var mid = @as(u128, @intFromFloat(val));
     var low: u128 = mid - fpeint((fpnext(val) - val) / 2.0);
     var high: u128 = mid + fpeint((val - fpprev(val)) / 2.0);
 
-    if (@bitCast(u64, val) & 0x1 != 0) {
+    if (@as(u64, @bitCast(val)) & 0x1 != 0) {
         high -= 1;
     } else {
         low -= 1;
     }
 
-    var l64 = @intCast(u64, low % pow19);
-    const lf = @intCast(u64, (low / pow19) % pow19);
+    var l64 = @as(u64, @intCast(low % pow19));
+    const lf = @as(u64, @intCast((low / pow19) % pow19));
 
-    var h64 = @intCast(u64, high % pow19);
-    const hf = @intCast(u64, (high / pow19) % pow19);
+    var h64 = @as(u64, @intCast(high % pow19));
+    const hf = @as(u64, @intCast((high / pow19) % pow19));
 
     if (lf != hf) {
         l64 = lf;
@@ -333,7 +333,7 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
             x *= 10;
         }
     }
-    const m64 = @truncate(u64, @divTrunc(mid, x));
+    const m64 = @as(u64, @truncate(@divTrunc(mid, x)));
 
     if (lf != hf) mi += 19;
 
@@ -349,7 +349,7 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
 
     return FloatDecimal{
         .digits = buffer[0..buf_index],
-        .exp = @intCast(i32, buf_index) + mi,
+        .exp = @as(i32, @intCast(buf_index)) + mi,
     };
 }
 
@@ -360,33 +360,33 @@ fn errolInt(val: f64, buffer: []u8) FloatDecimal {
 fn errolFixed(val: f64, buffer: []u8) FloatDecimal {
     assert((val >= 16.0) and (val < 9.007199254740992e15));
 
-    const u = @intFromFloat(u64, val);
-    const n = @floatFromInt(f64, u);
+    const u = @as(u64, @intFromFloat(val));
+    const n = @as(f64, @floatFromInt(u));
 
     var mid = val - n;
     var lo = ((fpprev(val) - n) + mid) / 2.0;
     var hi = ((fpnext(val) - n) + mid) / 2.0;
 
     var buf_index = u64toa(u, buffer);
-    var exp = @intCast(i32, buf_index);
+    var exp = @as(i32, @intCast(buf_index));
     var j = buf_index;
     buffer[j] = 0;
 
     if (mid != 0.0) {
         while (mid != 0.0) {
             lo *= 10.0;
-            const ldig = @intFromFloat(i32, lo);
-            lo -= @floatFromInt(f64, ldig);
+            const ldig = @as(i32, @intFromFloat(lo));
+            lo -= @as(f64, @floatFromInt(ldig));
 
             mid *= 10.0;
-            const mdig = @intFromFloat(i32, mid);
-            mid -= @floatFromInt(f64, mdig);
+            const mdig = @as(i32, @intFromFloat(mid));
+            mid -= @as(f64, @floatFromInt(mdig));
 
             hi *= 10.0;
-            const hdig = @intFromFloat(i32, hi);
-            hi -= @floatFromInt(f64, hdig);
+            const hdig = @as(i32, @intFromFloat(hi));
+            hi -= @as(f64, @floatFromInt(hdig));
 
-            buffer[j] = @intCast(u8, mdig + '0');
+            buffer[j] = @as(u8, @intCast(mdig + '0'));
             j += 1;
 
             if (hdig != ldig or j > 50) break;
@@ -413,11 +413,11 @@ fn errolFixed(val: f64, buffer: []u8) FloatDecimal {
 }
 
 fn fpnext(val: f64) f64 {
-    return @bitCast(f64, @bitCast(u64, val) +% 1);
+    return @as(f64, @bitCast(@as(u64, @bitCast(val)) +% 1));
 }
 
 fn fpprev(val: f64) f64 {
-    return @bitCast(f64, @bitCast(u64, val) -% 1);
+    return @as(f64, @bitCast(@as(u64, @bitCast(val)) -% 1));
 }
 
 pub const c_digits_lut = [_]u8{
@@ -453,7 +453,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
     var buf_index: usize = 0;
 
     if (value < kTen8) {
-        const v = @intCast(u32, value);
+        const v = @as(u32, @intCast(value));
         if (v < 10000) {
             const d1: u32 = (v / 100) << 1;
             const d2: u32 = (v % 100) << 1;
@@ -508,8 +508,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
             buf_index += 1;
         }
     } else if (value < kTen16) {
-        const v0: u32 = @intCast(u32, value / kTen8);
-        const v1: u32 = @intCast(u32, value % kTen8);
+        const v0: u32 = @as(u32, @intCast(value / kTen8));
+        const v1: u32 = @as(u32, @intCast(value % kTen8));
 
         const b0: u32 = v0 / 10000;
         const c0: u32 = v0 % 10000;
@@ -579,11 +579,11 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
         buffer[buf_index] = c_digits_lut[d8 + 1];
         buf_index += 1;
     } else {
-        const a = @intCast(u32, value / kTen16); // 1 to 1844
+        const a = @as(u32, @intCast(value / kTen16)); // 1 to 1844
         value %= kTen16;
 
         if (a < 10) {
-            buffer[buf_index] = '0' + @intCast(u8, a);
+            buffer[buf_index] = '0' + @as(u8, @intCast(a));
             buf_index += 1;
         } else if (a < 100) {
             const i: u32 = a << 1;
@@ -592,7 +592,7 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
             buffer[buf_index] = c_digits_lut[i + 1];
             buf_index += 1;
         } else if (a < 1000) {
-            buffer[buf_index] = '0' + @intCast(u8, a / 100);
+            buffer[buf_index] = '0' + @as(u8, @intCast(a / 100));
             buf_index += 1;
 
             const i: u32 = (a % 100) << 1;
@@ -613,8 +613,8 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
             buf_index += 1;
         }
 
-        const v0 = @intCast(u32, value / kTen8);
-        const v1 = @intCast(u32, value % kTen8);
+        const v0 = @as(u32, @intCast(value / kTen8));
+        const v1 = @as(u32, @intCast(value % kTen8));
 
         const b0: u32 = v0 / 10000;
         const c0: u32 = v0 % 10000;
@@ -672,10 +672,10 @@ fn u64toa(value_param: u64, buffer: []u8) usize {
 }
 
 fn fpeint(from: f64) u128 {
-    const bits = @bitCast(u64, from);
+    const bits = @as(u64, @bitCast(from));
     assert((bits & ((1 << 52) - 1)) == 0);
 
-    return @as(u128, 1) << @truncate(u7, (bits >> 52) -% 1023);
+    return @as(u128, 1) << @as(u7, @truncate((bits >> 52) -% 1023));
 }
 
 /// Given two different integers with the same length in terms of the number
lib/std/fmt/parse_float.zig
@@ -78,7 +78,7 @@ test "fmt.parseFloat nan and inf" {
     inline for ([_]type{ f16, f32, f64, f128 }) |T| {
         const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
 
-        try expectEqual(@bitCast(Z, try parseFloat(T, "nAn")), @bitCast(Z, std.math.nan(T)));
+        try expectEqual(@as(Z, @bitCast(try parseFloat(T, "nAn"))), @as(Z, @bitCast(std.math.nan(T))));
         try expectEqual(try parseFloat(T, "inF"), std.math.inf(T));
         try expectEqual(try parseFloat(T, "-INF"), -std.math.inf(T));
     }
lib/std/fs/file.zig
@@ -368,7 +368,7 @@ pub const File = struct {
 
             return Stat{
                 .inode = st.ino,
-                .size = @bitCast(u64, st.size),
+                .size = @as(u64, @bitCast(st.size)),
                 .mode = st.mode,
                 .kind = kind,
                 .atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
@@ -398,7 +398,7 @@ pub const File = struct {
             }
             return Stat{
                 .inode = info.InternalInformation.IndexNumber,
-                .size = @bitCast(u64, info.StandardInformation.EndOfFile),
+                .size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)),
                 .mode = 0,
                 .kind = if (info.StandardInformation.Directory == 0) .file else .directory,
                 .atime = windows.fromSysTime(info.BasicInformation.LastAccessTime),
@@ -650,7 +650,7 @@ pub const File = struct {
 
         /// Returns the size of the file
         pub fn size(self: Self) u64 {
-            return @intCast(u64, self.stat.size);
+            return @as(u64, @intCast(self.stat.size));
         }
 
         /// Returns a `Permissions` struct, representing the permissions on the file
@@ -855,7 +855,7 @@ pub const File = struct {
                         if (info.BasicInformation.FileAttributes & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) {
                             var reparse_buf: [windows.MAXIMUM_REPARSE_DATA_BUFFER_SIZE]u8 = undefined;
                             try windows.DeviceIoControl(self.handle, windows.FSCTL_GET_REPARSE_POINT, null, reparse_buf[0..]);
-                            const reparse_struct = @ptrCast(*const windows.REPARSE_DATA_BUFFER, @alignCast(@alignOf(windows.REPARSE_DATA_BUFFER), &reparse_buf[0]));
+                            const reparse_struct: *const windows.REPARSE_DATA_BUFFER = @ptrCast(@alignCast(&reparse_buf[0]));
                             break :reparse_blk reparse_struct.ReparseTag;
                         }
                         break :reparse_blk 0;
@@ -864,7 +864,7 @@ pub const File = struct {
                     break :blk MetadataWindows{
                         .attributes = info.BasicInformation.FileAttributes,
                         .reparse_tag = reparse_tag,
-                        ._size = @bitCast(u64, info.StandardInformation.EndOfFile),
+                        ._size = @as(u64, @bitCast(info.StandardInformation.EndOfFile)),
                         .access_time = windows.fromSysTime(info.BasicInformation.LastAccessTime),
                         .modified_time = windows.fromSysTime(info.BasicInformation.LastWriteTime),
                         .creation_time = windows.fromSysTime(info.BasicInformation.CreationTime),
@@ -881,16 +881,16 @@ pub const File = struct {
                         .NOSYS => {
                             const st = try os.fstat(self.handle);
 
-                            stx.mode = @intCast(u16, st.mode);
+                            stx.mode = @as(u16, @intCast(st.mode));
 
                             // Hacky conversion from timespec to statx_timestamp
                             stx.atime = std.mem.zeroes(os.linux.statx_timestamp);
                             stx.atime.tv_sec = st.atim.tv_sec;
-                            stx.atime.tv_nsec = @intCast(u32, st.atim.tv_nsec); // Guaranteed to succeed (tv_nsec is always below 10^9)
+                            stx.atime.tv_nsec = @as(u32, @intCast(st.atim.tv_nsec)); // Guaranteed to succeed (tv_nsec is always below 10^9)
 
                             stx.mtime = std.mem.zeroes(os.linux.statx_timestamp);
                             stx.mtime.tv_sec = st.mtim.tv_sec;
-                            stx.mtime.tv_nsec = @intCast(u32, st.mtim.tv_nsec);
+                            stx.mtime.tv_nsec = @as(u32, @intCast(st.mtim.tv_nsec));
 
                             stx.mask = os.linux.STATX_BASIC_STATS | os.linux.STATX_MTIME;
                         },
@@ -1414,7 +1414,7 @@ pub const File = struct {
                 amt = try os.sendfile(out_fd, in_fd, offset + off, count - off, zero_iovec, trailers, flags);
                 off += amt;
             }
-            amt = @intCast(usize, off - count);
+            amt = @as(usize, @intCast(off - count));
         }
         var i: usize = 0;
         while (i < trailers.len) {
lib/std/fs/get_app_data_dir.zig
@@ -23,7 +23,7 @@ pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDi
                 &dir_path_ptr,
             )) {
                 os.windows.S_OK => {
-                    defer os.windows.ole32.CoTaskMemFree(@ptrCast(*anyopaque, dir_path_ptr));
+                    defer os.windows.ole32.CoTaskMemFree(@as(*anyopaque, @ptrCast(dir_path_ptr)));
                     const global_dir = unicode.utf16leToUtf8Alloc(allocator, mem.sliceTo(dir_path_ptr, 0)) catch |err| switch (err) {
                         error.UnexpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
                         error.ExpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
lib/std/fs/wasi.zig
@@ -17,7 +17,7 @@ pub const Preopens = struct {
     pub fn find(p: Preopens, name: []const u8) ?os.fd_t {
         for (p.names, 0..) |elem_name, i| {
             if (mem.eql(u8, elem_name, name)) {
-                return @intCast(os.fd_t, i);
+                return @as(os.fd_t, @intCast(i));
             }
         }
         return null;
@@ -34,7 +34,7 @@ pub fn preopensAlloc(gpa: Allocator) Allocator.Error!Preopens {
     names.appendAssumeCapacity("stdout"); // 1
     names.appendAssumeCapacity("stderr"); // 2
     while (true) {
-        const fd = @intCast(wasi.fd_t, names.items.len);
+        const fd = @as(wasi.fd_t, @intCast(names.items.len));
         var prestat: prestat_t = undefined;
         switch (wasi.fd_prestat_get(fd, &prestat)) {
             .SUCCESS => {},
lib/std/fs/watch.zig
@@ -279,7 +279,7 @@ pub fn Watch(comptime V: type) type {
 
             while (!put.cancelled) {
                 kev.* = os.Kevent{
-                    .ident = @intCast(usize, fd),
+                    .ident = @as(usize, @intCast(fd)),
                     .filter = os.EVFILT_VNODE,
                     .flags = os.EV_ADD | os.EV_ENABLE | os.EV_CLEAR | os.EV_ONESHOT |
                         os.NOTE_WRITE | os.NOTE_DELETE | os.NOTE_REVOKE,
@@ -487,14 +487,14 @@ pub fn Watch(comptime V: type) type {
                     var ptr: [*]u8 = &event_buf;
                     const end_ptr = ptr + bytes_transferred;
                     while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) {
-                        const ev = @ptrCast(*const windows.FILE_NOTIFY_INFORMATION, ptr);
+                        const ev = @as(*const windows.FILE_NOTIFY_INFORMATION, @ptrCast(ptr));
                         const emit = switch (ev.Action) {
                             windows.FILE_ACTION_REMOVED => WatchEventId.Delete,
                             windows.FILE_ACTION_MODIFIED => .CloseWrite,
                             else => null,
                         };
                         if (emit) |id| {
-                            const basename_ptr = @ptrCast([*]u16, ptr + @sizeOf(windows.FILE_NOTIFY_INFORMATION));
+                            const basename_ptr = @as([*]u16, @ptrCast(ptr + @sizeOf(windows.FILE_NOTIFY_INFORMATION)));
                             const basename_utf16le = basename_ptr[0 .. ev.FileNameLength / 2];
                             var basename_data: [std.fs.MAX_PATH_BYTES]u8 = undefined;
                             const basename = basename_data[0 .. std.unicode.utf16leToUtf8(&basename_data, basename_utf16le) catch unreachable];
@@ -510,7 +510,7 @@ pub fn Watch(comptime V: type) type {
                         }
 
                         if (ev.NextEntryOffset == 0) break;
-                        ptr = @alignCast(@alignOf(windows.FILE_NOTIFY_INFORMATION), ptr + ev.NextEntryOffset);
+                        ptr = @alignCast(ptr + ev.NextEntryOffset);
                     }
                 }
             }
@@ -586,10 +586,10 @@ pub fn Watch(comptime V: type) type {
                 var ptr: [*]u8 = &event_buf;
                 const end_ptr = ptr + bytes_read;
                 while (@intFromPtr(ptr) < @intFromPtr(end_ptr)) {
-                    const ev = @ptrCast(*const os.linux.inotify_event, ptr);
+                    const ev = @as(*const os.linux.inotify_event, @ptrCast(ptr));
                     if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) {
                         const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
-                        const basename = std.mem.span(@ptrCast([*:0]u8, basename_ptr));
+                        const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr)));
 
                         const dir = &self.os_data.wd_table.get(ev.wd).?;
                         if (dir.file_table.getEntry(basename)) |file_value| {
@@ -615,7 +615,7 @@ pub fn Watch(comptime V: type) type {
                     } else if (ev.mask & os.linux.IN_DELETE == os.linux.IN_DELETE) {
                         // File or directory was removed or deleted
                         const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
-                        const basename = std.mem.span(@ptrCast([*:0]u8, basename_ptr));
+                        const basename = std.mem.span(@as([*:0]u8, @ptrCast(basename_ptr)));
 
                         const dir = &self.os_data.wd_table.get(ev.wd).?;
                         if (dir.file_table.getEntry(basename)) |file_value| {
@@ -628,7 +628,7 @@ pub fn Watch(comptime V: type) type {
                         }
                     }
 
-                    ptr = @alignCast(@alignOf(os.linux.inotify_event), ptr + @sizeOf(os.linux.inotify_event) + ev.len);
+                    ptr = @alignCast(ptr + @sizeOf(os.linux.inotify_event) + ev.len);
                 }
             }
         }
lib/std/hash/adler.zig
@@ -118,7 +118,7 @@ test "adler32 very long with variation" {
 
         var i: usize = 0;
         while (i < result.len) : (i += 1) {
-            result[i] = @truncate(u8, i);
+            result[i] = @as(u8, @truncate(i));
         }
 
         break :blk result;
lib/std/hash/auto_hash.zig
@@ -92,10 +92,10 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
         // Help the optimizer see that hashing an int is easy by inlining!
         // TODO Check if the situation is better after #561 is resolved.
         .Int => |int| switch (int.signedness) {
-            .signed => hash(hasher, @bitCast(@Type(.{ .Int = .{
+            .signed => hash(hasher, @as(@Type(.{ .Int = .{
                 .bits = int.bits,
                 .signedness = .unsigned,
-            } }), key), strat),
+            } }), @bitCast(key)), strat),
             .unsigned => {
                 if (comptime meta.trait.hasUniqueRepresentation(Key)) {
                     @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) });
lib/std/hash/benchmark.zig
@@ -122,13 +122,13 @@ pub fn benchmarkHash(comptime H: anytype, bytes: usize, allocator: std.mem.Alloc
     for (0..blocks_count) |i| {
         h.update(blocks[i * alignment ..][0..block_size]);
     }
-    const final = if (H.has_crypto_api) @truncate(u64, h.finalInt()) else h.final();
+    const final = if (H.has_crypto_api) @as(u64, @truncate(h.finalInt())) else h.final();
     std.mem.doNotOptimizeAway(final);
 
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s));
 
     return Result{
         .hash = final,
@@ -152,7 +152,7 @@ pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize
         const final = blk: {
             if (H.init_u8s) |init| {
                 if (H.has_crypto_api) {
-                    break :blk @truncate(u64, H.ty.toInt(small_key, init[0..H.ty.key_length]));
+                    break :blk @as(u64, @truncate(H.ty.toInt(small_key, init[0..H.ty.key_length])));
                 } else {
                     break :blk H.ty.hash(init, small_key);
                 }
@@ -166,8 +166,8 @@ pub fn benchmarkHashSmallKeys(comptime H: anytype, key_size: usize, bytes: usize
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s));
 
     std.mem.doNotOptimizeAway(sum);
 
lib/std/hash/cityhash.zig
@@ -2,7 +2,7 @@ const std = @import("std");
 
 inline fn offsetPtr(ptr: [*]const u8, offset: usize) [*]const u8 {
     // ptr + offset doesn't work at comptime so we need this instead.
-    return @ptrCast([*]const u8, &ptr[offset]);
+    return @as([*]const u8, @ptrCast(&ptr[offset]));
 }
 
 fn fetch32(ptr: [*]const u8, offset: usize) u32 {
@@ -49,18 +49,18 @@ pub const CityHash32 = struct {
     }
 
     fn hash32Len0To4(str: []const u8) u32 {
-        const len: u32 = @truncate(u32, str.len);
+        const len: u32 = @as(u32, @truncate(str.len));
         var b: u32 = 0;
         var c: u32 = 9;
         for (str) |v| {
-            b = b *% c1 +% @bitCast(u32, @intCast(i32, @bitCast(i8, v)));
+            b = b *% c1 +% @as(u32, @bitCast(@as(i32, @intCast(@as(i8, @bitCast(v))))));
             c ^= b;
         }
         return fmix(mur(b, mur(len, c)));
     }
 
     fn hash32Len5To12(str: []const u8) u32 {
-        var a: u32 = @truncate(u32, str.len);
+        var a: u32 = @as(u32, @truncate(str.len));
         var b: u32 = a *% 5;
         var c: u32 = 9;
         const d: u32 = b;
@@ -73,7 +73,7 @@ pub const CityHash32 = struct {
     }
 
     fn hash32Len13To24(str: []const u8) u32 {
-        const len: u32 = @truncate(u32, str.len);
+        const len: u32 = @as(u32, @truncate(str.len));
         const a: u32 = fetch32(str.ptr, (str.len >> 1) - 4);
         const b: u32 = fetch32(str.ptr, 4);
         const c: u32 = fetch32(str.ptr, str.len - 8);
@@ -95,7 +95,7 @@ pub const CityHash32 = struct {
             }
         }
 
-        const len: u32 = @truncate(u32, str.len);
+        const len: u32 = @as(u32, @truncate(str.len));
         var h: u32 = len;
         var g: u32 = c1 *% len;
         var f: u32 = g;
@@ -220,9 +220,9 @@ pub const CityHash64 = struct {
             const a: u8 = str[0];
             const b: u8 = str[str.len >> 1];
             const c: u8 = str[str.len - 1];
-            const y: u32 = @intCast(u32, a) +% (@intCast(u32, b) << 8);
-            const z: u32 = @truncate(u32, str.len) +% (@intCast(u32, c) << 2);
-            return shiftmix(@intCast(u64, y) *% k2 ^ @intCast(u64, z) *% k0) *% k2;
+            const y: u32 = @as(u32, @intCast(a)) +% (@as(u32, @intCast(b)) << 8);
+            const z: u32 = @as(u32, @truncate(str.len)) +% (@as(u32, @intCast(c)) << 2);
+            return shiftmix(@as(u64, @intCast(y)) *% k2 ^ @as(u64, @intCast(z)) *% k0) *% k2;
         }
         return k2;
     }
@@ -309,7 +309,7 @@ pub const CityHash64 = struct {
         var w: WeakPair = weakHashLen32WithSeeds(offsetPtr(str.ptr, str.len - 32), y +% k1, x);
 
         x = x *% k1 +% fetch64(str.ptr, 0);
-        len = (len - 1) & ~@intCast(u64, 63);
+        len = (len - 1) & ~@as(u64, @intCast(63));
 
         var ptr: [*]const u8 = str.ptr;
         while (true) {
@@ -353,19 +353,19 @@ fn SMHasherTest(comptime hash_fn: anytype) u32 {
 
     var i: u32 = 0;
     while (i < 256) : (i += 1) {
-        key[i] = @intCast(u8, i);
+        key[i] = @as(u8, @intCast(i));
 
         var h: HashResult = hash_fn(key[0..i], 256 - i);
 
         // comptime can't really do reinterpret casting yet,
         // so we need to write the bytes manually.
         for (hashes_bytes[i * @sizeOf(HashResult) ..][0..@sizeOf(HashResult)]) |*byte| {
-            byte.* = @truncate(u8, h);
+            byte.* = @as(u8, @truncate(h));
             h = h >> 8;
         }
     }
 
-    return @truncate(u32, hash_fn(&hashes_bytes, 0));
+    return @as(u32, @truncate(hash_fn(&hashes_bytes, 0)));
 }
 
 fn CityHash32hashIgnoreSeed(str: []const u8, seed: u32) u32 {
lib/std/hash/crc.zig
@@ -65,7 +65,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
         }
 
         inline fn tableEntry(index: I) I {
-            return lookup_table[@intCast(u8, index & 0xFF)];
+            return lookup_table[@as(u8, @intCast(index & 0xFF))];
         }
 
         pub fn update(self: *Self, bytes: []const u8) void {
@@ -95,7 +95,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
             if (!algorithm.reflect_output) {
                 c >>= @bitSizeOf(I) - @bitSizeOf(W);
             }
-            return @intCast(W, c ^ algorithm.xor_output);
+            return @as(W, @intCast(c ^ algorithm.xor_output));
         }
 
         pub fn hash(bytes: []const u8) W {
@@ -125,7 +125,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
             var tables: [8][256]u32 = undefined;
 
             for (&tables[0], 0..) |*e, i| {
-                var crc = @intCast(u32, i);
+                var crc = @as(u32, @intCast(i));
                 var j: usize = 0;
                 while (j < 8) : (j += 1) {
                     if (crc & 1 == 1) {
@@ -142,7 +142,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
                 var crc = tables[0][i];
                 var j: usize = 1;
                 while (j < 8) : (j += 1) {
-                    const index = @truncate(u8, crc);
+                    const index = @as(u8, @truncate(crc));
                     crc = tables[0][index] ^ (crc >> 8);
                     tables[j][i] = crc;
                 }
@@ -170,14 +170,14 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
                     lookup_tables[1][p[6]] ^
                     lookup_tables[2][p[5]] ^
                     lookup_tables[3][p[4]] ^
-                    lookup_tables[4][@truncate(u8, self.crc >> 24)] ^
-                    lookup_tables[5][@truncate(u8, self.crc >> 16)] ^
-                    lookup_tables[6][@truncate(u8, self.crc >> 8)] ^
-                    lookup_tables[7][@truncate(u8, self.crc >> 0)];
+                    lookup_tables[4][@as(u8, @truncate(self.crc >> 24))] ^
+                    lookup_tables[5][@as(u8, @truncate(self.crc >> 16))] ^
+                    lookup_tables[6][@as(u8, @truncate(self.crc >> 8))] ^
+                    lookup_tables[7][@as(u8, @truncate(self.crc >> 0))];
             }
 
             while (i < input.len) : (i += 1) {
-                const index = @truncate(u8, self.crc) ^ input[i];
+                const index = @as(u8, @truncate(self.crc)) ^ input[i];
                 self.crc = (self.crc >> 8) ^ lookup_tables[0][index];
             }
         }
@@ -218,7 +218,7 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type {
             var table: [16]u32 = undefined;
 
             for (&table, 0..) |*e, i| {
-                var crc = @intCast(u32, i * 16);
+                var crc = @as(u32, @intCast(i * 16));
                 var j: usize = 0;
                 while (j < 8) : (j += 1) {
                     if (crc & 1 == 1) {
@@ -241,8 +241,8 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type {
 
         pub fn update(self: *Self, input: []const u8) void {
             for (input) |b| {
-                self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 0))] ^ (self.crc >> 4);
-                self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 4))] ^ (self.crc >> 4);
+                self.crc = lookup_table[@as(u4, @truncate(self.crc ^ (b >> 0)))] ^ (self.crc >> 4);
+                self.crc = lookup_table[@as(u4, @truncate(self.crc ^ (b >> 4)))] ^ (self.crc >> 4);
             }
         }
 
lib/std/hash/murmur.zig
@@ -14,9 +14,9 @@ pub const Murmur2_32 = struct {
 
     pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
         const m: u32 = 0x5bd1e995;
-        const len = @truncate(u32, str.len);
+        const len = @as(u32, @truncate(str.len));
         var h1: u32 = seed ^ len;
-        for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| {
+        for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| {
             var k1: u32 = v;
             if (native_endian == .Big)
                 k1 = @byteSwap(k1);
@@ -29,13 +29,13 @@ pub const Murmur2_32 = struct {
         const offset = len & 0xfffffffc;
         const rest = len & 3;
         if (rest >= 3) {
-            h1 ^= @intCast(u32, str[offset + 2]) << 16;
+            h1 ^= @as(u32, @intCast(str[offset + 2])) << 16;
         }
         if (rest >= 2) {
-            h1 ^= @intCast(u32, str[offset + 1]) << 8;
+            h1 ^= @as(u32, @intCast(str[offset + 1])) << 8;
         }
         if (rest >= 1) {
-            h1 ^= @intCast(u32, str[offset + 0]);
+            h1 ^= @as(u32, @intCast(str[offset + 0]));
             h1 *%= m;
         }
         h1 ^= h1 >> 13;
@@ -73,12 +73,12 @@ pub const Murmur2_32 = struct {
         const len: u32 = 8;
         var h1: u32 = seed ^ len;
         var k1: u32 = undefined;
-        k1 = @truncate(u32, v) *% m;
+        k1 = @as(u32, @truncate(v)) *% m;
         k1 ^= k1 >> 24;
         k1 *%= m;
         h1 *%= m;
         h1 ^= k1;
-        k1 = @truncate(u32, v >> 32) *% m;
+        k1 = @as(u32, @truncate(v >> 32)) *% m;
         k1 ^= k1 >> 24;
         k1 *%= m;
         h1 *%= m;
@@ -100,7 +100,7 @@ pub const Murmur2_64 = struct {
     pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
         const m: u64 = 0xc6a4a7935bd1e995;
         var h1: u64 = seed ^ (@as(u64, str.len) *% m);
-        for (@ptrCast([*]align(1) const u64, str.ptr)[0 .. str.len / 8]) |v| {
+        for (@as([*]align(1) const u64, @ptrCast(str.ptr))[0 .. str.len / 8]) |v| {
             var k1: u64 = v;
             if (native_endian == .Big)
                 k1 = @byteSwap(k1);
@@ -114,7 +114,7 @@ pub const Murmur2_64 = struct {
         const offset = str.len - rest;
         if (rest > 0) {
             var k1: u64 = 0;
-            @memcpy(@ptrCast([*]u8, &k1)[0..rest], str[offset..]);
+            @memcpy(@as([*]u8, @ptrCast(&k1))[0..rest], str[offset..]);
             if (native_endian == .Big)
                 k1 = @byteSwap(k1);
             h1 ^= k1;
@@ -178,9 +178,9 @@ pub const Murmur3_32 = struct {
     pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
         const c1: u32 = 0xcc9e2d51;
         const c2: u32 = 0x1b873593;
-        const len = @truncate(u32, str.len);
+        const len = @as(u32, @truncate(str.len));
         var h1: u32 = seed;
-        for (@ptrCast([*]align(1) const u32, str.ptr)[0..(len >> 2)]) |v| {
+        for (@as([*]align(1) const u32, @ptrCast(str.ptr))[0..(len >> 2)]) |v| {
             var k1: u32 = v;
             if (native_endian == .Big)
                 k1 = @byteSwap(k1);
@@ -197,13 +197,13 @@ pub const Murmur3_32 = struct {
             const offset = len & 0xfffffffc;
             const rest = len & 3;
             if (rest == 3) {
-                k1 ^= @intCast(u32, str[offset + 2]) << 16;
+                k1 ^= @as(u32, @intCast(str[offset + 2])) << 16;
             }
             if (rest >= 2) {
-                k1 ^= @intCast(u32, str[offset + 1]) << 8;
+                k1 ^= @as(u32, @intCast(str[offset + 1])) << 8;
             }
             if (rest >= 1) {
-                k1 ^= @intCast(u32, str[offset + 0]);
+                k1 ^= @as(u32, @intCast(str[offset + 0]));
                 k1 *%= c1;
                 k1 = rotl32(k1, 15);
                 k1 *%= c2;
@@ -255,14 +255,14 @@ pub const Murmur3_32 = struct {
         const len: u32 = 8;
         var h1: u32 = seed;
         var k1: u32 = undefined;
-        k1 = @truncate(u32, v) *% c1;
+        k1 = @as(u32, @truncate(v)) *% c1;
         k1 = rotl32(k1, 15);
         k1 *%= c2;
         h1 ^= k1;
         h1 = rotl32(h1, 13);
         h1 *%= 5;
         h1 +%= 0xe6546b64;
-        k1 = @truncate(u32, v >> 32) *% c1;
+        k1 = @as(u32, @truncate(v >> 32)) *% c1;
         k1 = rotl32(k1, 15);
         k1 *%= c2;
         h1 ^= k1;
@@ -286,15 +286,15 @@ fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
 
     var i: u32 = 0;
     while (i < 256) : (i += 1) {
-        key[i] = @truncate(u8, i);
+        key[i] = @as(u8, @truncate(i));
 
         var h = hash_fn(key[0..i], 256 - i);
         if (native_endian == .Big)
             h = @byteSwap(h);
-        @memcpy(hashes[i * hashbytes ..][0..hashbytes], @ptrCast([*]u8, &h));
+        @memcpy(hashes[i * hashbytes ..][0..hashbytes], @as([*]u8, @ptrCast(&h)));
     }
 
-    return @truncate(u32, hash_fn(&hashes, 0));
+    return @as(u32, @truncate(hash_fn(&hashes, 0)));
 }
 
 test "murmur2_32" {
@@ -307,8 +307,8 @@ test "murmur2_32" {
         v0le = @byteSwap(v0le);
         v1le = @byteSwap(v1le);
     }
-    try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_32.hashUint32(v0));
-    try testing.expectEqual(Murmur2_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_32.hashUint64(v1));
+    try testing.expectEqual(Murmur2_32.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur2_32.hashUint32(v0));
+    try testing.expectEqual(Murmur2_32.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur2_32.hashUint64(v1));
 }
 
 test "murmur2_64" {
@@ -321,8 +321,8 @@ test "murmur2_64" {
         v0le = @byteSwap(v0le);
         v1le = @byteSwap(v1le);
     }
-    try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur2_64.hashUint32(v0));
-    try testing.expectEqual(Murmur2_64.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur2_64.hashUint64(v1));
+    try testing.expectEqual(Murmur2_64.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur2_64.hashUint32(v0));
+    try testing.expectEqual(Murmur2_64.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur2_64.hashUint64(v1));
 }
 
 test "murmur3_32" {
@@ -335,6 +335,6 @@ test "murmur3_32" {
         v0le = @byteSwap(v0le);
         v1le = @byteSwap(v1le);
     }
-    try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v0le)[0..4]), Murmur3_32.hashUint32(v0));
-    try testing.expectEqual(Murmur3_32.hash(@ptrCast([*]u8, &v1le)[0..8]), Murmur3_32.hashUint64(v1));
+    try testing.expectEqual(Murmur3_32.hash(@as([*]u8, @ptrCast(&v0le))[0..4]), Murmur3_32.hashUint32(v0));
+    try testing.expectEqual(Murmur3_32.hash(@as([*]u8, @ptrCast(&v1le))[0..8]), Murmur3_32.hashUint64(v1));
 }
lib/std/hash/wyhash.zig
@@ -132,8 +132,8 @@ pub const Wyhash = struct {
 
     inline fn mum(a: *u64, b: *u64) void {
         const x = @as(u128, a.*) *% b.*;
-        a.* = @truncate(u64, x);
-        b.* = @truncate(u64, x >> 64);
+        a.* = @as(u64, @truncate(x));
+        b.* = @as(u64, @truncate(x >> 64));
     }
 
     inline fn mix(a_: u64, b_: u64) u64 {
@@ -252,7 +252,7 @@ test "test ensure idempotent final call" {
 test "iterative non-divisible update" {
     var buf: [8192]u8 = undefined;
     for (&buf, 0..) |*e, i| {
-        e.* = @truncate(u8, i);
+        e.* = @as(u8, @truncate(i));
     }
 
     const seed = 0x128dad08f;
lib/std/hash/xxhash.zig
@@ -212,7 +212,7 @@ pub const XxHash32 = struct {
                 rotl(u32, self.acc3, 12) +% rotl(u32, self.acc4, 18);
         }
 
-        acc = acc +% @intCast(u32, self.byte_count) +% @intCast(u32, self.buf_len);
+        acc = acc +% @as(u32, @intCast(self.byte_count)) +% @as(u32, @intCast(self.buf_len));
 
         var pos: usize = 0;
         while (pos + 4 <= self.buf_len) : (pos += 4) {
lib/std/heap/arena_allocator.zig
@@ -48,7 +48,7 @@ pub const ArenaAllocator = struct {
             // this has to occur before the free because the free frees node
             const next_it = node.next;
             const align_bits = std.math.log2_int(usize, @alignOf(BufNode));
-            const alloc_buf = @ptrCast([*]u8, node)[0..node.data];
+            const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
             self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
             it = next_it;
         }
@@ -128,7 +128,7 @@ pub const ArenaAllocator = struct {
             const next_it = node.next;
             if (next_it == null)
                 break node;
-            const alloc_buf = @ptrCast([*]u8, node)[0..node.data];
+            const alloc_buf = @as([*]u8, @ptrCast(node))[0..node.data];
             self.child_allocator.rawFree(alloc_buf, align_bits, @returnAddress());
             it = next_it;
         } else null;
@@ -140,7 +140,7 @@ pub const ArenaAllocator = struct {
             // perfect, no need to invoke the child_allocator
             if (first_node.data == total_size)
                 return true;
-            const first_alloc_buf = @ptrCast([*]u8, first_node)[0..first_node.data];
+            const first_alloc_buf = @as([*]u8, @ptrCast(first_node))[0..first_node.data];
             if (self.child_allocator.rawResize(first_alloc_buf, align_bits, total_size, @returnAddress())) {
                 // successful resize
                 first_node.data = total_size;
@@ -151,7 +151,7 @@ pub const ArenaAllocator = struct {
                     return false;
                 };
                 self.child_allocator.rawFree(first_alloc_buf, align_bits, @returnAddress());
-                const node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), new_ptr));
+                const node: *BufNode = @ptrCast(@alignCast(new_ptr));
                 node.* = .{ .data = total_size };
                 self.state.buffer_list.first = node;
             }
@@ -166,7 +166,7 @@ pub const ArenaAllocator = struct {
         const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
         const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse
             return null;
-        const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), ptr));
+        const buf_node: *BufNode = @ptrCast(@alignCast(ptr));
         buf_node.* = .{ .data = len };
         self.state.buffer_list.prepend(buf_node);
         self.state.end_index = 0;
@@ -174,16 +174,16 @@ pub const ArenaAllocator = struct {
     }
 
     fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
-        const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx));
+        const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
         _ = ra;
 
-        const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+        const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
         var cur_node = if (self.state.buffer_list.first) |first_node|
             first_node
         else
             (self.createNode(0, n + ptr_align) orelse return null);
         while (true) {
-            const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data];
+            const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data];
             const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..];
             const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index;
             const adjusted_addr = mem.alignForward(usize, addr, ptr_align);
@@ -208,12 +208,12 @@ pub const ArenaAllocator = struct {
     }
 
     fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
-        const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx));
+        const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
         _ = log2_buf_align;
         _ = ret_addr;
 
         const cur_node = self.state.buffer_list.first orelse return false;
-        const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data];
+        const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data];
         if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) {
             // It's not the most recent allocation, so it cannot be expanded,
             // but it's fine if they want to make it smaller.
@@ -235,10 +235,10 @@ pub const ArenaAllocator = struct {
         _ = log2_buf_align;
         _ = ret_addr;
 
-        const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx));
+        const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
 
         const cur_node = self.state.buffer_list.first orelse return;
-        const cur_buf = @ptrCast([*]u8, cur_node)[@sizeOf(BufNode)..cur_node.data];
+        const cur_buf = @as([*]u8, @ptrCast(cur_node))[@sizeOf(BufNode)..cur_node.data];
 
         if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) {
             self.state.end_index -= buf.len;
lib/std/heap/general_purpose_allocator.zig
@@ -250,7 +250,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             used_count: SlotIndex,
 
             fn usedBits(bucket: *BucketHeader, index: usize) *u8 {
-                return @ptrFromInt(*u8, @intFromPtr(bucket) + @sizeOf(BucketHeader) + index);
+                return @as(*u8, @ptrFromInt(@intFromPtr(bucket) + @sizeOf(BucketHeader) + index));
             }
 
             fn stackTracePtr(
@@ -259,10 +259,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                 slot_index: SlotIndex,
                 trace_kind: TraceKind,
             ) *[stack_n]usize {
-                const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class);
+                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(size_class);
                 const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
                     @intFromEnum(trace_kind) * @as(usize, one_trace_size);
-                return @ptrCast(*[stack_n]usize, @alignCast(@alignOf(usize), addr));
+                return @ptrCast(@alignCast(addr));
             }
 
             fn captureStackTrace(
@@ -338,9 +338,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                 if (used_byte != 0) {
                     var bit_index: u3 = 0;
                     while (true) : (bit_index += 1) {
-                        const is_used = @truncate(u1, used_byte >> bit_index) != 0;
+                        const is_used = @as(u1, @truncate(used_byte >> bit_index)) != 0;
                         if (is_used) {
-                            const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index);
+                            const slot_index = @as(SlotIndex, @intCast(used_bits_byte * 8 + bit_index));
                             const stack_trace = bucketStackTrace(bucket, size_class, slot_index, .alloc);
                             const addr = bucket.page + slot_index * size_class;
                             log.err("memory address 0x{x} leaked: {}", .{
@@ -361,7 +361,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             var leaks = false;
             for (self.buckets, 0..) |optional_bucket, bucket_i| {
                 const first_bucket = optional_bucket orelse continue;
-                const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i);
+                const size_class = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(bucket_i));
                 const used_bits_count = usedBitsCount(size_class);
                 var bucket = first_bucket;
                 while (true) {
@@ -385,7 +385,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
 
         fn freeBucket(self: *Self, bucket: *BucketHeader, size_class: usize) void {
             const bucket_size = bucketSize(size_class);
-            const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size];
+            const bucket_slice = @as([*]align(@alignOf(BucketHeader)) u8, @ptrCast(bucket))[0..bucket_size];
             self.backing_allocator.free(bucket_slice);
         }
 
@@ -444,7 +444,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                 self.small_allocations.deinit(self.backing_allocator);
             }
             self.* = undefined;
-            return @enumFromInt(Check, @intFromBool(leaks));
+            return @as(Check, @enumFromInt(@intFromBool(leaks)));
         }
 
         fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
@@ -496,7 +496,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             bucket.alloc_cursor += 1;
 
             var used_bits_byte = bucket.usedBits(slot_index / 8);
-            const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary
+            const used_bit_index: u3 = @as(u3, @intCast(slot_index % 8)); // TODO cast should be unnecessary
             used_bits_byte.* |= (@as(u8, 1) << used_bit_index);
             bucket.used_count += 1;
             bucket.captureStackTrace(trace_addr, size_class, slot_index, .alloc);
@@ -667,8 +667,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             new_size: usize,
             ret_addr: usize,
         ) bool {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
-            const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8);
+            const self: *Self = @ptrCast(@alignCast(ctx));
+            const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8));
             self.mutex.lock();
             defer self.mutex.unlock();
 
@@ -704,11 +704,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                 return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
             };
             const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page);
-            const slot_index = @intCast(SlotIndex, byte_offset / size_class);
+            const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class));
             const used_byte_index = slot_index / 8;
-            const used_bit_index = @intCast(u3, slot_index % 8);
+            const used_bit_index = @as(u3, @intCast(slot_index % 8));
             const used_byte = bucket.usedBits(used_byte_index);
-            const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
+            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
             if (!is_used) {
                 if (config.safety) {
                     reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
@@ -739,8 +739,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                     }
                     if (log2_old_align != entry.value_ptr.log2_ptr_align) {
                         log.err("Allocation alignment {d} does not match resize alignment {d}. Allocation: {} Resize: {}", .{
-                            @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align),
-                            @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align),
+                            @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)),
+                            @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)),
                             bucketStackTrace(bucket, size_class, slot_index, .alloc),
                             free_stack_trace,
                         });
@@ -786,8 +786,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             log2_old_align_u8: u8,
             ret_addr: usize,
         ) void {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
-            const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8);
+            const self: *Self = @ptrCast(@alignCast(ctx));
+            const log2_old_align = @as(Allocator.Log2Align, @intCast(log2_old_align_u8));
             self.mutex.lock();
             defer self.mutex.unlock();
 
@@ -825,11 +825,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                 return;
             };
             const byte_offset = @intFromPtr(old_mem.ptr) - @intFromPtr(bucket.page);
-            const slot_index = @intCast(SlotIndex, byte_offset / size_class);
+            const slot_index = @as(SlotIndex, @intCast(byte_offset / size_class));
             const used_byte_index = slot_index / 8;
-            const used_bit_index = @intCast(u3, slot_index % 8);
+            const used_bit_index = @as(u3, @intCast(slot_index % 8));
             const used_byte = bucket.usedBits(used_byte_index);
-            const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
+            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
             if (!is_used) {
                 if (config.safety) {
                     reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free));
@@ -861,8 +861,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                     }
                     if (log2_old_align != entry.value_ptr.log2_ptr_align) {
                         log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {} Free: {}", .{
-                            @as(usize, 1) << @intCast(math.Log2Int(usize), entry.value_ptr.log2_ptr_align),
-                            @as(usize, 1) << @intCast(math.Log2Int(usize), log2_old_align),
+                            @as(usize, 1) << @as(math.Log2Int(usize), @intCast(entry.value_ptr.log2_ptr_align)),
+                            @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_old_align)),
                             bucketStackTrace(bucket, size_class, slot_index, .alloc),
                             free_stack_trace,
                         });
@@ -896,7 +896,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                 } else {
                     // move alloc_cursor to end so we can tell size_class later
                     const slot_count = @divExact(page_size, size_class);
-                    bucket.alloc_cursor = @truncate(SlotIndex, slot_count);
+                    bucket.alloc_cursor = @as(SlotIndex, @truncate(slot_count));
                     if (self.empty_buckets) |prev_bucket| {
                         // empty_buckets is ordered newest to oldest through prev so that if
                         // config.never_unmap is false and backing_allocator reuses freed memory
@@ -936,11 +936,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
         }
 
         fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             self.mutex.lock();
             defer self.mutex.unlock();
             if (!self.isAllocationAllowed(len)) return null;
-            return allocInner(self, len, @intCast(Allocator.Log2Align, log2_ptr_align), ret_addr) catch return null;
+            return allocInner(self, len, @as(Allocator.Log2Align, @intCast(log2_ptr_align)), ret_addr) catch return null;
         }
 
         fn allocInner(
@@ -949,7 +949,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             log2_ptr_align: Allocator.Log2Align,
             ret_addr: usize,
         ) Allocator.Error![*]u8 {
-            const new_aligned_size = @max(len, @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align));
+            const new_aligned_size = @max(len, @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align)));
             if (new_aligned_size > largest_bucket_object_size) {
                 try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
                 const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse
@@ -1002,7 +1002,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
 
             const bucket_size = bucketSize(size_class);
             const bucket_bytes = try self.backing_allocator.alignedAlloc(u8, @alignOf(BucketHeader), bucket_size);
-            const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr);
+            const ptr = @as(*BucketHeader, @ptrCast(bucket_bytes.ptr));
             ptr.* = BucketHeader{
                 .prev = ptr,
                 .next = ptr,
lib/std/heap/log_to_writer_allocator.zig
@@ -34,7 +34,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
             log2_ptr_align: u8,
             ra: usize,
         ) ?[*]u8 {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             self.writer.print("alloc : {}", .{len}) catch {};
             const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
             if (result != null) {
@@ -52,7 +52,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
             new_len: usize,
             ra: usize,
         ) bool {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             if (new_len <= buf.len) {
                 self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
             } else {
@@ -77,7 +77,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
             log2_buf_align: u8,
             ra: usize,
         ) void {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             self.writer.print("free  : {}\n", .{buf.len}) catch {};
             self.parent_allocator.rawFree(buf, log2_buf_align, ra);
         }
lib/std/heap/logging_allocator.zig
@@ -59,7 +59,7 @@ pub fn ScopedLoggingAllocator(
             log2_ptr_align: u8,
             ra: usize,
         ) ?[*]u8 {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
             if (result != null) {
                 logHelper(
@@ -84,7 +84,7 @@ pub fn ScopedLoggingAllocator(
             new_len: usize,
             ra: usize,
         ) bool {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) {
                 if (new_len <= buf.len) {
                     logHelper(
@@ -118,7 +118,7 @@ pub fn ScopedLoggingAllocator(
             log2_buf_align: u8,
             ra: usize,
         ) void {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             self.parent_allocator.rawFree(buf, log2_buf_align, ra);
             logHelper(success_log_level, "free - len: {}", .{buf.len});
         }
lib/std/heap/memory_pool.zig
@@ -70,7 +70,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
             var i: usize = 0;
             while (i < initial_size) : (i += 1) {
                 const raw_mem = try pool.allocNew();
-                const free_node = @ptrCast(NodePtr, raw_mem);
+                const free_node = @as(NodePtr, @ptrCast(raw_mem));
                 free_node.* = Node{
                     .next = pool.free_list,
                 };
@@ -106,11 +106,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
                 pool.free_list = item.next;
                 break :blk item;
             } else if (pool_options.growable)
-                @ptrCast(NodePtr, try pool.allocNew())
+                @as(NodePtr, @ptrCast(try pool.allocNew()))
             else
                 return error.OutOfMemory;
 
-            const ptr = @ptrCast(ItemPtr, node);
+            const ptr = @as(ItemPtr, @ptrCast(node));
             ptr.* = undefined;
             return ptr;
         }
@@ -120,7 +120,7 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type
         pub fn destroy(pool: *Pool, ptr: ItemPtr) void {
             ptr.* = undefined;
 
-            const node = @ptrCast(NodePtr, ptr);
+            const node = @as(NodePtr, @ptrCast(ptr));
             node.* = Node{
                 .next = pool.free_list,
             };
lib/std/heap/PageAllocator.zig
@@ -27,7 +27,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
             w.MEM_COMMIT | w.MEM_RESERVE,
             w.PAGE_READWRITE,
         ) catch return null;
-        return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, addr));
+        return @ptrCast(addr);
     }
 
     const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .Unordered);
@@ -40,7 +40,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
         0,
     ) catch return null;
     assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size));
-    const new_hint = @alignCast(mem.page_size, slice.ptr + aligned_len);
+    const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len);
     _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
     return slice.ptr;
 }
@@ -66,7 +66,7 @@ fn resize(
                 // For shrinking that is not releasing, we will only
                 // decommit the pages not needed anymore.
                 w.VirtualFree(
-                    @ptrFromInt(*anyopaque, new_addr_end),
+                    @as(*anyopaque, @ptrFromInt(new_addr_end)),
                     old_addr_end - new_addr_end,
                     w.MEM_DECOMMIT,
                 );
@@ -85,9 +85,9 @@ fn resize(
         return true;
 
     if (new_size_aligned < buf_aligned_len) {
-        const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned);
+        const ptr = buf_unaligned.ptr + new_size_aligned;
         // TODO: if the next_mmap_addr_hint is within the unmapped range, update it
-        os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
+        os.munmap(@alignCast(ptr[0 .. buf_aligned_len - new_size_aligned]));
         return true;
     }
 
@@ -104,7 +104,6 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v
         os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE);
     } else {
         const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size);
-        const ptr = @alignCast(mem.page_size, slice.ptr);
-        os.munmap(ptr[0..buf_aligned_len]);
+        os.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
     }
 }
lib/std/heap/ThreadSafeAllocator.zig
@@ -15,7 +15,7 @@ pub fn allocator(self: *ThreadSafeAllocator) Allocator {
 }
 
 fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
-    const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx));
+    const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
     self.mutex.lock();
     defer self.mutex.unlock();
 
@@ -23,7 +23,7 @@ fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
 }
 
 fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
-    const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx));
+    const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
 
     self.mutex.lock();
     defer self.mutex.unlock();
@@ -32,7 +32,7 @@ fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_ad
 }
 
 fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
-    const self = @ptrCast(*ThreadSafeAllocator, @alignCast(@alignOf(ThreadSafeAllocator), ctx));
+    const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
 
     self.mutex.lock();
     defer self.mutex.unlock();
lib/std/heap/WasmAllocator.zig
@@ -47,7 +47,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*
     _ = ctx;
     _ = return_address;
     // Make room for the freelist next pointer.
-    const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align);
+    const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
     const actual_len = @max(len +| @sizeOf(usize), alignment);
     const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null;
     const class = math.log2(slot_size) - min_class;
@@ -55,7 +55,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*
         const addr = a: {
             const top_free_ptr = frees[class];
             if (top_free_ptr != 0) {
-                const node = @ptrFromInt(*usize, top_free_ptr + (slot_size - @sizeOf(usize)));
+                const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize))));
                 frees[class] = node.*;
                 break :a top_free_ptr;
             }
@@ -74,11 +74,11 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*
                 break :a next_addr;
             }
         };
-        return @ptrFromInt([*]u8, addr);
+        return @as([*]u8, @ptrFromInt(addr));
     }
     const bigpages_needed = bigPagesNeeded(actual_len);
     const addr = allocBigPages(bigpages_needed);
-    return @ptrFromInt([*]u8, addr);
+    return @as([*]u8, @ptrFromInt(addr));
 }
 
 fn resize(
@@ -92,7 +92,7 @@ fn resize(
     _ = return_address;
     // We don't want to move anything from one size class to another, but we
     // can recover bytes in between powers of two.
-    const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align);
+    const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
     const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align);
     const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align);
     const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len);
@@ -117,20 +117,20 @@ fn free(
 ) void {
     _ = ctx;
     _ = return_address;
-    const buf_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_buf_align);
+    const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
     const actual_len = @max(buf.len + @sizeOf(usize), buf_align);
     const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len);
     const class = math.log2(slot_size) - min_class;
     const addr = @intFromPtr(buf.ptr);
     if (class < size_class_count) {
-        const node = @ptrFromInt(*usize, addr + (slot_size - @sizeOf(usize)));
+        const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize))));
         node.* = frees[class];
         frees[class] = addr;
     } else {
         const bigpages_needed = bigPagesNeeded(actual_len);
         const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed);
         const big_slot_size_bytes = pow2_pages * bigpage_size;
-        const node = @ptrFromInt(*usize, addr + (big_slot_size_bytes - @sizeOf(usize)));
+        const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize))));
         const big_class = math.log2(pow2_pages);
         node.* = big_frees[big_class];
         big_frees[big_class] = addr;
@@ -148,14 +148,14 @@ fn allocBigPages(n: usize) usize {
 
     const top_free_ptr = big_frees[class];
     if (top_free_ptr != 0) {
-        const node = @ptrFromInt(*usize, top_free_ptr + (slot_size_bytes - @sizeOf(usize)));
+        const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize))));
         big_frees[class] = node.*;
         return top_free_ptr;
     }
 
     const page_index = @wasmMemoryGrow(0, pow2_pages * pages_per_bigpage);
     if (page_index <= 0) return 0;
-    const addr = @intCast(u32, page_index) * wasm.page_size;
+    const addr = @as(u32, @intCast(page_index)) * wasm.page_size;
     return addr;
 }
 
lib/std/heap/WasmPageAllocator.zig
@@ -40,7 +40,7 @@ const FreeBlock = struct {
 
     fn getBit(self: FreeBlock, idx: usize) PageStatus {
         const bit_offset = 0;
-        return @enumFromInt(PageStatus, Io.get(mem.sliceAsBytes(self.data), idx, bit_offset));
+        return @as(PageStatus, @enumFromInt(Io.get(mem.sliceAsBytes(self.data), idx, bit_offset)));
     }
 
     fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void {
@@ -63,7 +63,7 @@ const FreeBlock = struct {
     fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize {
         @setCold(true);
         for (self.data, 0..) |segment, i| {
-            const spills_into_next = @bitCast(i128, segment) < 0;
+            const spills_into_next = @as(i128, @bitCast(segment)) < 0;
             const has_enough_bits = @popCount(segment) >= num_pages;
 
             if (!spills_into_next and !has_enough_bits) continue;
@@ -109,7 +109,7 @@ fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
     if (len > maxInt(usize) - (mem.page_size - 1)) return null;
     const page_count = nPages(len);
     const page_idx = allocPages(page_count, log2_align) catch return null;
-    return @ptrFromInt([*]u8, page_idx * mem.page_size);
+    return @as([*]u8, @ptrFromInt(page_idx * mem.page_size));
 }
 
 fn allocPages(page_count: usize, log2_align: u8) !usize {
@@ -129,7 +129,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize {
     const next_page_addr = next_page_idx * mem.page_size;
     const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align);
     const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size);
-    const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count));
+    const result = @wasmMemoryGrow(0, @as(u32, @intCast(drop_page_count + page_count)));
     if (result <= 0)
         return error.OutOfMemory;
     assert(result == next_page_idx);
@@ -137,7 +137,7 @@ fn allocPages(page_count: usize, log2_align: u8) !usize {
     if (drop_page_count > 0) {
         freePages(next_page_idx, aligned_page_idx);
     }
-    return @intCast(usize, aligned_page_idx);
+    return @as(usize, @intCast(aligned_page_idx));
 }
 
 fn freePages(start: usize, end: usize) void {
@@ -151,7 +151,7 @@ fn freePages(start: usize, end: usize) void {
             // TODO: would it be better if we use the first page instead?
             new_end -= 1;
 
-            extended.data = @ptrFromInt([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)];
+            extended.data = @as([*]u128, @ptrFromInt(new_end * mem.page_size))[0 .. mem.page_size / @sizeOf(u128)];
             // Since this is the first page being freed and we consume it, assume *nothing* is free.
             @memset(extended.data, PageStatus.none_free);
         }
lib/std/http/Client.zig
@@ -187,7 +187,7 @@ pub const Connection = struct {
         const nread = try conn.rawReadAtLeast(conn.read_buf[0..], 1);
         if (nread == 0) return error.EndOfStream;
         conn.read_start = 0;
-        conn.read_end = @intCast(u16, nread);
+        conn.read_end = @as(u16, @intCast(nread));
     }
 
     pub fn peek(conn: *Connection) []const u8 {
@@ -208,8 +208,8 @@ pub const Connection = struct {
 
             if (available_read > available_buffer) { // partially read buffered data
                 @memcpy(buffer[out_index..], conn.read_buf[conn.read_start..conn.read_end][0..available_buffer]);
-                out_index += @intCast(u16, available_buffer);
-                conn.read_start += @intCast(u16, available_buffer);
+                out_index += @as(u16, @intCast(available_buffer));
+                conn.read_start += @as(u16, @intCast(available_buffer));
 
                 break;
             } else if (available_read > 0) { // fully read buffered data
@@ -343,7 +343,7 @@ pub const Response = struct {
             else => return error.HttpHeadersInvalid,
         };
         if (first_line[8] != ' ') return error.HttpHeadersInvalid;
-        const status = @enumFromInt(http.Status, parseInt3(first_line[9..12].*));
+        const status = @as(http.Status, @enumFromInt(parseInt3(first_line[9..12].*)));
         const reason = mem.trimLeft(u8, first_line[12..], " ");
 
         res.version = version;
@@ -415,7 +415,7 @@ pub const Response = struct {
     }
 
     inline fn int64(array: *const [8]u8) u64 {
-        return @bitCast(u64, array.*);
+        return @as(u64, @bitCast(array.*));
     }
 
     fn parseInt3(nnn: @Vector(3, u8)) u10 {
@@ -649,7 +649,7 @@ pub const Request = struct {
                 try req.connection.?.data.fill();
 
                 const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.data.peek());
-                req.connection.?.data.drop(@intCast(u16, nchecked));
+                req.connection.?.data.drop(@as(u16, @intCast(nchecked)));
 
                 if (req.response.parser.state.isContent()) break;
             }
@@ -768,7 +768,7 @@ pub const Request = struct {
                 try req.connection.?.data.fill();
 
                 const nchecked = try req.response.parser.checkCompleteHead(req.client.allocator, req.connection.?.data.peek());
-                req.connection.?.data.drop(@intCast(u16, nchecked));
+                req.connection.?.data.drop(@as(u16, @intCast(nchecked)));
             }
 
             if (has_trail) {
lib/std/http/protocol.zig
@@ -83,7 +83,7 @@ pub const HeadersParser = struct {
     /// first byte of content is located at `bytes[result]`.
     pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
         const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8);
-        const len = @intCast(u32, bytes.len);
+        const len = @as(u32, @intCast(bytes.len));
         var index: u32 = 0;
 
         while (true) {
@@ -182,8 +182,8 @@ pub const HeadersParser = struct {
 
                         const chunk = bytes[index..][0..vector_len];
                         const v: Vector = chunk.*;
-                        const matches_r = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\r')));
-                        const matches_n = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\n')));
+                        const matches_r = @as(BitVector, @bitCast(v == @splat(vector_len, @as(u8, '\r'))));
+                        const matches_n = @as(BitVector, @bitCast(v == @splat(vector_len, @as(u8, '\n'))));
                         const matches_or: SizeVector = matches_r | matches_n;
 
                         const matches = @reduce(.Add, matches_or);
@@ -234,7 +234,7 @@ pub const HeadersParser = struct {
                             },
                             4...vector_len => {
                                 inline for (0..vector_len - 3) |i_usize| {
-                                    const i = @truncate(u32, i_usize);
+                                    const i = @as(u32, @truncate(i_usize));
 
                                     const b32 = int32(chunk[i..][0..4]);
                                     const b16 = intShift(u16, b32);
@@ -405,10 +405,10 @@ pub const HeadersParser = struct {
     /// If the amount returned is less than `bytes.len`, you may assume that the parser is in the `chunk_data` state
     /// and that the first byte of the chunk is at `bytes[result]`.
     pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
-        const len = @intCast(u32, bytes.len);
+        const len = @as(u32, @intCast(bytes.len));
 
         for (bytes[0..], 0..) |c, i| {
-            const index = @intCast(u32, i);
+            const index = @as(u32, @intCast(i));
             switch (r.state) {
                 .chunk_data_suffix => switch (c) {
                     '\r' => r.state = .chunk_data_suffix_r,
@@ -529,7 +529,7 @@ pub const HeadersParser = struct {
                         try conn.fill();
 
                         const nread = @min(conn.peek().len, data_avail);
-                        conn.drop(@intCast(u16, nread));
+                        conn.drop(@as(u16, @intCast(nread)));
                         r.next_chunk_length -= nread;
 
                         if (r.next_chunk_length == 0) r.done = true;
@@ -538,7 +538,7 @@ pub const HeadersParser = struct {
                     } else {
                         const out_avail = buffer.len;
 
-                        const can_read = @intCast(usize, @min(data_avail, out_avail));
+                        const can_read = @as(usize, @intCast(@min(data_avail, out_avail)));
                         const nread = try conn.read(buffer[0..can_read]);
                         r.next_chunk_length -= nread;
 
@@ -551,7 +551,7 @@ pub const HeadersParser = struct {
                     try conn.fill();
 
                     const i = r.findChunkedLen(conn.peek());
-                    conn.drop(@intCast(u16, i));
+                    conn.drop(@as(u16, @intCast(i)));
 
                     switch (r.state) {
                         .invalid => return error.HttpChunkInvalid,
@@ -579,10 +579,10 @@ pub const HeadersParser = struct {
                         try conn.fill();
 
                         const nread = @min(conn.peek().len, data_avail);
-                        conn.drop(@intCast(u16, nread));
+                        conn.drop(@as(u16, @intCast(nread)));
                         r.next_chunk_length -= nread;
                     } else if (out_avail > 0) {
-                        const can_read = @intCast(usize, @min(data_avail, out_avail));
+                        const can_read: usize = @intCast(@min(data_avail, out_avail));
                         const nread = try conn.read(buffer[out_index..][0..can_read]);
                         r.next_chunk_length -= nread;
                         out_index += nread;
@@ -601,21 +601,21 @@ pub const HeadersParser = struct {
 };
 
 inline fn int16(array: *const [2]u8) u16 {
-    return @bitCast(u16, array.*);
+    return @as(u16, @bitCast(array.*));
 }
 
 inline fn int24(array: *const [3]u8) u24 {
-    return @bitCast(u24, array.*);
+    return @as(u24, @bitCast(array.*));
 }
 
 inline fn int32(array: *const [4]u8) u32 {
-    return @bitCast(u32, array.*);
+    return @as(u32, @bitCast(array.*));
 }
 
 inline fn intShift(comptime T: type, x: anytype) T {
     switch (@import("builtin").cpu.arch.endian()) {
-        .Little => return @truncate(T, x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T))),
-        .Big => return @truncate(T, x),
+        .Little => return @as(T, @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T)))),
+        .Big => return @as(T, @truncate(x)),
     }
 }
 
@@ -634,7 +634,7 @@ const MockBufferedConnection = struct {
         const nread = try conn.conn.read(conn.buf[0..]);
         if (nread == 0) return error.EndOfStream;
         conn.start = 0;
-        conn.end = @truncate(u16, nread);
+        conn.end = @as(u16, @truncate(nread));
     }
 
     pub fn peek(conn: *MockBufferedConnection) []const u8 {
@@ -652,7 +652,7 @@ const MockBufferedConnection = struct {
             const left = buffer.len - out_index;
 
             if (available > 0) {
-                const can_read = @truncate(u16, @min(available, left));
+                const can_read = @as(u16, @truncate(@min(available, left)));
 
                 @memcpy(buffer[out_index..][0..can_read], conn.buf[conn.start..][0..can_read]);
                 out_index += can_read;
@@ -705,8 +705,8 @@ test "HeadersParser.findHeadersEnd" {
 
     for (0..36) |i| {
         r = HeadersParser.initDynamic(0);
-        try std.testing.expectEqual(@intCast(u32, i), r.findHeadersEnd(data[0..i]));
-        try std.testing.expectEqual(@intCast(u32, 35 - i), r.findHeadersEnd(data[i..]));
+        try std.testing.expectEqual(@as(u32, @intCast(i)), r.findHeadersEnd(data[0..i]));
+        try std.testing.expectEqual(@as(u32, @intCast(35 - i)), r.findHeadersEnd(data[i..]));
     }
 }
 
@@ -761,7 +761,7 @@ test "HeadersParser.read length" {
         try conn.fill();
 
         const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
-        conn.drop(@intCast(u16, nchecked));
+        conn.drop(@as(u16, @intCast(nchecked)));
 
         if (r.state.isContent()) break;
     }
@@ -792,7 +792,7 @@ test "HeadersParser.read chunked" {
         try conn.fill();
 
         const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
-        conn.drop(@intCast(u16, nchecked));
+        conn.drop(@as(u16, @intCast(nchecked)));
 
         if (r.state.isContent()) break;
     }
@@ -822,7 +822,7 @@ test "HeadersParser.read chunked trailer" {
         try conn.fill();
 
         const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
-        conn.drop(@intCast(u16, nchecked));
+        conn.drop(@as(u16, @intCast(nchecked)));
 
         if (r.state.isContent()) break;
     }
@@ -837,7 +837,7 @@ test "HeadersParser.read chunked trailer" {
         try conn.fill();
 
         const nchecked = try r.checkCompleteHead(std.testing.allocator, conn.peek());
-        conn.drop(@intCast(u16, nchecked));
+        conn.drop(@as(u16, @intCast(nchecked)));
 
         if (r.state.isContent()) break;
     }
lib/std/http/Server.zig
@@ -46,7 +46,7 @@ pub const Connection = struct {
         const nread = try conn.rawReadAtLeast(conn.read_buf[0..], 1);
         if (nread == 0) return error.EndOfStream;
         conn.read_start = 0;
-        conn.read_end = @intCast(u16, nread);
+        conn.read_end = @as(u16, @intCast(nread));
     }
 
     pub fn peek(conn: *Connection) []const u8 {
@@ -67,8 +67,8 @@ pub const Connection = struct {
 
             if (available_read > available_buffer) { // partially read buffered data
                 @memcpy(buffer[out_index..], conn.read_buf[conn.read_start..conn.read_end][0..available_buffer]);
-                out_index += @intCast(u16, available_buffer);
-                conn.read_start += @intCast(u16, available_buffer);
+                out_index += @as(u16, @intCast(available_buffer));
+                conn.read_start += @as(u16, @intCast(available_buffer));
 
                 break;
             } else if (available_read > 0) { // fully read buffered data
@@ -268,7 +268,7 @@ pub const Request = struct {
     }
 
     inline fn int64(array: *const [8]u8) u64 {
-        return @bitCast(u64, array.*);
+        return @as(u64, @bitCast(array.*));
     }
 
     method: http.Method,
@@ -493,7 +493,7 @@ pub const Response = struct {
             try res.connection.fill();
 
             const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek());
-            res.connection.drop(@intCast(u16, nchecked));
+            res.connection.drop(@as(u16, @intCast(nchecked)));
 
             if (res.request.parser.state.isContent()) break;
         }
@@ -560,7 +560,7 @@ pub const Response = struct {
                 try res.connection.fill();
 
                 const nchecked = try res.request.parser.checkCompleteHead(res.allocator, res.connection.peek());
-                res.connection.drop(@intCast(u16, nchecked));
+                res.connection.drop(@as(u16, @intCast(nchecked)));
             }
 
             if (has_trail) {
lib/std/io/bit_reader.zig
@@ -60,7 +60,7 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type)
             var out_buffer = @as(Buf, 0);
 
             if (self.bit_count > 0) {
-                const n = if (self.bit_count >= bits) @intCast(u3, bits) else self.bit_count;
+                const n = if (self.bit_count >= bits) @as(u3, @intCast(bits)) else self.bit_count;
                 const shift = u7_bit_count - n;
                 switch (endian) {
                     .Big => {
@@ -88,45 +88,45 @@ pub fn BitReader(comptime endian: std.builtin.Endian, comptime ReaderType: type)
             while (out_bits.* < bits) {
                 const n = bits - out_bits.*;
                 const next_byte = self.forward_reader.readByte() catch |err| switch (err) {
-                    error.EndOfStream => return @intCast(U, out_buffer),
+                    error.EndOfStream => return @as(U, @intCast(out_buffer)),
                     else => |e| return e,
                 };
 
                 switch (endian) {
                     .Big => {
                         if (n >= u8_bit_count) {
-                            out_buffer <<= @intCast(u3, u8_bit_count - 1);
+                            out_buffer <<= @as(u3, @intCast(u8_bit_count - 1));
                             out_buffer <<= 1;
                             out_buffer |= @as(Buf, next_byte);
                             out_bits.* += u8_bit_count;
                             continue;
                         }
 
-                        const shift = @intCast(u3, u8_bit_count - n);
-                        out_buffer <<= @intCast(BufShift, n);
+                        const shift = @as(u3, @intCast(u8_bit_count - n));
+                        out_buffer <<= @as(BufShift, @intCast(n));
                         out_buffer |= @as(Buf, next_byte >> shift);
                         out_bits.* += n;
-                        self.bit_buffer = @truncate(u7, next_byte << @intCast(u3, n - 1));
+                        self.bit_buffer = @as(u7, @truncate(next_byte << @as(u3, @intCast(n - 1))));
                         self.bit_count = shift;
                     },
                     .Little => {
                         if (n >= u8_bit_count) {
-                            out_buffer |= @as(Buf, next_byte) << @intCast(BufShift, out_bits.*);
+                            out_buffer |= @as(Buf, next_byte) << @as(BufShift, @intCast(out_bits.*));
                             out_bits.* += u8_bit_count;
                             continue;
                         }
 
-                        const shift = @intCast(u3, u8_bit_count - n);
+                        const shift = @as(u3, @intCast(u8_bit_count - n));
                         const value = (next_byte << shift) >> shift;
-                        out_buffer |= @as(Buf, value) << @intCast(BufShift, out_bits.*);
+                        out_buffer |= @as(Buf, value) << @as(BufShift, @intCast(out_bits.*));
                         out_bits.* += n;
-                        self.bit_buffer = @truncate(u7, next_byte >> @intCast(u3, n));
+                        self.bit_buffer = @as(u7, @truncate(next_byte >> @as(u3, @intCast(n))));
                         self.bit_count = shift;
                     },
                 }
             }
 
-            return @intCast(U, out_buffer);
+            return @as(U, @intCast(out_buffer));
         }
 
         pub fn alignToByte(self: *Self) void {
lib/std/io/bit_writer.zig
@@ -47,27 +47,27 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
             const Buf = std.meta.Int(.unsigned, buf_bit_count);
             const BufShift = math.Log2Int(Buf);
 
-            const buf_value = @intCast(Buf, value);
+            const buf_value = @as(Buf, @intCast(value));
 
-            const high_byte_shift = @intCast(BufShift, buf_bit_count - u8_bit_count);
+            const high_byte_shift = @as(BufShift, @intCast(buf_bit_count - u8_bit_count));
             var in_buffer = switch (endian) {
-                .Big => buf_value << @intCast(BufShift, buf_bit_count - bits),
+                .Big => buf_value << @as(BufShift, @intCast(buf_bit_count - bits)),
                 .Little => buf_value,
             };
             var in_bits = bits;
 
             if (self.bit_count > 0) {
                 const bits_remaining = u8_bit_count - self.bit_count;
-                const n = @intCast(u3, if (bits_remaining > bits) bits else bits_remaining);
+                const n = @as(u3, @intCast(if (bits_remaining > bits) bits else bits_remaining));
                 switch (endian) {
                     .Big => {
-                        const shift = @intCast(BufShift, high_byte_shift + self.bit_count);
-                        const v = @intCast(u8, in_buffer >> shift);
+                        const shift = @as(BufShift, @intCast(high_byte_shift + self.bit_count));
+                        const v = @as(u8, @intCast(in_buffer >> shift));
                         self.bit_buffer |= v;
                         in_buffer <<= n;
                     },
                     .Little => {
-                        const v = @truncate(u8, in_buffer) << @intCast(u3, self.bit_count);
+                        const v = @as(u8, @truncate(in_buffer)) << @as(u3, @intCast(self.bit_count));
                         self.bit_buffer |= v;
                         in_buffer >>= n;
                     },
@@ -87,15 +87,15 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
             while (in_bits >= u8_bit_count) {
                 switch (endian) {
                     .Big => {
-                        const v = @intCast(u8, in_buffer >> high_byte_shift);
+                        const v = @as(u8, @intCast(in_buffer >> high_byte_shift));
                         try self.forward_writer.writeByte(v);
-                        in_buffer <<= @intCast(u3, u8_bit_count - 1);
+                        in_buffer <<= @as(u3, @intCast(u8_bit_count - 1));
                         in_buffer <<= 1;
                     },
                     .Little => {
-                        const v = @truncate(u8, in_buffer);
+                        const v = @as(u8, @truncate(in_buffer));
                         try self.forward_writer.writeByte(v);
-                        in_buffer >>= @intCast(u3, u8_bit_count - 1);
+                        in_buffer >>= @as(u3, @intCast(u8_bit_count - 1));
                         in_buffer >>= 1;
                     },
                 }
@@ -103,10 +103,10 @@ pub fn BitWriter(comptime endian: std.builtin.Endian, comptime WriterType: type)
             }
 
             if (in_bits > 0) {
-                self.bit_count = @intCast(u4, in_bits);
+                self.bit_count = @as(u4, @intCast(in_bits));
                 self.bit_buffer = switch (endian) {
-                    .Big => @truncate(u8, in_buffer >> high_byte_shift),
-                    .Little => @truncate(u8, in_buffer),
+                    .Big => @as(u8, @truncate(in_buffer >> high_byte_shift)),
+                    .Little => @as(u8, @truncate(in_buffer)),
                 };
             }
         }
lib/std/io/c_writer.zig
@@ -13,7 +13,7 @@ pub fn cWriter(c_file: *std.c.FILE) CWriter {
 fn cWriterWrite(c_file: *std.c.FILE, bytes: []const u8) std.fs.File.WriteError!usize {
     const amt_written = std.c.fwrite(bytes.ptr, 1, bytes.len, c_file);
     if (amt_written >= 0) return amt_written;
-    switch (@enumFromInt(os.E, std.c._errno().*)) {
+    switch (@as(os.E, @enumFromInt(std.c._errno().*))) {
         .SUCCESS => unreachable,
         .INVAL => unreachable,
         .FAULT => unreachable,
lib/std/io/reader.zig
@@ -246,7 +246,7 @@ pub fn Reader(
 
         /// Same as `readByte` except the returned byte is signed.
         pub fn readByteSigned(self: Self) (Error || error{EndOfStream})!i8 {
-            return @bitCast(i8, try self.readByte());
+            return @as(i8, @bitCast(try self.readByte()));
         }
 
         /// Reads exactly `num_bytes` bytes and returns as an array.
lib/std/json/scanner.zig
@@ -193,7 +193,7 @@ pub const TokenType = enum {
 /// to get meaningful information from this.
 pub const Diagnostics = struct {
     line_number: u64 = 1,
-    line_start_cursor: usize = @bitCast(usize, @as(isize, -1)), // Start just "before" the input buffer to get a 1-based column for line 1.
+    line_start_cursor: usize = @as(usize, @bitCast(@as(isize, -1))), // Start just "before" the input buffer to get a 1-based column for line 1.
     total_bytes_before_current_input: u64 = 0,
     cursor_pointer: *const usize = undefined,
 
@@ -1719,7 +1719,7 @@ const BitStack = struct {
 
     pub fn push(self: *@This(), b: u1) Allocator.Error!void {
         const byte_index = self.bit_len >> 3;
-        const bit_index = @intCast(u3, self.bit_len & 7);
+        const bit_index = @as(u3, @intCast(self.bit_len & 7));
 
         if (self.bytes.items.len <= byte_index) {
             try self.bytes.append(0);
@@ -1733,8 +1733,8 @@ const BitStack = struct {
 
     pub fn peek(self: *const @This()) u1 {
         const byte_index = (self.bit_len - 1) >> 3;
-        const bit_index = @intCast(u3, (self.bit_len - 1) & 7);
-        return @intCast(u1, (self.bytes.items[byte_index] >> bit_index) & 1);
+        const bit_index = @as(u3, @intCast((self.bit_len - 1) & 7));
+        return @as(u1, @intCast((self.bytes.items[byte_index] >> bit_index) & 1));
     }
 
     pub fn pop(self: *@This()) u1 {
lib/std/json/static.zig
@@ -442,7 +442,7 @@ fn internalParse(
                             }
 
                             if (ptrInfo.sentinel) |some| {
-                                const sentinel_value = @ptrCast(*align(1) const ptrInfo.child, some).*;
+                                const sentinel_value = @as(*align(1) const ptrInfo.child, @ptrCast(some)).*;
                                 return try arraylist.toOwnedSliceSentinel(sentinel_value);
                             }
 
@@ -456,7 +456,7 @@ fn internalParse(
                                 // Use our own array list so we can append the sentinel.
                                 var value_list = ArrayList(u8).init(allocator);
                                 _ = try source.allocNextIntoArrayList(&value_list, .alloc_always);
-                                return try value_list.toOwnedSliceSentinel(@ptrCast(*const u8, sentinel_ptr).*);
+                                return try value_list.toOwnedSliceSentinel(@as(*const u8, @ptrCast(sentinel_ptr)).*);
                             }
                             if (ptrInfo.is_const) {
                                 switch (try source.nextAllocMax(allocator, .alloc_if_needed, options.max_value_len.?)) {
@@ -518,8 +518,8 @@ fn internalParseFromValue(
         },
         .Float, .ComptimeFloat => {
             switch (source) {
-                .float => |f| return @floatCast(T, f),
-                .integer => |i| return @floatFromInt(T, i),
+                .float => |f| return @as(T, @floatCast(f)),
+                .integer => |i| return @as(T, @floatFromInt(i)),
                 .number_string, .string => |s| return std.fmt.parseFloat(T, s),
                 else => return error.UnexpectedToken,
             }
@@ -530,12 +530,12 @@ fn internalParseFromValue(
                     if (@round(f) != f) return error.InvalidNumber;
                     if (f > std.math.maxInt(T)) return error.Overflow;
                     if (f < std.math.minInt(T)) return error.Overflow;
-                    return @intFromFloat(T, f);
+                    return @as(T, @intFromFloat(f));
                 },
                 .integer => |i| {
                     if (i > std.math.maxInt(T)) return error.Overflow;
                     if (i < std.math.minInt(T)) return error.Overflow;
-                    return @intCast(T, i);
+                    return @as(T, @intCast(i));
                 },
                 .number_string, .string => |s| {
                     return sliceToInt(T, s);
@@ -686,7 +686,7 @@ fn internalParseFromValue(
                     switch (source) {
                         .array => |array| {
                             const r = if (ptrInfo.sentinel) |sentinel_ptr|
-                                try allocator.allocSentinel(ptrInfo.child, array.items.len, @ptrCast(*align(1) const ptrInfo.child, sentinel_ptr).*)
+                                try allocator.allocSentinel(ptrInfo.child, array.items.len, @as(*align(1) const ptrInfo.child, @ptrCast(sentinel_ptr)).*)
                             else
                                 try allocator.alloc(ptrInfo.child, array.items.len);
 
@@ -701,7 +701,7 @@ fn internalParseFromValue(
                             // Dynamic length string.
 
                             const r = if (ptrInfo.sentinel) |sentinel_ptr|
-                                try allocator.allocSentinel(ptrInfo.child, s.len, @ptrCast(*align(1) const ptrInfo.child, sentinel_ptr).*)
+                                try allocator.allocSentinel(ptrInfo.child, s.len, @as(*align(1) const ptrInfo.child, @ptrCast(sentinel_ptr)).*)
                             else
                                 try allocator.alloc(ptrInfo.child, s.len);
                             @memcpy(r[0..], s);
@@ -743,7 +743,7 @@ fn sliceToInt(comptime T: type, slice: []const u8) !T {
     const float = try std.fmt.parseFloat(f128, slice);
     if (@round(float) != float) return error.InvalidNumber;
     if (float > std.math.maxInt(T) or float < std.math.minInt(T)) return error.Overflow;
-    return @intCast(T, @intFromFloat(i128, float));
+    return @as(T, @intCast(@as(i128, @intFromFloat(float))));
 }
 
 fn sliceToEnum(comptime T: type, slice: []const u8) !T {
@@ -759,7 +759,7 @@ fn fillDefaultStructValues(comptime T: type, r: *T, fields_seen: *[@typeInfo(T).
     inline for (@typeInfo(T).Struct.fields, 0..) |field, i| {
         if (!fields_seen[i]) {
             if (field.default_value) |default_ptr| {
-                const default = @ptrCast(*align(1) const field.type, default_ptr).*;
+                const default = @as(*align(1) const field.type, @ptrCast(default_ptr)).*;
                 @field(r, field.name) = default;
             } else {
                 return error.MissingField;
lib/std/json/stringify.zig
@@ -78,8 +78,8 @@ fn outputUnicodeEscape(
         assert(codepoint <= 0x10FFFF);
         // To escape an extended character that is not in the Basic Multilingual Plane,
         // the character is represented as a 12-character sequence, encoding the UTF-16 surrogate pair.
-        const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800;
-        const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00;
+        const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800;
+        const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00;
         try out_stream.writeAll("\\u");
         try std.fmt.formatIntValue(high, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, out_stream);
         try out_stream.writeAll("\\u");
lib/std/json/write_stream.zig
@@ -176,8 +176,8 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
                 .ComptimeInt => {
                     return self.emitNumber(@as(std.math.IntFittingRange(value, value), value));
                 },
-                .Float, .ComptimeFloat => if (@floatCast(f64, value) == value) {
-                    try self.stream.print("{}", .{@floatCast(f64, value)});
+                .Float, .ComptimeFloat => if (@as(f64, @floatCast(value)) == value) {
+                    try self.stream.print("{}", .{@as(f64, @floatCast(value))});
                     self.popState();
                     return;
                 },
@@ -294,7 +294,7 @@ test "json write stream" {
 
 fn getJsonObject(allocator: std.mem.Allocator) !Value {
     var value = Value{ .object = ObjectMap.init(allocator) };
-    try value.object.put("one", Value{ .integer = @intCast(i64, 1) });
+    try value.object.put("one", Value{ .integer = @as(i64, @intCast(1)) });
     try value.object.put("two", Value{ .float = 2.0 });
     return value;
 }
lib/std/math/big/int.zig
@@ -30,7 +30,7 @@ pub fn calcLimbLen(scalar: anytype) usize {
     }
 
     const w_value = std.math.absCast(scalar);
-    return @intCast(usize, @divFloor(@intCast(Limb, math.log2(w_value)), limb_bits) + 1);
+    return @as(usize, @intCast(@divFloor(@as(Limb, @intCast(math.log2(w_value))), limb_bits) + 1));
 }
 
 pub fn calcToStringLimbsBufferLen(a_len: usize, base: u8) usize {
@@ -87,8 +87,8 @@ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
 
     // r2 = b * c
     const bc = @as(DoubleLimb, math.mulWide(Limb, b, c));
-    const r2 = @truncate(Limb, bc);
-    const c2 = @truncate(Limb, bc >> limb_bits);
+    const r2 = @as(Limb, @truncate(bc));
+    const c2 = @as(Limb, @truncate(bc >> limb_bits));
 
     // ov2[0] = ov1[0] + r2
     const ov2 = @addWithOverflow(ov1[0], r2);
@@ -107,8 +107,8 @@ fn subMulLimbWithBorrow(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
 
     // r2 = b * c
     const bc = @as(DoubleLimb, std.math.mulWide(Limb, b, c));
-    const r2 = @truncate(Limb, bc);
-    const c2 = @truncate(Limb, bc >> limb_bits);
+    const r2 = @as(Limb, @truncate(bc));
+    const c2 = @as(Limb, @truncate(bc >> limb_bits));
 
     // ov2[0] = ov1[0] - r2
     const ov2 = @subWithOverflow(ov1[0], r2);
@@ -244,7 +244,7 @@ pub const Mutable = struct {
                 } else {
                     var i: usize = 0;
                     while (true) : (i += 1) {
-                        self.limbs[i] = @truncate(Limb, w_value);
+                        self.limbs[i] = @as(Limb, @truncate(w_value));
                         w_value >>= limb_bits;
 
                         if (w_value == 0) break;
@@ -340,7 +340,7 @@ pub const Mutable = struct {
         }
 
         const req_limbs = calcTwosCompLimbCount(bit_count);
-        const bit = @truncate(Log2Limb, bit_count - 1);
+        const bit = @as(Log2Limb, @truncate(bit_count - 1));
         const signmask = @as(Limb, 1) << bit; // 0b0..010..0 where 1 is the sign bit.
         const mask = (signmask << 1) -% 1; // 0b0..011..1 where the leftmost 1 is the sign bit.
 
@@ -365,7 +365,7 @@ pub const Mutable = struct {
                         r.set(0);
                     } else {
                         const new_req_limbs = calcTwosCompLimbCount(bit_count - 1);
-                        const msb = @truncate(Log2Limb, bit_count - 2);
+                        const msb = @as(Log2Limb, @truncate(bit_count - 2));
                         const new_signmask = @as(Limb, 1) << msb; // 0b0..010..0 where 1 is the sign bit.
                         const new_mask = (new_signmask << 1) -% 1; // 0b0..001..1 where the rightmost 0 is the sign bit.
 
@@ -1153,7 +1153,7 @@ pub const Mutable = struct {
         // const msb = @truncate(Log2Limb, checkbit);
         // const checkmask = (@as(Limb, 1) << msb) -% 1;
 
-        if (a.limbs[a.limbs.len - 1] >> @truncate(Log2Limb, checkbit) != 0) {
+        if (a.limbs[a.limbs.len - 1] >> @as(Log2Limb, @truncate(checkbit)) != 0) {
             // Need to saturate.
             r.setTwosCompIntLimit(if (a.positive) .max else .min, signedness, bit_count);
             return;
@@ -1554,7 +1554,7 @@ pub const Mutable = struct {
             // Optimization for small divisor. By using a half limb we can avoid requiring DoubleLimb
             // divisions in the hot code path. This may often require compiler_rt software-emulation.
             if (divisor < maxInt(HalfLimb)) {
-                lldiv0p5(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], @intCast(HalfLimb, divisor));
+                lldiv0p5(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], @as(HalfLimb, @intCast(divisor)));
             } else {
                 lldiv1(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], divisor);
             }
@@ -1671,7 +1671,7 @@ pub const Mutable = struct {
             } else {
                 const q0 = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]);
                 const n0 = @as(DoubleLimb, y.limbs[t]);
-                q.limbs[k] = @intCast(Limb, q0 / n0);
+                q.limbs[k] = @as(Limb, @intCast(q0 / n0));
             }
 
             // 3.2
@@ -1750,7 +1750,7 @@ pub const Mutable = struct {
             return;
         }
 
-        const bit = @truncate(Log2Limb, bit_count - 1);
+        const bit = @as(Log2Limb, @truncate(bit_count - 1));
         const signmask = @as(Limb, 1) << bit;
         const mask = (signmask << 1) -% 1;
 
@@ -1781,7 +1781,7 @@ pub const Mutable = struct {
             return;
         }
 
-        const bit = @truncate(Log2Limb, bit_count - 1);
+        const bit = @as(Log2Limb, @truncate(bit_count - 1));
         const signmask = @as(Limb, 1) << bit; // 0b0..010...0 where 1 is the sign bit.
         const mask = (signmask << 1) -% 1; // 0b0..01..1 where the leftmost 1 is the sign bit.
 
@@ -1912,7 +1912,7 @@ pub const Mutable = struct {
                 .Big => buffer.len - ((total_bits + 7) / 8),
             };
 
-            const sign_bit = @as(u8, 1) << @intCast(u3, (total_bits - 1) % 8);
+            const sign_bit = @as(u8, 1) << @as(u3, @intCast((total_bits - 1) % 8));
             positive = ((buffer[last_byte] & sign_bit) == 0);
         }
 
@@ -1942,7 +1942,7 @@ pub const Mutable = struct {
                 .signed => b: {
                     const SLimb = std.meta.Int(.signed, @bitSizeOf(Limb));
                     const limb = mem.readVarPackedInt(SLimb, buffer, bit_index + bit_offset, bit_count - bit_index, endian, .signed);
-                    break :b @bitCast(Limb, limb);
+                    break :b @as(Limb, @bitCast(limb));
                 },
             };
 
@@ -2170,7 +2170,7 @@ pub const Const = struct {
                 var r: UT = 0;
 
                 if (@sizeOf(UT) <= @sizeOf(Limb)) {
-                    r = @intCast(UT, self.limbs[0]);
+                    r = @as(UT, @intCast(self.limbs[0]));
                 } else {
                     for (self.limbs[0..self.limbs.len], 0..) |_, ri| {
                         const limb = self.limbs[self.limbs.len - ri - 1];
@@ -2180,10 +2180,10 @@ pub const Const = struct {
                 }
 
                 if (info.signedness == .unsigned) {
-                    return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned;
+                    return if (self.positive) @as(T, @intCast(r)) else error.NegativeIntoUnsigned;
                 } else {
                     if (self.positive) {
-                        return @intCast(T, r);
+                        return @as(T, @intCast(r));
                     } else {
                         if (math.cast(T, r)) |ok| {
                             return -ok;
@@ -2292,7 +2292,7 @@ pub const Const = struct {
             outer: for (self.limbs[0..self.limbs.len]) |limb| {
                 var shift: usize = 0;
                 while (shift < limb_bits) : (shift += base_shift) {
-                    const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1));
+                    const r = @as(u8, @intCast((limb >> @as(Log2Limb, @intCast(shift))) & @as(Limb, base - 1)));
                     const ch = std.fmt.digitToChar(r, case);
                     string[digits_len] = ch;
                     digits_len += 1;
@@ -2340,7 +2340,7 @@ pub const Const = struct {
                 var r_word = r.limbs[0];
                 var i: usize = 0;
                 while (i < digits_per_limb) : (i += 1) {
-                    const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), case);
+                    const ch = std.fmt.digitToChar(@as(u8, @intCast(r_word % base)), case);
                     r_word /= base;
                     string[digits_len] = ch;
                     digits_len += 1;
@@ -2352,7 +2352,7 @@ pub const Const = struct {
 
                 var r_word = q.limbs[0];
                 while (r_word != 0) {
-                    const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), case);
+                    const ch = std.fmt.digitToChar(@as(u8, @intCast(r_word % base)), case);
                     r_word /= base;
                     string[digits_len] = ch;
                     digits_len += 1;
@@ -3680,13 +3680,13 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
             rem.* = 0;
         } else if (pdiv < b) {
             quo[i] = 0;
-            rem.* = @truncate(Limb, pdiv);
+            rem.* = @as(Limb, @truncate(pdiv));
         } else if (pdiv == b) {
             quo[i] = 1;
             rem.* = 0;
         } else {
-            quo[i] = @truncate(Limb, @divTrunc(pdiv, b));
-            rem.* = @truncate(Limb, pdiv - (quo[i] *% b));
+            quo[i] = @as(Limb, @truncate(@divTrunc(pdiv, b)));
+            rem.* = @as(Limb, @truncate(pdiv - (quo[i] *% b)));
         }
     }
 }
@@ -3719,7 +3719,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
     @setRuntimeSafety(debug_safety);
     assert(a.len >= 1);
 
-    const interior_limb_shift = @truncate(Log2Limb, shift);
+    const interior_limb_shift = @as(Log2Limb, @truncate(shift));
 
     // We only need the extra limb if the shift of the last element overflows.
     // This is useful for the implementation of `shiftLeftSat`.
@@ -3741,7 +3741,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
         r[dst_i] = carry | @call(.always_inline, math.shr, .{
             Limb,
             src_digit,
-            limb_bits - @intCast(Limb, interior_limb_shift),
+            limb_bits - @as(Limb, @intCast(interior_limb_shift)),
         });
         carry = (src_digit << interior_limb_shift);
     }
@@ -3756,7 +3756,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
     assert(r.len >= a.len - (shift / limb_bits));
 
     const limb_shift = shift / limb_bits;
-    const interior_limb_shift = @truncate(Log2Limb, shift);
+    const interior_limb_shift = @as(Log2Limb, @truncate(shift));
 
     var carry: Limb = 0;
     var i: usize = 0;
@@ -3769,7 +3769,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
         carry = @call(.always_inline, math.shl, .{
             Limb,
             src_digit,
-            limb_bits - @intCast(Limb, interior_limb_shift),
+            limb_bits - @as(Limb, @intCast(interior_limb_shift)),
         });
     }
 }
@@ -4150,7 +4150,7 @@ fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void {
     // Square the result if the current bit is zero, square and multiply by a if
     // it is one.
     var exp_bits = 32 - 1 - b_leading_zeros;
-    var exp = b << @intCast(u5, 1 + b_leading_zeros);
+    var exp = b << @as(u5, @intCast(1 + b_leading_zeros));
 
     var i: usize = 0;
     while (i < exp_bits) : (i += 1) {
@@ -4174,9 +4174,9 @@ fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable {
     assert(storage.len >= 2);
 
     const A_is_positive = A >= 0;
-    const Au = @intCast(DoubleLimb, if (A < 0) -A else A);
-    storage[0] = @truncate(Limb, Au);
-    storage[1] = @truncate(Limb, Au >> limb_bits);
+    const Au = @as(DoubleLimb, @intCast(if (A < 0) -A else A));
+    storage[0] = @as(Limb, @truncate(Au));
+    storage[1] = @as(Limb, @truncate(Au >> limb_bits));
     return .{
         .limbs = storage[0..2],
         .positive = A_is_positive,
lib/std/math/big/int_test.zig
@@ -2898,19 +2898,19 @@ test "big int conversion write twos complement with padding" {
 
     buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa };
     m.readTwosComplement(buffer[0..13], bit_count, .Little, .unsigned);
-    try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq);
+    try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq);
 
     buffer = &[_]u8{ 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
     m.readTwosComplement(buffer[0..13], bit_count, .Big, .unsigned);
-    try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaa_02030405_06070809_0a0b0c0d)) == .eq);
+    try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaa_02030405_06070809_0a0b0c0d))) == .eq);
 
     buffer = &[_]u8{ 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0xaa, 0xaa, 0xaa, 0xaa };
     m.readTwosComplement(buffer[0..16], bit_count, .Little, .unsigned);
-    try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq);
+    try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq);
 
     buffer = &[_]u8{ 0xaa, 0xaa, 0xaa, 0xaa, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd };
     m.readTwosComplement(buffer[0..16], bit_count, .Big, .unsigned);
-    try testing.expect(m.toConst().orderAgainstScalar(@truncate(Limb, 0xaaaaaaaa_02030405_06070809_0a0b0c0d)) == .eq);
+    try testing.expect(m.toConst().orderAgainstScalar(@as(Limb, @truncate(0xaaaaaaaa_02030405_06070809_0a0b0c0d))) == .eq);
 
     bit_count = 12 * 8 + 2;
 
@@ -3014,20 +3014,20 @@ test "big int bit reverse" {
     try bitReverseTest(u96, 0x123456789abcdef111213141, 0x828c84888f7b3d591e6a2c48);
     try bitReverseTest(u128, 0x123456789abcdef11121314151617181, 0x818e868a828c84888f7b3d591e6a2c48);
 
-    try bitReverseTest(i8, @bitCast(i8, @as(u8, 0x92)), @bitCast(i8, @as(u8, 0x49)));
-    try bitReverseTest(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x2c48)));
-    try bitReverseTest(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x6a2c48)));
-    try bitReverseTest(i24, @bitCast(i24, @as(u24, 0x12345f)), @bitCast(i24, @as(u24, 0xfa2c48)));
-    try bitReverseTest(i24, @bitCast(i24, @as(u24, 0xf23456)), @bitCast(i24, @as(u24, 0x6a2c4f)));
-    try bitReverseTest(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x1e6a2c48)));
-    try bitReverseTest(i32, @bitCast(i32, @as(u32, 0xf2345678)), @bitCast(i32, @as(u32, 0x1e6a2c4f)));
-    try bitReverseTest(i32, @bitCast(i32, @as(u32, 0x1234567f)), @bitCast(i32, @as(u32, 0xfe6a2c48)));
-    try bitReverseTest(i40, @bitCast(i40, @as(u40, 0x123456789a)), @bitCast(i40, @as(u40, 0x591e6a2c48)));
-    try bitReverseTest(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0x3d591e6a2c48)));
-    try bitReverseTest(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0x7b3d591e6a2c48)));
-    try bitReverseTest(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48)));
-    try bitReverseTest(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48)));
-    try bitReverseTest(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48)));
+    try bitReverseTest(i8, @as(i8, @bitCast(@as(u8, 0x92))), @as(i8, @bitCast(@as(u8, 0x49))));
+    try bitReverseTest(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x2c48))));
+    try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x6a2c48))));
+    try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0x12345f))), @as(i24, @bitCast(@as(u24, 0xfa2c48))));
+    try bitReverseTest(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), @as(i24, @bitCast(@as(u24, 0x6a2c4f))));
+    try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x1e6a2c48))));
+    try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), @as(i32, @bitCast(@as(u32, 0x1e6a2c4f))));
+    try bitReverseTest(i32, @as(i32, @bitCast(@as(u32, 0x1234567f))), @as(i32, @bitCast(@as(u32, 0xfe6a2c48))));
+    try bitReverseTest(i40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(i40, @bitCast(@as(u40, 0x591e6a2c48))));
+    try bitReverseTest(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0x3d591e6a2c48))));
+    try bitReverseTest(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0x7b3d591e6a2c48))));
+    try bitReverseTest(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0x8f7b3d591e6a2c48))));
+    try bitReverseTest(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x828c84888f7b3d591e6a2c48))));
+    try bitReverseTest(i128, @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), @as(i128, @bitCast(@as(u128, 0x818e868a828c84888f7b3d591e6a2c48))));
 }
 
 fn byteSwapTest(comptime T: type, comptime input: comptime_int, comptime expected_output: comptime_int) !void {
@@ -3063,16 +3063,16 @@ test "big int byte swap" {
     try byteSwapTest(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412);
 
     try byteSwapTest(i8, -50, -50);
-    try byteSwapTest(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x3412)));
-    try byteSwapTest(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x563412)));
-    try byteSwapTest(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x78563412)));
-    try byteSwapTest(i40, @bitCast(i40, @as(u40, 0x123456789a)), @bitCast(i40, @as(u40, 0x9a78563412)));
-    try byteSwapTest(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0xbc9a78563412)));
-    try byteSwapTest(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0xdebc9a78563412)));
-    try byteSwapTest(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0xf1debc9a78563412)));
-    try byteSwapTest(i88, @bitCast(i88, @as(u88, 0x123456789abcdef1112131)), @bitCast(i88, @as(u88, 0x312111f1debc9a78563412)));
-    try byteSwapTest(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x41312111f1debc9a78563412)));
-    try byteSwapTest(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), @bitCast(i128, @as(u128, 0x8171615141312111f1debc9a78563412)));
+    try byteSwapTest(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412))));
+    try byteSwapTest(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412))));
+    try byteSwapTest(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412))));
+    try byteSwapTest(i40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(i40, @bitCast(@as(u40, 0x9a78563412))));
+    try byteSwapTest(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
+    try byteSwapTest(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412))));
+    try byteSwapTest(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
+    try byteSwapTest(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412))));
+    try byteSwapTest(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412))));
+    try byteSwapTest(i128, @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))), @as(i128, @bitCast(@as(u128, 0x8171615141312111f1debc9a78563412))));
 
     try byteSwapTest(u512, 0x80, 1 << 511);
     try byteSwapTest(i512, 0x80, minInt(i512));
@@ -3080,11 +3080,11 @@ test "big int byte swap" {
     try byteSwapTest(i512, -0x100, (1 << 504) - 1);
     try byteSwapTest(i400, -0x100, (1 << 392) - 1);
     try byteSwapTest(i400, -0x2, -(1 << 392) - 1);
-    try byteSwapTest(i24, @bitCast(i24, @as(u24, 0xf23456)), 0x5634f2);
-    try byteSwapTest(i24, 0x1234f6, @bitCast(i24, @as(u24, 0xf63412)));
-    try byteSwapTest(i32, @bitCast(i32, @as(u32, 0xf2345678)), 0x785634f2);
-    try byteSwapTest(i32, 0x123456f8, @bitCast(i32, @as(u32, 0xf8563412)));
-    try byteSwapTest(i48, 0x123456789abc, @bitCast(i48, @as(u48, 0xbc9a78563412)));
+    try byteSwapTest(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2);
+    try byteSwapTest(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412))));
+    try byteSwapTest(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2);
+    try byteSwapTest(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412))));
+    try byteSwapTest(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
 }
 
 test "big.int mul multi-multi alias r with a and b" {
lib/std/math/big/rational.zig
@@ -137,7 +137,7 @@ pub const Rational = struct {
         debug.assert(@typeInfo(T) == .Float);
 
         const UnsignedInt = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
-        const f_bits = @bitCast(UnsignedInt, f);
+        const f_bits = @as(UnsignedInt, @bitCast(f));
 
         const exponent_bits = math.floatExponentBits(T);
         const exponent_bias = (1 << (exponent_bits - 1)) - 1;
@@ -146,7 +146,7 @@ pub const Rational = struct {
         const exponent_mask = (1 << exponent_bits) - 1;
         const mantissa_mask = (1 << mantissa_bits) - 1;
 
-        var exponent = @intCast(i16, (f_bits >> mantissa_bits) & exponent_mask);
+        var exponent = @as(i16, @intCast((f_bits >> mantissa_bits) & exponent_mask));
         var mantissa = f_bits & mantissa_mask;
 
         switch (exponent) {
@@ -177,9 +177,9 @@ pub const Rational = struct {
 
         try self.q.set(1);
         if (shift >= 0) {
-            try self.q.shiftLeft(&self.q, @intCast(usize, shift));
+            try self.q.shiftLeft(&self.q, @as(usize, @intCast(shift)));
         } else {
-            try self.p.shiftLeft(&self.p, @intCast(usize, -shift));
+            try self.p.shiftLeft(&self.p, @as(usize, @intCast(-shift)));
         }
 
         try self.reduce();
@@ -210,7 +210,7 @@ pub const Rational = struct {
         }
 
         // 1. left-shift a or sub so that a/b is in [1 << msize1, 1 << (msize2 + 1)]
-        var exp = @intCast(isize, self.p.bitCountTwosComp()) - @intCast(isize, self.q.bitCountTwosComp());
+        var exp = @as(isize, @intCast(self.p.bitCountTwosComp())) - @as(isize, @intCast(self.q.bitCountTwosComp()));
 
         var a2 = try self.p.clone();
         defer a2.deinit();
@@ -220,9 +220,9 @@ pub const Rational = struct {
 
         const shift = msize2 - exp;
         if (shift >= 0) {
-            try a2.shiftLeft(&a2, @intCast(usize, shift));
+            try a2.shiftLeft(&a2, @as(usize, @intCast(shift)));
         } else {
-            try b2.shiftLeft(&b2, @intCast(usize, -shift));
+            try b2.shiftLeft(&b2, @as(usize, @intCast(-shift)));
         }
 
         // 2. compute quotient and remainder
@@ -254,8 +254,8 @@ pub const Rational = struct {
         // 4. Rounding
         if (emin - msize <= exp and exp <= emin) {
             // denormal
-            const shift1 = @intCast(math.Log2Int(BitReprType), emin - (exp - 1));
-            const lost_bits = mantissa & ((@intCast(BitReprType, 1) << shift1) - 1);
+            const shift1 = @as(math.Log2Int(BitReprType), @intCast(emin - (exp - 1)));
+            const lost_bits = mantissa & ((@as(BitReprType, @intCast(1)) << shift1) - 1);
             have_rem = have_rem or lost_bits != 0;
             mantissa >>= shift1;
             exp = 2 - ebias;
@@ -276,7 +276,7 @@ pub const Rational = struct {
         }
         mantissa >>= 1;
 
-        const f = math.scalbn(@floatFromInt(T, mantissa), @intCast(i32, exp - msize1));
+        const f = math.scalbn(@as(T, @floatFromInt(mantissa)), @as(i32, @intCast(exp - msize1)));
         if (math.isInf(f)) {
             exact = false;
         }
@@ -477,7 +477,7 @@ fn extractLowBits(a: Int, comptime T: type) T {
     const t_bits = @typeInfo(T).Int.bits;
     const limb_bits = @typeInfo(Limb).Int.bits;
     if (t_bits <= limb_bits) {
-        return @truncate(T, a.limbs[0]);
+        return @as(T, @truncate(a.limbs[0]));
     } else {
         var r: T = 0;
         comptime var i: usize = 0;
lib/std/math/complex/atan.zig
@@ -32,7 +32,7 @@ fn redupif32(x: f32) f32 {
         t -= 0.5;
     }
 
-    const u = @floatFromInt(f32, @intFromFloat(i32, t));
+    const u = @as(f32, @floatFromInt(@as(i32, @intFromFloat(t))));
     return ((x - u * DP1) - u * DP2) - t * DP3;
 }
 
@@ -81,7 +81,7 @@ fn redupif64(x: f64) f64 {
         t -= 0.5;
     }
 
-    const u = @floatFromInt(f64, @intFromFloat(i64, t));
+    const u = @as(f64, @floatFromInt(@as(i64, @intFromFloat(t))));
     return ((x - u * DP1) - u * DP2) - t * DP3;
 }
 
lib/std/math/complex/cosh.zig
@@ -26,10 +26,10 @@ fn cosh32(z: Complex(f32)) Complex(f32) {
     const x = z.re;
     const y = z.im;
 
-    const hx = @bitCast(u32, x);
+    const hx = @as(u32, @bitCast(x));
     const ix = hx & 0x7fffffff;
 
-    const hy = @bitCast(u32, y);
+    const hy = @as(u32, @bitCast(y));
     const iy = hy & 0x7fffffff;
 
     if (ix < 0x7f800000 and iy < 0x7f800000) {
@@ -89,14 +89,14 @@ fn cosh64(z: Complex(f64)) Complex(f64) {
     const x = z.re;
     const y = z.im;
 
-    const fx = @bitCast(u64, x);
-    const hx = @intCast(u32, fx >> 32);
-    const lx = @truncate(u32, fx);
+    const fx = @as(u64, @bitCast(x));
+    const hx = @as(u32, @intCast(fx >> 32));
+    const lx = @as(u32, @truncate(fx));
     const ix = hx & 0x7fffffff;
 
-    const fy = @bitCast(u64, y);
-    const hy = @intCast(u32, fy >> 32);
-    const ly = @truncate(u32, fy);
+    const fy = @as(u64, @bitCast(y));
+    const hy = @as(u32, @intCast(fy >> 32));
+    const ly = @as(u32, @truncate(fy));
     const iy = hy & 0x7fffffff;
 
     // nearly non-exceptional case where x, y are finite
lib/std/math/complex/exp.zig
@@ -30,13 +30,13 @@ fn exp32(z: Complex(f32)) Complex(f32) {
     const x = z.re;
     const y = z.im;
 
-    const hy = @bitCast(u32, y) & 0x7fffffff;
+    const hy = @as(u32, @bitCast(y)) & 0x7fffffff;
     // cexp(x + i0) = exp(x) + i0
     if (hy == 0) {
         return Complex(f32).init(@exp(x), y);
     }
 
-    const hx = @bitCast(u32, x);
+    const hx = @as(u32, @bitCast(x));
     // cexp(0 + iy) = cos(y) + isin(y)
     if ((hx & 0x7fffffff) == 0) {
         return Complex(f32).init(@cos(y), @sin(y));
@@ -75,18 +75,18 @@ fn exp64(z: Complex(f64)) Complex(f64) {
     const x = z.re;
     const y = z.im;
 
-    const fy = @bitCast(u64, y);
-    const hy = @intCast(u32, (fy >> 32) & 0x7fffffff);
-    const ly = @truncate(u32, fy);
+    const fy = @as(u64, @bitCast(y));
+    const hy = @as(u32, @intCast((fy >> 32) & 0x7fffffff));
+    const ly = @as(u32, @truncate(fy));
 
     // cexp(x + i0) = exp(x) + i0
     if (hy | ly == 0) {
         return Complex(f64).init(@exp(x), y);
     }
 
-    const fx = @bitCast(u64, x);
-    const hx = @intCast(u32, fx >> 32);
-    const lx = @truncate(u32, fx);
+    const fx = @as(u64, @bitCast(x));
+    const hx = @as(u32, @intCast(fx >> 32));
+    const lx = @as(u32, @truncate(fx));
 
     // cexp(0 + iy) = cos(y) + isin(y)
     if ((hx & 0x7fffffff) | lx == 0) {
lib/std/math/complex/ldexp.zig
@@ -27,10 +27,10 @@ fn frexp_exp32(x: f32, expt: *i32) f32 {
     const kln2 = 162.88958740; // k * ln2
 
     const exp_x = @exp(x - kln2);
-    const hx = @bitCast(u32, exp_x);
+    const hx = @as(u32, @bitCast(exp_x));
     // TODO zig should allow this cast implicitly because it should know the value is in range
-    expt.* = @intCast(i32, hx >> 23) - (0x7f + 127) + k;
-    return @bitCast(f32, (hx & 0x7fffff) | ((0x7f + 127) << 23));
+    expt.* = @as(i32, @intCast(hx >> 23)) - (0x7f + 127) + k;
+    return @as(f32, @bitCast((hx & 0x7fffff) | ((0x7f + 127) << 23)));
 }
 
 fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) {
@@ -39,10 +39,10 @@ fn ldexp_cexp32(z: Complex(f32), expt: i32) Complex(f32) {
     const exptf = expt + ex_expt;
 
     const half_expt1 = @divTrunc(exptf, 2);
-    const scale1 = @bitCast(f32, (0x7f + half_expt1) << 23);
+    const scale1 = @as(f32, @bitCast((0x7f + half_expt1) << 23));
 
     const half_expt2 = exptf - half_expt1;
-    const scale2 = @bitCast(f32, (0x7f + half_expt2) << 23);
+    const scale2 = @as(f32, @bitCast((0x7f + half_expt2) << 23));
 
     return Complex(f32).init(
         @cos(z.im) * exp_x * scale1 * scale2,
@@ -56,14 +56,14 @@ fn frexp_exp64(x: f64, expt: *i32) f64 {
 
     const exp_x = @exp(x - kln2);
 
-    const fx = @bitCast(u64, exp_x);
-    const hx = @intCast(u32, fx >> 32);
-    const lx = @truncate(u32, fx);
+    const fx = @as(u64, @bitCast(exp_x));
+    const hx = @as(u32, @intCast(fx >> 32));
+    const lx = @as(u32, @truncate(fx));
 
-    expt.* = @intCast(i32, hx >> 20) - (0x3ff + 1023) + k;
+    expt.* = @as(i32, @intCast(hx >> 20)) - (0x3ff + 1023) + k;
 
     const high_word = (hx & 0xfffff) | ((0x3ff + 1023) << 20);
-    return @bitCast(f64, (@as(u64, high_word) << 32) | lx);
+    return @as(f64, @bitCast((@as(u64, high_word) << 32) | lx));
 }
 
 fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) {
@@ -72,10 +72,10 @@ fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) {
     const exptf = @as(i64, expt + ex_expt);
 
     const half_expt1 = @divTrunc(exptf, 2);
-    const scale1 = @bitCast(f64, (0x3ff + half_expt1) << (20 + 32));
+    const scale1 = @as(f64, @bitCast((0x3ff + half_expt1) << (20 + 32)));
 
     const half_expt2 = exptf - half_expt1;
-    const scale2 = @bitCast(f64, (0x3ff + half_expt2) << (20 + 32));
+    const scale2 = @as(f64, @bitCast((0x3ff + half_expt2) << (20 + 32)));
 
     return Complex(f64).init(
         @cos(z.im) * exp_x * scale1 * scale2,
lib/std/math/complex/sinh.zig
@@ -26,10 +26,10 @@ fn sinh32(z: Complex(f32)) Complex(f32) {
     const x = z.re;
     const y = z.im;
 
-    const hx = @bitCast(u32, x);
+    const hx = @as(u32, @bitCast(x));
     const ix = hx & 0x7fffffff;
 
-    const hy = @bitCast(u32, y);
+    const hy = @as(u32, @bitCast(y));
     const iy = hy & 0x7fffffff;
 
     if (ix < 0x7f800000 and iy < 0x7f800000) {
@@ -89,14 +89,14 @@ fn sinh64(z: Complex(f64)) Complex(f64) {
     const x = z.re;
     const y = z.im;
 
-    const fx = @bitCast(u64, x);
-    const hx = @intCast(u32, fx >> 32);
-    const lx = @truncate(u32, fx);
+    const fx = @as(u64, @bitCast(x));
+    const hx = @as(u32, @intCast(fx >> 32));
+    const lx = @as(u32, @truncate(fx));
     const ix = hx & 0x7fffffff;
 
-    const fy = @bitCast(u64, y);
-    const hy = @intCast(u32, fy >> 32);
-    const ly = @truncate(u32, fy);
+    const fy = @as(u64, @bitCast(y));
+    const hy = @as(u32, @intCast(fy >> 32));
+    const ly = @as(u32, @truncate(fy));
     const iy = hy & 0x7fffffff;
 
     if (ix < 0x7ff00000 and iy < 0x7ff00000) {
lib/std/math/complex/sqrt.zig
@@ -58,14 +58,14 @@ fn sqrt32(z: Complex(f32)) Complex(f32) {
     if (dx >= 0) {
         const t = @sqrt((dx + math.hypot(f64, dx, dy)) * 0.5);
         return Complex(f32).init(
-            @floatCast(f32, t),
-            @floatCast(f32, dy / (2.0 * t)),
+            @as(f32, @floatCast(t)),
+            @as(f32, @floatCast(dy / (2.0 * t))),
         );
     } else {
         const t = @sqrt((-dx + math.hypot(f64, dx, dy)) * 0.5);
         return Complex(f32).init(
-            @floatCast(f32, @fabs(y) / (2.0 * t)),
-            @floatCast(f32, math.copysign(t, y)),
+            @as(f32, @floatCast(@fabs(y) / (2.0 * t))),
+            @as(f32, @floatCast(math.copysign(t, y))),
         );
     }
 }
lib/std/math/complex/tanh.zig
@@ -24,7 +24,7 @@ fn tanh32(z: Complex(f32)) Complex(f32) {
     const x = z.re;
     const y = z.im;
 
-    const hx = @bitCast(u32, x);
+    const hx = @as(u32, @bitCast(x));
     const ix = hx & 0x7fffffff;
 
     if (ix >= 0x7f800000) {
@@ -32,7 +32,7 @@ fn tanh32(z: Complex(f32)) Complex(f32) {
             const r = if (y == 0) y else x * y;
             return Complex(f32).init(x, r);
         }
-        const xx = @bitCast(f32, hx - 0x40000000);
+        const xx = @as(f32, @bitCast(hx - 0x40000000));
         const r = if (math.isInf(y)) y else @sin(y) * @cos(y);
         return Complex(f32).init(xx, math.copysign(@as(f32, 0.0), r));
     }
@@ -62,11 +62,11 @@ fn tanh64(z: Complex(f64)) Complex(f64) {
     const x = z.re;
     const y = z.im;
 
-    const fx = @bitCast(u64, x);
+    const fx = @as(u64, @bitCast(x));
     // TODO: zig should allow this conversion implicitly because it can notice that the value necessarily
     // fits in range.
-    const hx = @intCast(u32, fx >> 32);
-    const lx = @truncate(u32, fx);
+    const hx = @as(u32, @intCast(fx >> 32));
+    const lx = @as(u32, @truncate(fx));
     const ix = hx & 0x7fffffff;
 
     if (ix >= 0x7ff00000) {
@@ -75,7 +75,7 @@ fn tanh64(z: Complex(f64)) Complex(f64) {
             return Complex(f64).init(x, r);
         }
 
-        const xx = @bitCast(f64, (@as(u64, hx - 0x40000000) << 32) | lx);
+        const xx = @as(f64, @bitCast((@as(u64, hx - 0x40000000) << 32) | lx));
         const r = if (math.isInf(y)) y else @sin(y) * @cos(y);
         return Complex(f64).init(xx, math.copysign(@as(f64, 0.0), r));
     }
lib/std/math/acos.zig
@@ -36,7 +36,7 @@ fn acos32(x: f32) f32 {
     const pio2_hi = 1.5707962513e+00;
     const pio2_lo = 7.5497894159e-08;
 
-    const hx: u32 = @bitCast(u32, x);
+    const hx: u32 = @as(u32, @bitCast(x));
     const ix: u32 = hx & 0x7FFFFFFF;
 
     // |x| >= 1 or nan
@@ -72,8 +72,8 @@ fn acos32(x: f32) f32 {
     // x > 0.5
     const z = (1.0 - x) * 0.5;
     const s = @sqrt(z);
-    const jx = @bitCast(u32, s);
-    const df = @bitCast(f32, jx & 0xFFFFF000);
+    const jx = @as(u32, @bitCast(s));
+    const df = @as(f32, @bitCast(jx & 0xFFFFF000));
     const c = (z - df * df) / (s + df);
     const w = r32(z) * s + c;
     return 2 * (df + w);
@@ -100,13 +100,13 @@ fn acos64(x: f64) f64 {
     const pio2_hi: f64 = 1.57079632679489655800e+00;
     const pio2_lo: f64 = 6.12323399573676603587e-17;
 
-    const ux = @bitCast(u64, x);
-    const hx = @intCast(u32, ux >> 32);
+    const ux = @as(u64, @bitCast(x));
+    const hx = @as(u32, @intCast(ux >> 32));
     const ix = hx & 0x7FFFFFFF;
 
     // |x| >= 1 or nan
     if (ix >= 0x3FF00000) {
-        const lx = @intCast(u32, ux & 0xFFFFFFFF);
+        const lx = @as(u32, @intCast(ux & 0xFFFFFFFF));
 
         // acos(1) = 0, acos(-1) = pi
         if ((ix - 0x3FF00000) | lx == 0) {
@@ -141,8 +141,8 @@ fn acos64(x: f64) f64 {
     // x > 0.5
     const z = (1.0 - x) * 0.5;
     const s = @sqrt(z);
-    const jx = @bitCast(u64, s);
-    const df = @bitCast(f64, jx & 0xFFFFFFFF00000000);
+    const jx = @as(u64, @bitCast(s));
+    const df = @as(f64, @bitCast(jx & 0xFFFFFFFF00000000));
     const c = (z - df * df) / (s + df);
     const w = r64(z) * s + c;
     return 2 * (df + w);
lib/std/math/acosh.zig
@@ -24,7 +24,7 @@ pub fn acosh(x: anytype) @TypeOf(x) {
 
 // acosh(x) = log(x + sqrt(x * x - 1))
 fn acosh32(x: f32) f32 {
-    const u = @bitCast(u32, x);
+    const u = @as(u32, @bitCast(x));
     const i = u & 0x7FFFFFFF;
 
     // |x| < 2, invalid if x < 1 or nan
@@ -42,7 +42,7 @@ fn acosh32(x: f32) f32 {
 }
 
 fn acosh64(x: f64) f64 {
-    const u = @bitCast(u64, x);
+    const u = @as(u64, @bitCast(x));
     const e = (u >> 52) & 0x7FF;
 
     // |x| < 2, invalid if x < 1 or nan
lib/std/math/asin.zig
@@ -36,7 +36,7 @@ fn r32(z: f32) f32 {
 fn asin32(x: f32) f32 {
     const pio2 = 1.570796326794896558e+00;
 
-    const hx: u32 = @bitCast(u32, x);
+    const hx: u32 = @as(u32, @bitCast(x));
     const ix: u32 = hx & 0x7FFFFFFF;
 
     // |x| >= 1
@@ -92,13 +92,13 @@ fn asin64(x: f64) f64 {
     const pio2_hi: f64 = 1.57079632679489655800e+00;
     const pio2_lo: f64 = 6.12323399573676603587e-17;
 
-    const ux = @bitCast(u64, x);
-    const hx = @intCast(u32, ux >> 32);
+    const ux = @as(u64, @bitCast(x));
+    const hx = @as(u32, @intCast(ux >> 32));
     const ix = hx & 0x7FFFFFFF;
 
     // |x| >= 1 or nan
     if (ix >= 0x3FF00000) {
-        const lx = @intCast(u32, ux & 0xFFFFFFFF);
+        const lx = @as(u32, @intCast(ux & 0xFFFFFFFF));
 
         // asin(1) = +-pi/2 with inexact
         if ((ix - 0x3FF00000) | lx == 0) {
@@ -128,8 +128,8 @@ fn asin64(x: f64) f64 {
     if (ix >= 0x3FEF3333) {
         fx = pio2_hi - 2 * (s + s * r);
     } else {
-        const jx = @bitCast(u64, s);
-        const df = @bitCast(f64, jx & 0xFFFFFFFF00000000);
+        const jx = @as(u64, @bitCast(s));
+        const df = @as(f64, @bitCast(jx & 0xFFFFFFFF00000000));
         const c = (z - df * df) / (s + df);
         fx = 0.5 * pio2_hi - (2 * s * r - (pio2_lo - 2 * c) - (0.5 * pio2_hi - 2 * df));
     }
lib/std/math/asinh.zig
@@ -26,11 +26,11 @@ pub fn asinh(x: anytype) @TypeOf(x) {
 
 // asinh(x) = sign(x) * log(|x| + sqrt(x * x + 1)) ~= x - x^3/6 + o(x^5)
 fn asinh32(x: f32) f32 {
-    const u = @bitCast(u32, x);
+    const u = @as(u32, @bitCast(x));
     const i = u & 0x7FFFFFFF;
     const s = i >> 31;
 
-    var rx = @bitCast(f32, i); // |x|
+    var rx = @as(f32, @bitCast(i)); // |x|
 
     // TODO: Shouldn't need this explicit check.
     if (math.isNegativeInf(x)) {
@@ -58,11 +58,11 @@ fn asinh32(x: f32) f32 {
 }
 
 fn asinh64(x: f64) f64 {
-    const u = @bitCast(u64, x);
+    const u = @as(u64, @bitCast(x));
     const e = (u >> 52) & 0x7FF;
     const s = e >> 63;
 
-    var rx = @bitCast(f64, u & (maxInt(u64) >> 1)); // |x|
+    var rx = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // |x|
 
     if (math.isNegativeInf(x)) {
         return x;
lib/std/math/atan.zig
@@ -46,7 +46,7 @@ fn atan32(x_: f32) f32 {
     };
 
     var x = x_;
-    var ix: u32 = @bitCast(u32, x);
+    var ix: u32 = @as(u32, @bitCast(x));
     const sign = ix >> 31;
     ix &= 0x7FFFFFFF;
 
@@ -143,8 +143,8 @@ fn atan64(x_: f64) f64 {
     };
 
     var x = x_;
-    var ux = @bitCast(u64, x);
-    var ix = @intCast(u32, ux >> 32);
+    var ux = @as(u64, @bitCast(x));
+    var ix = @as(u32, @intCast(ux >> 32));
     const sign = ix >> 31;
     ix &= 0x7FFFFFFF;
 
@@ -165,7 +165,7 @@ fn atan64(x_: f64) f64 {
         // |x| < 2^(-27)
         if (ix < 0x3E400000) {
             if (ix < 0x00100000) {
-                math.doNotOptimizeAway(@floatCast(f32, x));
+                math.doNotOptimizeAway(@as(f32, @floatCast(x)));
             }
             return x;
         }
@@ -212,7 +212,7 @@ fn atan64(x_: f64) f64 {
 }
 
 test "math.atan" {
-    try expect(@bitCast(u32, atan(@as(f32, 0.2))) == @bitCast(u32, atan32(0.2)));
+    try expect(@as(u32, @bitCast(atan(@as(f32, 0.2)))) == @as(u32, @bitCast(atan32(0.2))));
     try expect(atan(@as(f64, 0.2)) == atan64(0.2));
 }
 
lib/std/math/atan2.zig
@@ -44,8 +44,8 @@ fn atan2_32(y: f32, x: f32) f32 {
         return x + y;
     }
 
-    var ix = @bitCast(u32, x);
-    var iy = @bitCast(u32, y);
+    var ix = @as(u32, @bitCast(x));
+    var iy = @as(u32, @bitCast(y));
 
     // x = 1.0
     if (ix == 0x3F800000) {
@@ -129,13 +129,13 @@ fn atan2_64(y: f64, x: f64) f64 {
         return x + y;
     }
 
-    var ux = @bitCast(u64, x);
-    var ix = @intCast(u32, ux >> 32);
-    var lx = @intCast(u32, ux & 0xFFFFFFFF);
+    var ux = @as(u64, @bitCast(x));
+    var ix = @as(u32, @intCast(ux >> 32));
+    var lx = @as(u32, @intCast(ux & 0xFFFFFFFF));
 
-    var uy = @bitCast(u64, y);
-    var iy = @intCast(u32, uy >> 32);
-    var ly = @intCast(u32, uy & 0xFFFFFFFF);
+    var uy = @as(u64, @bitCast(y));
+    var iy = @as(u32, @intCast(uy >> 32));
+    var ly = @as(u32, @intCast(uy & 0xFFFFFFFF));
 
     // x = 1.0
     if ((ix -% 0x3FF00000) | lx == 0) {
lib/std/math/atanh.zig
@@ -26,11 +26,11 @@ pub fn atanh(x: anytype) @TypeOf(x) {
 
 // atanh(x) = log((1 + x) / (1 - x)) / 2 = log1p(2x / (1 - x)) / 2 ~= x + x^3 / 3 + o(x^5)
 fn atanh_32(x: f32) f32 {
-    const u = @bitCast(u32, x);
+    const u = @as(u32, @bitCast(x));
     const i = u & 0x7FFFFFFF;
     const s = u >> 31;
 
-    var y = @bitCast(f32, i); // |x|
+    var y = @as(f32, @bitCast(i)); // |x|
 
     if (y == 1.0) {
         return math.copysign(math.inf(f32), x);
@@ -55,11 +55,11 @@ fn atanh_32(x: f32) f32 {
 }
 
 fn atanh_64(x: f64) f64 {
-    const u = @bitCast(u64, x);
+    const u = @as(u64, @bitCast(x));
     const e = (u >> 52) & 0x7FF;
     const s = u >> 63;
 
-    var y = @bitCast(f64, u & (maxInt(u64) >> 1)); // |x|
+    var y = @as(f64, @bitCast(u & (maxInt(u64) >> 1))); // |x|
 
     if (y == 1.0) {
         return math.copysign(math.inf(f64), x);
@@ -69,7 +69,7 @@ fn atanh_64(x: f64) f64 {
         if (e < 0x3FF - 32) {
             // underflow
             if (e == 0) {
-                math.doNotOptimizeAway(@floatCast(f32, y));
+                math.doNotOptimizeAway(@as(f32, @floatCast(y)));
             }
         }
         // |x| < 0.5
lib/std/math/cbrt.zig
@@ -27,7 +27,7 @@ fn cbrt32(x: f32) f32 {
     const B1: u32 = 709958130; // (127 - 127.0 / 3 - 0.03306235651) * 2^23
     const B2: u32 = 642849266; // (127 - 127.0 / 3 - 24 / 3 - 0.03306235651) * 2^23
 
-    var u = @bitCast(u32, x);
+    var u = @as(u32, @bitCast(x));
     var hx = u & 0x7FFFFFFF;
 
     // cbrt(nan, inf) = itself
@@ -41,7 +41,7 @@ fn cbrt32(x: f32) f32 {
         if (hx == 0) {
             return x;
         }
-        u = @bitCast(u32, x * 0x1.0p24);
+        u = @as(u32, @bitCast(x * 0x1.0p24));
         hx = u & 0x7FFFFFFF;
         hx = hx / 3 + B2;
     } else {
@@ -52,7 +52,7 @@ fn cbrt32(x: f32) f32 {
     u |= hx;
 
     // first step newton to 16 bits
-    var t: f64 = @bitCast(f32, u);
+    var t: f64 = @as(f32, @bitCast(u));
     var r: f64 = t * t * t;
     t = t * (@as(f64, x) + x + r) / (x + r + r);
 
@@ -60,7 +60,7 @@ fn cbrt32(x: f32) f32 {
     r = t * t * t;
     t = t * (@as(f64, x) + x + r) / (x + r + r);
 
-    return @floatCast(f32, t);
+    return @as(f32, @floatCast(t));
 }
 
 fn cbrt64(x: f64) f64 {
@@ -74,8 +74,8 @@ fn cbrt64(x: f64) f64 {
     const P3: f64 = -0.758397934778766047437;
     const P4: f64 = 0.145996192886612446982;
 
-    var u = @bitCast(u64, x);
-    var hx = @intCast(u32, u >> 32) & 0x7FFFFFFF;
+    var u = @as(u64, @bitCast(x));
+    var hx = @as(u32, @intCast(u >> 32)) & 0x7FFFFFFF;
 
     // cbrt(nan, inf) = itself
     if (hx >= 0x7FF00000) {
@@ -84,8 +84,8 @@ fn cbrt64(x: f64) f64 {
 
     // cbrt to ~5bits
     if (hx < 0x00100000) {
-        u = @bitCast(u64, x * 0x1.0p54);
-        hx = @intCast(u32, u >> 32) & 0x7FFFFFFF;
+        u = @as(u64, @bitCast(x * 0x1.0p54));
+        hx = @as(u32, @intCast(u >> 32)) & 0x7FFFFFFF;
 
         // cbrt(0) is itself
         if (hx == 0) {
@@ -98,7 +98,7 @@ fn cbrt64(x: f64) f64 {
 
     u &= 1 << 63;
     u |= @as(u64, hx) << 32;
-    var t = @bitCast(f64, u);
+    var t = @as(f64, @bitCast(u));
 
     // cbrt to 23 bits
     // cbrt(x) = t * cbrt(x / t^3) ~= t * P(t^3 / x)
@@ -106,9 +106,9 @@ fn cbrt64(x: f64) f64 {
     t = t * ((P0 + r * (P1 + r * P2)) + ((r * r) * r) * (P3 + r * P4));
 
     // Round t away from 0 to 23 bits
-    u = @bitCast(u64, t);
+    u = @as(u64, @bitCast(t));
     u = (u + 0x80000000) & 0xFFFFFFFFC0000000;
-    t = @bitCast(f64, u);
+    t = @as(f64, @bitCast(u));
 
     // one step newton to 53 bits
     const s = t * t;
lib/std/math/copysign.zig
@@ -7,9 +7,9 @@ pub fn copysign(magnitude: anytype, sign: @TypeOf(magnitude)) @TypeOf(magnitude)
     const T = @TypeOf(magnitude);
     const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
     const sign_bit_mask = @as(TBits, 1) << (@bitSizeOf(T) - 1);
-    const mag = @bitCast(TBits, magnitude) & ~sign_bit_mask;
-    const sgn = @bitCast(TBits, sign) & sign_bit_mask;
-    return @bitCast(T, mag | sgn);
+    const mag = @as(TBits, @bitCast(magnitude)) & ~sign_bit_mask;
+    const sgn = @as(TBits, @bitCast(sign)) & sign_bit_mask;
+    return @as(T, @bitCast(mag | sgn));
 }
 
 test "math.copysign" {
lib/std/math/cosh.zig
@@ -29,9 +29,9 @@ pub fn cosh(x: anytype) @TypeOf(x) {
 //         = 1 + 0.5 * (exp(x) - 1) * (exp(x) - 1) / exp(x)
 //         = 1 + (x * x) / 2 + o(x^4)
 fn cosh32(x: f32) f32 {
-    const u = @bitCast(u32, x);
+    const u = @as(u32, @bitCast(x));
     const ux = u & 0x7FFFFFFF;
-    const ax = @bitCast(f32, ux);
+    const ax = @as(f32, @bitCast(ux));
 
     // |x| < log(2)
     if (ux < 0x3F317217) {
@@ -54,9 +54,9 @@ fn cosh32(x: f32) f32 {
 }
 
 fn cosh64(x: f64) f64 {
-    const u = @bitCast(u64, x);
-    const w = @intCast(u32, u >> 32) & (maxInt(u32) >> 1);
-    const ax = @bitCast(f64, u & (maxInt(u64) >> 1));
+    const u = @as(u64, @bitCast(x));
+    const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1);
+    const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1)));
 
     // TODO: Shouldn't need this explicit check.
     if (x == 0.0) {
lib/std/math/expm1.zig
@@ -38,7 +38,7 @@ fn expm1_32(x_: f32) f32 {
     const Q2: f32 = 1.5807170421e-3;
 
     var x = x_;
-    const ux = @bitCast(u32, x);
+    const ux = @as(u32, @bitCast(x));
     const hx = ux & 0x7FFFFFFF;
     const sign = hx >> 31;
 
@@ -88,8 +88,8 @@ fn expm1_32(x_: f32) f32 {
                 kf += 0.5;
             }
 
-            k = @intFromFloat(i32, kf);
-            const t = @floatFromInt(f32, k);
+            k = @as(i32, @intFromFloat(kf));
+            const t = @as(f32, @floatFromInt(k));
             hi = x - t * ln2_hi;
             lo = t * ln2_lo;
         }
@@ -133,7 +133,7 @@ fn expm1_32(x_: f32) f32 {
         }
     }
 
-    const twopk = @bitCast(f32, @intCast(u32, (0x7F +% k) << 23));
+    const twopk = @as(f32, @bitCast(@as(u32, @intCast((0x7F +% k) << 23))));
 
     if (k < 0 or k > 56) {
         var y = x - e + 1.0;
@@ -146,7 +146,7 @@ fn expm1_32(x_: f32) f32 {
         return y - 1.0;
     }
 
-    const uf = @bitCast(f32, @intCast(u32, 0x7F -% k) << 23);
+    const uf = @as(f32, @bitCast(@as(u32, @intCast(0x7F -% k)) << 23));
     if (k < 23) {
         return (x - e + (1 - uf)) * twopk;
     } else {
@@ -169,8 +169,8 @@ fn expm1_64(x_: f64) f64 {
     const Q5: f64 = -2.01099218183624371326e-07;
 
     var x = x_;
-    const ux = @bitCast(u64, x);
-    const hx = @intCast(u32, ux >> 32) & 0x7FFFFFFF;
+    const ux = @as(u64, @bitCast(x));
+    const hx = @as(u32, @intCast(ux >> 32)) & 0x7FFFFFFF;
     const sign = ux >> 63;
 
     if (math.isNegativeInf(x)) {
@@ -219,8 +219,8 @@ fn expm1_64(x_: f64) f64 {
                 kf += 0.5;
             }
 
-            k = @intFromFloat(i32, kf);
-            const t = @floatFromInt(f64, k);
+            k = @as(i32, @intFromFloat(kf));
+            const t = @as(f64, @floatFromInt(k));
             hi = x - t * ln2_hi;
             lo = t * ln2_lo;
         }
@@ -231,7 +231,7 @@ fn expm1_64(x_: f64) f64 {
     // |x| < 2^(-54)
     else if (hx < 0x3C900000) {
         if (hx < 0x00100000) {
-            math.doNotOptimizeAway(@floatCast(f32, x));
+            math.doNotOptimizeAway(@as(f32, @floatCast(x)));
         }
         return x;
     } else {
@@ -264,7 +264,7 @@ fn expm1_64(x_: f64) f64 {
         }
     }
 
-    const twopk = @bitCast(f64, @intCast(u64, 0x3FF +% k) << 52);
+    const twopk = @as(f64, @bitCast(@as(u64, @intCast(0x3FF +% k)) << 52));
 
     if (k < 0 or k > 56) {
         var y = x - e + 1.0;
@@ -277,7 +277,7 @@ fn expm1_64(x_: f64) f64 {
         return y - 1.0;
     }
 
-    const uf = @bitCast(f64, @intCast(u64, 0x3FF -% k) << 52);
+    const uf = @as(f64, @bitCast(@as(u64, @intCast(0x3FF -% k)) << 52));
     if (k < 20) {
         return (x - e + (1 - uf)) * twopk;
     } else {
lib/std/math/expo2.zig
@@ -21,7 +21,7 @@ fn expo2f(x: f32) f32 {
     const kln2 = 0x1.45C778p+7;
 
     const u = (0x7F + k / 2) << 23;
-    const scale = @bitCast(f32, u);
+    const scale = @as(f32, @bitCast(u));
     return @exp(x - kln2) * scale * scale;
 }
 
@@ -30,6 +30,6 @@ fn expo2d(x: f64) f64 {
     const kln2 = 0x1.62066151ADD8BP+10;
 
     const u = (0x3FF + k / 2) << 20;
-    const scale = @bitCast(f64, @as(u64, u) << 32);
+    const scale = @as(f64, @bitCast(@as(u64, u) << 32));
     return @exp(x - kln2) * scale * scale;
 }
lib/std/math/float.zig
@@ -11,7 +11,7 @@ inline fn mantissaOne(comptime T: type) comptime_int {
 inline fn reconstructFloat(comptime T: type, comptime exponent: comptime_int, comptime mantissa: comptime_int) T {
     const TBits = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = @bitSizeOf(T) } });
     const biased_exponent = @as(TBits, exponent + floatExponentMax(T));
-    return @bitCast(T, (biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa));
+    return @as(T, @bitCast((biased_exponent << floatMantissaBits(T)) | @as(TBits, mantissa)));
 }
 
 /// Returns the number of bits in the exponent of floating point type T.
lib/std/math/frexp.zig
@@ -38,8 +38,8 @@ pub fn frexp(x: anytype) Frexp(@TypeOf(x)) {
 fn frexp32(x: f32) Frexp(f32) {
     var result: Frexp(f32) = undefined;
 
-    var y = @bitCast(u32, x);
-    const e = @intCast(i32, y >> 23) & 0xFF;
+    var y = @as(u32, @bitCast(x));
+    const e = @as(i32, @intCast(y >> 23)) & 0xFF;
 
     if (e == 0) {
         if (x != 0) {
@@ -68,15 +68,15 @@ fn frexp32(x: f32) Frexp(f32) {
     result.exponent = e - 0x7E;
     y &= 0x807FFFFF;
     y |= 0x3F000000;
-    result.significand = @bitCast(f32, y);
+    result.significand = @as(f32, @bitCast(y));
     return result;
 }
 
 fn frexp64(x: f64) Frexp(f64) {
     var result: Frexp(f64) = undefined;
 
-    var y = @bitCast(u64, x);
-    const e = @intCast(i32, y >> 52) & 0x7FF;
+    var y = @as(u64, @bitCast(x));
+    const e = @as(i32, @intCast(y >> 52)) & 0x7FF;
 
     if (e == 0) {
         if (x != 0) {
@@ -105,15 +105,15 @@ fn frexp64(x: f64) Frexp(f64) {
     result.exponent = e - 0x3FE;
     y &= 0x800FFFFFFFFFFFFF;
     y |= 0x3FE0000000000000;
-    result.significand = @bitCast(f64, y);
+    result.significand = @as(f64, @bitCast(y));
     return result;
 }
 
 fn frexp128(x: f128) Frexp(f128) {
     var result: Frexp(f128) = undefined;
 
-    var y = @bitCast(u128, x);
-    const e = @intCast(i32, y >> 112) & 0x7FFF;
+    var y = @as(u128, @bitCast(x));
+    const e = @as(i32, @intCast(y >> 112)) & 0x7FFF;
 
     if (e == 0) {
         if (x != 0) {
@@ -142,7 +142,7 @@ fn frexp128(x: f128) Frexp(f128) {
     result.exponent = e - 0x3FFE;
     y &= 0x8000FFFFFFFFFFFFFFFFFFFFFFFFFFFF;
     y |= 0x3FFE0000000000000000000000000000;
-    result.significand = @bitCast(f128, y);
+    result.significand = @as(f128, @bitCast(y));
     return result;
 }
 
lib/std/math/hypot.zig
@@ -25,8 +25,8 @@ pub fn hypot(comptime T: type, x: T, y: T) T {
 }
 
 fn hypot32(x: f32, y: f32) f32 {
-    var ux = @bitCast(u32, x);
-    var uy = @bitCast(u32, y);
+    var ux = @as(u32, @bitCast(x));
+    var uy = @as(u32, @bitCast(y));
 
     ux &= maxInt(u32) >> 1;
     uy &= maxInt(u32) >> 1;
@@ -36,8 +36,8 @@ fn hypot32(x: f32, y: f32) f32 {
         uy = tmp;
     }
 
-    var xx = @bitCast(f32, ux);
-    var yy = @bitCast(f32, uy);
+    var xx = @as(f32, @bitCast(ux));
+    var yy = @as(f32, @bitCast(uy));
     if (uy == 0xFF << 23) {
         return yy;
     }
@@ -56,7 +56,7 @@ fn hypot32(x: f32, y: f32) f32 {
         yy *= 0x1.0p-90;
     }
 
-    return z * @sqrt(@floatCast(f32, @as(f64, x) * x + @as(f64, y) * y));
+    return z * @sqrt(@as(f32, @floatCast(@as(f64, x) * x + @as(f64, y) * y)));
 }
 
 fn sq(hi: *f64, lo: *f64, x: f64) void {
@@ -69,8 +69,8 @@ fn sq(hi: *f64, lo: *f64, x: f64) void {
 }
 
 fn hypot64(x: f64, y: f64) f64 {
-    var ux = @bitCast(u64, x);
-    var uy = @bitCast(u64, y);
+    var ux = @as(u64, @bitCast(x));
+    var uy = @as(u64, @bitCast(y));
 
     ux &= maxInt(u64) >> 1;
     uy &= maxInt(u64) >> 1;
@@ -82,8 +82,8 @@ fn hypot64(x: f64, y: f64) f64 {
 
     const ex = ux >> 52;
     const ey = uy >> 52;
-    var xx = @bitCast(f64, ux);
-    var yy = @bitCast(f64, uy);
+    var xx = @as(f64, @bitCast(ux));
+    var yy = @as(f64, @bitCast(uy));
 
     // hypot(inf, nan) == inf
     if (ey == 0x7FF) {
lib/std/math/ilogb.zig
@@ -38,8 +38,8 @@ fn ilogbX(comptime T: type, x: T) i32 {
 
     const absMask = signBit - 1;
 
-    var u = @bitCast(Z, x) & absMask;
-    var e = @intCast(i32, u >> significandBits);
+    var u = @as(Z, @bitCast(x)) & absMask;
+    var e = @as(i32, @intCast(u >> significandBits));
 
     if (e == 0) {
         if (u == 0) {
@@ -49,12 +49,12 @@ fn ilogbX(comptime T: type, x: T) i32 {
 
         // offset sign bit, exponent bits, and integer bit (if present) + bias
         const offset = 1 + exponentBits + @as(comptime_int, @intFromBool(T == f80)) - exponentBias;
-        return offset - @intCast(i32, @clz(u));
+        return offset - @as(i32, @intCast(@clz(u)));
     }
 
     if (e == maxExponent) {
         math.raiseInvalid();
-        if (u > @bitCast(Z, math.inf(T))) {
+        if (u > @as(Z, @bitCast(math.inf(T)))) {
             return fp_ilogbnan; // u is a NaN
         } else return maxInt(i32);
     }
lib/std/math/isfinite.zig
@@ -7,7 +7,7 @@ pub fn isFinite(x: anytype) bool {
     const T = @TypeOf(x);
     const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
     const remove_sign = ~@as(TBits, 0) >> 1;
-    return @bitCast(TBits, x) & remove_sign < @bitCast(TBits, math.inf(T));
+    return @as(TBits, @bitCast(x)) & remove_sign < @as(TBits, @bitCast(math.inf(T)));
 }
 
 test "math.isFinite" {
lib/std/math/isinf.zig
@@ -7,7 +7,7 @@ pub inline fn isInf(x: anytype) bool {
     const T = @TypeOf(x);
     const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
     const remove_sign = ~@as(TBits, 0) >> 1;
-    return @bitCast(TBits, x) & remove_sign == @bitCast(TBits, math.inf(T));
+    return @as(TBits, @bitCast(x)) & remove_sign == @as(TBits, @bitCast(math.inf(T)));
 }
 
 /// Returns whether x is an infinity with a positive sign.
lib/std/math/isnormal.zig
@@ -15,7 +15,7 @@ pub fn isNormal(x: anytype) bool {
     // The sign bit is removed because all ones would overflow into it.
     // For f80, even though it has an explicit integer part stored,
     // the exponent effectively takes priority if mismatching.
-    const value = @bitCast(TBits, x) +% increment_exp;
+    const value = @as(TBits, @bitCast(x)) +% increment_exp;
     return value & remove_sign >= (increment_exp << 1);
 }
 
@@ -35,7 +35,7 @@ test "math.isNormal" {
         try expect(!isNormal(@as(T, math.floatTrueMin(T))));
 
         // largest subnormal
-        try expect(!isNormal(@bitCast(T, ~(~@as(TBits, 0) << math.floatFractionalBits(T)))));
+        try expect(!isNormal(@as(T, @bitCast(~(~@as(TBits, 0) << math.floatFractionalBits(T))))));
 
         // non-finite numbers
         try expect(!isNormal(-math.inf(T)));
@@ -43,6 +43,6 @@ test "math.isNormal" {
         try expect(!isNormal(math.nan(T)));
 
         // overflow edge-case (described in implementation, also see #10133)
-        try expect(!isNormal(@bitCast(T, ~@as(TBits, 0))));
+        try expect(!isNormal(@as(T, @bitCast(~@as(TBits, 0)))));
     }
 }
lib/std/math/ldexp.zig
@@ -16,53 +16,53 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) {
     const max_biased_exponent = 2 * math.floatExponentMax(T);
     const mantissa_mask = @as(TBits, (1 << mantissa_bits) - 1);
 
-    const repr = @bitCast(TBits, x);
+    const repr = @as(TBits, @bitCast(x));
     const sign_bit = repr & (1 << (exponent_bits + mantissa_bits));
 
     if (math.isNan(x) or !math.isFinite(x))
         return x;
 
-    var exponent: i32 = @intCast(i32, (repr << 1) >> (mantissa_bits + 1));
+    var exponent: i32 = @as(i32, @intCast((repr << 1) >> (mantissa_bits + 1)));
     if (exponent == 0)
         exponent += (@as(i32, exponent_bits) + @intFromBool(T == f80)) - @clz(repr << 1);
 
     if (n >= 0) {
         if (n > max_biased_exponent - exponent) {
             // Overflow. Return +/- inf
-            return @bitCast(T, @bitCast(TBits, math.inf(T)) | sign_bit);
+            return @as(T, @bitCast(@as(TBits, @bitCast(math.inf(T))) | sign_bit));
         } else if (exponent + n <= 0) {
             // Result is subnormal
-            return @bitCast(T, (repr << @intCast(Log2Int(TBits), n)) | sign_bit);
+            return @as(T, @bitCast((repr << @as(Log2Int(TBits), @intCast(n))) | sign_bit));
         } else if (exponent <= 0) {
             // Result is normal, but needs shifting
-            var result = @intCast(TBits, n + exponent) << mantissa_bits;
-            result |= (repr << @intCast(Log2Int(TBits), 1 - exponent)) & mantissa_mask;
-            return @bitCast(T, result | sign_bit);
+            var result = @as(TBits, @intCast(n + exponent)) << mantissa_bits;
+            result |= (repr << @as(Log2Int(TBits), @intCast(1 - exponent))) & mantissa_mask;
+            return @as(T, @bitCast(result | sign_bit));
         }
 
         // Result needs no shifting
-        return @bitCast(T, repr + (@intCast(TBits, n) << mantissa_bits));
+        return @as(T, @bitCast(repr + (@as(TBits, @intCast(n)) << mantissa_bits)));
     } else {
         if (n <= -exponent) {
             if (n < -(mantissa_bits + exponent))
-                return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0
+                return @as(T, @bitCast(sign_bit)); // Severe underflow. Return +/- 0
 
             // Result underflowed, we need to shift and round
-            const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1));
+            const shift = @as(Log2Int(TBits), @intCast(@min(-n, -(exponent + n) + 1)));
             const exact_tie: bool = @ctz(repr) == shift - 1;
             var result = repr & mantissa_mask;
 
             if (T != f80) // Include integer bit
                 result |= @as(TBits, @intFromBool(exponent > 0)) << fractional_bits;
-            result = @intCast(TBits, (result >> (shift - 1)));
+            result = @as(TBits, @intCast((result >> (shift - 1))));
 
             // Round result, including round-to-even for exact ties
             result = ((result + 1) >> 1) & ~@as(TBits, @intFromBool(exact_tie));
-            return @bitCast(T, result | sign_bit);
+            return @as(T, @bitCast(result | sign_bit));
         }
 
         // Result is exact, and needs no shifting
-        return @bitCast(T, repr - (@intCast(TBits, -n) << mantissa_bits));
+        return @as(T, @bitCast(repr - (@as(TBits, @intCast(-n)) << mantissa_bits)));
     }
 }
 
@@ -105,8 +105,8 @@ test "math.ldexp" {
         // Multiplications might flush the denormals to zero, esp. at
         // runtime, so we manually construct the constants here instead.
         const Z = std.meta.Int(.unsigned, @bitSizeOf(T));
-        const EightTimesTrueMin = @bitCast(T, @as(Z, 8));
-        const TwoTimesTrueMin = @bitCast(T, @as(Z, 2));
+        const EightTimesTrueMin = @as(T, @bitCast(@as(Z, 8)));
+        const TwoTimesTrueMin = @as(T, @bitCast(@as(Z, 2)));
 
         // subnormals -> subnormals
         try expect(ldexp(math.floatTrueMin(T), 3) == EightTimesTrueMin);
lib/std/math/log.zig
@@ -30,12 +30,12 @@ pub fn log(comptime T: type, base: T, x: T) T {
         // TODO implement integer log without using float math
         .Int => |IntType| switch (IntType.signedness) {
             .signed => @compileError("log not implemented for signed integers"),
-            .unsigned => return @intFromFloat(T, @floor(@log(@floatFromInt(f64, x)) / @log(float_base))),
+            .unsigned => return @as(T, @intFromFloat(@floor(@log(@as(f64, @floatFromInt(x))) / @log(float_base)))),
         },
 
         .Float => {
             switch (T) {
-                f32 => return @floatCast(f32, @log(@as(f64, x)) / @log(float_base)),
+                f32 => return @as(f32, @floatCast(@log(@as(f64, x)) / @log(float_base))),
                 f64 => return @log(x) / @log(float_base),
                 else => @compileError("log not implemented for " ++ @typeName(T)),
             }
lib/std/math/log10.zig
@@ -49,9 +49,9 @@ pub fn log10_int(x: anytype) Log2Int(@TypeOf(x)) {
     const bit_size = @typeInfo(T).Int.bits;
 
     if (bit_size <= 8) {
-        return @intCast(OutT, log10_int_u8(x));
+        return @as(OutT, @intCast(log10_int_u8(x)));
     } else if (bit_size <= 16) {
-        return @intCast(OutT, less_than_5(x));
+        return @as(OutT, @intCast(less_than_5(x)));
     }
 
     var val = x;
@@ -71,7 +71,7 @@ pub fn log10_int(x: anytype) Log2Int(@TypeOf(x)) {
         log += 5;
     }
 
-    return @intCast(OutT, log + less_than_5(@intCast(u32, val)));
+    return @as(OutT, @intCast(log + less_than_5(@as(u32, @intCast(val)))));
 }
 
 fn pow10(comptime y: comptime_int) comptime_int {
@@ -134,7 +134,7 @@ inline fn less_than_5(x: u32) u32 {
 }
 
 fn oldlog10(x: anytype) u8 {
-    return @intFromFloat(u8, @log10(@floatFromInt(f64, x)));
+    return @as(u8, @intFromFloat(@log10(@as(f64, @floatFromInt(x)))));
 }
 
 test "oldlog10 doesn't work" {
@@ -158,7 +158,7 @@ test "log10_int vs old implementation" {
     inline for (int_types) |T| {
         const last = @min(maxInt(T), 100_000);
         for (1..last) |i| {
-            const x = @intCast(T, i);
+            const x = @as(T, @intCast(i));
             try testing.expectEqual(oldlog10(x), log10_int(x));
         }
 
@@ -185,10 +185,10 @@ test "log10_int close to powers of 10" {
         try testing.expectEqual(expected_max_ilog, log10_int(max_val));
 
         for (0..(expected_max_ilog + 1)) |idx| {
-            const i = @intCast(T, idx);
+            const i = @as(T, @intCast(idx));
             const p: T = try math.powi(T, 10, i);
 
-            const b = @intCast(Log2Int(T), i);
+            const b = @as(Log2Int(T), @intCast(i));
 
             if (p >= 10) {
                 try testing.expectEqual(b - 1, log10_int(p - 9));
lib/std/math/log1p.zig
@@ -33,7 +33,7 @@ fn log1p_32(x: f32) f32 {
     const Lg3: f32 = 0x91e9ee.0p-25;
     const Lg4: f32 = 0xf89e26.0p-26;
 
-    const u = @bitCast(u32, x);
+    const u = @as(u32, @bitCast(x));
     var ix = u;
     var k: i32 = 1;
     var f: f32 = undefined;
@@ -72,9 +72,9 @@ fn log1p_32(x: f32) f32 {
 
     if (k != 0) {
         const uf = 1 + x;
-        var iu = @bitCast(u32, uf);
+        var iu = @as(u32, @bitCast(uf));
         iu += 0x3F800000 - 0x3F3504F3;
-        k = @intCast(i32, iu >> 23) - 0x7F;
+        k = @as(i32, @intCast(iu >> 23)) - 0x7F;
 
         // correction to avoid underflow in c / u
         if (k < 25) {
@@ -86,7 +86,7 @@ fn log1p_32(x: f32) f32 {
 
         // u into [sqrt(2)/2, sqrt(2)]
         iu = (iu & 0x007FFFFF) + 0x3F3504F3;
-        f = @bitCast(f32, iu) - 1;
+        f = @as(f32, @bitCast(iu)) - 1;
     }
 
     const s = f / (2.0 + f);
@@ -96,7 +96,7 @@ fn log1p_32(x: f32) f32 {
     const t2 = z * (Lg1 + w * Lg3);
     const R = t2 + t1;
     const hfsq = 0.5 * f * f;
-    const dk = @floatFromInt(f32, k);
+    const dk = @as(f32, @floatFromInt(k));
 
     return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi;
 }
@@ -112,8 +112,8 @@ fn log1p_64(x: f64) f64 {
     const Lg6: f64 = 1.531383769920937332e-01;
     const Lg7: f64 = 1.479819860511658591e-01;
 
-    var ix = @bitCast(u64, x);
-    var hx = @intCast(u32, ix >> 32);
+    var ix = @as(u64, @bitCast(x));
+    var hx = @as(u32, @intCast(ix >> 32));
     var k: i32 = 1;
     var c: f64 = undefined;
     var f: f64 = undefined;
@@ -150,10 +150,10 @@ fn log1p_64(x: f64) f64 {
 
     if (k != 0) {
         const uf = 1 + x;
-        const hu = @bitCast(u64, uf);
-        var iu = @intCast(u32, hu >> 32);
+        const hu = @as(u64, @bitCast(uf));
+        var iu = @as(u32, @intCast(hu >> 32));
         iu += 0x3FF00000 - 0x3FE6A09E;
-        k = @intCast(i32, iu >> 20) - 0x3FF;
+        k = @as(i32, @intCast(iu >> 20)) - 0x3FF;
 
         // correction to avoid underflow in c / u
         if (k < 54) {
@@ -166,7 +166,7 @@ fn log1p_64(x: f64) f64 {
         // u into [sqrt(2)/2, sqrt(2)]
         iu = (iu & 0x000FFFFF) + 0x3FE6A09E;
         const iq = (@as(u64, iu) << 32) | (hu & 0xFFFFFFFF);
-        f = @bitCast(f64, iq) - 1;
+        f = @as(f64, @bitCast(iq)) - 1;
     }
 
     const hfsq = 0.5 * f * f;
@@ -176,7 +176,7 @@ fn log1p_64(x: f64) f64 {
     const t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
     const t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
     const R = t2 + t1;
-    const dk = @floatFromInt(f64, k);
+    const dk = @as(f64, @floatFromInt(k));
 
     return s * (hfsq + R) + (dk * ln2_lo + c) - hfsq + f + dk * ln2_hi;
 }
lib/std/math/modf.zig
@@ -37,8 +37,8 @@ pub fn modf(x: anytype) modf_result(@TypeOf(x)) {
 fn modf32(x: f32) modf32_result {
     var result: modf32_result = undefined;
 
-    const u = @bitCast(u32, x);
-    const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
+    const u = @as(u32, @bitCast(x));
+    const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
     const us = u & 0x80000000;
 
     // TODO: Shouldn't need this.
@@ -54,26 +54,26 @@ fn modf32(x: f32) modf32_result {
         if (e == 0x80 and u << 9 != 0) { // nan
             result.fpart = x;
         } else {
-            result.fpart = @bitCast(f32, us);
+            result.fpart = @as(f32, @bitCast(us));
         }
         return result;
     }
 
     // no integral part
     if (e < 0) {
-        result.ipart = @bitCast(f32, us);
+        result.ipart = @as(f32, @bitCast(us));
         result.fpart = x;
         return result;
     }
 
-    const mask = @as(u32, 0x007FFFFF) >> @intCast(u5, e);
+    const mask = @as(u32, 0x007FFFFF) >> @as(u5, @intCast(e));
     if (u & mask == 0) {
         result.ipart = x;
-        result.fpart = @bitCast(f32, us);
+        result.fpart = @as(f32, @bitCast(us));
         return result;
     }
 
-    const uf = @bitCast(f32, u & ~mask);
+    const uf = @as(f32, @bitCast(u & ~mask));
     result.ipart = uf;
     result.fpart = x - uf;
     return result;
@@ -82,8 +82,8 @@ fn modf32(x: f32) modf32_result {
 fn modf64(x: f64) modf64_result {
     var result: modf64_result = undefined;
 
-    const u = @bitCast(u64, x);
-    const e = @intCast(i32, (u >> 52) & 0x7FF) - 0x3FF;
+    const u = @as(u64, @bitCast(x));
+    const e = @as(i32, @intCast((u >> 52) & 0x7FF)) - 0x3FF;
     const us = u & (1 << 63);
 
     if (math.isInf(x)) {
@@ -98,26 +98,26 @@ fn modf64(x: f64) modf64_result {
         if (e == 0x400 and u << 12 != 0) { // nan
             result.fpart = x;
         } else {
-            result.fpart = @bitCast(f64, us);
+            result.fpart = @as(f64, @bitCast(us));
         }
         return result;
     }
 
     // no integral part
     if (e < 0) {
-        result.ipart = @bitCast(f64, us);
+        result.ipart = @as(f64, @bitCast(us));
         result.fpart = x;
         return result;
     }
 
-    const mask = @as(u64, maxInt(u64) >> 12) >> @intCast(u6, e);
+    const mask = @as(u64, maxInt(u64) >> 12) >> @as(u6, @intCast(e));
     if (u & mask == 0) {
         result.ipart = x;
-        result.fpart = @bitCast(f64, us);
+        result.fpart = @as(f64, @bitCast(us));
         return result;
     }
 
-    const uf = @bitCast(f64, u & ~mask);
+    const uf = @as(f64, @bitCast(u & ~mask));
     result.ipart = uf;
     result.fpart = x - uf;
     return result;
lib/std/math/pow.zig
@@ -144,7 +144,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
     var xe = r2.exponent;
     var x1 = r2.significand;
 
-    var i = @intFromFloat(std.meta.Int(.signed, @typeInfo(T).Float.bits), yi);
+    var i = @as(std.meta.Int(.signed, @typeInfo(T).Float.bits), @intFromFloat(yi));
     while (i != 0) : (i >>= 1) {
         const overflow_shift = math.floatExponentBits(T) + 1;
         if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) {
@@ -179,7 +179,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
 
 fn isOddInteger(x: f64) bool {
     const r = math.modf(x);
-    return r.fpart == 0.0 and @intFromFloat(i64, r.ipart) & 1 == 1;
+    return r.fpart == 0.0 and @as(i64, @intFromFloat(r.ipart)) & 1 == 1;
 }
 
 test "math.pow" {
lib/std/math/signbit.zig
@@ -6,7 +6,7 @@ const expect = std.testing.expect;
 pub fn signbit(x: anytype) bool {
     const T = @TypeOf(x);
     const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
-    return @bitCast(TBits, x) >> (@bitSizeOf(T) - 1) != 0;
+    return @as(TBits, @bitCast(x)) >> (@bitSizeOf(T) - 1) != 0;
 }
 
 test "math.signbit" {
lib/std/math/sinh.zig
@@ -29,9 +29,9 @@ pub fn sinh(x: anytype) @TypeOf(x) {
 //         = (exp(x) - 1 + (exp(x) - 1) / exp(x)) / 2
 //         = x + x^3 / 6 + o(x^5)
 fn sinh32(x: f32) f32 {
-    const u = @bitCast(u32, x);
+    const u = @as(u32, @bitCast(x));
     const ux = u & 0x7FFFFFFF;
-    const ax = @bitCast(f32, ux);
+    const ax = @as(f32, @bitCast(ux));
 
     if (x == 0.0 or math.isNan(x)) {
         return x;
@@ -60,9 +60,9 @@ fn sinh32(x: f32) f32 {
 }
 
 fn sinh64(x: f64) f64 {
-    const u = @bitCast(u64, x);
-    const w = @intCast(u32, u >> 32) & (maxInt(u32) >> 1);
-    const ax = @bitCast(f64, u & (maxInt(u64) >> 1));
+    const u = @as(u64, @bitCast(x));
+    const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1);
+    const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1)));
 
     if (x == 0.0 or math.isNan(x)) {
         return x;
lib/std/math/sqrt.zig
@@ -57,7 +57,7 @@ fn sqrt_int(comptime T: type, value: T) Sqrt(T) {
             one >>= 2;
         }
 
-        return @intCast(Sqrt(T), res);
+        return @as(Sqrt(T), @intCast(res));
     }
 }
 
lib/std/math/tanh.zig
@@ -29,9 +29,9 @@ pub fn tanh(x: anytype) @TypeOf(x) {
 //         = (exp(2x) - 1) / (exp(2x) - 1 + 2)
 //         = (1 - exp(-2x)) / (exp(-2x) - 1 + 2)
 fn tanh32(x: f32) f32 {
-    const u = @bitCast(u32, x);
+    const u = @as(u32, @bitCast(x));
     const ux = u & 0x7FFFFFFF;
-    const ax = @bitCast(f32, ux);
+    const ax = @as(f32, @bitCast(ux));
     const sign = (u >> 31) != 0;
 
     var t: f32 = undefined;
@@ -66,10 +66,10 @@ fn tanh32(x: f32) f32 {
 }
 
 fn tanh64(x: f64) f64 {
-    const u = @bitCast(u64, x);
+    const u = @as(u64, @bitCast(x));
     const ux = u & 0x7FFFFFFFFFFFFFFF;
-    const w = @intCast(u32, ux >> 32);
-    const ax = @bitCast(f64, ux);
+    const w = @as(u32, @intCast(ux >> 32));
+    const ax = @as(f64, @bitCast(ux));
     const sign = (u >> 63) != 0;
 
     var t: f64 = undefined;
@@ -96,7 +96,7 @@ fn tanh64(x: f64) f64 {
     }
     // |x| is subnormal
     else {
-        math.doNotOptimizeAway(@floatCast(f32, ax));
+        math.doNotOptimizeAway(@as(f32, @floatCast(ax)));
         t = ax;
     }
 
lib/std/mem/Allocator.zig
@@ -101,7 +101,7 @@ pub inline fn rawFree(self: Allocator, buf: []u8, log2_buf_align: u8, ret_addr:
 /// Returns a pointer to undefined memory.
 /// Call `destroy` with the result to free the memory.
 pub fn create(self: Allocator, comptime T: type) Error!*T {
-    if (@sizeOf(T) == 0) return @ptrFromInt(*T, math.maxInt(usize));
+    if (@sizeOf(T) == 0) return @as(*T, @ptrFromInt(math.maxInt(usize)));
     const slice = try self.allocAdvancedWithRetAddr(T, null, 1, @returnAddress());
     return &slice[0];
 }
@@ -112,7 +112,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
     const info = @typeInfo(@TypeOf(ptr)).Pointer;
     const T = info.child;
     if (@sizeOf(T) == 0) return;
-    const non_const_ptr = @ptrCast([*]u8, @constCast(ptr));
+    const non_const_ptr = @as([*]u8, @ptrCast(@constCast(ptr)));
     self.rawFree(non_const_ptr[0..@sizeOf(T)], math.log2(info.alignment), @returnAddress());
 }
 
@@ -209,15 +209,15 @@ pub fn allocAdvancedWithRetAddr(
 
     if (n == 0) {
         const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a);
-        return @ptrFromInt([*]align(a) T, ptr)[0..0];
+        return @as([*]align(a) T, @ptrFromInt(ptr))[0..0];
     }
 
     const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
     const byte_ptr = self.rawAlloc(byte_count, log2a(a), return_address) orelse return Error.OutOfMemory;
     // TODO: https://github.com/ziglang/zig/issues/4298
     @memset(byte_ptr[0..byte_count], undefined);
-    const byte_slice = byte_ptr[0..byte_count];
-    return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
+    const byte_slice: []align(a) u8 = @alignCast(byte_ptr[0..byte_count]);
+    return mem.bytesAsSlice(T, byte_slice);
 }
 
 /// Requests to modify the size of an allocation. It is guaranteed to not move
@@ -268,7 +268,7 @@ pub fn reallocAdvanced(
     if (new_n == 0) {
         self.free(old_mem);
         const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), Slice.alignment);
-        return @ptrFromInt([*]align(Slice.alignment) T, ptr)[0..0];
+        return @as([*]align(Slice.alignment) T, @ptrFromInt(ptr))[0..0];
     }
 
     const old_byte_slice = mem.sliceAsBytes(old_mem);
@@ -276,7 +276,8 @@ pub fn reallocAdvanced(
     // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
     if (mem.isAligned(@intFromPtr(old_byte_slice.ptr), Slice.alignment)) {
         if (self.rawResize(old_byte_slice, log2a(Slice.alignment), byte_count, return_address)) {
-            return mem.bytesAsSlice(T, @alignCast(Slice.alignment, old_byte_slice.ptr[0..byte_count]));
+            const new_bytes: []align(Slice.alignment) u8 = @alignCast(old_byte_slice.ptr[0..byte_count]);
+            return mem.bytesAsSlice(T, new_bytes);
         }
     }
 
@@ -288,7 +289,8 @@ pub fn reallocAdvanced(
     @memset(old_byte_slice, undefined);
     self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address);
 
-    return mem.bytesAsSlice(T, @alignCast(Slice.alignment, new_mem[0..byte_count]));
+    const new_bytes: []align(Slice.alignment) u8 = @alignCast(new_mem[0..byte_count]);
+    return mem.bytesAsSlice(T, new_bytes);
 }
 
 /// Free an array allocated with `alloc`. To free a single item,
lib/std/meta/trailer_flags.zig
@@ -72,7 +72,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
         pub fn setMany(self: Self, p: [*]align(@alignOf(Fields)) u8, fields: FieldValues) void {
             inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| {
                 if (@field(fields, field.name)) |value|
-                    self.set(p, @enumFromInt(FieldEnum, i), value);
+                    self.set(p, @as(FieldEnum, @enumFromInt(i)), value);
             }
         }
 
@@ -89,14 +89,14 @@ pub fn TrailerFlags(comptime Fields: type) type {
             if (@sizeOf(Field(field)) == 0)
                 return undefined;
             const off = self.offset(field);
-            return @ptrCast(*Field(field), @alignCast(@alignOf(Field(field)), p + off));
+            return @ptrCast(@alignCast(p + off));
         }
 
         pub fn ptrConst(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) *const Field(field) {
             if (@sizeOf(Field(field)) == 0)
                 return undefined;
             const off = self.offset(field);
-            return @ptrCast(*const Field(field), @alignCast(@alignOf(Field(field)), p + off));
+            return @ptrCast(@alignCast(p + off));
         }
 
         pub fn offset(self: Self, comptime field: FieldEnum) usize {
lib/std/meta/trait.zig
@@ -237,7 +237,7 @@ pub fn isManyItemPtr(comptime T: type) bool {
 
 test "isManyItemPtr" {
     const array = [_]u8{0} ** 10;
-    const mip = @ptrCast([*]const u8, &array[0]);
+    const mip = @as([*]const u8, @ptrCast(&array[0]));
     try testing.expect(isManyItemPtr(@TypeOf(mip)));
     try testing.expect(!isManyItemPtr(@TypeOf(array)));
     try testing.expect(!isManyItemPtr(@TypeOf(array[0..1])));
lib/std/os/linux/bpf/helpers.zig
@@ -11,147 +11,147 @@ const SkFullSock = @compileError("TODO missing os bits: SkFullSock");
 //
 // Note, these function signatures were created from documentation found in
 // '/usr/include/linux/bpf.h'
-pub const map_lookup_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, 1);
-pub const map_update_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, 2);
-pub const map_delete_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, 3);
-pub const probe_read = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 4);
-pub const ktime_get_ns = @ptrFromInt(*const fn () u64, 5);
-pub const trace_printk = @ptrFromInt(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, 6);
-pub const get_prandom_u32 = @ptrFromInt(*const fn () u32, 7);
-pub const get_smp_processor_id = @ptrFromInt(*const fn () u32, 8);
-pub const skb_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, 9);
-pub const l3_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, 10);
-pub const l4_csum_replace = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, 11);
-pub const tail_call = @ptrFromInt(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, 12);
-pub const clone_redirect = @ptrFromInt(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, 13);
-pub const get_current_pid_tgid = @ptrFromInt(*const fn () u64, 14);
-pub const get_current_uid_gid = @ptrFromInt(*const fn () u64, 15);
-pub const get_current_comm = @ptrFromInt(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, 16);
-pub const get_cgroup_classid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 17);
+pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1));
+pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2));
+pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3));
+pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4));
+pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5));
+pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6));
+pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7));
+pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8));
+pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9));
+pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10));
+pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11));
+pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12));
+pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13));
+pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14));
+pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15));
+pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16));
+pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17));
 // Note vlan_proto is big endian
-pub const skb_vlan_push = @ptrFromInt(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, 18);
-pub const skb_vlan_pop = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 19);
-pub const skb_get_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 20);
-pub const skb_set_tunnel_key = @ptrFromInt(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 21);
-pub const perf_event_read = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64) u64, 22);
-pub const redirect = @ptrFromInt(*const fn (ifindex: u32, flags: u64) c_long, 23);
-pub const get_route_realm = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 24);
-pub const perf_event_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 25);
-pub const skb_load_bytes = @ptrFromInt(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, 26);
-pub const get_stackid = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, 27);
+pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18));
+pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19));
+pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20));
+pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21));
+pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22));
+pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23));
+pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24));
+pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25));
+pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26));
+pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27));
 // from and to point to __be32
-pub const csum_diff = @ptrFromInt(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, 28);
-pub const skb_get_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 29);
-pub const skb_set_tunnel_opt = @ptrFromInt(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, 30);
+pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28));
+pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29));
+pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30));
 // proto is __be16
-pub const skb_change_proto = @ptrFromInt(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, 31);
-pub const skb_change_type = @ptrFromInt(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, 32);
-pub const skb_under_cgroup = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, 33);
-pub const get_hash_recalc = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 34);
-pub const get_current_task = @ptrFromInt(*const fn () u64, 35);
-pub const probe_write_user = @ptrFromInt(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, 36);
-pub const current_task_under_cgroup = @ptrFromInt(*const fn (map: *const kern.MapDef, index: u32) c_long, 37);
-pub const skb_change_tail = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 38);
-pub const skb_pull_data = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32) c_long, 39);
-pub const csum_update = @ptrFromInt(*const fn (skb: *kern.SkBuff, csum: u32) i64, 40);
-pub const set_hash_invalid = @ptrFromInt(*const fn (skb: *kern.SkBuff) void, 41);
-pub const get_numa_node_id = @ptrFromInt(*const fn () c_long, 42);
-pub const skb_change_head = @ptrFromInt(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 43);
-pub const xdp_adjust_head = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 44);
-pub const probe_read_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 45);
-pub const get_socket_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 46);
-pub const get_socket_uid = @ptrFromInt(*const fn (skb: *kern.SkBuff) u32, 47);
-pub const set_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, hash: u32) c_long, 48);
-pub const setsockopt = @ptrFromInt(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 49);
-pub const skb_adjust_room = @ptrFromInt(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, 50);
-pub const redirect_map = @ptrFromInt(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, 51);
-pub const sk_redirect_map = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, 52);
-pub const sock_map_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 53);
-pub const xdp_adjust_meta = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 54);
-pub const perf_event_read_value = @ptrFromInt(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, 55);
-pub const perf_prog_read_value = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, 56);
-pub const getsockopt = @ptrFromInt(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, 57);
-pub const override_return = @ptrFromInt(*const fn (regs: *PtRegs, rc: u64) c_long, 58);
-pub const sock_ops_cb_flags_set = @ptrFromInt(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, 59);
-pub const msg_redirect_map = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, 60);
-pub const msg_apply_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 61);
-pub const msg_cork_bytes = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 62);
-pub const msg_pull_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, 63);
-pub const bind = @ptrFromInt(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, 64);
-pub const xdp_adjust_tail = @ptrFromInt(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 65);
-pub const skb_get_xfrm_state = @ptrFromInt(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, 66);
-pub const get_stack = @ptrFromInt(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 67);
-pub const skb_load_bytes_relative = @ptrFromInt(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, 68);
-pub const fib_lookup = @ptrFromInt(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, 69);
-pub const sock_hash_update = @ptrFromInt(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 70);
-pub const msg_redirect_hash = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 71);
-pub const sk_redirect_hash = @ptrFromInt(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 72);
-pub const lwt_push_encap = @ptrFromInt(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, 73);
-pub const lwt_seg6_store_bytes = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, 74);
-pub const lwt_seg6_adjust_srh = @ptrFromInt(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, 75);
-pub const lwt_seg6_action = @ptrFromInt(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, 76);
-pub const rc_repeat = @ptrFromInt(*const fn (ctx: ?*anyopaque) c_long, 77);
-pub const rc_keydown = @ptrFromInt(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, 78);
-pub const skb_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff) u64, 79);
-pub const get_current_cgroup_id = @ptrFromInt(*const fn () u64, 80);
-pub const get_local_storage = @ptrFromInt(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, 81);
-pub const sk_select_reuseport = @ptrFromInt(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, 82);
-pub const skb_ancestor_cgroup_id = @ptrFromInt(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, 83);
-pub const sk_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 84);
-pub const sk_lookup_udp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 85);
-pub const sk_release = @ptrFromInt(*const fn (sock: *kern.Sock) c_long, 86);
-pub const map_push_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, 87);
-pub const map_pop_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 88);
-pub const map_peek_elem = @ptrFromInt(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, 89);
-pub const msg_push_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 90);
-pub const msg_pop_data = @ptrFromInt(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 91);
-pub const rc_pointer_rel = @ptrFromInt(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, 92);
-pub const spin_lock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 93);
-pub const spin_unlock = @ptrFromInt(*const fn (lock: *kern.SpinLock) c_long, 94);
-pub const sk_fullsock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*SkFullSock, 95);
-pub const tcp_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.TcpSock, 96);
-pub const skb_ecn_set_ce = @ptrFromInt(*const fn (skb: *kern.SkBuff) c_long, 97);
-pub const get_listener_sock = @ptrFromInt(*const fn (sk: *kern.Sock) ?*kern.Sock, 98);
-pub const skc_lookup_tcp = @ptrFromInt(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 99);
-pub const tcp_check_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, 100);
-pub const sysctl_get_name = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, 101);
-pub const sysctl_get_current_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 102);
-pub const sysctl_get_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 103);
-pub const sysctl_set_new_value = @ptrFromInt(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, 104);
-pub const strtol = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, 105);
-pub const strtoul = @ptrFromInt(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, 106);
-pub const sk_storage_get = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, 107);
-pub const sk_storage_delete = @ptrFromInt(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, 108);
-pub const send_signal = @ptrFromInt(*const fn (sig: u32) c_long, 109);
-pub const tcp_gen_syncookie = @ptrFromInt(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, 110);
-pub const skb_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 111);
-pub const probe_read_user = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 112);
-pub const probe_read_kernel = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 113);
-pub const probe_read_user_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 114);
-pub const probe_read_kernel_str = @ptrFromInt(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, 115);
-pub const tcp_send_ack = @ptrFromInt(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, 116);
-pub const send_signal_thread = @ptrFromInt(*const fn (sig: u32) c_long, 117);
-pub const jiffies64 = @ptrFromInt(*const fn () u64, 118);
-pub const read_branch_records = @ptrFromInt(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, 119);
-pub const get_ns_current_pid_tgid = @ptrFromInt(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, 120);
-pub const xdp_output = @ptrFromInt(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, 121);
-pub const get_netns_cookie = @ptrFromInt(*const fn (ctx: ?*anyopaque) u64, 122);
-pub const get_current_ancestor_cgroup_id = @ptrFromInt(*const fn (ancestor_level: c_int) u64, 123);
-pub const sk_assign = @ptrFromInt(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, 124);
-pub const ktime_get_boot_ns = @ptrFromInt(*const fn () u64, 125);
-pub const seq_printf = @ptrFromInt(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, 126);
-pub const seq_write = @ptrFromInt(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, 127);
-pub const sk_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock) u64, 128);
-pub const sk_ancestor_cgroup_id = @ptrFromInt(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, 129);
-pub const ringbuf_output = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, 130);
-pub const ringbuf_reserve = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, 131);
-pub const ringbuf_submit = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 132);
-pub const ringbuf_discard = @ptrFromInt(*const fn (data: ?*anyopaque, flags: u64) void, 133);
-pub const ringbuf_query = @ptrFromInt(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, 134);
-pub const csum_level = @ptrFromInt(*const fn (skb: *kern.SkBuff, level: u64) c_long, 135);
-pub const skc_to_tcp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, 136);
-pub const skc_to_tcp_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, 137);
-pub const skc_to_tcp_timewait_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, 138);
-pub const skc_to_tcp_request_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, 139);
-pub const skc_to_udp6_sock = @ptrFromInt(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, 140);
-pub const get_task_stack = @ptrFromInt(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, 141);
+pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31));
+pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32));
+pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33));
+pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34));
+pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35));
+pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36));
+pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37));
+pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38));
+pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39));
+pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40));
+pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41));
+pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42));
+pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43));
+pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44));
+pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45));
+pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46));
+pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47));
+pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48));
+pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49));
+pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50));
+pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51));
+pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52));
+pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53));
+pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54));
+pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55));
+pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56));
+pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57));
+pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58));
+pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59));
+pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60));
+pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61));
+pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62));
+pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63));
+pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64));
+pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65));
+pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66));
+pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67));
+pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68));
+pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69));
+pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70));
+pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71));
+pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72));
+pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73));
+pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74));
+pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75));
+pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76));
+pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77));
+pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78));
+pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79));
+pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80));
+pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81));
+pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82));
+pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83));
+pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84));
+pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85));
+pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86));
+pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87));
+pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88));
+pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89));
+pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90));
+pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91));
+pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92));
+pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93));
+pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94));
+pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95));
+pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96));
+pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97));
+pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98));
+pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99));
+pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100));
+pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101));
+pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102));
+pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103));
+pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104));
+pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105));
+pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106));
+pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107));
+pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108));
+pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109));
+pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110));
+pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111));
+pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112));
+pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113));
+pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114));
+pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115));
+pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116));
+pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117));
+pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118));
+pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119));
+pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120));
+pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121));
+pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122));
+pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123));
+pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124));
+pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125));
+pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126));
+pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127));
+pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128));
+pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129));
+pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130));
+pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131));
+pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132));
+pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133));
+pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134));
+pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135));
+pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136));
+pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137));
+pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138));
+pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139));
+pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140));
+pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141));
lib/std/os/linux/bpf.zig
@@ -643,7 +643,7 @@ pub const Insn = packed struct {
             .dst = @intFromEnum(dst),
             .src = @intFromEnum(src),
             .off = 0,
-            .imm = @intCast(i32, @truncate(u32, imm)),
+            .imm = @as(i32, @intCast(@as(u32, @truncate(imm)))),
         };
     }
 
@@ -653,7 +653,7 @@ pub const Insn = packed struct {
             .dst = 0,
             .src = 0,
             .off = 0,
-            .imm = @intCast(i32, @truncate(u32, imm >> 32)),
+            .imm = @as(i32, @intCast(@as(u32, @truncate(imm >> 32)))),
         };
     }
 
@@ -666,11 +666,11 @@ pub const Insn = packed struct {
     }
 
     pub fn ld_map_fd1(dst: Reg, map_fd: fd_t) Insn {
-        return ld_imm_impl1(dst, @enumFromInt(Reg, PSEUDO_MAP_FD), @intCast(u64, map_fd));
+        return ld_imm_impl1(dst, @as(Reg, @enumFromInt(PSEUDO_MAP_FD)), @as(u64, @intCast(map_fd)));
     }
 
     pub fn ld_map_fd2(map_fd: fd_t) Insn {
-        return ld_imm_impl2(@intCast(u64, map_fd));
+        return ld_imm_impl2(@as(u64, @intCast(map_fd)));
     }
 
     pub fn st(comptime size: Size, dst: Reg, off: i16, imm: i32) Insn {
@@ -786,17 +786,17 @@ test "opcodes" {
 
     // TODO: byteswap instructions
     try expect_opcode(0xd4, Insn.le(.half_word, .r1));
-    try expectEqual(@intCast(i32, 16), Insn.le(.half_word, .r1).imm);
+    try expectEqual(@as(i32, @intCast(16)), Insn.le(.half_word, .r1).imm);
     try expect_opcode(0xd4, Insn.le(.word, .r1));
-    try expectEqual(@intCast(i32, 32), Insn.le(.word, .r1).imm);
+    try expectEqual(@as(i32, @intCast(32)), Insn.le(.word, .r1).imm);
     try expect_opcode(0xd4, Insn.le(.double_word, .r1));
-    try expectEqual(@intCast(i32, 64), Insn.le(.double_word, .r1).imm);
+    try expectEqual(@as(i32, @intCast(64)), Insn.le(.double_word, .r1).imm);
     try expect_opcode(0xdc, Insn.be(.half_word, .r1));
-    try expectEqual(@intCast(i32, 16), Insn.be(.half_word, .r1).imm);
+    try expectEqual(@as(i32, @intCast(16)), Insn.be(.half_word, .r1).imm);
     try expect_opcode(0xdc, Insn.be(.word, .r1));
-    try expectEqual(@intCast(i32, 32), Insn.be(.word, .r1).imm);
+    try expectEqual(@as(i32, @intCast(32)), Insn.be(.word, .r1).imm);
     try expect_opcode(0xdc, Insn.be(.double_word, .r1));
-    try expectEqual(@intCast(i32, 64), Insn.be(.double_word, .r1).imm);
+    try expectEqual(@as(i32, @intCast(64)), Insn.be(.double_word, .r1).imm);
 
     // memory instructions
     try expect_opcode(0x18, Insn.ld_dw1(.r1, 0));
@@ -804,7 +804,7 @@ test "opcodes" {
 
     //   loading a map fd
     try expect_opcode(0x18, Insn.ld_map_fd1(.r1, 0));
-    try expectEqual(@intCast(u4, PSEUDO_MAP_FD), Insn.ld_map_fd1(.r1, 0).src);
+    try expectEqual(@as(u4, @intCast(PSEUDO_MAP_FD)), Insn.ld_map_fd1(.r1, 0).src);
     try expect_opcode(0x00, Insn.ld_map_fd2(0));
 
     try expect_opcode(0x38, Insn.ld_abs(.double_word, .r1, .r2, 0));
@@ -1518,7 +1518,7 @@ pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries
 
     const rc = linux.bpf(.map_create, &attr, @sizeOf(MapCreateAttr));
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(fd_t, rc),
+        .SUCCESS => return @as(fd_t, @intCast(rc)),
         .INVAL => return error.MapTypeOrAttrInvalid,
         .NOMEM => return error.SystemResources,
         .PERM => return error.AccessDenied,
@@ -1668,20 +1668,20 @@ pub fn prog_load(
 
     attr.prog_load.prog_type = @intFromEnum(prog_type);
     attr.prog_load.insns = @intFromPtr(insns.ptr);
-    attr.prog_load.insn_cnt = @intCast(u32, insns.len);
+    attr.prog_load.insn_cnt = @as(u32, @intCast(insns.len));
     attr.prog_load.license = @intFromPtr(license.ptr);
     attr.prog_load.kern_version = kern_version;
     attr.prog_load.prog_flags = flags;
 
     if (log) |l| {
         attr.prog_load.log_buf = @intFromPtr(l.buf.ptr);
-        attr.prog_load.log_size = @intCast(u32, l.buf.len);
+        attr.prog_load.log_size = @as(u32, @intCast(l.buf.len));
         attr.prog_load.log_level = l.level;
     }
 
     const rc = linux.bpf(.prog_load, &attr, @sizeOf(ProgLoadAttr));
     return switch (errno(rc)) {
-        .SUCCESS => @intCast(fd_t, rc),
+        .SUCCESS => @as(fd_t, @intCast(rc)),
         .ACCES => error.UnsafeProgram,
         .FAULT => unreachable,
         .INVAL => error.InvalidProgram,
lib/std/os/linux/io_uring.zig
@@ -60,7 +60,7 @@ pub const IO_Uring = struct {
             .NOSYS => return error.SystemOutdated,
             else => |errno| return os.unexpectedErrno(errno),
         }
-        const fd = @intCast(os.fd_t, res);
+        const fd = @as(os.fd_t, @intCast(res));
         assert(fd >= 0);
         errdefer os.close(fd);
 
@@ -198,7 +198,7 @@ pub const IO_Uring = struct {
             .INTR => return error.SignalInterrupt,
             else => |errno| return os.unexpectedErrno(errno),
         }
-        return @intCast(u32, res);
+        return @as(u32, @intCast(res));
     }
 
     /// Sync internal state with kernel ring state on the SQ side.
@@ -937,8 +937,8 @@ pub const IO_Uring = struct {
         const res = linux.io_uring_register(
             self.fd,
             .REGISTER_FILES,
-            @ptrCast(*const anyopaque, fds.ptr),
-            @intCast(u32, fds.len),
+            @as(*const anyopaque, @ptrCast(fds.ptr)),
+            @as(u32, @intCast(fds.len)),
         );
         try handle_registration_result(res);
     }
@@ -968,8 +968,8 @@ pub const IO_Uring = struct {
         const res = linux.io_uring_register(
             self.fd,
             .REGISTER_FILES_UPDATE,
-            @ptrCast(*const anyopaque, &update),
-            @intCast(u32, fds.len),
+            @as(*const anyopaque, @ptrCast(&update)),
+            @as(u32, @intCast(fds.len)),
         );
         try handle_registration_result(res);
     }
@@ -982,7 +982,7 @@ pub const IO_Uring = struct {
         const res = linux.io_uring_register(
             self.fd,
             .REGISTER_EVENTFD,
-            @ptrCast(*const anyopaque, &fd),
+            @as(*const anyopaque, @ptrCast(&fd)),
             1,
         );
         try handle_registration_result(res);
@@ -997,7 +997,7 @@ pub const IO_Uring = struct {
         const res = linux.io_uring_register(
             self.fd,
             .REGISTER_EVENTFD_ASYNC,
-            @ptrCast(*const anyopaque, &fd),
+            @as(*const anyopaque, @ptrCast(&fd)),
             1,
         );
         try handle_registration_result(res);
@@ -1022,7 +1022,7 @@ pub const IO_Uring = struct {
             self.fd,
             .REGISTER_BUFFERS,
             buffers.ptr,
-            @intCast(u32, buffers.len),
+            @as(u32, @intCast(buffers.len)),
         );
         try handle_registration_result(res);
     }
@@ -1122,20 +1122,17 @@ pub const SubmissionQueue = struct {
         errdefer os.munmap(mmap_sqes);
         assert(mmap_sqes.len == size_sqes);
 
-        const array = @ptrCast([*]u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.array]));
-        const sqes = @ptrCast([*]linux.io_uring_sqe, @alignCast(@alignOf(linux.io_uring_sqe), &mmap_sqes[0]));
+        const array: [*]u32 = @ptrCast(@alignCast(&mmap[p.sq_off.array]));
+        const sqes: [*]linux.io_uring_sqe = @ptrCast(@alignCast(&mmap_sqes[0]));
         // We expect the kernel copies p.sq_entries to the u32 pointed to by p.sq_off.ring_entries,
         // see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L7843-L7844.
-        assert(
-            p.sq_entries ==
-                @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_entries])).*,
-        );
+        assert(p.sq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_entries]))).*);
         return SubmissionQueue{
-            .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.head])),
-            .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.tail])),
-            .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_mask])).*,
-            .flags = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.flags])),
-            .dropped = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.dropped])),
+            .head = @ptrCast(@alignCast(&mmap[p.sq_off.head])),
+            .tail = @ptrCast(@alignCast(&mmap[p.sq_off.tail])),
+            .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.sq_off.ring_mask]))).*,
+            .flags = @ptrCast(@alignCast(&mmap[p.sq_off.flags])),
+            .dropped = @ptrCast(@alignCast(&mmap[p.sq_off.dropped])),
             .array = array[0..p.sq_entries],
             .sqes = sqes[0..p.sq_entries],
             .mmap = mmap,
@@ -1160,17 +1157,13 @@ pub const CompletionQueue = struct {
         assert(fd >= 0);
         assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
         const mmap = sq.mmap;
-        const cqes = @ptrCast(
-            [*]linux.io_uring_cqe,
-            @alignCast(@alignOf(linux.io_uring_cqe), &mmap[p.cq_off.cqes]),
-        );
-        assert(p.cq_entries ==
-            @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_entries])).*);
+        const cqes: [*]linux.io_uring_cqe = @ptrCast(@alignCast(&mmap[p.cq_off.cqes]));
+        assert(p.cq_entries == @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_entries]))).*);
         return CompletionQueue{
-            .head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.head])),
-            .tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.tail])),
-            .mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_mask])).*,
-            .overflow = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.overflow])),
+            .head = @ptrCast(@alignCast(&mmap[p.cq_off.head])),
+            .tail = @ptrCast(@alignCast(&mmap[p.cq_off.tail])),
+            .mask = @as(*u32, @ptrCast(@alignCast(&mmap[p.cq_off.ring_mask]))).*,
+            .overflow = @ptrCast(@alignCast(&mmap[p.cq_off.overflow])),
             .cqes = cqes[0..p.cq_entries],
         };
     }
@@ -1233,7 +1226,7 @@ pub fn io_uring_prep_rw(
         .fd = fd,
         .off = offset,
         .addr = addr,
-        .len = @intCast(u32, len),
+        .len = @as(u32, @intCast(len)),
         .rw_flags = 0,
         .user_data = 0,
         .buf_index = 0,
@@ -1319,7 +1312,7 @@ pub fn io_uring_prep_epoll_ctl(
     op: u32,
     ev: ?*linux.epoll_event,
 ) void {
-    io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @intCast(u64, fd));
+    io_uring_prep_rw(.EPOLL_CTL, sqe, epfd, @intFromPtr(ev), op, @as(u64, @intCast(fd)));
 }
 
 pub fn io_uring_prep_recv(sqe: *linux.io_uring_sqe, fd: os.fd_t, buffer: []u8, flags: u32) void {
@@ -1459,7 +1452,7 @@ pub fn io_uring_prep_fallocate(
         .fd = fd,
         .off = offset,
         .addr = len,
-        .len = @intCast(u32, mode),
+        .len = @as(u32, @intCast(mode)),
         .rw_flags = 0,
         .user_data = 0,
         .buf_index = 0,
@@ -1514,7 +1507,7 @@ pub fn io_uring_prep_renameat(
         0,
         @intFromPtr(new_path),
     );
-    sqe.len = @bitCast(u32, new_dir_fd);
+    sqe.len = @as(u32, @bitCast(new_dir_fd));
     sqe.rw_flags = flags;
 }
 
@@ -1569,7 +1562,7 @@ pub fn io_uring_prep_linkat(
         0,
         @intFromPtr(new_path),
     );
-    sqe.len = @bitCast(u32, new_dir_fd);
+    sqe.len = @as(u32, @bitCast(new_dir_fd));
     sqe.rw_flags = flags;
 }
 
@@ -1582,8 +1575,8 @@ pub fn io_uring_prep_provide_buffers(
     buffer_id: usize,
 ) void {
     const ptr = @intFromPtr(buffers);
-    io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @intCast(i32, num), ptr, buffer_len, buffer_id);
-    sqe.buf_index = @intCast(u16, group_id);
+    io_uring_prep_rw(.PROVIDE_BUFFERS, sqe, @as(i32, @intCast(num)), ptr, buffer_len, buffer_id);
+    sqe.buf_index = @as(u16, @intCast(group_id));
 }
 
 pub fn io_uring_prep_remove_buffers(
@@ -1591,8 +1584,8 @@ pub fn io_uring_prep_remove_buffers(
     num: usize,
     group_id: usize,
 ) void {
-    io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @intCast(i32, num), 0, 0, 0);
-    sqe.buf_index = @intCast(u16, group_id);
+    io_uring_prep_rw(.REMOVE_BUFFERS, sqe, @as(i32, @intCast(num)), 0, 0, 0);
+    sqe.buf_index = @as(u16, @intCast(group_id));
 }
 
 test "structs/offsets/entries" {
@@ -1886,12 +1879,12 @@ test "write_fixed/read_fixed" {
 
     try testing.expectEqual(linux.io_uring_cqe{
         .user_data = 0x45454545,
-        .res = @intCast(i32, buffers[0].iov_len),
+        .res = @as(i32, @intCast(buffers[0].iov_len)),
         .flags = 0,
     }, cqe_write);
     try testing.expectEqual(linux.io_uring_cqe{
         .user_data = 0x12121212,
-        .res = @intCast(i32, buffers[1].iov_len),
+        .res = @as(i32, @intCast(buffers[1].iov_len)),
         .flags = 0,
     }, cqe_read);
 
@@ -2145,7 +2138,7 @@ test "timeout (after a relative time)" {
     }, cqe);
 
     // Tests should not depend on timings: skip test if outside margin.
-    if (!std.math.approxEqAbs(f64, ms, @floatFromInt(f64, stopped - started), margin)) return error.SkipZigTest;
+    if (!std.math.approxEqAbs(f64, ms, @as(f64, @floatFromInt(stopped - started)), margin)) return error.SkipZigTest;
 }
 
 test "timeout (after a number of completions)" {
@@ -2637,7 +2630,7 @@ test "renameat" {
     );
     try testing.expectEqual(linux.IORING_OP.RENAMEAT, sqe.opcode);
     try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
-    try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len));
+    try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len)));
     try testing.expectEqual(@as(u32, 1), try ring.submit());
 
     const cqe = try ring.copy_cqe();
@@ -2850,7 +2843,7 @@ test "linkat" {
     );
     try testing.expectEqual(linux.IORING_OP.LINKAT, sqe.opcode);
     try testing.expectEqual(@as(i32, tmp.dir.fd), sqe.fd);
-    try testing.expectEqual(@as(i32, tmp.dir.fd), @bitCast(i32, sqe.len));
+    try testing.expectEqual(@as(i32, tmp.dir.fd), @as(i32, @bitCast(sqe.len)));
     try testing.expectEqual(@as(u32, 1), try ring.submit());
 
     const cqe = try ring.copy_cqe();
@@ -2898,7 +2891,7 @@ test "provide_buffers: read" {
     // Provide 4 buffers
 
     {
-        const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id);
+        const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
         try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
         try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
         try testing.expectEqual(@as(u32, buffers[0].len), sqe.len);
@@ -2939,7 +2932,7 @@ test "provide_buffers: read" {
         try testing.expectEqual(@as(i32, buffer_len), cqe.res);
 
         try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
-        try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]);
+        try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
     }
 
     // This read should fail
@@ -2971,7 +2964,7 @@ test "provide_buffers: read" {
     const reprovided_buffer_id = 2;
 
     {
-        _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id);
+        _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
         try testing.expectEqual(@as(u32, 1), try ring.submit());
 
         const cqe = try ring.copy_cqe();
@@ -3003,7 +2996,7 @@ test "provide_buffers: read" {
         try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
         try testing.expectEqual(@as(i32, buffer_len), cqe.res);
         try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
-        try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]);
+        try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
     }
 }
 
@@ -3030,7 +3023,7 @@ test "remove_buffers" {
     // Provide 4 buffers
 
     {
-        _ = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id);
+        _ = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
         try testing.expectEqual(@as(u32, 1), try ring.submit());
 
         const cqe = try ring.copy_cqe();
@@ -3076,7 +3069,7 @@ test "remove_buffers" {
         try testing.expect(used_buffer_id >= 0 and used_buffer_id < 4);
         try testing.expectEqual(@as(i32, buffer_len), cqe.res);
         try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
-        try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@intCast(usize, cqe.res)]);
+        try testing.expectEqualSlices(u8, &([_]u8{0} ** buffer_len), buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))]);
     }
 
     // Final read should _not_ work
@@ -3119,7 +3112,7 @@ test "provide_buffers: accept/connect/send/recv" {
     // Provide 4 buffers
 
     {
-        const sqe = try ring.provide_buffers(0xcccccccc, @ptrCast([*]u8, &buffers), buffer_len, buffers.len, group_id, buffer_id);
+        const sqe = try ring.provide_buffers(0xcccccccc, @as([*]u8, @ptrCast(&buffers)), buffer_len, buffers.len, group_id, buffer_id);
         try testing.expectEqual(linux.IORING_OP.PROVIDE_BUFFERS, sqe.opcode);
         try testing.expectEqual(@as(i32, buffers.len), sqe.fd);
         try testing.expectEqual(@as(u32, buffer_len), sqe.len);
@@ -3181,7 +3174,7 @@ test "provide_buffers: accept/connect/send/recv" {
         try testing.expectEqual(@as(i32, buffer_len), cqe.res);
 
         try testing.expectEqual(@as(u64, 0xdededede), cqe.user_data);
-        const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)];
+        const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
         try testing.expectEqualSlices(u8, &([_]u8{'z'} ** buffer_len), buffer);
     }
 
@@ -3213,7 +3206,7 @@ test "provide_buffers: accept/connect/send/recv" {
     const reprovided_buffer_id = 2;
 
     {
-        _ = try ring.provide_buffers(0xabababab, @ptrCast([*]u8, &buffers[reprovided_buffer_id]), buffer_len, 1, group_id, reprovided_buffer_id);
+        _ = try ring.provide_buffers(0xabababab, @as([*]u8, @ptrCast(&buffers[reprovided_buffer_id])), buffer_len, 1, group_id, reprovided_buffer_id);
         try testing.expectEqual(@as(u32, 1), try ring.submit());
 
         const cqe = try ring.copy_cqe();
@@ -3259,7 +3252,7 @@ test "provide_buffers: accept/connect/send/recv" {
         try testing.expectEqual(used_buffer_id, reprovided_buffer_id);
         try testing.expectEqual(@as(i32, buffer_len), cqe.res);
         try testing.expectEqual(@as(u64, 0xdfdfdfdf), cqe.user_data);
-        const buffer = buffers[used_buffer_id][0..@intCast(usize, cqe.res)];
+        const buffer = buffers[used_buffer_id][0..@as(usize, @intCast(cqe.res))];
         try testing.expectEqualSlices(u8, &([_]u8{'w'} ** buffer_len), buffer);
     }
 }
lib/std/os/linux/ioctl.zig
@@ -32,7 +32,7 @@ fn io_impl(dir: Direction, io_type: u8, nr: u8, comptime T: type) u32 {
         .io_type = io_type,
         .nr = nr,
     };
-    return @bitCast(u32, request);
+    return @as(u32, @bitCast(request));
 }
 
 pub fn IO(io_type: u8, nr: u8) u32 {
lib/std/os/linux/start_pie.zig
@@ -103,17 +103,17 @@ pub fn relocate(phdrs: []elf.Phdr) void {
 
     // Apply the relocations.
     if (rel_addr != 0) {
-        const rel = std.mem.bytesAsSlice(elf.Rel, @ptrFromInt([*]u8, rel_addr)[0..rel_size]);
+        const rel = std.mem.bytesAsSlice(elf.Rel, @as([*]u8, @ptrFromInt(rel_addr))[0..rel_size]);
         for (rel) |r| {
             if (r.r_type() != R_RELATIVE) continue;
-            @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr;
+            @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr;
         }
     }
     if (rela_addr != 0) {
-        const rela = std.mem.bytesAsSlice(elf.Rela, @ptrFromInt([*]u8, rela_addr)[0..rela_size]);
+        const rela = std.mem.bytesAsSlice(elf.Rela, @as([*]u8, @ptrFromInt(rela_addr))[0..rela_size]);
         for (rela) |r| {
             if (r.r_type() != R_RELATIVE) continue;
-            @ptrFromInt(*usize, base_addr + r.r_offset).* += base_addr + @bitCast(usize, r.r_addend);
+            @as(*usize, @ptrFromInt(base_addr + r.r_offset)).* += base_addr + @as(usize, @bitCast(r.r_addend));
         }
     }
 }
lib/std/os/linux/test.zig
@@ -50,7 +50,7 @@ test "timer" {
         .it_value = time_interval,
     };
 
-    err = linux.getErrno(linux.timerfd_settime(@intCast(i32, timer_fd), 0, &new_time, null));
+    err = linux.getErrno(linux.timerfd_settime(@as(i32, @intCast(timer_fd)), 0, &new_time, null));
     try expect(err == .SUCCESS);
 
     var event = linux.epoll_event{
@@ -58,13 +58,13 @@ test "timer" {
         .data = linux.epoll_data{ .ptr = 0 },
     };
 
-    err = linux.getErrno(linux.epoll_ctl(@intCast(i32, epoll_fd), linux.EPOLL.CTL_ADD, @intCast(i32, timer_fd), &event));
+    err = linux.getErrno(linux.epoll_ctl(@as(i32, @intCast(epoll_fd)), linux.EPOLL.CTL_ADD, @as(i32, @intCast(timer_fd)), &event));
     try expect(err == .SUCCESS);
 
     const events_one: linux.epoll_event = undefined;
     var events = [_]linux.epoll_event{events_one} ** 8;
 
-    err = linux.getErrno(linux.epoll_wait(@intCast(i32, epoll_fd), &events, 8, -1));
+    err = linux.getErrno(linux.epoll_wait(@as(i32, @intCast(epoll_fd)), &events, 8, -1));
     try expect(err == .SUCCESS);
 }
 
@@ -91,11 +91,11 @@ test "statx" {
     }
 
     try expect(stat_buf.mode == statx_buf.mode);
-    try expect(@bitCast(u32, stat_buf.uid) == statx_buf.uid);
-    try expect(@bitCast(u32, stat_buf.gid) == statx_buf.gid);
-    try expect(@bitCast(u64, @as(i64, stat_buf.size)) == statx_buf.size);
-    try expect(@bitCast(u64, @as(i64, stat_buf.blksize)) == statx_buf.blksize);
-    try expect(@bitCast(u64, @as(i64, stat_buf.blocks)) == statx_buf.blocks);
+    try expect(@as(u32, @bitCast(stat_buf.uid)) == statx_buf.uid);
+    try expect(@as(u32, @bitCast(stat_buf.gid)) == statx_buf.gid);
+    try expect(@as(u64, @bitCast(@as(i64, stat_buf.size))) == statx_buf.size);
+    try expect(@as(u64, @bitCast(@as(i64, stat_buf.blksize))) == statx_buf.blksize);
+    try expect(@as(u64, @bitCast(@as(i64, stat_buf.blocks))) == statx_buf.blocks);
 }
 
 test "user and group ids" {
lib/std/os/linux/tls.zig
@@ -205,7 +205,7 @@ fn initTLS(phdrs: []elf.Phdr) void {
         // the data stored in the PT_TLS segment is p_filesz and may be less
         // than the former
         tls_align_factor = phdr.p_align;
-        tls_data = @ptrFromInt([*]u8, img_base + phdr.p_vaddr)[0..phdr.p_filesz];
+        tls_data = @as([*]u8, @ptrFromInt(img_base + phdr.p_vaddr))[0..phdr.p_filesz];
         tls_data_alloc_size = phdr.p_memsz;
     } else {
         tls_align_factor = @alignOf(usize);
@@ -263,12 +263,12 @@ fn initTLS(phdrs: []elf.Phdr) void {
         .dtv_offset = dtv_offset,
         .data_offset = data_offset,
         .data_size = tls_data_alloc_size,
-        .gdt_entry_number = @bitCast(usize, @as(isize, -1)),
+        .gdt_entry_number = @as(usize, @bitCast(@as(isize, -1))),
     };
 }
 
 inline fn alignPtrCast(comptime T: type, ptr: [*]u8) *T {
-    return @ptrCast(*T, @alignCast(@alignOf(T), ptr));
+    return @ptrCast(@alignCast(ptr));
 }
 
 /// Initializes all the fields of the static TLS area and returns the computed
lib/std/os/linux/vdso.zig
@@ -8,7 +8,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
     const vdso_addr = std.os.system.getauxval(std.elf.AT_SYSINFO_EHDR);
     if (vdso_addr == 0) return 0;
 
-    const eh = @ptrFromInt(*elf.Ehdr, vdso_addr);
+    const eh = @as(*elf.Ehdr, @ptrFromInt(vdso_addr));
     var ph_addr: usize = vdso_addr + eh.e_phoff;
 
     var maybe_dynv: ?[*]usize = null;
@@ -19,14 +19,14 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
             i += 1;
             ph_addr += eh.e_phentsize;
         }) {
-            const this_ph = @ptrFromInt(*elf.Phdr, ph_addr);
+            const this_ph = @as(*elf.Phdr, @ptrFromInt(ph_addr));
             switch (this_ph.p_type) {
                 // On WSL1 as well as older kernels, the VDSO ELF image is pre-linked in the upper half
                 // of the memory space (e.g. p_vaddr = 0xffffffffff700000 on WSL1).
                 // Wrapping operations are used on this line as well as subsequent calculations relative to base
                 // (lines 47, 78) to ensure no overflow check is tripped.
                 elf.PT_LOAD => base = vdso_addr +% this_ph.p_offset -% this_ph.p_vaddr,
-                elf.PT_DYNAMIC => maybe_dynv = @ptrFromInt([*]usize, vdso_addr + this_ph.p_offset),
+                elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(vdso_addr + this_ph.p_offset)),
                 else => {},
             }
         }
@@ -45,11 +45,11 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
         while (dynv[i] != 0) : (i += 2) {
             const p = base +% dynv[i + 1];
             switch (dynv[i]) {
-                elf.DT_STRTAB => maybe_strings = @ptrFromInt([*]u8, p),
-                elf.DT_SYMTAB => maybe_syms = @ptrFromInt([*]elf.Sym, p),
-                elf.DT_HASH => maybe_hashtab = @ptrFromInt([*]linux.Elf_Symndx, p),
-                elf.DT_VERSYM => maybe_versym = @ptrFromInt([*]u16, p),
-                elf.DT_VERDEF => maybe_verdef = @ptrFromInt(*elf.Verdef, p),
+                elf.DT_STRTAB => maybe_strings = @as([*]u8, @ptrFromInt(p)),
+                elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)),
+                elf.DT_HASH => maybe_hashtab = @as([*]linux.Elf_Symndx, @ptrFromInt(p)),
+                elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)),
+                elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)),
                 else => {},
             }
         }
@@ -65,10 +65,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
 
     var i: usize = 0;
     while (i < hashtab[1]) : (i += 1) {
-        if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info & 0xf) & OK_TYPES)) continue;
-        if (0 == (@as(u32, 1) << @intCast(u5, syms[i].st_info >> 4) & OK_BINDS)) continue;
+        if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info & 0xf)) & OK_TYPES)) continue;
+        if (0 == (@as(u32, 1) << @as(u5, @intCast(syms[i].st_info >> 4)) & OK_BINDS)) continue;
         if (0 == syms[i].st_shndx) continue;
-        const sym_name = @ptrCast([*:0]u8, strings + syms[i].st_name);
+        const sym_name = @as([*:0]u8, @ptrCast(strings + syms[i].st_name));
         if (!mem.eql(u8, name, mem.sliceTo(sym_name, 0))) continue;
         if (maybe_versym) |versym| {
             if (!checkver(maybe_verdef.?, versym[i], vername, strings))
@@ -82,15 +82,15 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
 
 fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*]u8) bool {
     var def = def_arg;
-    const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
+    const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff;
     while (true) {
         if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym)
             break;
         if (def.vd_next == 0)
             return false;
-        def = @ptrFromInt(*elf.Verdef, @intFromPtr(def) + def.vd_next);
+        def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next));
     }
-    const aux = @ptrFromInt(*elf.Verdaux, @intFromPtr(def) + def.vd_aux);
-    const vda_name = @ptrCast([*:0]u8, strings + aux.vda_name);
+    const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux));
+    const vda_name = @as([*:0]u8, @ptrCast(strings + aux.vda_name));
     return mem.eql(u8, vername, mem.sliceTo(vda_name, 0));
 }
lib/std/os/uefi/protocols/device_path_protocol.zig
@@ -23,10 +23,10 @@ pub const DevicePathProtocol = extern struct {
 
     /// Returns the next DevicePathProtocol node in the sequence, if any.
     pub fn next(self: *DevicePathProtocol) ?*DevicePathProtocol {
-        if (self.type == .End and @enumFromInt(EndDevicePath.Subtype, self.subtype) == .EndEntire)
+        if (self.type == .End and @as(EndDevicePath.Subtype, @enumFromInt(self.subtype)) == .EndEntire)
             return null;
 
-        return @ptrCast(*DevicePathProtocol, @ptrCast([*]u8, self) + self.length);
+        return @as(*DevicePathProtocol, @ptrCast(@as([*]u8, @ptrCast(self)) + self.length));
     }
 
     /// Calculates the total length of the device path structure in bytes, including the end of device path node.
@@ -48,30 +48,30 @@ pub const DevicePathProtocol = extern struct {
         // DevicePathProtocol for the extra node before the end
         var buf = try allocator.alloc(u8, path_size + 2 * (path.len + 1) + @sizeOf(DevicePathProtocol));
 
-        @memcpy(buf[0..path_size.len], @ptrCast([*]const u8, self)[0..path_size]);
+        @memcpy(buf[0..path_size.len], @as([*]const u8, @ptrCast(self))[0..path_size]);
 
         // Pointer to the copy of the end node of the current chain, which is - 4 from the buffer
         // as the end node itself is 4 bytes (type: u8 + subtype: u8 + length: u16).
-        var new = @ptrCast(*MediaDevicePath.FilePathDevicePath, buf.ptr + path_size - 4);
+        var new = @as(*MediaDevicePath.FilePathDevicePath, @ptrCast(buf.ptr + path_size - 4));
 
         new.type = .Media;
         new.subtype = .FilePath;
-        new.length = @sizeOf(MediaDevicePath.FilePathDevicePath) + 2 * (@intCast(u16, path.len) + 1);
+        new.length = @sizeOf(MediaDevicePath.FilePathDevicePath) + 2 * (@as(u16, @intCast(path.len)) + 1);
 
         // The same as new.getPath(), but not const as we're filling it in.
-        var ptr = @ptrCast([*:0]align(1) u16, @ptrCast([*]u8, new) + @sizeOf(MediaDevicePath.FilePathDevicePath));
+        var ptr = @as([*:0]align(1) u16, @ptrCast(@as([*]u8, @ptrCast(new)) + @sizeOf(MediaDevicePath.FilePathDevicePath)));
 
         for (path, 0..) |s, i|
             ptr[i] = s;
 
         ptr[path.len] = 0;
 
-        var end = @ptrCast(*EndDevicePath.EndEntireDevicePath, @ptrCast(*DevicePathProtocol, new).next().?);
+        var end = @as(*EndDevicePath.EndEntireDevicePath, @ptrCast(@as(*DevicePathProtocol, @ptrCast(new)).next().?));
         end.type = .End;
         end.subtype = .EndEntire;
         end.length = @sizeOf(EndDevicePath.EndEntireDevicePath);
 
-        return @ptrCast(*DevicePathProtocol, buf.ptr);
+        return @as(*DevicePathProtocol, @ptrCast(buf.ptr));
     }
 
     pub fn getDevicePath(self: *const DevicePathProtocol) ?DevicePath {
@@ -103,7 +103,7 @@ pub const DevicePathProtocol = extern struct {
 
             if (self.subtype == tag_val) {
                 // e.g. expr = .{ .Pci = @ptrCast(...) }
-                return @unionInit(TUnion, subtype.name, @ptrCast(subtype.type, self));
+                return @unionInit(TUnion, subtype.name, @as(subtype.type, @ptrCast(self)));
             }
         }
 
@@ -332,7 +332,7 @@ pub const AcpiDevicePath = union(Subtype) {
         pub fn adrs(self: *const AdrDevicePath) []align(1) const u32 {
             // self.length is a minimum of 8 with one adr which is size 4.
             var entries = (self.length - 4) / @sizeOf(u32);
-            return @ptrCast([*]align(1) const u32, &self.adr)[0..entries];
+            return @as([*]align(1) const u32, @ptrCast(&self.adr))[0..entries];
         }
     };
 
@@ -550,7 +550,7 @@ pub const MessagingDevicePath = union(Subtype) {
 
         pub fn serial_number(self: *const UsbWwidDevicePath) []align(1) const u16 {
             var serial_len = (self.length - @sizeOf(UsbWwidDevicePath)) / @sizeOf(u16);
-            return @ptrCast([*]align(1) const u16, @ptrCast([*]const u8, self) + @sizeOf(UsbWwidDevicePath))[0..serial_len];
+            return @as([*]align(1) const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(UsbWwidDevicePath)))[0..serial_len];
         }
     };
 
@@ -943,7 +943,7 @@ pub const MediaDevicePath = union(Subtype) {
         length: u16 align(1),
 
         pub fn getPath(self: *const FilePathDevicePath) [*:0]align(1) const u16 {
-            return @ptrCast([*:0]align(1) const u16, @ptrCast([*]const u8, self) + @sizeOf(FilePathDevicePath));
+            return @as([*:0]align(1) const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(FilePathDevicePath)));
         }
     };
 
@@ -1068,7 +1068,7 @@ pub const BiosBootSpecificationDevicePath = union(Subtype) {
         status_flag: u16 align(1),
 
         pub fn getDescription(self: *const BBS101DevicePath) [*:0]const u8 {
-            return @ptrCast([*:0]const u8, self) + @sizeOf(BBS101DevicePath);
+            return @as([*:0]const u8, @ptrCast(self)) + @sizeOf(BBS101DevicePath);
         }
     };
 
lib/std/os/uefi/protocols/file_protocol.zig
@@ -152,7 +152,7 @@ pub const FileInfo = extern struct {
     attribute: u64,
 
     pub fn getFileName(self: *const FileInfo) [*:0]const u16 {
-        return @ptrCast([*:0]const u16, @ptrCast([*]const u8, self) + @sizeOf(FileInfo));
+        return @as([*:0]const u16, @ptrCast(@as([*]const u8, @ptrCast(self)) + @sizeOf(FileInfo)));
     }
 
     pub const efi_file_read_only: u64 = 0x0000000000000001;
@@ -182,7 +182,7 @@ pub const FileSystemInfo = extern struct {
     _volume_label: u16,
 
     pub fn getVolumeLabel(self: *const FileSystemInfo) [*:0]const u16 {
-        return @ptrCast([*:0]const u16, &self._volume_label);
+        return @as([*:0]const u16, @ptrCast(&self._volume_label));
     }
 
     pub const guid align(8) = Guid{
lib/std/os/uefi/protocols/hii.zig
@@ -39,7 +39,7 @@ pub const HIISimplifiedFontPackage = extern struct {
     number_of_wide_glyphs: u16,
 
     pub fn getNarrowGlyphs(self: *HIISimplifiedFontPackage) []NarrowGlyph {
-        return @ptrCast([*]NarrowGlyph, @ptrCast([*]u8, self) + @sizeOf(HIISimplifiedFontPackage))[0..self.number_of_narrow_glyphs];
+        return @as([*]NarrowGlyph, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(HIISimplifiedFontPackage)))[0..self.number_of_narrow_glyphs];
     }
 };
 
lib/std/os/uefi/protocols/managed_network_protocol.zig
@@ -118,7 +118,7 @@ pub const ManagedNetworkTransmitData = extern struct {
     fragment_count: u16,
 
     pub fn getFragments(self: *ManagedNetworkTransmitData) []ManagedNetworkFragmentData {
-        return @ptrCast([*]ManagedNetworkFragmentData, @ptrCast([*]u8, self) + @sizeOf(ManagedNetworkTransmitData))[0..self.fragment_count];
+        return @as([*]ManagedNetworkFragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(ManagedNetworkTransmitData)))[0..self.fragment_count];
     }
 };
 
lib/std/os/uefi/protocols/udp6_protocol.zig
@@ -87,7 +87,7 @@ pub const Udp6ReceiveData = extern struct {
     fragment_count: u32,
 
     pub fn getFragments(self: *Udp6ReceiveData) []Udp6FragmentData {
-        return @ptrCast([*]Udp6FragmentData, @ptrCast([*]u8, self) + @sizeOf(Udp6ReceiveData))[0..self.fragment_count];
+        return @as([*]Udp6FragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(Udp6ReceiveData)))[0..self.fragment_count];
     }
 };
 
@@ -97,7 +97,7 @@ pub const Udp6TransmitData = extern struct {
     fragment_count: u32,
 
     pub fn getFragments(self: *Udp6TransmitData) []Udp6FragmentData {
-        return @ptrCast([*]Udp6FragmentData, @ptrCast([*]u8, self) + @sizeOf(Udp6TransmitData))[0..self.fragment_count];
+        return @as([*]Udp6FragmentData, @ptrCast(@as([*]u8, @ptrCast(self)) + @sizeOf(Udp6TransmitData)))[0..self.fragment_count];
     }
 };
 
lib/std/os/uefi/tables/boot_services.zig
@@ -165,7 +165,7 @@ pub const BootServices = extern struct {
         try self.openProtocol(
             handle,
             &protocol.guid,
-            @ptrCast(*?*anyopaque, &ptr),
+            @as(*?*anyopaque, @ptrCast(&ptr)),
             // Invoking handle (loaded image)
             uefi.handle,
             // Control handle (null as not a driver)
lib/std/os/uefi/pool_allocator.zig
@@ -9,7 +9,7 @@ const Allocator = mem.Allocator;
 
 const UefiPoolAllocator = struct {
     fn getHeader(ptr: [*]u8) *[*]align(8) u8 {
-        return @ptrFromInt(*[*]align(8) u8, @intFromPtr(ptr) - @sizeOf(usize));
+        return @as(*[*]align(8) u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize)));
     }
 
     fn alloc(
@@ -22,7 +22,7 @@ const UefiPoolAllocator = struct {
 
         assert(len > 0);
 
-        const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+        const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
 
         const metadata_len = mem.alignForward(usize, @sizeOf(usize), ptr_align);
 
@@ -135,5 +135,5 @@ fn uefi_free(
 ) void {
     _ = log2_old_ptr_align;
     _ = ret_addr;
-    _ = uefi.system_table.boot_services.?.freePool(@alignCast(8, buf.ptr));
+    _ = uefi.system_table.boot_services.?.freePool(@alignCast(buf.ptr));
 }
lib/std/os/windows/user32.zig
@@ -1275,7 +1275,7 @@ pub const WS_EX_LAYERED = 0x00080000;
 pub const WS_EX_OVERLAPPEDWINDOW = WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE;
 pub const WS_EX_PALETTEWINDOW = WS_EX_WINDOWEDGE | WS_EX_TOOLWINDOW | WS_EX_TOPMOST;
 
-pub const CW_USEDEFAULT = @bitCast(i32, @as(u32, 0x80000000));
+pub const CW_USEDEFAULT = @as(i32, @bitCast(@as(u32, 0x80000000)));
 
 pub extern "user32" fn CreateWindowExA(dwExStyle: DWORD, lpClassName: [*:0]const u8, lpWindowName: [*:0]const u8, dwStyle: DWORD, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?LPVOID) callconv(WINAPI) ?HWND;
 pub fn createWindowExA(dwExStyle: u32, lpClassName: [*:0]const u8, lpWindowName: [*:0]const u8, dwStyle: u32, X: i32, Y: i32, nWidth: i32, nHeight: i32, hWindParent: ?HWND, hMenu: ?HMENU, hInstance: HINSTANCE, lpParam: ?*anyopaque) !HWND {
lib/std/os/windows/ws2_32.zig
@@ -21,7 +21,7 @@ const LPARAM = windows.LPARAM;
 const FARPROC = windows.FARPROC;
 
 pub const SOCKET = *opaque {};
-pub const INVALID_SOCKET = @ptrFromInt(SOCKET, ~@as(usize, 0));
+pub const INVALID_SOCKET = @as(SOCKET, @ptrFromInt(~@as(usize, 0)));
 
 pub const GROUP = u32;
 pub const ADDRESS_FAMILY = u16;
lib/std/os/linux.zig
@@ -175,62 +175,62 @@ const require_aligned_register_pair =
 // Split a 64bit value into a {LSB,MSB} pair.
 // The LE/BE variants specify the endianness to assume.
 fn splitValueLE64(val: i64) [2]u32 {
-    const u = @bitCast(u64, val);
+    const u = @as(u64, @bitCast(val));
     return [2]u32{
-        @truncate(u32, u),
-        @truncate(u32, u >> 32),
+        @as(u32, @truncate(u)),
+        @as(u32, @truncate(u >> 32)),
     };
 }
 fn splitValueBE64(val: i64) [2]u32 {
-    const u = @bitCast(u64, val);
+    const u = @as(u64, @bitCast(val));
     return [2]u32{
-        @truncate(u32, u >> 32),
-        @truncate(u32, u),
+        @as(u32, @truncate(u >> 32)),
+        @as(u32, @truncate(u)),
     };
 }
 fn splitValue64(val: i64) [2]u32 {
-    const u = @bitCast(u64, val);
+    const u = @as(u64, @bitCast(val));
     switch (native_endian) {
         .Little => return [2]u32{
-            @truncate(u32, u),
-            @truncate(u32, u >> 32),
+            @as(u32, @truncate(u)),
+            @as(u32, @truncate(u >> 32)),
         },
         .Big => return [2]u32{
-            @truncate(u32, u >> 32),
-            @truncate(u32, u),
+            @as(u32, @truncate(u >> 32)),
+            @as(u32, @truncate(u)),
         },
     }
 }
 
 /// Get the errno from a syscall return value, or 0 for no error.
 pub fn getErrno(r: usize) E {
-    const signed_r = @bitCast(isize, r);
+    const signed_r = @as(isize, @bitCast(r));
     const int = if (signed_r > -4096 and signed_r < 0) -signed_r else 0;
-    return @enumFromInt(E, int);
+    return @as(E, @enumFromInt(int));
 }
 
 pub fn dup(old: i32) usize {
-    return syscall1(.dup, @bitCast(usize, @as(isize, old)));
+    return syscall1(.dup, @as(usize, @bitCast(@as(isize, old))));
 }
 
 pub fn dup2(old: i32, new: i32) usize {
     if (@hasField(SYS, "dup2")) {
-        return syscall2(.dup2, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)));
+        return syscall2(.dup2, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))));
     } else {
         if (old == new) {
             if (std.debug.runtime_safety) {
-                const rc = syscall2(.fcntl, @bitCast(usize, @as(isize, old)), F.GETFD);
-                if (@bitCast(isize, rc) < 0) return rc;
+                const rc = syscall2(.fcntl, @as(usize, @bitCast(@as(isize, old))), F.GETFD);
+                if (@as(isize, @bitCast(rc)) < 0) return rc;
             }
-            return @intCast(usize, old);
+            return @as(usize, @intCast(old));
         } else {
-            return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), 0);
+            return syscall3(.dup3, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))), 0);
         }
     }
 }
 
 pub fn dup3(old: i32, new: i32, flags: u32) usize {
-    return syscall3(.dup3, @bitCast(usize, @as(isize, old)), @bitCast(usize, @as(isize, new)), flags);
+    return syscall3(.dup3, @as(usize, @bitCast(@as(isize, old))), @as(usize, @bitCast(@as(isize, new))), flags);
 }
 
 pub fn chdir(path: [*:0]const u8) usize {
@@ -238,7 +238,7 @@ pub fn chdir(path: [*:0]const u8) usize {
 }
 
 pub fn fchdir(fd: fd_t) usize {
-    return syscall1(.fchdir, @bitCast(usize, @as(isize, fd)));
+    return syscall1(.fchdir, @as(usize, @bitCast(@as(isize, fd))));
 }
 
 pub fn chroot(path: [*:0]const u8) usize {
@@ -273,7 +273,7 @@ pub fn futimens(fd: i32, times: *const [2]timespec) usize {
 }
 
 pub fn utimensat(dirfd: i32, path: ?[*:0]const u8, times: *const [2]timespec, flags: u32) usize {
-    return syscall4(.utimensat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(times), flags);
+    return syscall4(.utimensat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(times), flags);
 }
 
 pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize {
@@ -282,8 +282,8 @@ pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize {
         const length_halves = splitValue64(length);
         return syscall6(
             .fallocate,
-            @bitCast(usize, @as(isize, fd)),
-            @bitCast(usize, @as(isize, mode)),
+            @as(usize, @bitCast(@as(isize, fd))),
+            @as(usize, @bitCast(@as(isize, mode))),
             offset_halves[0],
             offset_halves[1],
             length_halves[0],
@@ -292,20 +292,20 @@ pub fn fallocate(fd: i32, mode: i32, offset: i64, length: i64) usize {
     } else {
         return syscall4(
             .fallocate,
-            @bitCast(usize, @as(isize, fd)),
-            @bitCast(usize, @as(isize, mode)),
-            @bitCast(u64, offset),
-            @bitCast(u64, length),
+            @as(usize, @bitCast(@as(isize, fd))),
+            @as(usize, @bitCast(@as(isize, mode))),
+            @as(u64, @bitCast(offset)),
+            @as(u64, @bitCast(length)),
         );
     }
 }
 
 pub fn futex_wait(uaddr: *const i32, futex_op: u32, val: i32, timeout: ?*const timespec) usize {
-    return syscall4(.futex, @intFromPtr(uaddr), futex_op, @bitCast(u32, val), @intFromPtr(timeout));
+    return syscall4(.futex, @intFromPtr(uaddr), futex_op, @as(u32, @bitCast(val)), @intFromPtr(timeout));
 }
 
 pub fn futex_wake(uaddr: *const i32, futex_op: u32, val: i32) usize {
-    return syscall3(.futex, @intFromPtr(uaddr), futex_op, @bitCast(u32, val));
+    return syscall3(.futex, @intFromPtr(uaddr), futex_op, @as(u32, @bitCast(val)));
 }
 
 pub fn getcwd(buf: [*]u8, size: usize) usize {
@@ -315,7 +315,7 @@ pub fn getcwd(buf: [*]u8, size: usize) usize {
 pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize {
     return syscall3(
         .getdents,
-        @bitCast(usize, @as(isize, fd)),
+        @as(usize, @bitCast(@as(isize, fd))),
         @intFromPtr(dirp),
         @min(len, maxInt(c_int)),
     );
@@ -324,7 +324,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize {
 pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize {
     return syscall3(
         .getdents64,
-        @bitCast(usize, @as(isize, fd)),
+        @as(usize, @bitCast(@as(isize, fd))),
         @intFromPtr(dirp),
         @min(len, maxInt(c_int)),
     );
@@ -335,35 +335,35 @@ pub fn inotify_init1(flags: u32) usize {
 }
 
 pub fn inotify_add_watch(fd: i32, pathname: [*:0]const u8, mask: u32) usize {
-    return syscall3(.inotify_add_watch, @bitCast(usize, @as(isize, fd)), @intFromPtr(pathname), mask);
+    return syscall3(.inotify_add_watch, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(pathname), mask);
 }
 
 pub fn inotify_rm_watch(fd: i32, wd: i32) usize {
-    return syscall2(.inotify_rm_watch, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, wd)));
+    return syscall2(.inotify_rm_watch, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, wd))));
 }
 
 pub fn readlink(noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize {
     if (@hasField(SYS, "readlink")) {
         return syscall3(.readlink, @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
     } else {
-        return syscall4(.readlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
+        return syscall4(.readlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
     }
 }
 
 pub fn readlinkat(dirfd: i32, noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize {
-    return syscall4(.readlinkat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
+    return syscall4(.readlinkat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(buf_ptr), buf_len);
 }
 
 pub fn mkdir(path: [*:0]const u8, mode: u32) usize {
     if (@hasField(SYS, "mkdir")) {
         return syscall2(.mkdir, @intFromPtr(path), mode);
     } else {
-        return syscall3(.mkdirat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), mode);
+        return syscall3(.mkdirat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode);
     }
 }
 
 pub fn mkdirat(dirfd: i32, path: [*:0]const u8, mode: u32) usize {
-    return syscall3(.mkdirat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode);
+    return syscall3(.mkdirat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode);
 }
 
 pub fn mknod(path: [*:0]const u8, mode: u32, dev: u32) usize {
@@ -375,7 +375,7 @@ pub fn mknod(path: [*:0]const u8, mode: u32, dev: u32) usize {
 }
 
 pub fn mknodat(dirfd: i32, path: [*:0]const u8, mode: u32, dev: u32) usize {
-    return syscall4(.mknodat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode, dev);
+    return syscall4(.mknodat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode, dev);
 }
 
 pub fn mount(special: [*:0]const u8, dir: [*:0]const u8, fstype: ?[*:0]const u8, flags: u32, data: usize) usize {
@@ -394,7 +394,7 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of
     if (@hasField(SYS, "mmap2")) {
         // Make sure the offset is also specified in multiples of page size
         if ((offset & (MMAP2_UNIT - 1)) != 0)
-            return @bitCast(usize, -@as(isize, @intFromEnum(E.INVAL)));
+            return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.INVAL))));
 
         return syscall6(
             .mmap2,
@@ -402,8 +402,8 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of
             length,
             prot,
             flags,
-            @bitCast(usize, @as(isize, fd)),
-            @truncate(usize, @bitCast(u64, offset) / MMAP2_UNIT),
+            @as(usize, @bitCast(@as(isize, fd))),
+            @as(usize, @truncate(@as(u64, @bitCast(offset)) / MMAP2_UNIT)),
         );
     } else {
         return syscall6(
@@ -412,8 +412,8 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, of
             length,
             prot,
             flags,
-            @bitCast(usize, @as(isize, fd)),
-            @bitCast(u64, offset),
+            @as(usize, @bitCast(@as(isize, fd))),
+            @as(u64, @bitCast(offset)),
         );
     }
 }
@@ -429,7 +429,7 @@ pub const MSF = struct {
 };
 
 pub fn msync(address: [*]const u8, length: usize, flags: i32) usize {
-    return syscall3(.msync, @intFromPtr(address), length, @bitCast(u32, flags));
+    return syscall3(.msync, @intFromPtr(address), length, @as(u32, @bitCast(flags)));
 }
 
 pub fn munmap(address: [*]const u8, length: usize) usize {
@@ -438,7 +438,7 @@ pub fn munmap(address: [*]const u8, length: usize) usize {
 
 pub fn poll(fds: [*]pollfd, n: nfds_t, timeout: i32) usize {
     if (@hasField(SYS, "poll")) {
-        return syscall3(.poll, @intFromPtr(fds), n, @bitCast(u32, timeout));
+        return syscall3(.poll, @intFromPtr(fds), n, @as(u32, @bitCast(timeout)));
     } else {
         return syscall5(
             .ppoll,
@@ -462,69 +462,69 @@ pub fn ppoll(fds: [*]pollfd, n: nfds_t, timeout: ?*timespec, sigmask: ?*const si
 }
 
 pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
-    return syscall3(.read, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), count);
+    return syscall3(.read, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count);
 }
 
 pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: i64) usize {
-    const offset_u = @bitCast(u64, offset);
+    const offset_u = @as(u64, @bitCast(offset));
     return syscall5(
         .preadv,
-        @bitCast(usize, @as(isize, fd)),
+        @as(usize, @bitCast(@as(isize, fd))),
         @intFromPtr(iov),
         count,
         // Kernel expects the offset is split into largest natural word-size.
         // See following link for detail:
         // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=601cc11d054ae4b5e9b5babec3d8e4667a2cb9b5
-        @truncate(usize, offset_u),
-        if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
+        @as(usize, @truncate(offset_u)),
+        if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0,
     );
 }
 
 pub fn preadv2(fd: i32, iov: [*]const iovec, count: usize, offset: i64, flags: kernel_rwf) usize {
-    const offset_u = @bitCast(u64, offset);
+    const offset_u = @as(u64, @bitCast(offset));
     return syscall6(
         .preadv2,
-        @bitCast(usize, @as(isize, fd)),
+        @as(usize, @bitCast(@as(isize, fd))),
         @intFromPtr(iov),
         count,
         // See comments in preadv
-        @truncate(usize, offset_u),
-        if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
+        @as(usize, @truncate(offset_u)),
+        if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0,
         flags,
     );
 }
 
 pub fn readv(fd: i32, iov: [*]const iovec, count: usize) usize {
-    return syscall3(.readv, @bitCast(usize, @as(isize, fd)), @intFromPtr(iov), count);
+    return syscall3(.readv, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count);
 }
 
 pub fn writev(fd: i32, iov: [*]const iovec_const, count: usize) usize {
-    return syscall3(.writev, @bitCast(usize, @as(isize, fd)), @intFromPtr(iov), count);
+    return syscall3(.writev, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(iov), count);
 }
 
 pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64) usize {
-    const offset_u = @bitCast(u64, offset);
+    const offset_u = @as(u64, @bitCast(offset));
     return syscall5(
         .pwritev,
-        @bitCast(usize, @as(isize, fd)),
+        @as(usize, @bitCast(@as(isize, fd))),
         @intFromPtr(iov),
         count,
         // See comments in preadv
-        @truncate(usize, offset_u),
-        if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
+        @as(usize, @truncate(offset_u)),
+        if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0,
     );
 }
 
 pub fn pwritev2(fd: i32, iov: [*]const iovec_const, count: usize, offset: i64, flags: kernel_rwf) usize {
-    const offset_u = @bitCast(u64, offset);
+    const offset_u = @as(u64, @bitCast(offset));
     return syscall6(
         .pwritev2,
-        @bitCast(usize, @as(isize, fd)),
+        @as(usize, @bitCast(@as(isize, fd))),
         @intFromPtr(iov),
         count,
         // See comments in preadv
-        @truncate(usize, offset_u),
-        if (usize_bits < 64) @truncate(usize, offset_u >> 32) else 0,
+        @as(usize, @truncate(offset_u)),
+        if (usize_bits < 64) @as(usize, @truncate(offset_u >> 32)) else 0,
         flags,
     );
 }
@@ -533,7 +533,7 @@ pub fn rmdir(path: [*:0]const u8) usize {
     if (@hasField(SYS, "rmdir")) {
         return syscall1(.rmdir, @intFromPtr(path));
     } else {
-        return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), AT.REMOVEDIR);
+        return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), AT.REMOVEDIR);
     }
 }
 
@@ -541,12 +541,12 @@ pub fn symlink(existing: [*:0]const u8, new: [*:0]const u8) usize {
     if (@hasField(SYS, "symlink")) {
         return syscall2(.symlink, @intFromPtr(existing), @intFromPtr(new));
     } else {
-        return syscall3(.symlinkat, @intFromPtr(existing), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new));
+        return syscall3(.symlinkat, @intFromPtr(existing), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new));
     }
 }
 
 pub fn symlinkat(existing: [*:0]const u8, newfd: i32, newpath: [*:0]const u8) usize {
-    return syscall3(.symlinkat, @intFromPtr(existing), @bitCast(usize, @as(isize, newfd)), @intFromPtr(newpath));
+    return syscall3(.symlinkat, @intFromPtr(existing), @as(usize, @bitCast(@as(isize, newfd))), @intFromPtr(newpath));
 }
 
 pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize {
@@ -555,7 +555,7 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize {
         if (require_aligned_register_pair) {
             return syscall6(
                 .pread64,
-                @bitCast(usize, @as(isize, fd)),
+                @as(usize, @bitCast(@as(isize, fd))),
                 @intFromPtr(buf),
                 count,
                 0,
@@ -565,7 +565,7 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize {
         } else {
             return syscall5(
                 .pread64,
-                @bitCast(usize, @as(isize, fd)),
+                @as(usize, @bitCast(@as(isize, fd))),
                 @intFromPtr(buf),
                 count,
                 offset_halves[0],
@@ -580,10 +580,10 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: i64) usize {
             .pread;
         return syscall4(
             syscall_number,
-            @bitCast(usize, @as(isize, fd)),
+            @as(usize, @bitCast(@as(isize, fd))),
             @intFromPtr(buf),
             count,
-            @bitCast(u64, offset),
+            @as(u64, @bitCast(offset)),
         );
     }
 }
@@ -592,12 +592,12 @@ pub fn access(path: [*:0]const u8, mode: u32) usize {
     if (@hasField(SYS, "access")) {
         return syscall2(.access, @intFromPtr(path), mode);
     } else {
-        return syscall4(.faccessat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), mode, 0);
+        return syscall4(.faccessat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), mode, 0);
     }
 }
 
 pub fn faccessat(dirfd: i32, path: [*:0]const u8, mode: u32, flags: u32) usize {
-    return syscall4(.faccessat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), mode, flags);
+    return syscall4(.faccessat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), mode, flags);
 }
 
 pub fn pipe(fd: *[2]i32) usize {
@@ -615,7 +615,7 @@ pub fn pipe2(fd: *[2]i32, flags: u32) usize {
 }
 
 pub fn write(fd: i32, buf: [*]const u8, count: usize) usize {
-    return syscall3(.write, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), count);
+    return syscall3(.write, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), count);
 }
 
 pub fn ftruncate(fd: i32, length: i64) usize {
@@ -624,7 +624,7 @@ pub fn ftruncate(fd: i32, length: i64) usize {
         if (require_aligned_register_pair) {
             return syscall4(
                 .ftruncate64,
-                @bitCast(usize, @as(isize, fd)),
+                @as(usize, @bitCast(@as(isize, fd))),
                 0,
                 length_halves[0],
                 length_halves[1],
@@ -632,7 +632,7 @@ pub fn ftruncate(fd: i32, length: i64) usize {
         } else {
             return syscall3(
                 .ftruncate64,
-                @bitCast(usize, @as(isize, fd)),
+                @as(usize, @bitCast(@as(isize, fd))),
                 length_halves[0],
                 length_halves[1],
             );
@@ -640,8 +640,8 @@ pub fn ftruncate(fd: i32, length: i64) usize {
     } else {
         return syscall2(
             .ftruncate,
-            @bitCast(usize, @as(isize, fd)),
-            @bitCast(usize, length),
+            @as(usize, @bitCast(@as(isize, fd))),
+            @as(usize, @bitCast(length)),
         );
     }
 }
@@ -653,7 +653,7 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize {
         if (require_aligned_register_pair) {
             return syscall6(
                 .pwrite64,
-                @bitCast(usize, @as(isize, fd)),
+                @as(usize, @bitCast(@as(isize, fd))),
                 @intFromPtr(buf),
                 count,
                 0,
@@ -663,7 +663,7 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize {
         } else {
             return syscall5(
                 .pwrite64,
-                @bitCast(usize, @as(isize, fd)),
+                @as(usize, @bitCast(@as(isize, fd))),
                 @intFromPtr(buf),
                 count,
                 offset_halves[0],
@@ -678,10 +678,10 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: i64) usize {
             .pwrite;
         return syscall4(
             syscall_number,
-            @bitCast(usize, @as(isize, fd)),
+            @as(usize, @bitCast(@as(isize, fd))),
             @intFromPtr(buf),
             count,
-            @bitCast(u64, offset),
+            @as(u64, @bitCast(offset)),
         );
     }
 }
@@ -690,9 +690,9 @@ pub fn rename(old: [*:0]const u8, new: [*:0]const u8) usize {
     if (@hasField(SYS, "rename")) {
         return syscall2(.rename, @intFromPtr(old), @intFromPtr(new));
     } else if (@hasField(SYS, "renameat")) {
-        return syscall4(.renameat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(old), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new));
+        return syscall4(.renameat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(old), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new));
     } else {
-        return syscall5(.renameat2, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(old), @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(new), 0);
+        return syscall5(.renameat2, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(old), @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(new), 0);
     }
 }
 
@@ -700,17 +700,17 @@ pub fn renameat(oldfd: i32, oldpath: [*]const u8, newfd: i32, newpath: [*]const
     if (@hasField(SYS, "renameat")) {
         return syscall4(
             .renameat,
-            @bitCast(usize, @as(isize, oldfd)),
+            @as(usize, @bitCast(@as(isize, oldfd))),
             @intFromPtr(oldpath),
-            @bitCast(usize, @as(isize, newfd)),
+            @as(usize, @bitCast(@as(isize, newfd))),
             @intFromPtr(newpath),
         );
     } else {
         return syscall5(
             .renameat2,
-            @bitCast(usize, @as(isize, oldfd)),
+            @as(usize, @bitCast(@as(isize, oldfd))),
             @intFromPtr(oldpath),
-            @bitCast(usize, @as(isize, newfd)),
+            @as(usize, @bitCast(@as(isize, newfd))),
             @intFromPtr(newpath),
             0,
         );
@@ -720,9 +720,9 @@ pub fn renameat(oldfd: i32, oldpath: [*]const u8, newfd: i32, newpath: [*]const
 pub fn renameat2(oldfd: i32, oldpath: [*:0]const u8, newfd: i32, newpath: [*:0]const u8, flags: u32) usize {
     return syscall5(
         .renameat2,
-        @bitCast(usize, @as(isize, oldfd)),
+        @as(usize, @bitCast(@as(isize, oldfd))),
         @intFromPtr(oldpath),
-        @bitCast(usize, @as(isize, newfd)),
+        @as(usize, @bitCast(@as(isize, newfd))),
         @intFromPtr(newpath),
         flags,
     );
@@ -734,7 +734,7 @@ pub fn open(path: [*:0]const u8, flags: u32, perm: mode_t) usize {
     } else {
         return syscall4(
             .openat,
-            @bitCast(usize, @as(isize, AT.FDCWD)),
+            @as(usize, @bitCast(@as(isize, AT.FDCWD))),
             @intFromPtr(path),
             flags,
             perm,
@@ -748,7 +748,7 @@ pub fn create(path: [*:0]const u8, perm: mode_t) usize {
 
 pub fn openat(dirfd: i32, path: [*:0]const u8, flags: u32, mode: mode_t) usize {
     // dirfd could be negative, for example AT.FDCWD is -100
-    return syscall4(.openat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), flags, mode);
+    return syscall4(.openat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags, mode);
 }
 
 /// See also `clone` (from the arch-specific include)
@@ -762,11 +762,11 @@ pub fn clone2(flags: u32, child_stack_ptr: usize) usize {
 }
 
 pub fn close(fd: i32) usize {
-    return syscall1(.close, @bitCast(usize, @as(isize, fd)));
+    return syscall1(.close, @as(usize, @bitCast(@as(isize, fd))));
 }
 
 pub fn fchmod(fd: i32, mode: mode_t) usize {
-    return syscall2(.fchmod, @bitCast(usize, @as(isize, fd)), mode);
+    return syscall2(.fchmod, @as(usize, @bitCast(@as(isize, fd))), mode);
 }
 
 pub fn chmod(path: [*:0]const u8, mode: mode_t) usize {
@@ -775,7 +775,7 @@ pub fn chmod(path: [*:0]const u8, mode: mode_t) usize {
     } else {
         return syscall4(
             .fchmodat,
-            @bitCast(usize, @as(isize, AT.FDCWD)),
+            @as(usize, @bitCast(@as(isize, AT.FDCWD))),
             @intFromPtr(path),
             mode,
             0,
@@ -785,14 +785,14 @@ pub fn chmod(path: [*:0]const u8, mode: mode_t) usize {
 
 pub fn fchown(fd: i32, owner: uid_t, group: gid_t) usize {
     if (@hasField(SYS, "fchown32")) {
-        return syscall3(.fchown32, @bitCast(usize, @as(isize, fd)), owner, group);
+        return syscall3(.fchown32, @as(usize, @bitCast(@as(isize, fd))), owner, group);
     } else {
-        return syscall3(.fchown, @bitCast(usize, @as(isize, fd)), owner, group);
+        return syscall3(.fchown, @as(usize, @bitCast(@as(isize, fd))), owner, group);
     }
 }
 
 pub fn fchmodat(fd: i32, path: [*:0]const u8, mode: mode_t, flags: u32) usize {
-    return syscall4(.fchmodat, @bitCast(usize, @as(isize, fd)), @intFromPtr(path), mode, flags);
+    return syscall4(.fchmodat, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(path), mode, flags);
 }
 
 /// Can only be called on 32 bit systems. For 64 bit see `lseek`.
@@ -801,9 +801,9 @@ pub fn llseek(fd: i32, offset: u64, result: ?*u64, whence: usize) usize {
     // endianness.
     return syscall5(
         ._llseek,
-        @bitCast(usize, @as(isize, fd)),
-        @truncate(usize, offset >> 32),
-        @truncate(usize, offset),
+        @as(usize, @bitCast(@as(isize, fd))),
+        @as(usize, @truncate(offset >> 32)),
+        @as(usize, @truncate(offset)),
         @intFromPtr(result),
         whence,
     );
@@ -811,16 +811,16 @@ pub fn llseek(fd: i32, offset: u64, result: ?*u64, whence: usize) usize {
 
 /// Can only be called on 64 bit systems. For 32 bit see `llseek`.
 pub fn lseek(fd: i32, offset: i64, whence: usize) usize {
-    return syscall3(.lseek, @bitCast(usize, @as(isize, fd)), @bitCast(usize, offset), whence);
+    return syscall3(.lseek, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(offset)), whence);
 }
 
 pub fn exit(status: i32) noreturn {
-    _ = syscall1(.exit, @bitCast(usize, @as(isize, status)));
+    _ = syscall1(.exit, @as(usize, @bitCast(@as(isize, status))));
     unreachable;
 }
 
 pub fn exit_group(status: i32) noreturn {
-    _ = syscall1(.exit_group, @bitCast(usize, @as(isize, status)));
+    _ = syscall1(.exit_group, @as(usize, @bitCast(@as(isize, status))));
     unreachable;
 }
 
@@ -886,15 +886,15 @@ pub fn getrandom(buf: [*]u8, count: usize, flags: u32) usize {
 }
 
 pub fn kill(pid: pid_t, sig: i32) usize {
-    return syscall2(.kill, @bitCast(usize, @as(isize, pid)), @bitCast(usize, @as(isize, sig)));
+    return syscall2(.kill, @as(usize, @bitCast(@as(isize, pid))), @as(usize, @bitCast(@as(isize, sig))));
 }
 
 pub fn tkill(tid: pid_t, sig: i32) usize {
-    return syscall2(.tkill, @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig)));
+    return syscall2(.tkill, @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig))));
 }
 
 pub fn tgkill(tgid: pid_t, tid: pid_t, sig: i32) usize {
-    return syscall3(.tgkill, @bitCast(usize, @as(isize, tgid)), @bitCast(usize, @as(isize, tid)), @bitCast(usize, @as(isize, sig)));
+    return syscall3(.tgkill, @as(usize, @bitCast(@as(isize, tgid))), @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig))));
 }
 
 pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize {
@@ -903,16 +903,16 @@ pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize {
             .link,
             @intFromPtr(oldpath),
             @intFromPtr(newpath),
-            @bitCast(usize, @as(isize, flags)),
+            @as(usize, @bitCast(@as(isize, flags))),
         );
     } else {
         return syscall5(
             .linkat,
-            @bitCast(usize, @as(isize, AT.FDCWD)),
+            @as(usize, @bitCast(@as(isize, AT.FDCWD))),
             @intFromPtr(oldpath),
-            @bitCast(usize, @as(isize, AT.FDCWD)),
+            @as(usize, @bitCast(@as(isize, AT.FDCWD))),
             @intFromPtr(newpath),
-            @bitCast(usize, @as(isize, flags)),
+            @as(usize, @bitCast(@as(isize, flags))),
         );
     }
 }
@@ -920,11 +920,11 @@ pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: i32) usize {
 pub fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: i32) usize {
     return syscall5(
         .linkat,
-        @bitCast(usize, @as(isize, oldfd)),
+        @as(usize, @bitCast(@as(isize, oldfd))),
         @intFromPtr(oldpath),
-        @bitCast(usize, @as(isize, newfd)),
+        @as(usize, @bitCast(@as(isize, newfd))),
         @intFromPtr(newpath),
-        @bitCast(usize, @as(isize, flags)),
+        @as(usize, @bitCast(@as(isize, flags))),
     );
 }
 
@@ -932,22 +932,22 @@ pub fn unlink(path: [*:0]const u8) usize {
     if (@hasField(SYS, "unlink")) {
         return syscall1(.unlink, @intFromPtr(path));
     } else {
-        return syscall3(.unlinkat, @bitCast(usize, @as(isize, AT.FDCWD)), @intFromPtr(path), 0);
+        return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, AT.FDCWD))), @intFromPtr(path), 0);
     }
 }
 
 pub fn unlinkat(dirfd: i32, path: [*:0]const u8, flags: u32) usize {
-    return syscall3(.unlinkat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), flags);
+    return syscall3(.unlinkat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), flags);
 }
 
 pub fn waitpid(pid: pid_t, status: *u32, flags: u32) usize {
-    return syscall4(.wait4, @bitCast(usize, @as(isize, pid)), @intFromPtr(status), flags, 0);
+    return syscall4(.wait4, @as(usize, @bitCast(@as(isize, pid))), @intFromPtr(status), flags, 0);
 }
 
 pub fn wait4(pid: pid_t, status: *u32, flags: u32, usage: ?*rusage) usize {
     return syscall4(
         .wait4,
-        @bitCast(usize, @as(isize, pid)),
+        @as(usize, @bitCast(@as(isize, pid))),
         @intFromPtr(status),
         flags,
         @intFromPtr(usage),
@@ -955,18 +955,18 @@ pub fn wait4(pid: pid_t, status: *u32, flags: u32, usage: ?*rusage) usize {
 }
 
 pub fn waitid(id_type: P, id: i32, infop: *siginfo_t, flags: u32) usize {
-    return syscall5(.waitid, @intFromEnum(id_type), @bitCast(usize, @as(isize, id)), @intFromPtr(infop), flags, 0);
+    return syscall5(.waitid, @intFromEnum(id_type), @as(usize, @bitCast(@as(isize, id))), @intFromPtr(infop), flags, 0);
 }
 
 pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) usize {
-    return syscall3(.fcntl, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, cmd)), arg);
+    return syscall3(.fcntl, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, cmd))), arg);
 }
 
 pub fn flock(fd: fd_t, operation: i32) usize {
-    return syscall2(.flock, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, operation)));
+    return syscall2(.flock, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, operation))));
 }
 
-var vdso_clock_gettime = @ptrCast(?*const anyopaque, &init_vdso_clock_gettime);
+var vdso_clock_gettime = @as(?*const anyopaque, @ptrCast(&init_vdso_clock_gettime));
 
 // We must follow the C calling convention when we call into the VDSO
 const vdso_clock_gettime_ty = *align(1) const fn (i32, *timespec) callconv(.C) usize;
@@ -975,36 +975,36 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) usize {
     if (@hasDecl(VDSO, "CGT_SYM")) {
         const ptr = @atomicLoad(?*const anyopaque, &vdso_clock_gettime, .Unordered);
         if (ptr) |fn_ptr| {
-            const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr);
+            const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr));
             const rc = f(clk_id, tp);
             switch (rc) {
-                0, @bitCast(usize, -@as(isize, @intFromEnum(E.INVAL))) => return rc,
+                0, @as(usize, @bitCast(-@as(isize, @intFromEnum(E.INVAL)))) => return rc,
                 else => {},
             }
         }
     }
-    return syscall2(.clock_gettime, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp));
+    return syscall2(.clock_gettime, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp));
 }
 
 fn init_vdso_clock_gettime(clk: i32, ts: *timespec) callconv(.C) usize {
-    const ptr = @ptrFromInt(?*const anyopaque, vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM));
+    const ptr = @as(?*const anyopaque, @ptrFromInt(vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM)));
     // Note that we may not have a VDSO at all, update the stub address anyway
     // so that clock_gettime will fall back on the good old (and slow) syscall
     @atomicStore(?*const anyopaque, &vdso_clock_gettime, ptr, .Monotonic);
     // Call into the VDSO if available
     if (ptr) |fn_ptr| {
-        const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr);
+        const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr));
         return f(clk, ts);
     }
-    return @bitCast(usize, -@as(isize, @intFromEnum(E.NOSYS)));
+    return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.NOSYS))));
 }
 
 pub fn clock_getres(clk_id: i32, tp: *timespec) usize {
-    return syscall2(.clock_getres, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp));
+    return syscall2(.clock_getres, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp));
 }
 
 pub fn clock_settime(clk_id: i32, tp: *const timespec) usize {
-    return syscall2(.clock_settime, @bitCast(usize, @as(isize, clk_id)), @intFromPtr(tp));
+    return syscall2(.clock_settime, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp));
 }
 
 pub fn gettimeofday(tv: *timeval, tz: *timezone) usize {
@@ -1053,33 +1053,33 @@ pub fn setregid(rgid: gid_t, egid: gid_t) usize {
 
 pub fn getuid() uid_t {
     if (@hasField(SYS, "getuid32")) {
-        return @intCast(uid_t, syscall0(.getuid32));
+        return @as(uid_t, @intCast(syscall0(.getuid32)));
     } else {
-        return @intCast(uid_t, syscall0(.getuid));
+        return @as(uid_t, @intCast(syscall0(.getuid)));
     }
 }
 
 pub fn getgid() gid_t {
     if (@hasField(SYS, "getgid32")) {
-        return @intCast(gid_t, syscall0(.getgid32));
+        return @as(gid_t, @intCast(syscall0(.getgid32)));
     } else {
-        return @intCast(gid_t, syscall0(.getgid));
+        return @as(gid_t, @intCast(syscall0(.getgid)));
     }
 }
 
 pub fn geteuid() uid_t {
     if (@hasField(SYS, "geteuid32")) {
-        return @intCast(uid_t, syscall0(.geteuid32));
+        return @as(uid_t, @intCast(syscall0(.geteuid32)));
     } else {
-        return @intCast(uid_t, syscall0(.geteuid));
+        return @as(uid_t, @intCast(syscall0(.geteuid)));
     }
 }
 
 pub fn getegid() gid_t {
     if (@hasField(SYS, "getegid32")) {
-        return @intCast(gid_t, syscall0(.getegid32));
+        return @as(gid_t, @intCast(syscall0(.getegid32)));
     } else {
-        return @intCast(gid_t, syscall0(.getegid));
+        return @as(gid_t, @intCast(syscall0(.getegid)));
     }
 }
 
@@ -1154,11 +1154,11 @@ pub fn setgroups(size: usize, list: [*]const gid_t) usize {
 }
 
 pub fn getpid() pid_t {
-    return @bitCast(pid_t, @truncate(u32, syscall0(.getpid)));
+    return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.getpid)))));
 }
 
 pub fn gettid() pid_t {
-    return @bitCast(pid_t, @truncate(u32, syscall0(.gettid)));
+    return @as(pid_t, @bitCast(@as(u32, @truncate(syscall0(.gettid)))));
 }
 
 pub fn sigprocmask(flags: u32, noalias set: ?*const sigset_t, noalias oldset: ?*sigset_t) usize {
@@ -1182,9 +1182,9 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact
             .handler = new.handler.handler,
             .flags = new.flags | SA.RESTORER,
             .mask = undefined,
-            .restorer = @ptrCast(k_sigaction_funcs.restorer, restorer_fn),
+            .restorer = @as(k_sigaction_funcs.restorer, @ptrCast(restorer_fn)),
         };
-        @memcpy(@ptrCast([*]u8, &ksa.mask)[0..mask_size], @ptrCast([*]const u8, &new.mask));
+        @memcpy(@as([*]u8, @ptrCast(&ksa.mask))[0..mask_size], @as([*]const u8, @ptrCast(&new.mask)));
     }
 
     const ksa_arg = if (act != null) @intFromPtr(&ksa) else 0;
@@ -1199,8 +1199,8 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact
 
     if (oact) |old| {
         old.handler.handler = oldksa.handler;
-        old.flags = @truncate(c_uint, oldksa.flags);
-        @memcpy(@ptrCast([*]u8, &old.mask)[0..mask_size], @ptrCast([*]const u8, &oldksa.mask));
+        old.flags = @as(c_uint, @truncate(oldksa.flags));
+        @memcpy(@as([*]u8, @ptrCast(&old.mask))[0..mask_size], @as([*]const u8, @ptrCast(&oldksa.mask)));
     }
 
     return 0;
@@ -1211,28 +1211,28 @@ const usize_bits = @typeInfo(usize).Int.bits;
 pub fn sigaddset(set: *sigset_t, sig: u6) void {
     const s = sig - 1;
     // shift in musl: s&8*sizeof *set->__bits-1
-    const shift = @intCast(u5, s & (usize_bits - 1));
-    const val = @intCast(u32, 1) << shift;
-    (set.*)[@intCast(usize, s) / usize_bits] |= val;
+    const shift = @as(u5, @intCast(s & (usize_bits - 1)));
+    const val = @as(u32, @intCast(1)) << shift;
+    (set.*)[@as(usize, @intCast(s)) / usize_bits] |= val;
 }
 
 pub fn sigismember(set: *const sigset_t, sig: u6) bool {
     const s = sig - 1;
-    return ((set.*)[@intCast(usize, s) / usize_bits] & (@intCast(usize, 1) << (s & (usize_bits - 1)))) != 0;
+    return ((set.*)[@as(usize, @intCast(s)) / usize_bits] & (@as(usize, @intCast(1)) << (s & (usize_bits - 1)))) != 0;
 }
 
 pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.getsockname, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len) });
+        return socketcall(SC.getsockname, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len) });
     }
-    return syscall3(.getsockname, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len));
+    return syscall3(.getsockname, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len));
 }
 
 pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.getpeername, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len) });
+        return socketcall(SC.getpeername, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len) });
     }
-    return syscall3(.getpeername, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len));
+    return syscall3(.getpeername, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len));
 }
 
 pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
@@ -1244,20 +1244,20 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
 
 pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: [*]const u8, optlen: socklen_t) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.setsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intCast(usize, optlen) });
+        return socketcall(SC.setsockopt, &[5]usize{ @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @as(usize, @intCast(optlen)) });
     }
-    return syscall5(.setsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intCast(usize, optlen));
+    return syscall5(.setsockopt, @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @as(usize, @intCast(optlen)));
 }
 
 pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: [*]u8, noalias optlen: *socklen_t) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.getsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intFromPtr(optlen) });
+        return socketcall(SC.getsockopt, &[5]usize{ @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @intFromPtr(optlen) });
     }
-    return syscall5(.getsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @intFromPtr(optval), @intFromPtr(optlen));
+    return syscall5(.getsockopt, @as(usize, @bitCast(@as(isize, fd))), level, optname, @intFromPtr(optval), @intFromPtr(optlen));
 }
 
 pub fn sendmsg(fd: i32, msg: *const msghdr_const, flags: u32) usize {
-    const fd_usize = @bitCast(usize, @as(isize, fd));
+    const fd_usize = @as(usize, @bitCast(@as(isize, fd)));
     const msg_usize = @intFromPtr(msg);
     if (native_arch == .x86) {
         return socketcall(SC.sendmsg, &[3]usize{ fd_usize, msg_usize, flags });
@@ -1275,13 +1275,13 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
         var next_unsent: usize = 0;
         for (msgvec[0..kvlen], 0..) |*msg, i| {
             var size: i32 = 0;
-            const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned
+            const msg_iovlen = @as(usize, @intCast(msg.msg_hdr.msg_iovlen)); // kernel side this is treated as unsigned
             for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| {
-                if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(size, @intCast(i32, iov.iov_len))[1] != 0) {
+                if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(size, @as(i32, @intCast(iov.iov_len)))[1] != 0) {
                     // batch-send all messages up to the current message
                     if (next_unsent < i) {
                         const batch_size = i - next_unsent;
-                        const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
+                        const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
                         if (getErrno(r) != 0) return next_unsent;
                         if (r < batch_size) return next_unsent + r;
                     }
@@ -1289,7 +1289,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
                     const r = sendmsg(fd, &msg.msg_hdr, flags);
                     if (getErrno(r) != 0) return r;
                     // Linux limits the total bytes sent by sendmsg to INT_MAX, so this cast is safe.
-                    msg.msg_len = @intCast(u32, r);
+                    msg.msg_len = @as(u32, @intCast(r));
                     next_unsent = i + 1;
                     break;
                 }
@@ -1297,17 +1297,17 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
         }
         if (next_unsent < kvlen or next_unsent == 0) { // want to make sure at least one syscall occurs (e.g. to trigger MSG.EOR)
             const batch_size = kvlen - next_unsent;
-            const r = syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
+            const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
             if (getErrno(r) != 0) return r;
             return next_unsent + r;
         }
         return kvlen;
     }
-    return syscall4(.sendmmsg, @bitCast(usize, @as(isize, fd)), @intFromPtr(msgvec), vlen, flags);
+    return syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(msgvec), vlen, flags);
 }
 
 pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize {
-    const fd_usize = @bitCast(usize, @as(isize, fd));
+    const fd_usize = @as(usize, @bitCast(@as(isize, fd)));
     const addr_usize = @intFromPtr(addr);
     if (native_arch == .x86) {
         return socketcall(SC.connect, &[3]usize{ fd_usize, addr_usize, len });
@@ -1317,7 +1317,7 @@ pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize {
 }
 
 pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize {
-    const fd_usize = @bitCast(usize, @as(isize, fd));
+    const fd_usize = @as(usize, @bitCast(@as(isize, fd)));
     const msg_usize = @intFromPtr(msg);
     if (native_arch == .x86) {
         return socketcall(SC.recvmsg, &[3]usize{ fd_usize, msg_usize, flags });
@@ -1334,7 +1334,7 @@ pub fn recvfrom(
     noalias addr: ?*sockaddr,
     noalias alen: ?*socklen_t,
 ) usize {
-    const fd_usize = @bitCast(usize, @as(isize, fd));
+    const fd_usize = @as(usize, @bitCast(@as(isize, fd)));
     const buf_usize = @intFromPtr(buf);
     const addr_usize = @intFromPtr(addr);
     const alen_usize = @intFromPtr(alen);
@@ -1347,46 +1347,46 @@ pub fn recvfrom(
 
 pub fn shutdown(fd: i32, how: i32) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.shutdown, &[2]usize{ @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)) });
+        return socketcall(SC.shutdown, &[2]usize{ @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, how))) });
     }
-    return syscall2(.shutdown, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)));
+    return syscall2(.shutdown, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, how))));
 }
 
 pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.bind, &[3]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intCast(usize, len) });
+        return socketcall(SC.bind, &[3]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @as(usize, @intCast(len)) });
     }
-    return syscall3(.bind, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intCast(usize, len));
+    return syscall3(.bind, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @as(usize, @intCast(len)));
 }
 
 pub fn listen(fd: i32, backlog: u32) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.listen, &[2]usize{ @bitCast(usize, @as(isize, fd)), backlog });
+        return socketcall(SC.listen, &[2]usize{ @as(usize, @bitCast(@as(isize, fd))), backlog });
     }
-    return syscall2(.listen, @bitCast(usize, @as(isize, fd)), backlog);
+    return syscall2(.listen, @as(usize, @bitCast(@as(isize, fd))), backlog);
 }
 
 pub fn sendto(fd: i32, buf: [*]const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.sendto, &[6]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), len, flags, @intFromPtr(addr), @intCast(usize, alen) });
+        return socketcall(SC.sendto, &[6]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), len, flags, @intFromPtr(addr), @as(usize, @intCast(alen)) });
     }
-    return syscall6(.sendto, @bitCast(usize, @as(isize, fd)), @intFromPtr(buf), len, flags, @intFromPtr(addr), @intCast(usize, alen));
+    return syscall6(.sendto, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(buf), len, flags, @intFromPtr(addr), @as(usize, @intCast(alen)));
 }
 
 pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize {
     if (@hasField(SYS, "sendfile64")) {
         return syscall4(
             .sendfile64,
-            @bitCast(usize, @as(isize, outfd)),
-            @bitCast(usize, @as(isize, infd)),
+            @as(usize, @bitCast(@as(isize, outfd))),
+            @as(usize, @bitCast(@as(isize, infd))),
             @intFromPtr(offset),
             count,
         );
     } else {
         return syscall4(
             .sendfile,
-            @bitCast(usize, @as(isize, outfd)),
-            @bitCast(usize, @as(isize, infd)),
+            @as(usize, @bitCast(@as(isize, outfd))),
+            @as(usize, @bitCast(@as(isize, infd))),
             @intFromPtr(offset),
             count,
         );
@@ -1395,9 +1395,9 @@ pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize {
 
 pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: *[2]i32) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.socketpair, &[4]usize{ @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @intFromPtr(fd) });
+        return socketcall(SC.socketpair, &[4]usize{ @as(usize, @intCast(domain)), @as(usize, @intCast(socket_type)), @as(usize, @intCast(protocol)), @intFromPtr(fd) });
     }
-    return syscall4(.socketpair, @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @intFromPtr(fd));
+    return syscall4(.socketpair, @as(usize, @intCast(domain)), @as(usize, @intCast(socket_type)), @as(usize, @intCast(protocol)), @intFromPtr(fd));
 }
 
 pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize {
@@ -1409,16 +1409,16 @@ pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize
 
 pub fn accept4(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t, flags: u32) usize {
     if (native_arch == .x86) {
-        return socketcall(SC.accept4, &[4]usize{ @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len), flags });
+        return socketcall(SC.accept4, &[4]usize{ @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), flags });
     }
-    return syscall4(.accept4, @bitCast(usize, @as(isize, fd)), @intFromPtr(addr), @intFromPtr(len), flags);
+    return syscall4(.accept4, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(addr), @intFromPtr(len), flags);
 }
 
 pub fn fstat(fd: i32, stat_buf: *Stat) usize {
     if (@hasField(SYS, "fstat64")) {
-        return syscall2(.fstat64, @bitCast(usize, @as(isize, fd)), @intFromPtr(stat_buf));
+        return syscall2(.fstat64, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf));
     } else {
-        return syscall2(.fstat, @bitCast(usize, @as(isize, fd)), @intFromPtr(stat_buf));
+        return syscall2(.fstat, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(stat_buf));
     }
 }
 
@@ -1440,9 +1440,9 @@ pub fn lstat(pathname: [*:0]const u8, statbuf: *Stat) usize {
 
 pub fn fstatat(dirfd: i32, path: [*:0]const u8, stat_buf: *Stat, flags: u32) usize {
     if (@hasField(SYS, "fstatat64")) {
-        return syscall4(.fstatat64, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(stat_buf), flags);
+        return syscall4(.fstatat64, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags);
     } else {
-        return syscall4(.fstatat, @bitCast(usize, @as(isize, dirfd)), @intFromPtr(path), @intFromPtr(stat_buf), flags);
+        return syscall4(.fstatat, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(path), @intFromPtr(stat_buf), flags);
     }
 }
 
@@ -1450,14 +1450,14 @@ pub fn statx(dirfd: i32, path: [*]const u8, flags: u32, mask: u32, statx_buf: *S
     if (@hasField(SYS, "statx")) {
         return syscall5(
             .statx,
-            @bitCast(usize, @as(isize, dirfd)),
+            @as(usize, @bitCast(@as(isize, dirfd))),
             @intFromPtr(path),
             flags,
             mask,
             @intFromPtr(statx_buf),
         );
     }
-    return @bitCast(usize, -@as(isize, @intFromEnum(E.NOSYS)));
+    return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.NOSYS))));
 }
 
 pub fn listxattr(path: [*:0]const u8, list: [*]u8, size: usize) usize {
@@ -1513,9 +1513,9 @@ pub fn sched_yield() usize {
 }
 
 pub fn sched_getaffinity(pid: pid_t, size: usize, set: *cpu_set_t) usize {
-    const rc = syscall3(.sched_getaffinity, @bitCast(usize, @as(isize, pid)), size, @intFromPtr(set));
-    if (@bitCast(isize, rc) < 0) return rc;
-    if (rc < size) @memset(@ptrCast([*]u8, set)[rc..size], 0);
+    const rc = syscall3(.sched_getaffinity, @as(usize, @bitCast(@as(isize, pid))), size, @intFromPtr(set));
+    if (@as(isize, @bitCast(rc)) < 0) return rc;
+    if (rc < size) @memset(@as([*]u8, @ptrCast(set))[rc..size], 0);
     return 0;
 }
 
@@ -1526,18 +1526,18 @@ pub fn getcpu(cpu: *u32, node: *u32) usize {
 pub fn sched_getcpu() usize {
     var cpu: u32 = undefined;
     const rc = syscall3(.getcpu, @intFromPtr(&cpu), 0, 0);
-    if (@bitCast(isize, rc) < 0) return rc;
-    return @intCast(usize, cpu);
+    if (@as(isize, @bitCast(rc)) < 0) return rc;
+    return @as(usize, @intCast(cpu));
 }
 
 /// libc has no wrapper for this syscall
 pub fn mbind(addr: ?*anyopaque, len: u32, mode: i32, nodemask: *const u32, maxnode: u32, flags: u32) usize {
-    return syscall6(.mbind, @intFromPtr(addr), len, @bitCast(usize, @as(isize, mode)), @intFromPtr(nodemask), maxnode, flags);
+    return syscall6(.mbind, @intFromPtr(addr), len, @as(usize, @bitCast(@as(isize, mode))), @intFromPtr(nodemask), maxnode, flags);
 }
 
 pub fn sched_setaffinity(pid: pid_t, size: usize, set: *const cpu_set_t) usize {
-    const rc = syscall3(.sched_setaffinity, @bitCast(usize, @as(isize, pid)), size, @intFromPtr(set));
-    if (@bitCast(isize, rc) < 0) return rc;
+    const rc = syscall3(.sched_setaffinity, @as(usize, @bitCast(@as(isize, pid))), size, @intFromPtr(set));
+    if (@as(isize, @bitCast(rc)) < 0) return rc;
     return 0;
 }
 
@@ -1550,7 +1550,7 @@ pub fn epoll_create1(flags: usize) usize {
 }
 
 pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: ?*epoll_event) usize {
-    return syscall4(.epoll_ctl, @bitCast(usize, @as(isize, epoll_fd)), @intCast(usize, op), @bitCast(usize, @as(isize, fd)), @intFromPtr(ev));
+    return syscall4(.epoll_ctl, @as(usize, @bitCast(@as(isize, epoll_fd))), @as(usize, @intCast(op)), @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(ev));
 }
 
 pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32) usize {
@@ -1560,10 +1560,10 @@ pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout
 pub fn epoll_pwait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32, sigmask: ?*const sigset_t) usize {
     return syscall6(
         .epoll_pwait,
-        @bitCast(usize, @as(isize, epoll_fd)),
+        @as(usize, @bitCast(@as(isize, epoll_fd))),
         @intFromPtr(events),
-        @intCast(usize, maxevents),
-        @bitCast(usize, @as(isize, timeout)),
+        @as(usize, @intCast(maxevents)),
+        @as(usize, @bitCast(@as(isize, timeout))),
         @intFromPtr(sigmask),
         @sizeOf(sigset_t),
     );
@@ -1574,7 +1574,7 @@ pub fn eventfd(count: u32, flags: u32) usize {
 }
 
 pub fn timerfd_create(clockid: i32, flags: u32) usize {
-    return syscall2(.timerfd_create, @bitCast(usize, @as(isize, clockid)), flags);
+    return syscall2(.timerfd_create, @as(usize, @bitCast(@as(isize, clockid))), flags);
 }
 
 pub const itimerspec = extern struct {
@@ -1583,11 +1583,11 @@ pub const itimerspec = extern struct {
 };
 
 pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize {
-    return syscall2(.timerfd_gettime, @bitCast(usize, @as(isize, fd)), @intFromPtr(curr_value));
+    return syscall2(.timerfd_gettime, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(curr_value));
 }
 
 pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
-    return syscall4(.timerfd_settime, @bitCast(usize, @as(isize, fd)), flags, @intFromPtr(new_value), @intFromPtr(old_value));
+    return syscall4(.timerfd_settime, @as(usize, @bitCast(@as(isize, fd))), flags, @intFromPtr(new_value), @intFromPtr(old_value));
 }
 
 pub const sigevent = extern struct {
@@ -1609,8 +1609,8 @@ pub const timer_t = ?*anyopaque;
 
 pub fn timer_create(clockid: i32, sevp: *sigevent, timerid: *timer_t) usize {
     var t: timer_t = undefined;
-    const rc = syscall3(.timer_create, @bitCast(usize, @as(isize, clockid)), @intFromPtr(sevp), @intFromPtr(&t));
-    if (@bitCast(isize, rc) < 0) return rc;
+    const rc = syscall3(.timer_create, @as(usize, @bitCast(@as(isize, clockid))), @intFromPtr(sevp), @intFromPtr(&t));
+    if (@as(isize, @bitCast(rc)) < 0) return rc;
     timerid.* = t;
     return rc;
 }
@@ -1624,7 +1624,7 @@ pub fn timer_gettime(timerid: timer_t, curr_value: *itimerspec) usize {
 }
 
 pub fn timer_settime(timerid: timer_t, flags: i32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
-    return syscall4(.timer_settime, @intFromPtr(timerid), @bitCast(usize, @as(isize, flags)), @intFromPtr(new_value), @intFromPtr(old_value));
+    return syscall4(.timer_settime, @intFromPtr(timerid), @as(usize, @bitCast(@as(isize, flags))), @intFromPtr(new_value), @intFromPtr(old_value));
 }
 
 // Flags for the 'setitimer' system call
@@ -1635,11 +1635,11 @@ pub const ITIMER = enum(i32) {
 };
 
 pub fn getitimer(which: i32, curr_value: *itimerspec) usize {
-    return syscall2(.getitimer, @bitCast(usize, @as(isize, which)), @intFromPtr(curr_value));
+    return syscall2(.getitimer, @as(usize, @bitCast(@as(isize, which))), @intFromPtr(curr_value));
 }
 
 pub fn setitimer(which: i32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
-    return syscall3(.setitimer, @bitCast(usize, @as(isize, which)), @intFromPtr(new_value), @intFromPtr(old_value));
+    return syscall3(.setitimer, @as(usize, @bitCast(@as(isize, which))), @intFromPtr(new_value), @intFromPtr(old_value));
 }
 
 pub fn unshare(flags: usize) usize {
@@ -1667,11 +1667,11 @@ pub fn io_uring_setup(entries: u32, p: *io_uring_params) usize {
 }
 
 pub fn io_uring_enter(fd: i32, to_submit: u32, min_complete: u32, flags: u32, sig: ?*sigset_t) usize {
-    return syscall6(.io_uring_enter, @bitCast(usize, @as(isize, fd)), to_submit, min_complete, flags, @intFromPtr(sig), NSIG / 8);
+    return syscall6(.io_uring_enter, @as(usize, @bitCast(@as(isize, fd))), to_submit, min_complete, flags, @intFromPtr(sig), NSIG / 8);
 }
 
 pub fn io_uring_register(fd: i32, opcode: IORING_REGISTER, arg: ?*const anyopaque, nr_args: u32) usize {
-    return syscall4(.io_uring_register, @bitCast(usize, @as(isize, fd)), @intFromEnum(opcode), @intFromPtr(arg), nr_args);
+    return syscall4(.io_uring_register, @as(usize, @bitCast(@as(isize, fd))), @intFromEnum(opcode), @intFromPtr(arg), nr_args);
 }
 
 pub fn memfd_create(name: [*:0]const u8, flags: u32) usize {
@@ -1679,43 +1679,43 @@ pub fn memfd_create(name: [*:0]const u8, flags: u32) usize {
 }
 
 pub fn getrusage(who: i32, usage: *rusage) usize {
-    return syscall2(.getrusage, @bitCast(usize, @as(isize, who)), @intFromPtr(usage));
+    return syscall2(.getrusage, @as(usize, @bitCast(@as(isize, who))), @intFromPtr(usage));
 }
 
 pub fn tcgetattr(fd: fd_t, termios_p: *termios) usize {
-    return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CGETS, @intFromPtr(termios_p));
+    return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CGETS, @intFromPtr(termios_p));
 }
 
 pub fn tcsetattr(fd: fd_t, optional_action: TCSA, termios_p: *const termios) usize {
-    return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CSETS + @intFromEnum(optional_action), @intFromPtr(termios_p));
+    return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CSETS + @intFromEnum(optional_action), @intFromPtr(termios_p));
 }
 
 pub fn tcgetpgrp(fd: fd_t, pgrp: *pid_t) usize {
-    return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.IOCGPGRP, @intFromPtr(pgrp));
+    return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.IOCGPGRP, @intFromPtr(pgrp));
 }
 
 pub fn tcsetpgrp(fd: fd_t, pgrp: *const pid_t) usize {
-    return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.IOCSPGRP, @intFromPtr(pgrp));
+    return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.IOCSPGRP, @intFromPtr(pgrp));
 }
 
 pub fn tcdrain(fd: fd_t) usize {
-    return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), T.CSBRK, 1);
+    return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), T.CSBRK, 1);
 }
 
 pub fn ioctl(fd: fd_t, request: u32, arg: usize) usize {
-    return syscall3(.ioctl, @bitCast(usize, @as(isize, fd)), request, arg);
+    return syscall3(.ioctl, @as(usize, @bitCast(@as(isize, fd))), request, arg);
 }
 
 pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) usize {
-    return syscall4(.signalfd4, @bitCast(usize, @as(isize, fd)), @intFromPtr(mask), NSIG / 8, flags);
+    return syscall4(.signalfd4, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(mask), NSIG / 8, flags);
 }
 
 pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64, len: usize, flags: u32) usize {
     return syscall6(
         .copy_file_range,
-        @bitCast(usize, @as(isize, fd_in)),
+        @as(usize, @bitCast(@as(isize, fd_in))),
         @intFromPtr(off_in),
-        @bitCast(usize, @as(isize, fd_out)),
+        @as(usize, @bitCast(@as(isize, fd_out))),
         @intFromPtr(off_out),
         len,
         flags,
@@ -1731,19 +1731,19 @@ pub fn sync() void {
 }
 
 pub fn syncfs(fd: fd_t) usize {
-    return syscall1(.syncfs, @bitCast(usize, @as(isize, fd)));
+    return syscall1(.syncfs, @as(usize, @bitCast(@as(isize, fd))));
 }
 
 pub fn fsync(fd: fd_t) usize {
-    return syscall1(.fsync, @bitCast(usize, @as(isize, fd)));
+    return syscall1(.fsync, @as(usize, @bitCast(@as(isize, fd))));
 }
 
 pub fn fdatasync(fd: fd_t) usize {
-    return syscall1(.fdatasync, @bitCast(usize, @as(isize, fd)));
+    return syscall1(.fdatasync, @as(usize, @bitCast(@as(isize, fd))));
 }
 
 pub fn prctl(option: i32, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
-    return syscall5(.prctl, @bitCast(usize, @as(isize, option)), arg2, arg3, arg4, arg5);
+    return syscall5(.prctl, @as(usize, @bitCast(@as(isize, option))), arg2, arg3, arg4, arg5);
 }
 
 pub fn getrlimit(resource: rlimit_resource, rlim: *rlimit) usize {
@@ -1759,8 +1759,8 @@ pub fn setrlimit(resource: rlimit_resource, rlim: *const rlimit) usize {
 pub fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: ?*const rlimit, old_limit: ?*rlimit) usize {
     return syscall4(
         .prlimit64,
-        @bitCast(usize, @as(isize, pid)),
-        @bitCast(usize, @as(isize, @intFromEnum(resource))),
+        @as(usize, @bitCast(@as(isize, pid))),
+        @as(usize, @bitCast(@as(isize, @intFromEnum(resource)))),
         @intFromPtr(new_limit),
         @intFromPtr(old_limit),
     );
@@ -1775,14 +1775,14 @@ pub fn madvise(address: [*]u8, len: usize, advice: u32) usize {
 }
 
 pub fn pidfd_open(pid: pid_t, flags: u32) usize {
-    return syscall2(.pidfd_open, @bitCast(usize, @as(isize, pid)), flags);
+    return syscall2(.pidfd_open, @as(usize, @bitCast(@as(isize, pid))), flags);
 }
 
 pub fn pidfd_getfd(pidfd: fd_t, targetfd: fd_t, flags: u32) usize {
     return syscall3(
         .pidfd_getfd,
-        @bitCast(usize, @as(isize, pidfd)),
-        @bitCast(usize, @as(isize, targetfd)),
+        @as(usize, @bitCast(@as(isize, pidfd))),
+        @as(usize, @bitCast(@as(isize, targetfd))),
         flags,
     );
 }
@@ -1790,8 +1790,8 @@ pub fn pidfd_getfd(pidfd: fd_t, targetfd: fd_t, flags: u32) usize {
 pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) usize {
     return syscall4(
         .pidfd_send_signal,
-        @bitCast(usize, @as(isize, pidfd)),
-        @bitCast(usize, @as(isize, sig)),
+        @as(usize, @bitCast(@as(isize, pidfd))),
+        @as(usize, @bitCast(@as(isize, sig))),
         @intFromPtr(info),
         flags,
     );
@@ -1800,7 +1800,7 @@ pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) u
 pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const, flags: usize) usize {
     return syscall6(
         .process_vm_readv,
-        @bitCast(usize, @as(isize, pid)),
+        @as(usize, @bitCast(@as(isize, pid))),
         @intFromPtr(local.ptr),
         local.len,
         @intFromPtr(remote.ptr),
@@ -1812,7 +1812,7 @@ pub fn process_vm_readv(pid: pid_t, local: []iovec, remote: []const iovec_const,
 pub fn process_vm_writev(pid: pid_t, local: []const iovec_const, remote: []const iovec_const, flags: usize) usize {
     return syscall6(
         .process_vm_writev,
-        @bitCast(usize, @as(isize, pid)),
+        @as(usize, @bitCast(@as(isize, pid))),
         @intFromPtr(local.ptr),
         local.len,
         @intFromPtr(remote.ptr),
@@ -1830,7 +1830,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
 
         return syscall7(
             .fadvise64,
-            @bitCast(usize, @as(isize, fd)),
+            @as(usize, @bitCast(@as(isize, fd))),
             0,
             offset_halves[0],
             offset_halves[1],
@@ -1846,7 +1846,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
 
         return syscall6(
             .fadvise64_64,
-            @bitCast(usize, @as(isize, fd)),
+            @as(usize, @bitCast(@as(isize, fd))),
             advice,
             offset_halves[0],
             offset_halves[1],
@@ -1862,7 +1862,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
 
         return syscall6(
             .fadvise64_64,
-            @bitCast(usize, @as(isize, fd)),
+            @as(usize, @bitCast(@as(isize, fd))),
             offset_halves[0],
             offset_halves[1],
             length_halves[0],
@@ -1872,9 +1872,9 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
     } else {
         return syscall4(
             .fadvise64,
-            @bitCast(usize, @as(isize, fd)),
-            @bitCast(usize, offset),
-            @bitCast(usize, len),
+            @as(usize, @bitCast(@as(isize, fd))),
+            @as(usize, @bitCast(offset)),
+            @as(usize, @bitCast(len)),
             advice,
         );
     }
@@ -1890,9 +1890,9 @@ pub fn perf_event_open(
     return syscall5(
         .perf_event_open,
         @intFromPtr(attr),
-        @bitCast(usize, @as(isize, pid)),
-        @bitCast(usize, @as(isize, cpu)),
-        @bitCast(usize, @as(isize, group_fd)),
+        @as(usize, @bitCast(@as(isize, pid))),
+        @as(usize, @bitCast(@as(isize, cpu))),
+        @as(usize, @bitCast(@as(isize, group_fd))),
         flags,
     );
 }
@@ -1911,7 +1911,7 @@ pub fn ptrace(
     return syscall5(
         .ptrace,
         req,
-        @bitCast(usize, @as(isize, pid)),
+        @as(usize, @bitCast(@as(isize, pid))),
         addr,
         data,
         addr2,
@@ -2057,7 +2057,7 @@ pub const W = struct {
     pub const NOWAIT = 0x1000000;
 
     pub fn EXITSTATUS(s: u32) u8 {
-        return @intCast(u8, (s & 0xff00) >> 8);
+        return @as(u8, @intCast((s & 0xff00) >> 8));
     }
     pub fn TERMSIG(s: u32) u32 {
         return s & 0x7f;
@@ -2069,7 +2069,7 @@ pub const W = struct {
         return TERMSIG(s) == 0;
     }
     pub fn IFSTOPPED(s: u32) bool {
-        return @truncate(u16, ((s & 0xffff) *% 0x10001) >> 8) > 0x7f00;
+        return @as(u16, @truncate(((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00;
     }
     pub fn IFSIGNALED(s: u32) bool {
         return (s & 0xffff) -% 1 < 0xff;
@@ -2154,9 +2154,9 @@ pub const SIG = if (is_mips) struct {
     pub const SYS = 31;
     pub const UNUSED = SIG.SYS;
 
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
 } else if (is_sparc) struct {
     pub const BLOCK = 1;
     pub const UNBLOCK = 2;
@@ -2198,9 +2198,9 @@ pub const SIG = if (is_mips) struct {
     pub const PWR = LOST;
     pub const IO = SIG.POLL;
 
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
 } else struct {
     pub const BLOCK = 0;
     pub const UNBLOCK = 1;
@@ -2241,9 +2241,9 @@ pub const SIG = if (is_mips) struct {
     pub const SYS = 31;
     pub const UNUSED = SIG.SYS;
 
-    pub const ERR = @ptrFromInt(?Sigaction.handler_fn, maxInt(usize));
-    pub const DFL = @ptrFromInt(?Sigaction.handler_fn, 0);
-    pub const IGN = @ptrFromInt(?Sigaction.handler_fn, 1);
+    pub const ERR = @as(?Sigaction.handler_fn, @ptrFromInt(maxInt(usize)));
+    pub const DFL = @as(?Sigaction.handler_fn, @ptrFromInt(0));
+    pub const IGN = @as(?Sigaction.handler_fn, @ptrFromInt(1));
 };
 
 pub const kernel_rwf = u32;
@@ -3541,7 +3541,7 @@ pub const CAP = struct {
     }
 
     pub fn TO_MASK(cap: u8) u32 {
-        return @as(u32, 1) << @intCast(u5, cap & 31);
+        return @as(u32, 1) << @as(u5, @intCast(cap & 31));
     }
 
     pub fn TO_INDEX(cap: u8) u8 {
@@ -3598,7 +3598,7 @@ pub const cpu_count_t = std.meta.Int(.unsigned, std.math.log2(CPU_SETSIZE * 8));
 
 fn cpu_mask(s: usize) cpu_count_t {
     var x = s & (CPU_SETSIZE * 8);
-    return @intCast(cpu_count_t, 1) << @intCast(u4, x);
+    return @as(cpu_count_t, @intCast(1)) << @as(u4, @intCast(x));
 }
 
 pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t {
@@ -3999,7 +3999,7 @@ pub const io_uring_cqe = extern struct {
 
     pub fn err(self: io_uring_cqe) E {
         if (self.res > -4096 and self.res < 0) {
-            return @enumFromInt(E, -self.res);
+            return @as(E, @enumFromInt(-self.res));
         }
         return .SUCCESS;
     }
lib/std/os/plan9.zig
@@ -8,9 +8,9 @@ pub const syscall_bits = switch (builtin.cpu.arch) {
 pub const E = @import("plan9/errno.zig").E;
 /// Get the errno from a syscall return value, or 0 for no error.
 pub fn getErrno(r: usize) E {
-    const signed_r = @bitCast(isize, r);
+    const signed_r = @as(isize, @bitCast(r));
     const int = if (signed_r > -4096 and signed_r < 0) -signed_r else 0;
-    return @enumFromInt(E, int);
+    return @as(E, @enumFromInt(int));
 }
 pub const SIG = struct {
     /// hangup
lib/std/os/test.zig
@@ -488,7 +488,7 @@ fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void {
 
         const reloc_addr = info.dlpi_addr + phdr.p_vaddr;
         // Find the ELF header
-        const elf_header = @ptrFromInt(*elf.Ehdr, reloc_addr - phdr.p_offset);
+        const elf_header = @as(*elf.Ehdr, @ptrFromInt(reloc_addr - phdr.p_offset));
         // Validate the magic
         if (!mem.eql(u8, elf_header.e_ident[0..4], elf.MAGIC)) return error.BadElfMagic;
         // Consistency check
@@ -751,7 +751,7 @@ test "getrlimit and setrlimit" {
     }
 
     inline for (std.meta.fields(os.rlimit_resource)) |field| {
-        const resource = @enumFromInt(os.rlimit_resource, field.value);
+        const resource = @as(os.rlimit_resource, @enumFromInt(field.value));
         const limit = try os.getrlimit(resource);
 
         // On 32 bit MIPS musl includes a fix which changes limits greater than -1UL/2 to RLIM_INFINITY.
lib/std/os/uefi.zig
@@ -143,7 +143,7 @@ pub const FileHandle = *opaque {};
 test "GUID formatting" {
     var bytes = [_]u8{ 137, 60, 203, 50, 128, 128, 124, 66, 186, 19, 80, 73, 135, 59, 194, 135 };
 
-    var guid = @bitCast(Guid, bytes);
+    var guid = @as(Guid, @bitCast(bytes));
 
     var str = try std.fmt.allocPrint(std.testing.allocator, "{}", .{guid});
     defer std.testing.allocator.free(str);
lib/std/os/wasi.zig
@@ -103,13 +103,13 @@ pub const timespec = extern struct {
         const tv_sec: timestamp_t = tm / 1_000_000_000;
         const tv_nsec = tm - tv_sec * 1_000_000_000;
         return timespec{
-            .tv_sec = @intCast(time_t, tv_sec),
-            .tv_nsec = @intCast(isize, tv_nsec),
+            .tv_sec = @as(time_t, @intCast(tv_sec)),
+            .tv_nsec = @as(isize, @intCast(tv_nsec)),
         };
     }
 
     pub fn toTimestamp(ts: timespec) timestamp_t {
-        const tm = @intCast(timestamp_t, ts.tv_sec * 1_000_000_000) + @intCast(timestamp_t, ts.tv_nsec);
+        const tm = @as(timestamp_t, @intCast(ts.tv_sec * 1_000_000_000)) + @as(timestamp_t, @intCast(ts.tv_nsec));
         return tm;
     }
 };
lib/std/os/windows.zig
@@ -30,7 +30,7 @@ pub const gdi32 = @import("windows/gdi32.zig");
 pub const winmm = @import("windows/winmm.zig");
 pub const crypt32 = @import("windows/crypt32.zig");
 
-pub const self_process_handle = @ptrFromInt(HANDLE, maxInt(usize));
+pub const self_process_handle = @as(HANDLE, @ptrFromInt(maxInt(usize)));
 
 const Self = @This();
 
@@ -198,9 +198,9 @@ pub fn DeviceIoControl(
 
     var io: IO_STATUS_BLOCK = undefined;
     const in_ptr = if (in) |i| i.ptr else null;
-    const in_len = if (in) |i| @intCast(ULONG, i.len) else 0;
+    const in_len = if (in) |i| @as(ULONG, @intCast(i.len)) else 0;
     const out_ptr = if (out) |o| o.ptr else null;
-    const out_len = if (out) |o| @intCast(ULONG, o.len) else 0;
+    const out_len = if (out) |o| @as(ULONG, @intCast(o.len)) else 0;
 
     const rc = blk: {
         if (is_fsctl) {
@@ -307,7 +307,7 @@ pub fn WaitForSingleObjectEx(handle: HANDLE, milliseconds: DWORD, alertable: boo
 
 pub fn WaitForMultipleObjectsEx(handles: []const HANDLE, waitAll: bool, milliseconds: DWORD, alertable: bool) !u32 {
     assert(handles.len < MAXIMUM_WAIT_OBJECTS);
-    const nCount: DWORD = @intCast(DWORD, handles.len);
+    const nCount: DWORD = @as(DWORD, @intCast(handles.len));
     switch (kernel32.WaitForMultipleObjectsEx(
         nCount,
         handles.ptr,
@@ -419,7 +419,7 @@ pub fn GetQueuedCompletionStatusEx(
     const success = kernel32.GetQueuedCompletionStatusEx(
         completion_port,
         completion_port_entries.ptr,
-        @intCast(ULONG, completion_port_entries.len),
+        @as(ULONG, @intCast(completion_port_entries.len)),
         &num_entries_removed,
         timeout_ms orelse INFINITE,
         @intFromBool(alertable),
@@ -469,8 +469,8 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
                     .InternalHigh = 0,
                     .DUMMYUNIONNAME = .{
                         .DUMMYSTRUCTNAME = .{
-                            .Offset = @truncate(u32, off),
-                            .OffsetHigh = @truncate(u32, off >> 32),
+                            .Offset = @as(u32, @truncate(off)),
+                            .OffsetHigh = @as(u32, @truncate(off >> 32)),
                         },
                     },
                     .hEvent = null,
@@ -480,7 +480,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
         loop.beginOneEvent();
         suspend {
             // TODO handle buffer bigger than DWORD can hold
-            _ = kernel32.ReadFile(in_hFile, buffer.ptr, @intCast(DWORD, buffer.len), null, &resume_node.base.overlapped);
+            _ = kernel32.ReadFile(in_hFile, buffer.ptr, @as(DWORD, @intCast(buffer.len)), null, &resume_node.base.overlapped);
         }
         var bytes_transferred: DWORD = undefined;
         if (kernel32.GetOverlappedResult(in_hFile, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) {
@@ -496,7 +496,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
         if (offset == null) {
             // TODO make setting the file position non-blocking
             const new_off = off + bytes_transferred;
-            try SetFilePointerEx_CURRENT(in_hFile, @bitCast(i64, new_off));
+            try SetFilePointerEx_CURRENT(in_hFile, @as(i64, @bitCast(new_off)));
         }
         return @as(usize, bytes_transferred);
     } else {
@@ -510,8 +510,8 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo
                     .InternalHigh = 0,
                     .DUMMYUNIONNAME = .{
                         .DUMMYSTRUCTNAME = .{
-                            .Offset = @truncate(u32, off),
-                            .OffsetHigh = @truncate(u32, off >> 32),
+                            .Offset = @as(u32, @truncate(off)),
+                            .OffsetHigh = @as(u32, @truncate(off >> 32)),
                         },
                     },
                     .hEvent = null,
@@ -563,8 +563,8 @@ pub fn WriteFile(
                     .InternalHigh = 0,
                     .DUMMYUNIONNAME = .{
                         .DUMMYSTRUCTNAME = .{
-                            .Offset = @truncate(u32, off),
-                            .OffsetHigh = @truncate(u32, off >> 32),
+                            .Offset = @as(u32, @truncate(off)),
+                            .OffsetHigh = @as(u32, @truncate(off >> 32)),
                         },
                     },
                     .hEvent = null,
@@ -591,7 +591,7 @@ pub fn WriteFile(
         if (offset == null) {
             // TODO make setting the file position non-blocking
             const new_off = off + bytes_transferred;
-            try SetFilePointerEx_CURRENT(handle, @bitCast(i64, new_off));
+            try SetFilePointerEx_CURRENT(handle, @as(i64, @bitCast(new_off)));
         }
         return bytes_transferred;
     } else {
@@ -603,8 +603,8 @@ pub fn WriteFile(
                 .InternalHigh = 0,
                 .DUMMYUNIONNAME = .{
                     .DUMMYSTRUCTNAME = .{
-                        .Offset = @truncate(u32, off),
-                        .OffsetHigh = @truncate(u32, off >> 32),
+                        .Offset = @as(u32, @truncate(off)),
+                        .OffsetHigh = @as(u32, @truncate(off >> 32)),
                     },
                 },
                 .hEvent = null,
@@ -745,19 +745,19 @@ pub fn CreateSymbolicLink(
     const header_len = @sizeOf(ULONG) + @sizeOf(USHORT) * 2;
     const symlink_data = SYMLINK_DATA{
         .ReparseTag = IO_REPARSE_TAG_SYMLINK,
-        .ReparseDataLength = @intCast(u16, buf_len - header_len),
+        .ReparseDataLength = @as(u16, @intCast(buf_len - header_len)),
         .Reserved = 0,
-        .SubstituteNameOffset = @intCast(u16, target_path.len * 2),
-        .SubstituteNameLength = @intCast(u16, target_path.len * 2),
+        .SubstituteNameOffset = @as(u16, @intCast(target_path.len * 2)),
+        .SubstituteNameLength = @as(u16, @intCast(target_path.len * 2)),
         .PrintNameOffset = 0,
-        .PrintNameLength = @intCast(u16, target_path.len * 2),
+        .PrintNameLength = @as(u16, @intCast(target_path.len * 2)),
         .Flags = if (dir) |_| SYMLINK_FLAG_RELATIVE else 0,
     };
 
     @memcpy(buffer[0..@sizeOf(SYMLINK_DATA)], std.mem.asBytes(&symlink_data));
-    @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path));
+    @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0 .. target_path.len * 2], @as([*]const u8, @ptrCast(target_path)));
     const paths_start = @sizeOf(SYMLINK_DATA) + target_path.len * 2;
-    @memcpy(buffer[paths_start..][0 .. target_path.len * 2], @ptrCast([*]const u8, target_path));
+    @memcpy(buffer[paths_start..][0 .. target_path.len * 2], @as([*]const u8, @ptrCast(target_path)));
     _ = try DeviceIoControl(symlink_handle, FSCTL_SET_REPARSE_POINT, buffer[0..buf_len], null);
 }
 
@@ -827,10 +827,10 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
         else => |e| return e,
     };
 
-    const reparse_struct = @ptrCast(*const REPARSE_DATA_BUFFER, @alignCast(@alignOf(REPARSE_DATA_BUFFER), &reparse_buf[0]));
+    const reparse_struct: *const REPARSE_DATA_BUFFER = @ptrCast(@alignCast(&reparse_buf[0]));
     switch (reparse_struct.ReparseTag) {
         IO_REPARSE_TAG_SYMLINK => {
-            const buf = @ptrCast(*const SYMBOLIC_LINK_REPARSE_BUFFER, @alignCast(@alignOf(SYMBOLIC_LINK_REPARSE_BUFFER), &reparse_struct.DataBuffer[0]));
+            const buf: *const SYMBOLIC_LINK_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0]));
             const offset = buf.SubstituteNameOffset >> 1;
             const len = buf.SubstituteNameLength >> 1;
             const path_buf = @as([*]const u16, &buf.PathBuffer);
@@ -838,7 +838,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
             return parseReadlinkPath(path_buf[offset..][0..len], is_relative, out_buffer);
         },
         IO_REPARSE_TAG_MOUNT_POINT => {
-            const buf = @ptrCast(*const MOUNT_POINT_REPARSE_BUFFER, @alignCast(@alignOf(MOUNT_POINT_REPARSE_BUFFER), &reparse_struct.DataBuffer[0]));
+            const buf: *const MOUNT_POINT_REPARSE_BUFFER = @ptrCast(@alignCast(&reparse_struct.DataBuffer[0]));
             const offset = buf.SubstituteNameOffset >> 1;
             const len = buf.SubstituteNameLength >> 1;
             const path_buf = @as([*]const u16, &buf.PathBuffer);
@@ -884,7 +884,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
     else
         FILE_NON_DIRECTORY_FILE | FILE_OPEN_REPARSE_POINT; // would we ever want to delete the target instead?
 
-    const path_len_bytes = @intCast(u16, sub_path_w.len * 2);
+    const path_len_bytes = @as(u16, @intCast(sub_path_w.len * 2));
     var nt_name = UNICODE_STRING{
         .Length = path_len_bytes,
         .MaximumLength = path_len_bytes,
@@ -1020,7 +1020,7 @@ pub fn SetFilePointerEx_BEGIN(handle: HANDLE, offset: u64) SetFilePointerError!v
     // "The starting point is zero or the beginning of the file. If [FILE_BEGIN]
     // is specified, then the liDistanceToMove parameter is interpreted as an unsigned value."
     // https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-setfilepointerex
-    const ipos = @bitCast(LARGE_INTEGER, offset);
+    const ipos = @as(LARGE_INTEGER, @bitCast(offset));
     if (kernel32.SetFilePointerEx(handle, ipos, null, FILE_BEGIN) == 0) {
         switch (kernel32.GetLastError()) {
             .INVALID_PARAMETER => unreachable,
@@ -1064,7 +1064,7 @@ pub fn SetFilePointerEx_CURRENT_get(handle: HANDLE) SetFilePointerError!u64 {
     }
     // Based on the docs for FILE_BEGIN, it seems that the returned signed integer
     // should be interpreted as an unsigned integer.
-    return @bitCast(u64, result);
+    return @as(u64, @bitCast(result));
 }
 
 pub fn QueryObjectName(
@@ -1073,7 +1073,7 @@ pub fn QueryObjectName(
 ) ![]u16 {
     const out_buffer_aligned = mem.alignInSlice(out_buffer, @alignOf(OBJECT_NAME_INFORMATION)) orelse return error.NameTooLong;
 
-    const info = @ptrCast(*OBJECT_NAME_INFORMATION, out_buffer_aligned);
+    const info = @as(*OBJECT_NAME_INFORMATION, @ptrCast(out_buffer_aligned));
     //buffer size is specified in bytes
     const out_buffer_len = std.math.cast(ULONG, out_buffer_aligned.len * 2) orelse std.math.maxInt(ULONG);
     //last argument would return the length required for full_buffer, not exposed here
@@ -1197,26 +1197,26 @@ pub fn GetFinalPathNameByHandle(
             };
             defer CloseHandle(mgmt_handle);
 
-            var input_struct = @ptrCast(*MOUNTMGR_MOUNT_POINT, &input_buf[0]);
+            var input_struct = @as(*MOUNTMGR_MOUNT_POINT, @ptrCast(&input_buf[0]));
             input_struct.DeviceNameOffset = @sizeOf(MOUNTMGR_MOUNT_POINT);
-            input_struct.DeviceNameLength = @intCast(USHORT, volume_name_u16.len * 2);
-            @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @ptrCast([*]const u8, volume_name_u16.ptr));
+            input_struct.DeviceNameLength = @as(USHORT, @intCast(volume_name_u16.len * 2));
+            @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0 .. volume_name_u16.len * 2], @as([*]const u8, @ptrCast(volume_name_u16.ptr)));
 
             DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_POINTS, &input_buf, &output_buf) catch |err| switch (err) {
                 error.AccessDenied => unreachable,
                 else => |e| return e,
             };
-            const mount_points_struct = @ptrCast(*const MOUNTMGR_MOUNT_POINTS, &output_buf[0]);
+            const mount_points_struct = @as(*const MOUNTMGR_MOUNT_POINTS, @ptrCast(&output_buf[0]));
 
-            const mount_points = @ptrCast(
+            const mount_points = @as(
                 [*]const MOUNTMGR_MOUNT_POINT,
-                &mount_points_struct.MountPoints[0],
+                @ptrCast(&mount_points_struct.MountPoints[0]),
             )[0..mount_points_struct.NumberOfMountPoints];
 
             for (mount_points) |mount_point| {
-                const symlink = @ptrCast(
+                const symlink = @as(
                     [*]const u16,
-                    @alignCast(@alignOf(u16), &output_buf[mount_point.SymbolicLinkNameOffset]),
+                    @ptrCast(@alignCast(&output_buf[mount_point.SymbolicLinkNameOffset])),
                 )[0 .. mount_point.SymbolicLinkNameLength / 2];
 
                 // Look for `\DosDevices\` prefix. We don't really care if there are more than one symlinks
@@ -1282,7 +1282,7 @@ pub fn GetFileSizeEx(hFile: HANDLE) GetFileSizeError!u64 {
             else => |err| return unexpectedError(err),
         }
     }
-    return @bitCast(u64, file_size);
+    return @as(u64, @bitCast(file_size));
 }
 
 pub const GetFileAttributesError = error{
@@ -1313,7 +1313,7 @@ pub fn WSAStartup(majorVersion: u8, minorVersion: u8) !ws2_32.WSADATA {
     var wsadata: ws2_32.WSADATA = undefined;
     return switch (ws2_32.WSAStartup((@as(WORD, minorVersion) << 8) | majorVersion, &wsadata)) {
         0 => wsadata,
-        else => |err_int| switch (@enumFromInt(ws2_32.WinsockError, @intCast(u16, err_int))) {
+        else => |err_int| switch (@as(ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(err_int))))) {
             .WSASYSNOTREADY => return error.SystemNotAvailable,
             .WSAVERNOTSUPPORTED => return error.VersionNotSupported,
             .WSAEINPROGRESS => return error.BlockingOperationInProgress,
@@ -1408,7 +1408,7 @@ pub fn WSASocketW(
 }
 
 pub fn bind(s: ws2_32.SOCKET, name: *const ws2_32.sockaddr, namelen: ws2_32.socklen_t) i32 {
-    return ws2_32.bind(s, name, @intCast(i32, namelen));
+    return ws2_32.bind(s, name, @as(i32, @intCast(namelen)));
 }
 
 pub fn listen(s: ws2_32.SOCKET, backlog: u31) i32 {
@@ -1427,15 +1427,15 @@ pub fn closesocket(s: ws2_32.SOCKET) !void {
 
 pub fn accept(s: ws2_32.SOCKET, name: ?*ws2_32.sockaddr, namelen: ?*ws2_32.socklen_t) ws2_32.SOCKET {
     assert((name == null) == (namelen == null));
-    return ws2_32.accept(s, name, @ptrCast(?*i32, namelen));
+    return ws2_32.accept(s, name, @as(?*i32, @ptrCast(namelen)));
 }
 
 pub fn getsockname(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 {
-    return ws2_32.getsockname(s, name, @ptrCast(*i32, namelen));
+    return ws2_32.getsockname(s, name, @as(*i32, @ptrCast(namelen)));
 }
 
 pub fn getpeername(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 {
-    return ws2_32.getpeername(s, name, @ptrCast(*i32, namelen));
+    return ws2_32.getpeername(s, name, @as(*i32, @ptrCast(namelen)));
 }
 
 pub fn sendmsg(
@@ -1447,28 +1447,28 @@ pub fn sendmsg(
     if (ws2_32.WSASendMsg(s, msg, flags, &bytes_send, null, null) == ws2_32.SOCKET_ERROR) {
         return ws2_32.SOCKET_ERROR;
     } else {
-        return @as(i32, @intCast(u31, bytes_send));
+        return @as(i32, @as(u31, @intCast(bytes_send)));
     }
 }
 
 pub fn sendto(s: ws2_32.SOCKET, buf: [*]const u8, len: usize, flags: u32, to: ?*const ws2_32.sockaddr, to_len: ws2_32.socklen_t) i32 {
-    var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @constCast(buf) };
+    var buffer = ws2_32.WSABUF{ .len = @as(u31, @truncate(len)), .buf = @constCast(buf) };
     var bytes_send: DWORD = undefined;
-    if (ws2_32.WSASendTo(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_send, flags, to, @intCast(i32, to_len), null, null) == ws2_32.SOCKET_ERROR) {
+    if (ws2_32.WSASendTo(s, @as([*]ws2_32.WSABUF, @ptrCast(&buffer)), 1, &bytes_send, flags, to, @as(i32, @intCast(to_len)), null, null) == ws2_32.SOCKET_ERROR) {
         return ws2_32.SOCKET_ERROR;
     } else {
-        return @as(i32, @intCast(u31, bytes_send));
+        return @as(i32, @as(u31, @intCast(bytes_send)));
     }
 }
 
 pub fn recvfrom(s: ws2_32.SOCKET, buf: [*]u8, len: usize, flags: u32, from: ?*ws2_32.sockaddr, from_len: ?*ws2_32.socklen_t) i32 {
-    var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = buf };
+    var buffer = ws2_32.WSABUF{ .len = @as(u31, @truncate(len)), .buf = buf };
     var bytes_received: DWORD = undefined;
     var flags_inout = flags;
-    if (ws2_32.WSARecvFrom(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_received, &flags_inout, from, @ptrCast(?*i32, from_len), null, null) == ws2_32.SOCKET_ERROR) {
+    if (ws2_32.WSARecvFrom(s, @as([*]ws2_32.WSABUF, @ptrCast(&buffer)), 1, &bytes_received, &flags_inout, from, @as(?*i32, @ptrCast(from_len)), null, null) == ws2_32.SOCKET_ERROR) {
         return ws2_32.SOCKET_ERROR;
     } else {
-        return @as(i32, @intCast(u31, bytes_received));
+        return @as(i32, @as(u31, @intCast(bytes_received)));
     }
 }
 
@@ -1489,9 +1489,9 @@ pub fn WSAIoctl(
         s,
         dwIoControlCode,
         if (inBuffer) |i| i.ptr else null,
-        if (inBuffer) |i| @intCast(DWORD, i.len) else 0,
+        if (inBuffer) |i| @as(DWORD, @intCast(i.len)) else 0,
         outBuffer.ptr,
-        @intCast(DWORD, outBuffer.len),
+        @as(DWORD, @intCast(outBuffer.len)),
         &bytes,
         overlapped,
         completionRoutine,
@@ -1741,7 +1741,7 @@ pub fn QueryPerformanceFrequency() u64 {
     var result: LARGE_INTEGER = undefined;
     assert(kernel32.QueryPerformanceFrequency(&result) != 0);
     // The kernel treats this integer as unsigned.
-    return @bitCast(u64, result);
+    return @as(u64, @bitCast(result));
 }
 
 pub fn QueryPerformanceCounter() u64 {
@@ -1750,7 +1750,7 @@ pub fn QueryPerformanceCounter() u64 {
     var result: LARGE_INTEGER = undefined;
     assert(kernel32.QueryPerformanceCounter(&result) != 0);
     // The kernel treats this integer as unsigned.
-    return @bitCast(u64, result);
+    return @as(u64, @bitCast(result));
 }
 
 pub fn InitOnceExecuteOnce(InitOnce: *INIT_ONCE, InitFn: INIT_ONCE_FN, Parameter: ?*anyopaque, Context: ?*anyopaque) void {
@@ -1852,7 +1852,7 @@ pub fn teb() *TEB {
     return switch (native_arch) {
         .x86 => blk: {
             if (builtin.zig_backend == .stage2_c) {
-                break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_windows_teb()));
+                break :blk @ptrCast(@alignCast(zig_x86_windows_teb()));
             } else {
                 break :blk asm volatile (
                     \\ movl %%fs:0x18, %[ptr]
@@ -1862,7 +1862,7 @@ pub fn teb() *TEB {
         },
         .x86_64 => blk: {
             if (builtin.zig_backend == .stage2_c) {
-                break :blk @ptrCast(*TEB, @alignCast(@alignOf(TEB), zig_x86_64_windows_teb()));
+                break :blk @ptrCast(@alignCast(zig_x86_64_windows_teb()));
             } else {
                 break :blk asm volatile (
                     \\ movq %%gs:0x30, %[ptr]
@@ -1894,7 +1894,7 @@ pub fn fromSysTime(hns: i64) i128 {
 
 pub fn toSysTime(ns: i128) i64 {
     const hns = @divFloor(ns, 100);
-    return @intCast(i64, hns) - std.time.epoch.windows * (std.time.ns_per_s / 100);
+    return @as(i64, @intCast(hns)) - std.time.epoch.windows * (std.time.ns_per_s / 100);
 }
 
 pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 {
@@ -1904,22 +1904,22 @@ pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 {
 
 /// Converts a number of nanoseconds since the POSIX epoch to a Windows FILETIME.
 pub fn nanoSecondsToFileTime(ns: i128) FILETIME {
-    const adjusted = @bitCast(u64, toSysTime(ns));
+    const adjusted = @as(u64, @bitCast(toSysTime(ns)));
     return FILETIME{
-        .dwHighDateTime = @truncate(u32, adjusted >> 32),
-        .dwLowDateTime = @truncate(u32, adjusted),
+        .dwHighDateTime = @as(u32, @truncate(adjusted >> 32)),
+        .dwLowDateTime = @as(u32, @truncate(adjusted)),
     };
 }
 
 /// Compares two WTF16 strings using RtlEqualUnicodeString
 pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool {
-    const a_bytes = @intCast(u16, a.len * 2);
+    const a_bytes = @as(u16, @intCast(a.len * 2));
     const a_string = UNICODE_STRING{
         .Length = a_bytes,
         .MaximumLength = a_bytes,
         .Buffer = @constCast(a.ptr),
     };
-    const b_bytes = @intCast(u16, b.len * 2);
+    const b_bytes = @as(u16, @intCast(b.len * 2));
     const b_string = UNICODE_STRING{
         .Length = b_bytes,
         .MaximumLength = b_bytes,
@@ -2117,7 +2117,7 @@ pub fn wToPrefixedFileW(path: [:0]const u16) !PathSpace {
                 .unc_absolute => nt_prefix.len + 2,
                 else => nt_prefix.len,
             };
-            const buf_len = @intCast(u32, path_space.data.len - path_buf_offset);
+            const buf_len = @as(u32, @intCast(path_space.data.len - path_buf_offset));
             const path_byte_len = ntdll.RtlGetFullPathName_U(
                 path.ptr,
                 buf_len * 2,
@@ -2263,7 +2263,7 @@ test getUnprefixedPathType {
 }
 
 fn getFullPathNameW(path: [*:0]const u16, out: []u16) !usize {
-    const result = kernel32.GetFullPathNameW(path, @intCast(u32, out.len), out.ptr, null);
+    const result = kernel32.GetFullPathNameW(path, @as(u32, @intCast(out.len)), out.ptr, null);
     if (result == 0) {
         switch (kernel32.GetLastError()) {
             else => |err| return unexpectedError(err),
@@ -2284,9 +2284,9 @@ pub fn loadWinsockExtensionFunction(comptime T: type, sock: ws2_32.SOCKET, guid:
     const rc = ws2_32.WSAIoctl(
         sock,
         ws2_32.SIO_GET_EXTENSION_FUNCTION_POINTER,
-        @ptrCast(*const anyopaque, &guid),
+        @as(*const anyopaque, @ptrCast(&guid)),
         @sizeOf(GUID),
-        @ptrFromInt(?*anyopaque, @intFromPtr(&function)),
+        @as(?*anyopaque, @ptrFromInt(@intFromPtr(&function))),
         @sizeOf(T),
         &num_bytes,
         null,
@@ -2332,7 +2332,7 @@ pub fn unexpectedError(err: Win32Error) std.os.UnexpectedError {
 }
 
 pub fn unexpectedWSAError(err: ws2_32.WinsockError) std.os.UnexpectedError {
-    return unexpectedError(@enumFromInt(Win32Error, @intFromEnum(err)));
+    return unexpectedError(@as(Win32Error, @enumFromInt(@intFromEnum(err))));
 }
 
 /// Call this when you made a windows NtDll call
@@ -2530,7 +2530,7 @@ pub fn CTL_CODE(deviceType: u16, function: u12, method: TransferType, access: u2
         @intFromEnum(method);
 }
 
-pub const INVALID_HANDLE_VALUE = @ptrFromInt(HANDLE, maxInt(usize));
+pub const INVALID_HANDLE_VALUE = @as(HANDLE, @ptrFromInt(maxInt(usize)));
 
 pub const INVALID_FILE_ATTRIBUTES = @as(DWORD, maxInt(DWORD));
 
@@ -3119,7 +3119,7 @@ pub const GUID = extern struct {
             bytes[i] = (try std.fmt.charToDigit(s[hex_offset], 16)) << 4 |
                 try std.fmt.charToDigit(s[hex_offset + 1], 16);
         }
-        return @bitCast(GUID, bytes);
+        return @as(GUID, @bitCast(bytes));
     }
 };
 
@@ -3150,16 +3150,16 @@ pub const KF_FLAG_SIMPLE_IDLIST = 256;
 pub const KF_FLAG_ALIAS_ONLY = -2147483648;
 
 pub const S_OK = 0;
-pub const E_NOTIMPL = @bitCast(c_long, @as(c_ulong, 0x80004001));
-pub const E_NOINTERFACE = @bitCast(c_long, @as(c_ulong, 0x80004002));
-pub const E_POINTER = @bitCast(c_long, @as(c_ulong, 0x80004003));
-pub const E_ABORT = @bitCast(c_long, @as(c_ulong, 0x80004004));
-pub const E_FAIL = @bitCast(c_long, @as(c_ulong, 0x80004005));
-pub const E_UNEXPECTED = @bitCast(c_long, @as(c_ulong, 0x8000FFFF));
-pub const E_ACCESSDENIED = @bitCast(c_long, @as(c_ulong, 0x80070005));
-pub const E_HANDLE = @bitCast(c_long, @as(c_ulong, 0x80070006));
-pub const E_OUTOFMEMORY = @bitCast(c_long, @as(c_ulong, 0x8007000E));
-pub const E_INVALIDARG = @bitCast(c_long, @as(c_ulong, 0x80070057));
+pub const E_NOTIMPL = @as(c_long, @bitCast(@as(c_ulong, 0x80004001)));
+pub const E_NOINTERFACE = @as(c_long, @bitCast(@as(c_ulong, 0x80004002)));
+pub const E_POINTER = @as(c_long, @bitCast(@as(c_ulong, 0x80004003)));
+pub const E_ABORT = @as(c_long, @bitCast(@as(c_ulong, 0x80004004)));
+pub const E_FAIL = @as(c_long, @bitCast(@as(c_ulong, 0x80004005)));
+pub const E_UNEXPECTED = @as(c_long, @bitCast(@as(c_ulong, 0x8000FFFF)));
+pub const E_ACCESSDENIED = @as(c_long, @bitCast(@as(c_ulong, 0x80070005)));
+pub const E_HANDLE = @as(c_long, @bitCast(@as(c_ulong, 0x80070006)));
+pub const E_OUTOFMEMORY = @as(c_long, @bitCast(@as(c_ulong, 0x8007000E)));
+pub const E_INVALIDARG = @as(c_long, @bitCast(@as(c_ulong, 0x80070057)));
 
 pub const FILE_FLAG_BACKUP_SEMANTICS = 0x02000000;
 pub const FILE_FLAG_DELETE_ON_CLOSE = 0x04000000;
@@ -3221,7 +3221,7 @@ pub const LSTATUS = LONG;
 
 pub const HKEY = *opaque {};
 
-pub const HKEY_LOCAL_MACHINE: HKEY = @ptrFromInt(HKEY, 0x80000002);
+pub const HKEY_LOCAL_MACHINE: HKEY = @as(HKEY, @ptrFromInt(0x80000002));
 
 /// Combines the STANDARD_RIGHTS_REQUIRED, KEY_QUERY_VALUE, KEY_SET_VALUE, KEY_CREATE_SUB_KEY,
 /// KEY_ENUMERATE_SUB_KEYS, KEY_NOTIFY, and KEY_CREATE_LINK access rights.
@@ -4685,7 +4685,7 @@ pub const KUSER_SHARED_DATA = extern struct {
 /// Read-only user-mode address for the shared data.
 /// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm
 /// https://msrc-blog.microsoft.com/2022/04/05/randomizing-the-kuser_shared_data-structure-on-windows/
-pub const SharedUserData: *const KUSER_SHARED_DATA = @ptrFromInt(*const KUSER_SHARED_DATA, 0x7FFE0000);
+pub const SharedUserData: *const KUSER_SHARED_DATA = @as(*const KUSER_SHARED_DATA, @ptrFromInt(0x7FFE0000));
 
 pub fn IsProcessorFeaturePresent(feature: PF) bool {
     if (@intFromEnum(feature) >= PROCESSOR_FEATURE_MAX) return false;
@@ -4886,7 +4886,7 @@ pub fn WriteProcessMemory(handle: HANDLE, addr: ?LPVOID, buffer: []const u8) Wri
     switch (ntdll.NtWriteVirtualMemory(
         handle,
         addr,
-        @ptrCast(*const anyopaque, buffer.ptr),
+        @as(*const anyopaque, @ptrCast(buffer.ptr)),
         buffer.len,
         &nwritten,
     )) {
@@ -4919,6 +4919,6 @@ pub fn ProcessBaseAddress(handle: HANDLE) ProcessBaseAddressError!HMODULE {
 
     var peb_buf: [@sizeOf(PEB)]u8 align(@alignOf(PEB)) = undefined;
     const peb_out = try ReadProcessMemory(handle, info.PebBaseAddress, &peb_buf);
-    const ppeb = @ptrCast(*const PEB, @alignCast(@alignOf(PEB), peb_out.ptr));
+    const ppeb: *const PEB = @ptrCast(@alignCast(peb_out.ptr));
     return ppeb.ImageBaseAddress;
 }
lib/std/rand/benchmark.zig
@@ -91,8 +91,8 @@ pub fn benchmark(comptime H: anytype, bytes: usize, comptime block_size: usize)
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s));
 
     std.debug.assert(rng.random().int(u64) != 0);
 
lib/std/rand/Isaac64.zig
@@ -38,10 +38,10 @@ fn step(self: *Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2:
     const x = self.m[base + m1];
     self.a = mix +% self.m[base + m2];
 
-    const y = self.a +% self.b +% self.m[@intCast(usize, (x >> 3) % self.m.len)];
+    const y = self.a +% self.b +% self.m[@as(usize, @intCast((x >> 3) % self.m.len))];
     self.m[base + m1] = y;
 
-    self.b = x +% self.m[@intCast(usize, (y >> 11) % self.m.len)];
+    self.b = x +% self.m[@as(usize, @intCast((y >> 11) % self.m.len))];
     self.r[self.r.len - 1 - base - m1] = self.b;
 }
 
@@ -159,7 +159,7 @@ pub fn fill(self: *Isaac64, buf: []u8) void {
         var n = self.next();
         comptime var j: usize = 0;
         inline while (j < 8) : (j += 1) {
-            buf[i + j] = @truncate(u8, n);
+            buf[i + j] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
@@ -168,7 +168,7 @@ pub fn fill(self: *Isaac64, buf: []u8) void {
     if (i != buf.len) {
         var n = self.next();
         while (i < buf.len) : (i += 1) {
-            buf[i] = @truncate(u8, n);
+            buf[i] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
lib/std/rand/Pcg.zig
@@ -29,10 +29,10 @@ fn next(self: *Pcg) u32 {
     const l = self.s;
     self.s = l *% default_multiplier +% (self.i | 1);
 
-    const xor_s = @truncate(u32, ((l >> 18) ^ l) >> 27);
-    const rot = @intCast(u32, l >> 59);
+    const xor_s = @as(u32, @truncate(((l >> 18) ^ l) >> 27));
+    const rot = @as(u32, @intCast(l >> 59));
 
-    return (xor_s >> @intCast(u5, rot)) | (xor_s << @intCast(u5, (0 -% rot) & 31));
+    return (xor_s >> @as(u5, @intCast(rot))) | (xor_s << @as(u5, @intCast((0 -% rot) & 31)));
 }
 
 fn seed(self: *Pcg, init_s: u64) void {
@@ -58,7 +58,7 @@ pub fn fill(self: *Pcg, buf: []u8) void {
         var n = self.next();
         comptime var j: usize = 0;
         inline while (j < 4) : (j += 1) {
-            buf[i + j] = @truncate(u8, n);
+            buf[i + j] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
@@ -67,7 +67,7 @@ pub fn fill(self: *Pcg, buf: []u8) void {
     if (i != buf.len) {
         var n = self.next();
         while (i < buf.len) : (i += 1) {
-            buf[i] = @truncate(u8, n);
+            buf[i] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
lib/std/rand/RomuTrio.zig
@@ -34,7 +34,7 @@ fn next(self: *RomuTrio) u64 {
 }
 
 pub fn seedWithBuf(self: *RomuTrio, buf: [24]u8) void {
-    const seed_buf = @bitCast([3]u64, buf);
+    const seed_buf = @as([3]u64, @bitCast(buf));
     self.x_state = seed_buf[0];
     self.y_state = seed_buf[1];
     self.z_state = seed_buf[2];
@@ -58,7 +58,7 @@ pub fn fill(self: *RomuTrio, buf: []u8) void {
         var n = self.next();
         comptime var j: usize = 0;
         inline while (j < 8) : (j += 1) {
-            buf[i + j] = @truncate(u8, n);
+            buf[i + j] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
@@ -67,7 +67,7 @@ pub fn fill(self: *RomuTrio, buf: []u8) void {
     if (i != buf.len) {
         var n = self.next();
         while (i < buf.len) : (i += 1) {
-            buf[i] = @truncate(u8, n);
+            buf[i] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
@@ -122,7 +122,7 @@ test "RomuTrio fill" {
 }
 
 test "RomuTrio buf seeding test" {
-    const buf0 = @bitCast([24]u8, [3]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476 });
+    const buf0 = @as([24]u8, @bitCast([3]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476 }));
     const resulting_state = .{ .x = 16294208416658607535, .y = 13964609475759908645, .z = 4703697494102998476 };
     var r = RomuTrio.init(0);
     r.seedWithBuf(buf0);
lib/std/rand/Sfc64.zig
@@ -56,7 +56,7 @@ pub fn fill(self: *Sfc64, buf: []u8) void {
         var n = self.next();
         comptime var j: usize = 0;
         inline while (j < 8) : (j += 1) {
-            buf[i + j] = @truncate(u8, n);
+            buf[i + j] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
@@ -65,7 +65,7 @@ pub fn fill(self: *Sfc64, buf: []u8) void {
     if (i != buf.len) {
         var n = self.next();
         while (i < buf.len) : (i += 1) {
-            buf[i] = @truncate(u8, n);
+            buf[i] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
lib/std/rand/test.zig
@@ -332,13 +332,13 @@ test "Random float chi-square goodness of fit" {
     while (i < num_numbers) : (i += 1) {
         const rand_f32 = random.float(f32);
         const rand_f64 = random.float(f64);
-        var f32_put = try f32_hist.getOrPut(@intFromFloat(u32, rand_f32 * @floatFromInt(f32, num_buckets)));
+        var f32_put = try f32_hist.getOrPut(@as(u32, @intFromFloat(rand_f32 * @as(f32, @floatFromInt(num_buckets)))));
         if (f32_put.found_existing) {
             f32_put.value_ptr.* += 1;
         } else {
             f32_put.value_ptr.* = 1;
         }
-        var f64_put = try f64_hist.getOrPut(@intFromFloat(u32, rand_f64 * @floatFromInt(f64, num_buckets)));
+        var f64_put = try f64_hist.getOrPut(@as(u32, @intFromFloat(rand_f64 * @as(f64, @floatFromInt(num_buckets)))));
         if (f64_put.found_existing) {
             f64_put.value_ptr.* += 1;
         } else {
@@ -352,8 +352,8 @@ test "Random float chi-square goodness of fit" {
     {
         var j: u32 = 0;
         while (j < num_buckets) : (j += 1) {
-            const count = @floatFromInt(f64, (if (f32_hist.get(j)) |v| v else 0));
-            const expected = @floatFromInt(f64, num_numbers) / @floatFromInt(f64, num_buckets);
+            const count = @as(f64, @floatFromInt((if (f32_hist.get(j)) |v| v else 0)));
+            const expected = @as(f64, @floatFromInt(num_numbers)) / @as(f64, @floatFromInt(num_buckets));
             const delta = count - expected;
             const variance = (delta * delta) / expected;
             f32_total_variance += variance;
@@ -363,8 +363,8 @@ test "Random float chi-square goodness of fit" {
     {
         var j: u64 = 0;
         while (j < num_buckets) : (j += 1) {
-            const count = @floatFromInt(f64, (if (f64_hist.get(j)) |v| v else 0));
-            const expected = @floatFromInt(f64, num_numbers) / @floatFromInt(f64, num_buckets);
+            const count = @as(f64, @floatFromInt((if (f64_hist.get(j)) |v| v else 0)));
+            const expected = @as(f64, @floatFromInt(num_numbers)) / @as(f64, @floatFromInt(num_buckets));
             const delta = count - expected;
             const variance = (delta * delta) / expected;
             f64_total_variance += variance;
@@ -421,13 +421,13 @@ fn testRange(r: Random, start: i8, end: i8) !void {
     try testRangeBias(r, start, end, false);
 }
 fn testRangeBias(r: Random, start: i8, end: i8, biased: bool) !void {
-    const count = @intCast(usize, @as(i32, end) - @as(i32, start));
+    const count = @as(usize, @intCast(@as(i32, end) - @as(i32, start)));
     var values_buffer = [_]bool{false} ** 0x100;
     const values = values_buffer[0..count];
     var i: usize = 0;
     while (i < count) {
         const value: i32 = if (biased) r.intRangeLessThanBiased(i8, start, end) else r.intRangeLessThan(i8, start, end);
-        const index = @intCast(usize, value - start);
+        const index = @as(usize, @intCast(value - start));
         if (!values[index]) {
             i += 1;
             values[index] = true;
lib/std/rand/Xoroshiro128.zig
@@ -45,7 +45,7 @@ pub fn jump(self: *Xoroshiro128) void {
     inline for (table) |entry| {
         var b: usize = 0;
         while (b < 64) : (b += 1) {
-            if ((entry & (@as(u64, 1) << @intCast(u6, b))) != 0) {
+            if ((entry & (@as(u64, 1) << @as(u6, @intCast(b)))) != 0) {
                 s0 ^= self.s[0];
                 s1 ^= self.s[1];
             }
@@ -74,7 +74,7 @@ pub fn fill(self: *Xoroshiro128, buf: []u8) void {
         var n = self.next();
         comptime var j: usize = 0;
         inline while (j < 8) : (j += 1) {
-            buf[i + j] = @truncate(u8, n);
+            buf[i + j] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
@@ -83,7 +83,7 @@ pub fn fill(self: *Xoroshiro128, buf: []u8) void {
     if (i != buf.len) {
         var n = self.next();
         while (i < buf.len) : (i += 1) {
-            buf[i] = @truncate(u8, n);
+            buf[i] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
lib/std/rand/Xoshiro256.zig
@@ -46,13 +46,13 @@ pub fn jump(self: *Xoshiro256) void {
     var table: u256 = 0x39abdc4529b1661ca9582618e03fc9aad5a61266f0c9392c180ec6d33cfd0aba;
 
     while (table != 0) : (table >>= 1) {
-        if (@truncate(u1, table) != 0) {
-            s ^= @bitCast(u256, self.s);
+        if (@as(u1, @truncate(table)) != 0) {
+            s ^= @as(u256, @bitCast(self.s));
         }
         _ = self.next();
     }
 
-    self.s = @bitCast([4]u64, s);
+    self.s = @as([4]u64, @bitCast(s));
 }
 
 pub fn seed(self: *Xoshiro256, init_s: u64) void {
@@ -74,7 +74,7 @@ pub fn fill(self: *Xoshiro256, buf: []u8) void {
         var n = self.next();
         comptime var j: usize = 0;
         inline while (j < 8) : (j += 1) {
-            buf[i + j] = @truncate(u8, n);
+            buf[i + j] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
@@ -83,7 +83,7 @@ pub fn fill(self: *Xoshiro256, buf: []u8) void {
     if (i != buf.len) {
         var n = self.next();
         while (i < buf.len) : (i += 1) {
-            buf[i] = @truncate(u8, n);
+            buf[i] = @as(u8, @truncate(n));
             n >>= 8;
         }
     }
lib/std/rand/ziggurat.zig
@@ -18,17 +18,17 @@ pub fn next_f64(random: Random, comptime tables: ZigTable) f64 {
         // We manually construct a float from parts as we can avoid an extra random lookup here by
         // using the unused exponent for the lookup table entry.
         const bits = random.int(u64);
-        const i = @as(usize, @truncate(u8, bits));
+        const i = @as(usize, @as(u8, @truncate(bits)));
 
         const u = blk: {
             if (tables.is_symmetric) {
                 // Generate a value in the range [2, 4) and scale into [-1, 1)
                 const repr = ((0x3ff + 1) << 52) | (bits >> 12);
-                break :blk @bitCast(f64, repr) - 3.0;
+                break :blk @as(f64, @bitCast(repr)) - 3.0;
             } else {
                 // Generate a value in the range [1, 2) and scale into (0, 1)
                 const repr = (0x3ff << 52) | (bits >> 12);
-                break :blk @bitCast(f64, repr) - (1.0 - math.floatEps(f64) / 2.0);
+                break :blk @as(f64, @bitCast(repr)) - (1.0 - math.floatEps(f64) / 2.0);
             }
         };
 
lib/std/sort/pdq.zig
@@ -251,7 +251,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void {
     const len = b - a;
     if (len < 8) return;
 
-    var rand = @intCast(u64, len);
+    var rand = @as(u64, @intCast(len));
     const modulus = math.ceilPowerOfTwoAssert(u64, len);
 
     var i = a + (len / 4) * 2 - 1;
@@ -261,7 +261,7 @@ fn breakPatterns(a: usize, b: usize, context: anytype) void {
         rand ^= rand >> 7;
         rand ^= rand << 17;
 
-        var other = @intCast(usize, rand & (modulus - 1));
+        var other = @as(usize, @intCast(rand & (modulus - 1)));
         if (other >= len) other -= len;
         context.swap(i, a + other);
     }
lib/std/testing/failing_allocator.zig
@@ -63,7 +63,7 @@ pub const FailingAllocator = struct {
         log2_ptr_align: u8,
         return_address: usize,
     ) ?[*]u8 {
-        const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx));
+        const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
         if (self.index == self.fail_index) {
             if (!self.has_induced_failure) {
                 @memset(&self.stack_addresses, 0);
@@ -91,7 +91,7 @@ pub const FailingAllocator = struct {
         new_len: usize,
         ra: usize,
     ) bool {
-        const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx));
+        const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
         if (!self.internal_allocator.rawResize(old_mem, log2_old_align, new_len, ra))
             return false;
         if (new_len < old_mem.len) {
@@ -108,7 +108,7 @@ pub const FailingAllocator = struct {
         log2_old_align: u8,
         ra: usize,
     ) void {
-        const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx));
+        const self: *FailingAllocator = @ptrCast(@alignCast(ctx));
         self.internal_allocator.rawFree(old_mem, log2_old_align, ra);
         self.deallocations += 1;
         self.freed_bytes += old_mem.len;
lib/std/Thread/Futex.zig
@@ -128,14 +128,14 @@ const WindowsImpl = struct {
         // NTDLL functions work with time in units of 100 nanoseconds.
         // Positive values are absolute deadlines while negative values are relative durations.
         if (timeout) |delay| {
-            timeout_value = @intCast(os.windows.LARGE_INTEGER, delay / 100);
+            timeout_value = @as(os.windows.LARGE_INTEGER, @intCast(delay / 100));
             timeout_value = -timeout_value;
             timeout_ptr = &timeout_value;
         }
 
         const rc = os.windows.ntdll.RtlWaitOnAddress(
-            @ptrCast(?*const anyopaque, ptr),
-            @ptrCast(?*const anyopaque, &expect),
+            @as(?*const anyopaque, @ptrCast(ptr)),
+            @as(?*const anyopaque, @ptrCast(&expect)),
             @sizeOf(@TypeOf(expect)),
             timeout_ptr,
         );
@@ -151,7 +151,7 @@ const WindowsImpl = struct {
     }
 
     fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
-        const address = @ptrCast(?*const anyopaque, ptr);
+        const address = @as(?*const anyopaque, @ptrCast(ptr));
         assert(max_waiters != 0);
 
         switch (max_waiters) {
@@ -186,7 +186,7 @@ const DarwinImpl = struct {
         // true so that we we know to ignore the ETIMEDOUT result.
         var timeout_overflowed = false;
 
-        const addr = @ptrCast(*const anyopaque, ptr);
+        const addr = @as(*const anyopaque, @ptrCast(ptr));
         const flags = os.darwin.UL_COMPARE_AND_WAIT | os.darwin.ULF_NO_ERRNO;
         const status = blk: {
             if (supports_ulock_wait2) {
@@ -202,7 +202,7 @@ const DarwinImpl = struct {
         };
 
         if (status >= 0) return;
-        switch (@enumFromInt(std.os.E, -status)) {
+        switch (@as(std.os.E, @enumFromInt(-status))) {
             // Wait was interrupted by the OS or other spurious signalling.
             .INTR => {},
             // Address of the futex was paged out. This is unlikely, but possible in theory, and
@@ -225,11 +225,11 @@ const DarwinImpl = struct {
         }
 
         while (true) {
-            const addr = @ptrCast(*const anyopaque, ptr);
+            const addr = @as(*const anyopaque, @ptrCast(ptr));
             const status = os.darwin.__ulock_wake(flags, addr, 0);
 
             if (status >= 0) return;
-            switch (@enumFromInt(std.os.E, -status)) {
+            switch (@as(std.os.E, @enumFromInt(-status))) {
                 .INTR => continue, // spurious wake()
                 .FAULT => unreachable, // __ulock_wake doesn't generate EFAULT according to darwin pthread_cond_t
                 .NOENT => return, // nothing was woken up
@@ -245,14 +245,14 @@ const LinuxImpl = struct {
     fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
         var ts: os.timespec = undefined;
         if (timeout) |timeout_ns| {
-            ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s);
-            ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s);
+            ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
+            ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
         }
 
         const rc = os.linux.futex_wait(
-            @ptrCast(*const i32, &ptr.value),
+            @as(*const i32, @ptrCast(&ptr.value)),
             os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAIT,
-            @bitCast(i32, expect),
+            @as(i32, @bitCast(expect)),
             if (timeout != null) &ts else null,
         );
 
@@ -272,7 +272,7 @@ const LinuxImpl = struct {
 
     fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
         const rc = os.linux.futex_wake(
-            @ptrCast(*const i32, &ptr.value),
+            @as(*const i32, @ptrCast(&ptr.value)),
             os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAKE,
             std.math.cast(i32, max_waiters) orelse std.math.maxInt(i32),
         );
@@ -299,8 +299,8 @@ const FreebsdImpl = struct {
 
             tm._flags = 0; // use relative time not UMTX_ABSTIME
             tm._clockid = os.CLOCK.MONOTONIC;
-            tm._timeout.tv_sec = @intCast(@TypeOf(tm._timeout.tv_sec), timeout_ns / std.time.ns_per_s);
-            tm._timeout.tv_nsec = @intCast(@TypeOf(tm._timeout.tv_nsec), timeout_ns % std.time.ns_per_s);
+            tm._timeout.tv_sec = @as(@TypeOf(tm._timeout.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
+            tm._timeout.tv_nsec = @as(@TypeOf(tm._timeout.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
         }
 
         const rc = os.freebsd._umtx_op(
@@ -347,14 +347,14 @@ const OpenbsdImpl = struct {
     fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
         var ts: os.timespec = undefined;
         if (timeout) |timeout_ns| {
-            ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s);
-            ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s);
+            ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
+            ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
         }
 
         const rc = os.openbsd.futex(
-            @ptrCast(*const volatile u32, &ptr.value),
+            @as(*const volatile u32, @ptrCast(&ptr.value)),
             os.openbsd.FUTEX_WAIT | os.openbsd.FUTEX_PRIVATE_FLAG,
-            @bitCast(c_int, expect),
+            @as(c_int, @bitCast(expect)),
             if (timeout != null) &ts else null,
             null, // FUTEX_WAIT takes no requeue address
         );
@@ -377,7 +377,7 @@ const OpenbsdImpl = struct {
 
     fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
         const rc = os.openbsd.futex(
-            @ptrCast(*const volatile u32, &ptr.value),
+            @as(*const volatile u32, @ptrCast(&ptr.value)),
             os.openbsd.FUTEX_WAKE | os.openbsd.FUTEX_PRIVATE_FLAG,
             std.math.cast(c_int, max_waiters) orelse std.math.maxInt(c_int),
             null, // FUTEX_WAKE takes no timeout ptr
@@ -411,8 +411,8 @@ const DragonflyImpl = struct {
             }
         }
 
-        const value = @bitCast(c_int, expect);
-        const addr = @ptrCast(*const volatile c_int, &ptr.value);
+        const value = @as(c_int, @bitCast(expect));
+        const addr = @as(*const volatile c_int, @ptrCast(&ptr.value));
         const rc = os.dragonfly.umtx_sleep(addr, value, timeout_us);
 
         switch (os.errno(rc)) {
@@ -441,7 +441,7 @@ const DragonflyImpl = struct {
         // https://man.dragonflybsd.org/?command=umtx&section=2
         // > umtx_wakeup() will generally return 0 unless the address is bad.
         // We are fine with the address being bad (e.g. for Semaphore.post() where Semaphore.wait() frees the Semaphore)
-        const addr = @ptrCast(*const volatile c_int, &ptr.value);
+        const addr = @as(*const volatile c_int, @ptrCast(&ptr.value));
         _ = os.dragonfly.umtx_wakeup(addr, to_wake);
     }
 };
@@ -488,8 +488,8 @@ const PosixImpl = struct {
             var ts: os.timespec = undefined;
             if (timeout) |timeout_ns| {
                 os.clock_gettime(os.CLOCK.REALTIME, &ts) catch unreachable;
-                ts.tv_sec +|= @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s);
-                ts.tv_nsec += @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s);
+                ts.tv_sec +|= @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
+                ts.tv_nsec += @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
 
                 if (ts.tv_nsec >= std.time.ns_per_s) {
                     ts.tv_sec +|= 1;
lib/std/Thread/Mutex.zig
@@ -242,12 +242,12 @@ const NonAtomicCounter = struct {
     value: [2]u64 = [_]u64{ 0, 0 },
 
     fn get(self: NonAtomicCounter) u128 {
-        return @bitCast(u128, self.value);
+        return @as(u128, @bitCast(self.value));
     }
 
     fn inc(self: *NonAtomicCounter) void {
-        for (@bitCast([2]u64, self.get() + 1), 0..) |v, i| {
-            @ptrCast(*volatile u64, &self.value[i]).* = v;
+        for (@as([2]u64, @bitCast(self.get() + 1)), 0..) |v, i| {
+            @as(*volatile u64, @ptrCast(&self.value[i])).* = v;
         }
     }
 };
lib/std/time/epoch.zig
@@ -122,9 +122,9 @@ pub const YearAndDay = struct {
             if (days_left < days_in_month)
                 break;
             days_left -= days_in_month;
-            month = @enumFromInt(Month, @intFromEnum(month) + 1);
+            month = @as(Month, @enumFromInt(@intFromEnum(month) + 1));
         }
-        return .{ .month = month, .day_index = @intCast(u5, days_left) };
+        return .{ .month = month, .day_index = @as(u5, @intCast(days_left)) };
     }
 };
 
@@ -146,7 +146,7 @@ pub const EpochDay = struct {
             year_day -= year_size;
             year += 1;
         }
-        return .{ .year = year, .day = @intCast(u9, year_day) };
+        return .{ .year = year, .day = @as(u9, @intCast(year_day)) };
     }
 };
 
@@ -156,11 +156,11 @@ pub const DaySeconds = struct {
 
     /// the number of hours past the start of the day (0 to 23)
     pub fn getHoursIntoDay(self: DaySeconds) u5 {
-        return @intCast(u5, @divTrunc(self.secs, 3600));
+        return @as(u5, @intCast(@divTrunc(self.secs, 3600)));
     }
     /// the number of minutes past the hour (0 to 59)
     pub fn getMinutesIntoHour(self: DaySeconds) u6 {
-        return @intCast(u6, @divTrunc(@mod(self.secs, 3600), 60));
+        return @as(u6, @intCast(@divTrunc(@mod(self.secs, 3600), 60)));
     }
     /// the number of seconds past the start of the minute (0 to 59)
     pub fn getSecondsIntoMinute(self: DaySeconds) u6 {
@@ -175,7 +175,7 @@ pub const EpochSeconds = struct {
     /// Returns the number of days since the epoch as an EpochDay.
     /// Use EpochDay to get information about the day of this time.
     pub fn getEpochDay(self: EpochSeconds) EpochDay {
-        return EpochDay{ .day = @intCast(u47, @divTrunc(self.secs, secs_per_day)) };
+        return EpochDay{ .day = @as(u47, @intCast(@divTrunc(self.secs, secs_per_day))) };
     }
 
     /// Returns the number of seconds into the day as DaySeconds.
lib/std/unicode/throughput_test.zig
@@ -32,8 +32,8 @@ fn benchmarkCodepointCount(buf: []const u8) !ResultCount {
     }
     const end = timer.read();
 
-    const elapsed_s = @floatFromInt(f64, end - start) / time.ns_per_s;
-    const throughput = @intFromFloat(u64, @floatFromInt(f64, bytes) / elapsed_s);
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / time.ns_per_s;
+    const throughput = @as(u64, @intFromFloat(@as(f64, @floatFromInt(bytes)) / elapsed_s));
 
     return ResultCount{ .count = r, .throughput = throughput };
 }
lib/std/valgrind/callgrind.zig
@@ -11,7 +11,7 @@ pub const CallgrindClientRequest = enum(usize) {
 };
 
 fn doCallgrindClientRequestExpr(default: usize, request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
-    return valgrind.doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5);
+    return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5);
 }
 
 fn doCallgrindClientRequestStmt(request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void {
lib/std/valgrind/memcheck.zig
@@ -21,7 +21,7 @@ pub const MemCheckClientRequest = enum(usize) {
 };
 
 fn doMemCheckClientRequestExpr(default: usize, request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
-    return valgrind.doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5);
+    return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5);
 }
 
 fn doMemCheckClientRequestStmt(request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void {
@@ -31,24 +31,24 @@ fn doMemCheckClientRequestStmt(request: MemCheckClientRequest, a1: usize, a2: us
 /// Mark memory at qzz.ptr as unaddressable for qzz.len bytes.
 /// This returns -1 when run on Valgrind and 0 otherwise.
 pub fn makeMemNoAccess(qzz: []u8) i1 {
-    return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
-        .MakeMemNoAccess, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
+    return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return
+        .MakeMemNoAccess, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)));
 }
 
 /// Similarly, mark memory at qzz.ptr as addressable but undefined
 /// for qzz.len bytes.
 /// This returns -1 when run on Valgrind and 0 otherwise.
 pub fn makeMemUndefined(qzz: []u8) i1 {
-    return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
-        .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
+    return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return
+        .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)));
 }
 
 /// Similarly, mark memory at qzz.ptr as addressable and defined
 /// for qzz.len bytes.
 pub fn makeMemDefined(qzz: []u8) i1 {
     // This returns -1 when run on Valgrind and 0 otherwise.
-    return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
-        .MakeMemDefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
+    return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return
+        .MakeMemDefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)));
 }
 
 /// Similar to makeMemDefined except that addressability is
@@ -56,8 +56,8 @@ pub fn makeMemDefined(qzz: []u8) i1 {
 /// but those which are not addressable are left unchanged.
 /// This returns -1 when run on Valgrind and 0 otherwise.
 pub fn makeMemDefinedIfAddressable(qzz: []u8) i1 {
-    return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
-        .MakeMemDefinedIfAddressable, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
+    return @as(i1, @intCast(doMemCheckClientRequestExpr(0, // default return
+        .MakeMemDefinedIfAddressable, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0)));
 }
 
 /// Create a block-description handle.  The description is an ascii
@@ -195,7 +195,7 @@ test "countLeakBlocks" {
 /// impossible to segfault your system by using this call.
 pub fn getVbits(zza: []u8, zzvbits: []u8) u2 {
     std.debug.assert(zzvbits.len >= zza.len / 8);
-    return @intCast(u2, doMemCheckClientRequestExpr(0, .GetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0));
+    return @as(u2, @intCast(doMemCheckClientRequestExpr(0, .GetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0)));
 }
 
 /// Set the validity data for addresses zza, copying it
@@ -208,7 +208,7 @@ pub fn getVbits(zza: []u8, zzvbits: []u8) u2 {
 /// impossible to segfault your system by using this call.
 pub fn setVbits(zzvbits: []u8, zza: []u8) u2 {
     std.debug.assert(zzvbits.len >= zza.len / 8);
-    return @intCast(u2, doMemCheckClientRequestExpr(0, .SetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0));
+    return @as(u2, @intCast(doMemCheckClientRequestExpr(0, .SetVbits, @intFromPtr(zza.ptr), @intFromPtr(zzvbits), zza.len, 0, 0)));
 }
 
 /// Disable and re-enable reporting of addressing errors in the
lib/std/zig/system/arm.zig
@@ -141,7 +141,7 @@ pub const aarch64 = struct {
     }
 
     inline fn bitField(input: u64, offset: u6) u4 {
-        return @truncate(u4, input >> offset);
+        return @as(u4, @truncate(input >> offset));
     }
 
     /// Input array should consist of readouts from 12 system registers such that:
@@ -176,23 +176,23 @@ pub const aarch64 = struct {
     /// Takes readout of MIDR_EL1 register as input.
     fn detectNativeCoreInfo(midr: u64) CoreInfo {
         var info = CoreInfo{
-            .implementer = @truncate(u8, midr >> 24),
-            .part = @truncate(u12, midr >> 4),
+            .implementer = @as(u8, @truncate(midr >> 24)),
+            .part = @as(u12, @truncate(midr >> 4)),
         };
 
         blk: {
             if (info.implementer == 0x41) {
                 // ARM Ltd.
-                const special_bits = @truncate(u4, info.part >> 8);
+                const special_bits = @as(u4, @truncate(info.part >> 8));
                 if (special_bits == 0x0 or special_bits == 0x7) {
                     // TODO Variant and arch encoded differently.
                     break :blk;
                 }
             }
 
-            info.variant |= @intCast(u8, @truncate(u4, midr >> 20)) << 4;
-            info.variant |= @truncate(u4, midr);
-            info.architecture = @truncate(u4, midr >> 16);
+            info.variant |= @as(u8, @intCast(@as(u4, @truncate(midr >> 20)))) << 4;
+            info.variant |= @as(u4, @truncate(midr));
+            info.architecture = @as(u4, @truncate(midr >> 16));
         }
 
         return info;
lib/std/zig/system/NativeTargetInfo.zig
@@ -479,8 +479,8 @@ fn glibcVerFromRPath(rpath: []const u8) !std.SemanticVersion {
 fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
     var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined;
     _ = try preadMin(file, &hdr_buf, 0, hdr_buf.len);
-    const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf);
-    const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf);
+    const hdr32 = @as(*elf.Elf32_Ehdr, @ptrCast(&hdr_buf));
+    const hdr64 = @as(*elf.Elf64_Ehdr, @ptrCast(&hdr_buf));
     if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
     const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) {
         elf.ELFDATA2LSB => .Little,
@@ -503,8 +503,8 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
     if (sh_buf.len < shentsize) return error.InvalidElfFile;
 
     _ = try preadMin(file, &sh_buf, str_section_off, shentsize);
-    const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf));
-    const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf));
+    const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf));
+    const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf));
     const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
     const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
     var strtab_buf: [4096:0]u8 = undefined;
@@ -529,14 +529,8 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
             shoff += shentsize;
             sh_buf_i += shentsize;
         }) {
-            const sh32 = @ptrCast(
-                *elf.Elf32_Shdr,
-                @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]),
-            );
-            const sh64 = @ptrCast(
-                *elf.Elf64_Shdr,
-                @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]),
-            );
+            const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
+            const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
             const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name);
             const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0);
             if (mem.eql(u8, sh_name, ".dynstr")) {
@@ -558,7 +552,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
     var buf: [80000]u8 = undefined;
     if (buf.len < dynstr.size) return error.InvalidGnuLibCVersion;
 
-    const dynstr_size = @intCast(usize, dynstr.size);
+    const dynstr_size = @as(usize, @intCast(dynstr.size));
     const dynstr_bytes = buf[0..dynstr_size];
     _ = try preadMin(file, dynstr_bytes, dynstr.offset, dynstr_bytes.len);
     var it = mem.splitScalar(u8, dynstr_bytes, 0);
@@ -621,8 +615,8 @@ pub fn abiAndDynamicLinkerFromFile(
 ) AbiAndDynamicLinkerFromFileError!NativeTargetInfo {
     var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined;
     _ = try preadMin(file, &hdr_buf, 0, hdr_buf.len);
-    const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf);
-    const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf);
+    const hdr32 = @as(*elf.Elf32_Ehdr, @ptrCast(&hdr_buf));
+    const hdr64 = @as(*elf.Elf64_Ehdr, @ptrCast(&hdr_buf));
     if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
     const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) {
         elf.ELFDATA2LSB => .Little,
@@ -668,21 +662,21 @@ pub fn abiAndDynamicLinkerFromFile(
             phoff += phentsize;
             ph_buf_i += phentsize;
         }) {
-            const ph32 = @ptrCast(*elf.Elf32_Phdr, @alignCast(@alignOf(elf.Elf32_Phdr), &ph_buf[ph_buf_i]));
-            const ph64 = @ptrCast(*elf.Elf64_Phdr, @alignCast(@alignOf(elf.Elf64_Phdr), &ph_buf[ph_buf_i]));
+            const ph32: *elf.Elf32_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i]));
+            const ph64: *elf.Elf64_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i]));
             const p_type = elfInt(is_64, need_bswap, ph32.p_type, ph64.p_type);
             switch (p_type) {
                 elf.PT_INTERP => if (look_for_ld) {
                     const p_offset = elfInt(is_64, need_bswap, ph32.p_offset, ph64.p_offset);
                     const p_filesz = elfInt(is_64, need_bswap, ph32.p_filesz, ph64.p_filesz);
                     if (p_filesz > result.dynamic_linker.buffer.len) return error.NameTooLong;
-                    const filesz = @intCast(usize, p_filesz);
+                    const filesz = @as(usize, @intCast(p_filesz));
                     _ = try preadMin(file, result.dynamic_linker.buffer[0..filesz], p_offset, filesz);
                     // PT_INTERP includes a null byte in filesz.
                     const len = filesz - 1;
                     // dynamic_linker.max_byte is "max", not "len".
                     // We know it will fit in u8 because we check against dynamic_linker.buffer.len above.
-                    result.dynamic_linker.max_byte = @intCast(u8, len - 1);
+                    result.dynamic_linker.max_byte = @as(u8, @intCast(len - 1));
 
                     // Use it to determine ABI.
                     const full_ld_path = result.dynamic_linker.buffer[0..len];
@@ -720,14 +714,8 @@ pub fn abiAndDynamicLinkerFromFile(
                             dyn_off += dyn_size;
                             dyn_buf_i += dyn_size;
                         }) {
-                            const dyn32 = @ptrCast(
-                                *elf.Elf32_Dyn,
-                                @alignCast(@alignOf(elf.Elf32_Dyn), &dyn_buf[dyn_buf_i]),
-                            );
-                            const dyn64 = @ptrCast(
-                                *elf.Elf64_Dyn,
-                                @alignCast(@alignOf(elf.Elf64_Dyn), &dyn_buf[dyn_buf_i]),
-                            );
+                            const dyn32: *elf.Elf32_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i]));
+                            const dyn64: *elf.Elf64_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i]));
                             const tag = elfInt(is_64, need_bswap, dyn32.d_tag, dyn64.d_tag);
                             const val = elfInt(is_64, need_bswap, dyn32.d_val, dyn64.d_val);
                             if (tag == elf.DT_RUNPATH) {
@@ -755,8 +743,8 @@ pub fn abiAndDynamicLinkerFromFile(
         if (sh_buf.len < shentsize) return error.InvalidElfFile;
 
         _ = try preadMin(file, &sh_buf, str_section_off, shentsize);
-        const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf));
-        const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf));
+        const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf));
+        const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf));
         const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
         const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
         var strtab_buf: [4096:0]u8 = undefined;
@@ -782,14 +770,8 @@ pub fn abiAndDynamicLinkerFromFile(
                 shoff += shentsize;
                 sh_buf_i += shentsize;
             }) {
-                const sh32 = @ptrCast(
-                    *elf.Elf32_Shdr,
-                    @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]),
-                );
-                const sh64 = @ptrCast(
-                    *elf.Elf64_Shdr,
-                    @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]),
-                );
+                const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
+                const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
                 const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name);
                 const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0);
                 if (mem.eql(u8, sh_name, ".dynstr")) {
lib/std/zig/system/windows.zig
@@ -26,8 +26,8 @@ pub fn detectRuntimeVersion() WindowsVersion {
     //   `---` `` ``--> Sub-version (Starting from Windows 10 onwards)
     //     \    `--> Service pack (Always zero in the constants defined)
     //      `--> OS version (Major & minor)
-    const os_ver: u16 = @intCast(u16, version_info.dwMajorVersion & 0xff) << 8 |
-        @intCast(u16, version_info.dwMinorVersion & 0xff);
+    const os_ver: u16 = @as(u16, @intCast(version_info.dwMajorVersion & 0xff)) << 8 |
+        @as(u16, @intCast(version_info.dwMinorVersion & 0xff));
     const sp_ver: u8 = 0;
     const sub_ver: u8 = if (os_ver >= 0x0A00) subver: {
         // There's no other way to obtain this info beside
@@ -38,12 +38,12 @@ pub fn detectRuntimeVersion() WindowsVersion {
             if (version_info.dwBuildNumber >= build)
                 last_idx = i;
         }
-        break :subver @truncate(u8, last_idx);
+        break :subver @as(u8, @truncate(last_idx));
     } else 0;
 
     const version: u32 = @as(u32, os_ver) << 16 | @as(u16, sp_ver) << 8 | sub_ver;
 
-    return @enumFromInt(WindowsVersion, version);
+    return @as(WindowsVersion, @enumFromInt(version));
 }
 
 // Technically, a registry value can be as long as 1MB. However, MS recommends storing
@@ -100,11 +100,11 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
                 REG.MULTI_SZ,
                 => {
                     comptime assert(@sizeOf(std.os.windows.UNICODE_STRING) % 2 == 0);
-                    const unicode = @ptrCast(*std.os.windows.UNICODE_STRING, &tmp_bufs[i]);
+                    const unicode = @as(*std.os.windows.UNICODE_STRING, @ptrCast(&tmp_bufs[i]));
                     unicode.* = .{
                         .Length = 0,
                         .MaximumLength = max_value_len - @sizeOf(std.os.windows.UNICODE_STRING),
-                        .Buffer = @ptrCast([*]u16, tmp_bufs[i][@sizeOf(std.os.windows.UNICODE_STRING)..]),
+                        .Buffer = @as([*]u16, @ptrCast(tmp_bufs[i][@sizeOf(std.os.windows.UNICODE_STRING)..])),
                     };
                     break :blk unicode;
                 },
@@ -159,7 +159,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
                 REG.MULTI_SZ,
                 => {
                     var buf = @field(args, field.name).value_buf;
-                    const entry = @ptrCast(*align(1) const std.os.windows.UNICODE_STRING, table[i + 1].EntryContext);
+                    const entry = @as(*align(1) const std.os.windows.UNICODE_STRING, @ptrCast(table[i + 1].EntryContext));
                     const len = try std.unicode.utf16leToUtf8(buf, entry.Buffer[0 .. entry.Length / 2]);
                     buf[len] = 0;
                 },
@@ -168,7 +168,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
                 REG.DWORD_BIG_ENDIAN,
                 REG.QWORD,
                 => {
-                    const entry = @ptrCast([*]align(1) const u8, table[i + 1].EntryContext);
+                    const entry = @as([*]align(1) const u8, @ptrCast(table[i + 1].EntryContext));
                     switch (@field(args, field.name).value_type) {
                         REG.DWORD, REG.DWORD_BIG_ENDIAN => {
                             @memcpy(@field(args, field.name).value_buf[0..4], entry[0..4]);
@@ -254,18 +254,18 @@ pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
                 // CP 4039 -> ID_AA64MMFR1_EL1
                 // CP 403A -> ID_AA64MMFR2_EL1
                 getCpuInfoFromRegistry(i, .{
-                    .{ .key = "CP 4000", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[0]) },
-                    .{ .key = "CP 4020", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[1]) },
-                    .{ .key = "CP 4021", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[2]) },
-                    .{ .key = "CP 4028", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[3]) },
-                    .{ .key = "CP 4029", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[4]) },
-                    .{ .key = "CP 402C", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[5]) },
-                    .{ .key = "CP 402D", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[6]) },
-                    .{ .key = "CP 4030", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[7]) },
-                    .{ .key = "CP 4031", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[8]) },
-                    .{ .key = "CP 4038", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[9]) },
-                    .{ .key = "CP 4039", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[10]) },
-                    .{ .key = "CP 403A", .value_type = REG.QWORD, .value_buf = @ptrCast(*[8]u8, &registers[11]) },
+                    .{ .key = "CP 4000", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[0])) },
+                    .{ .key = "CP 4020", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[1])) },
+                    .{ .key = "CP 4021", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[2])) },
+                    .{ .key = "CP 4028", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[3])) },
+                    .{ .key = "CP 4029", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[4])) },
+                    .{ .key = "CP 402C", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[5])) },
+                    .{ .key = "CP 402D", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[6])) },
+                    .{ .key = "CP 4030", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[7])) },
+                    .{ .key = "CP 4031", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[8])) },
+                    .{ .key = "CP 4038", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[9])) },
+                    .{ .key = "CP 4039", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[10])) },
+                    .{ .key = "CP 403A", .value_type = REG.QWORD, .value_buf = @as(*[8]u8, @ptrCast(&registers[11])) },
                 }) catch break :blk null;
 
                 cores[i] = @import("arm.zig").aarch64.detectNativeCpuAndFeatures(current_arch, registers) orelse
lib/std/zig/Ast.zig
@@ -62,7 +62,7 @@ pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!A
         const token = tokenizer.next();
         try tokens.append(gpa, .{
             .tag = token.tag,
-            .start = @intCast(u32, token.loc.start),
+            .start = @as(u32, @intCast(token.loc.start)),
         });
         if (token.tag == .eof) break;
     }
@@ -123,7 +123,7 @@ pub fn renderToArrayList(tree: Ast, buffer: *std.ArrayList(u8)) RenderError!void
 /// should point after the token in the error message.
 pub fn errorOffset(tree: Ast, parse_error: Error) u32 {
     return if (parse_error.token_is_prev)
-        @intCast(u32, tree.tokenSlice(parse_error.token).len)
+        @as(u32, @intCast(tree.tokenSlice(parse_error.token).len))
     else
         0;
 }
@@ -772,7 +772,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
     var n = node;
     var end_offset: TokenIndex = 0;
     while (true) switch (tags[n]) {
-        .root => return @intCast(TokenIndex, tree.tokens.len - 1),
+        .root => return @as(TokenIndex, @intCast(tree.tokens.len - 1)),
 
         .@"usingnamespace",
         .bool_not,
@@ -1288,7 +1288,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex {
             n = extra.else_expr;
         },
         .@"for" => {
-            const extra = @bitCast(Node.For, datas[n].rhs);
+            const extra = @as(Node.For, @bitCast(datas[n].rhs));
             n = tree.extra_data[datas[n].lhs + extra.inputs + @intFromBool(extra.has_else)];
         },
         .@"suspend" => {
@@ -1955,7 +1955,7 @@ pub fn forSimple(tree: Ast, node: Node.Index) full.For {
 
 pub fn forFull(tree: Ast, node: Node.Index) full.For {
     const data = tree.nodes.items(.data)[node];
-    const extra = @bitCast(Node.For, data.rhs);
+    const extra = @as(Node.For, @bitCast(data.rhs));
     const inputs = tree.extra_data[data.lhs..][0..extra.inputs];
     const then_expr = tree.extra_data[data.lhs + extra.inputs];
     const else_expr = if (extra.has_else) tree.extra_data[data.lhs + extra.inputs + 1] else 0;
lib/std/zig/c_builtins.zig
@@ -20,19 +20,19 @@ pub inline fn __builtin_signbitf(val: f32) c_int {
 pub inline fn __builtin_popcount(val: c_uint) c_int {
     // popcount of a c_uint will never exceed the capacity of a c_int
     @setRuntimeSafety(false);
-    return @bitCast(c_int, @as(c_uint, @popCount(val)));
+    return @as(c_int, @bitCast(@as(c_uint, @popCount(val))));
 }
 pub inline fn __builtin_ctz(val: c_uint) c_int {
     // Returns the number of trailing 0-bits in val, starting at the least significant bit position.
     // In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
     @setRuntimeSafety(false);
-    return @bitCast(c_int, @as(c_uint, @ctz(val)));
+    return @as(c_int, @bitCast(@as(c_uint, @ctz(val))));
 }
 pub inline fn __builtin_clz(val: c_uint) c_int {
     // Returns the number of leading 0-bits in x, starting at the most significant bit position.
     // In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
     @setRuntimeSafety(false);
-    return @bitCast(c_int, @as(c_uint, @clz(val)));
+    return @as(c_int, @bitCast(@as(c_uint, @clz(val))));
 }
 
 pub inline fn __builtin_sqrt(val: f64) f64 {
@@ -135,7 +135,7 @@ pub inline fn __builtin_object_size(ptr: ?*const anyopaque, ty: c_int) usize {
     // If it is not possible to determine which objects ptr points to at compile time,
     // __builtin_object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0
     // for type 2 or 3.
-    if (ty == 0 or ty == 1) return @bitCast(usize, -@as(isize, 1));
+    if (ty == 0 or ty == 1) return @as(usize, @bitCast(-@as(isize, 1)));
     if (ty == 2 or ty == 3) return 0;
     unreachable;
 }
@@ -151,8 +151,8 @@ pub inline fn __builtin___memset_chk(
 }
 
 pub inline fn __builtin_memset(dst: ?*anyopaque, val: c_int, len: usize) ?*anyopaque {
-    const dst_cast = @ptrCast([*c]u8, dst);
-    @memset(dst_cast[0..len], @bitCast(u8, @truncate(i8, val)));
+    const dst_cast = @as([*c]u8, @ptrCast(dst));
+    @memset(dst_cast[0..len], @as(u8, @bitCast(@as(i8, @truncate(val)))));
     return dst;
 }
 
@@ -172,8 +172,8 @@ pub inline fn __builtin_memcpy(
     len: usize,
 ) ?*anyopaque {
     if (len > 0) @memcpy(
-        @ptrCast([*]u8, dst.?)[0..len],
-        @ptrCast([*]const u8, src.?),
+        @as([*]u8, @ptrCast(dst.?))[0..len],
+        @as([*]const u8, @ptrCast(src.?)),
     );
     return dst;
 }
@@ -202,8 +202,8 @@ pub inline fn __builtin_expect(expr: c_long, c: c_long) c_long {
 /// If tagp is empty, the function returns a NaN whose significand is zero.
 pub inline fn __builtin_nanf(tagp: []const u8) f32 {
     const parsed = std.fmt.parseUnsigned(c_ulong, tagp, 0) catch 0;
-    const bits = @truncate(u23, parsed); // single-precision float trailing significand is 23 bits
-    return @bitCast(f32, @as(u32, bits) | std.math.qnan_u32);
+    const bits = @as(u23, @truncate(parsed)); // single-precision float trailing significand is 23 bits
+    return @as(f32, @bitCast(@as(u32, bits) | std.math.qnan_u32));
 }
 
 pub inline fn __builtin_huge_valf() f32 {
lib/std/zig/c_translation.zig
@@ -42,9 +42,9 @@ pub fn cast(comptime DestType: type, target: anytype) DestType {
         },
         .Float => {
             switch (@typeInfo(SourceType)) {
-                .Int => return @floatFromInt(DestType, target),
-                .Float => return @floatCast(DestType, target),
-                .Bool => return @floatFromInt(DestType, @intFromBool(target)),
+                .Int => return @as(DestType, @floatFromInt(target)),
+                .Float => return @as(DestType, @floatCast(target)),
+                .Bool => return @as(DestType, @floatFromInt(@intFromBool(target))),
                 else => {},
             }
         },
@@ -65,36 +65,25 @@ fn castInt(comptime DestType: type, target: anytype) DestType {
     const source = @typeInfo(@TypeOf(target)).Int;
 
     if (dest.bits < source.bits)
-        return @bitCast(DestType, @truncate(std.meta.Int(source.signedness, dest.bits), target))
+        return @as(DestType, @bitCast(@as(std.meta.Int(source.signedness, dest.bits), @truncate(target))))
     else
-        return @bitCast(DestType, @as(std.meta.Int(source.signedness, dest.bits), target));
+        return @as(DestType, @bitCast(@as(std.meta.Int(source.signedness, dest.bits), target)));
 }
 
 fn castPtr(comptime DestType: type, target: anytype) DestType {
-    const dest = ptrInfo(DestType);
-    const source = ptrInfo(@TypeOf(target));
-
-    if (source.is_const and !dest.is_const)
-        return @constCast(target)
-    else if (source.is_volatile and !dest.is_volatile)
-        return @volatileCast(target)
-    else if (@typeInfo(dest.child) == .Opaque)
-        // dest.alignment would error out
-        return @ptrCast(DestType, target)
-    else
-        return @ptrCast(DestType, @alignCast(dest.alignment, target));
+    return @constCast(@volatileCast(@alignCast(@ptrCast(target))));
 }
 
 fn castToPtr(comptime DestType: type, comptime SourceType: type, target: anytype) DestType {
     switch (@typeInfo(SourceType)) {
         .Int => {
-            return @ptrFromInt(DestType, castInt(usize, target));
+            return @as(DestType, @ptrFromInt(castInt(usize, target)));
         },
         .ComptimeInt => {
             if (target < 0)
-                return @ptrFromInt(DestType, @bitCast(usize, @intCast(isize, target)))
+                return @as(DestType, @ptrFromInt(@as(usize, @bitCast(@as(isize, @intCast(target))))))
             else
-                return @ptrFromInt(DestType, @intCast(usize, target));
+                return @as(DestType, @ptrFromInt(@as(usize, @intCast(target))));
         },
         .Pointer => {
             return castPtr(DestType, target);
@@ -120,34 +109,34 @@ fn ptrInfo(comptime PtrType: type) std.builtin.Type.Pointer {
 test "cast" {
     var i = @as(i64, 10);
 
-    try testing.expect(cast(*u8, 16) == @ptrFromInt(*u8, 16));
+    try testing.expect(cast(*u8, 16) == @as(*u8, @ptrFromInt(16)));
     try testing.expect(cast(*u64, &i).* == @as(u64, 10));
     try testing.expect(cast(*i64, @as(?*align(1) i64, &i)) == &i);
 
-    try testing.expect(cast(?*u8, 2) == @ptrFromInt(*u8, 2));
+    try testing.expect(cast(?*u8, 2) == @as(*u8, @ptrFromInt(2)));
     try testing.expect(cast(?*i64, @as(*align(1) i64, &i)) == &i);
     try testing.expect(cast(?*i64, @as(?*align(1) i64, &i)) == &i);
 
-    try testing.expectEqual(@as(u32, 4), cast(u32, @ptrFromInt(*u32, 4)));
-    try testing.expectEqual(@as(u32, 4), cast(u32, @ptrFromInt(?*u32, 4)));
+    try testing.expectEqual(@as(u32, 4), cast(u32, @as(*u32, @ptrFromInt(4))));
+    try testing.expectEqual(@as(u32, 4), cast(u32, @as(?*u32, @ptrFromInt(4))));
     try testing.expectEqual(@as(u32, 10), cast(u32, @as(u64, 10)));
 
-    try testing.expectEqual(@bitCast(i32, @as(u32, 0x8000_0000)), cast(i32, @as(u32, 0x8000_0000)));
+    try testing.expectEqual(@as(i32, @bitCast(@as(u32, 0x8000_0000))), cast(i32, @as(u32, 0x8000_0000)));
 
-    try testing.expectEqual(@ptrFromInt(*u8, 2), cast(*u8, @ptrFromInt(*const u8, 2)));
-    try testing.expectEqual(@ptrFromInt(*u8, 2), cast(*u8, @ptrFromInt(*volatile u8, 2)));
+    try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*const u8, @ptrFromInt(2))));
+    try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*volatile u8, @ptrFromInt(2))));
 
-    try testing.expectEqual(@ptrFromInt(?*anyopaque, 2), cast(?*anyopaque, @ptrFromInt(*u8, 2)));
+    try testing.expectEqual(@as(?*anyopaque, @ptrFromInt(2)), cast(?*anyopaque, @as(*u8, @ptrFromInt(2))));
 
     var foo: c_int = -1;
-    try testing.expect(cast(*anyopaque, -1) == @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))));
-    try testing.expect(cast(*anyopaque, foo) == @ptrFromInt(*anyopaque, @bitCast(usize, @as(isize, -1))));
-    try testing.expect(cast(?*anyopaque, -1) == @ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1))));
-    try testing.expect(cast(?*anyopaque, foo) == @ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1))));
+    try testing.expect(cast(*anyopaque, -1) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
+    try testing.expect(cast(*anyopaque, foo) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
+    try testing.expect(cast(?*anyopaque, -1) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
+    try testing.expect(cast(?*anyopaque, foo) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
 
     const FnPtr = ?*align(1) const fn (*anyopaque) void;
-    try testing.expect(cast(FnPtr, 0) == @ptrFromInt(FnPtr, @as(usize, 0)));
-    try testing.expect(cast(FnPtr, foo) == @ptrFromInt(FnPtr, @bitCast(usize, @as(isize, -1))));
+    try testing.expect(cast(FnPtr, 0) == @as(FnPtr, @ptrFromInt(@as(usize, 0))));
+    try testing.expect(cast(FnPtr, foo) == @as(FnPtr, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
 }
 
 /// Given a value returns its size as C's sizeof operator would.
@@ -192,7 +181,7 @@ pub fn sizeof(target: anytype) usize {
                 const array_info = @typeInfo(ptr.child).Array;
                 if ((array_info.child == u8 or array_info.child == u16) and
                     array_info.sentinel != null and
-                    @ptrCast(*align(1) const array_info.child, array_info.sentinel.?).* == 0)
+                    @as(*align(1) const array_info.child, @ptrCast(array_info.sentinel.?)).* == 0)
                 {
                     // length of the string plus one for the null terminator.
                     return (array_info.len + 1) * @sizeOf(array_info.child);
@@ -325,10 +314,10 @@ test "promoteIntLiteral" {
 pub fn shuffleVectorIndex(comptime this_index: c_int, comptime source_vector_len: usize) i32 {
     if (this_index <= 0) return 0;
 
-    const positive_index = @intCast(usize, this_index);
-    if (positive_index < source_vector_len) return @intCast(i32, this_index);
+    const positive_index = @as(usize, @intCast(this_index));
+    if (positive_index < source_vector_len) return @as(i32, @intCast(this_index));
     const b_index = positive_index - source_vector_len;
-    return ~@intCast(i32, b_index);
+    return ~@as(i32, @intCast(b_index));
 }
 
 test "shuffleVectorIndex" {
lib/std/zig/CrossTarget.zig
@@ -317,7 +317,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
             }
             const feature_name = cpu_features[start..index];
             for (all_features, 0..) |feature, feat_index_usize| {
-                const feat_index = @intCast(Target.Cpu.Feature.Set.Index, feat_index_usize);
+                const feat_index = @as(Target.Cpu.Feature.Set.Index, @intCast(feat_index_usize));
                 if (mem.eql(u8, feature_name, feature.name)) {
                     set.addFeature(feat_index);
                     break;
lib/std/zig/ErrorBundle.zig
@@ -94,7 +94,7 @@ pub fn getErrorMessageList(eb: ErrorBundle) ErrorMessageList {
 
 pub fn getMessages(eb: ErrorBundle) []const MessageIndex {
     const list = eb.getErrorMessageList();
-    return @ptrCast([]const MessageIndex, eb.extra[list.start..][0..list.len]);
+    return @as([]const MessageIndex, @ptrCast(eb.extra[list.start..][0..list.len]));
 }
 
 pub fn getErrorMessage(eb: ErrorBundle, index: MessageIndex) ErrorMessage {
@@ -109,7 +109,7 @@ pub fn getSourceLocation(eb: ErrorBundle, index: SourceLocationIndex) SourceLoca
 pub fn getNotes(eb: ErrorBundle, index: MessageIndex) []const MessageIndex {
     const notes_len = eb.getErrorMessage(index).notes_len;
     const start = @intFromEnum(index) + @typeInfo(ErrorMessage).Struct.fields.len;
-    return @ptrCast([]const MessageIndex, eb.extra[start..][0..notes_len]);
+    return @as([]const MessageIndex, @ptrCast(eb.extra[start..][0..notes_len]));
 }
 
 pub fn getCompileLogOutput(eb: ErrorBundle) [:0]const u8 {
@@ -125,8 +125,8 @@ fn extraData(eb: ErrorBundle, comptime T: type, index: usize) struct { data: T,
     inline for (fields) |field| {
         @field(result, field.name) = switch (field.type) {
             u32 => eb.extra[i],
-            MessageIndex => @enumFromInt(MessageIndex, eb.extra[i]),
-            SourceLocationIndex => @enumFromInt(SourceLocationIndex, eb.extra[i]),
+            MessageIndex => @as(MessageIndex, @enumFromInt(eb.extra[i])),
+            SourceLocationIndex => @as(SourceLocationIndex, @enumFromInt(eb.extra[i])),
             else => @compileError("bad field type"),
         };
         i += 1;
@@ -202,7 +202,7 @@ fn renderErrorMessageToWriter(
         try counting_stderr.writeAll(": ");
         // This is the length of the part before the error message:
         // e.g. "file.zig:4:5: error: "
-        const prefix_len = @intCast(usize, counting_stderr.context.bytes_written);
+        const prefix_len = @as(usize, @intCast(counting_stderr.context.bytes_written));
         try ttyconf.setColor(stderr, .reset);
         try ttyconf.setColor(stderr, .bold);
         if (err_msg.count == 1) {
@@ -357,7 +357,7 @@ pub const Wip = struct {
         }
 
         const compile_log_str_index = if (compile_log_text.len == 0) 0 else str: {
-            const str = @intCast(u32, wip.string_bytes.items.len);
+            const str = @as(u32, @intCast(wip.string_bytes.items.len));
             try wip.string_bytes.ensureUnusedCapacity(gpa, compile_log_text.len + 1);
             wip.string_bytes.appendSliceAssumeCapacity(compile_log_text);
             wip.string_bytes.appendAssumeCapacity(0);
@@ -365,11 +365,11 @@ pub const Wip = struct {
         };
 
         wip.setExtra(0, ErrorMessageList{
-            .len = @intCast(u32, wip.root_list.items.len),
-            .start = @intCast(u32, wip.extra.items.len),
+            .len = @as(u32, @intCast(wip.root_list.items.len)),
+            .start = @as(u32, @intCast(wip.extra.items.len)),
             .compile_log_text = compile_log_str_index,
         });
-        try wip.extra.appendSlice(gpa, @ptrCast([]const u32, wip.root_list.items));
+        try wip.extra.appendSlice(gpa, @as([]const u32, @ptrCast(wip.root_list.items)));
         wip.root_list.clearAndFree(gpa);
         return .{
             .string_bytes = try wip.string_bytes.toOwnedSlice(gpa),
@@ -386,7 +386,7 @@ pub const Wip = struct {
 
     pub fn addString(wip: *Wip, s: []const u8) !u32 {
         const gpa = wip.gpa;
-        const index = @intCast(u32, wip.string_bytes.items.len);
+        const index = @as(u32, @intCast(wip.string_bytes.items.len));
         try wip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1);
         wip.string_bytes.appendSliceAssumeCapacity(s);
         wip.string_bytes.appendAssumeCapacity(0);
@@ -395,7 +395,7 @@ pub const Wip = struct {
 
     pub fn printString(wip: *Wip, comptime fmt: []const u8, args: anytype) !u32 {
         const gpa = wip.gpa;
-        const index = @intCast(u32, wip.string_bytes.items.len);
+        const index = @as(u32, @intCast(wip.string_bytes.items.len));
         try wip.string_bytes.writer(gpa).print(fmt, args);
         try wip.string_bytes.append(gpa, 0);
         return index;
@@ -407,15 +407,15 @@ pub const Wip = struct {
     }
 
     pub fn addErrorMessage(wip: *Wip, em: ErrorMessage) !MessageIndex {
-        return @enumFromInt(MessageIndex, try addExtra(wip, em));
+        return @as(MessageIndex, @enumFromInt(try addExtra(wip, em)));
     }
 
     pub fn addErrorMessageAssumeCapacity(wip: *Wip, em: ErrorMessage) MessageIndex {
-        return @enumFromInt(MessageIndex, addExtraAssumeCapacity(wip, em));
+        return @as(MessageIndex, @enumFromInt(addExtraAssumeCapacity(wip, em)));
     }
 
     pub fn addSourceLocation(wip: *Wip, sl: SourceLocation) !SourceLocationIndex {
-        return @enumFromInt(SourceLocationIndex, try addExtra(wip, sl));
+        return @as(SourceLocationIndex, @enumFromInt(try addExtra(wip, sl)));
     }
 
     pub fn addReferenceTrace(wip: *Wip, rt: ReferenceTrace) !void {
@@ -431,7 +431,7 @@ pub const Wip = struct {
         const other_list = other.getMessages();
 
         // The ensureUnusedCapacity call above guarantees this.
-        const notes_start = wip.reserveNotes(@intCast(u32, other_list.len)) catch unreachable;
+        const notes_start = wip.reserveNotes(@as(u32, @intCast(other_list.len))) catch unreachable;
         for (notes_start.., other_list) |note, message| {
             wip.extra.items[note] = @intFromEnum(wip.addOtherMessage(other, message) catch unreachable);
         }
@@ -441,7 +441,7 @@ pub const Wip = struct {
         try wip.extra.ensureUnusedCapacity(wip.gpa, notes_len +
             notes_len * @typeInfo(ErrorBundle.ErrorMessage).Struct.fields.len);
         wip.extra.items.len += notes_len;
-        return @intCast(u32, wip.extra.items.len - notes_len);
+        return @as(u32, @intCast(wip.extra.items.len - notes_len));
     }
 
     fn addOtherMessage(wip: *Wip, other: ErrorBundle, msg_index: MessageIndex) !MessageIndex {
@@ -493,7 +493,7 @@ pub const Wip = struct {
 
     fn addExtraAssumeCapacity(wip: *Wip, extra: anytype) u32 {
         const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
-        const result = @intCast(u32, wip.extra.items.len);
+        const result = @as(u32, @intCast(wip.extra.items.len));
         wip.extra.items.len += fields.len;
         setExtra(wip, result, extra);
         return result;
lib/std/zig/number_literal.zig
@@ -141,7 +141,7 @@ pub fn parseNumberLiteral(bytes: []const u8) Result {
             'a'...'z' => c - 'a' + 10,
             else => return .{ .failure = .{ .invalid_character = i } },
         };
-        if (digit >= base) return .{ .failure = .{ .invalid_digit = .{ .i = i, .base = @enumFromInt(Base, base) } } };
+        if (digit >= base) return .{ .failure = .{ .invalid_digit = .{ .i = i, .base = @as(Base, @enumFromInt(base)) } } };
         if (exponent and digit >= 10) return .{ .failure = .{ .invalid_digit_exponent = i } };
         underscore = false;
         special = 0;
@@ -159,7 +159,7 @@ pub fn parseNumberLiteral(bytes: []const u8) Result {
     if (underscore) return .{ .failure = .{ .trailing_underscore = bytes.len - 1 } };
     if (special != 0) return .{ .failure = .{ .trailing_special = bytes.len - 1 } };
 
-    if (float) return .{ .float = @enumFromInt(FloatBase, base) };
-    if (overflow) return .{ .big_int = @enumFromInt(Base, base) };
+    if (float) return .{ .float = @as(FloatBase, @enumFromInt(base)) };
+    if (overflow) return .{ .big_int = @as(Base, @enumFromInt(base)) };
     return .{ .int = x };
 }
lib/std/zig/Parse.zig
@@ -36,20 +36,20 @@ const Members = struct {
 fn listToSpan(p: *Parse, list: []const Node.Index) !Node.SubRange {
     try p.extra_data.appendSlice(p.gpa, list);
     return Node.SubRange{
-        .start = @intCast(Node.Index, p.extra_data.items.len - list.len),
-        .end = @intCast(Node.Index, p.extra_data.items.len),
+        .start = @as(Node.Index, @intCast(p.extra_data.items.len - list.len)),
+        .end = @as(Node.Index, @intCast(p.extra_data.items.len)),
     };
 }
 
 fn addNode(p: *Parse, elem: Ast.Node) Allocator.Error!Node.Index {
-    const result = @intCast(Node.Index, p.nodes.len);
+    const result = @as(Node.Index, @intCast(p.nodes.len));
     try p.nodes.append(p.gpa, elem);
     return result;
 }
 
 fn setNode(p: *Parse, i: usize, elem: Ast.Node) Node.Index {
     p.nodes.set(i, elem);
-    return @intCast(Node.Index, i);
+    return @as(Node.Index, @intCast(i));
 }
 
 fn reserveNode(p: *Parse, tag: Ast.Node.Tag) !usize {
@@ -72,7 +72,7 @@ fn unreserveNode(p: *Parse, node_index: usize) void {
 fn addExtra(p: *Parse, extra: anytype) Allocator.Error!Node.Index {
     const fields = std.meta.fields(@TypeOf(extra));
     try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
-    const result = @intCast(u32, p.extra_data.items.len);
+    const result = @as(u32, @intCast(p.extra_data.items.len));
     inline for (fields) |field| {
         comptime assert(field.type == Node.Index);
         p.extra_data.appendAssumeCapacity(@field(extra, field.name));
@@ -1202,10 +1202,10 @@ fn parseForStatement(p: *Parse) !Node.Index {
         .main_token = for_token,
         .data = .{
             .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start,
-            .rhs = @bitCast(u32, Node.For{
-                .inputs = @intCast(u31, inputs),
+            .rhs = @as(u32, @bitCast(Node.For{
+                .inputs = @as(u31, @intCast(inputs)),
                 .has_else = has_else,
-            }),
+            })),
         },
     });
 }
@@ -1486,7 +1486,7 @@ fn parseExprPrecedence(p: *Parse, min_prec: i32) Error!Node.Index {
 
     while (true) {
         const tok_tag = p.token_tags[p.tok_i];
-        const info = operTable[@intCast(usize, @intFromEnum(tok_tag))];
+        const info = operTable[@as(usize, @intCast(@intFromEnum(tok_tag)))];
         if (info.prec < min_prec) {
             break;
         }
@@ -2087,10 +2087,10 @@ fn parseForExpr(p: *Parse) !Node.Index {
         .main_token = for_token,
         .data = .{
             .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start,
-            .rhs = @bitCast(u32, Node.For{
-                .inputs = @intCast(u31, inputs),
+            .rhs = @as(u32, @bitCast(Node.For{
+                .inputs = @as(u31, @intCast(inputs)),
                 .has_else = has_else,
-            }),
+            })),
         },
     });
 }
@@ -2862,10 +2862,10 @@ fn parseForTypeExpr(p: *Parse) !Node.Index {
         .main_token = for_token,
         .data = .{
             .lhs = (try p.listToSpan(p.scratch.items[scratch_top..])).start,
-            .rhs = @bitCast(u32, Node.For{
-                .inputs = @intCast(u31, inputs),
+            .rhs = @as(u32, @bitCast(Node.For{
+                .inputs = @as(u31, @intCast(inputs)),
                 .has_else = has_else,
-            }),
+            })),
         },
     });
 }
lib/std/zig/parser_test.zig
@@ -166,10 +166,10 @@ test "zig fmt: respect line breaks after var declarations" {
         \\    lookup_tables[1][p[6]] ^
         \\    lookup_tables[2][p[5]] ^
         \\    lookup_tables[3][p[4]] ^
-        \\    lookup_tables[4][@truncate(u8, self.crc >> 24)] ^
-        \\    lookup_tables[5][@truncate(u8, self.crc >> 16)] ^
-        \\    lookup_tables[6][@truncate(u8, self.crc >> 8)] ^
-        \\    lookup_tables[7][@truncate(u8, self.crc >> 0)];
+        \\    lookup_tables[4][@as(u8, self.crc >> 24)] ^
+        \\    lookup_tables[5][@as(u8, self.crc >> 16)] ^
+        \\    lookup_tables[6][@as(u8, self.crc >> 8)] ^
+        \\    lookup_tables[7][@as(u8, self.crc >> 0)];
         \\
     );
 }
@@ -1108,7 +1108,7 @@ test "zig fmt: async function" {
         \\    handleRequestFn: fn (*Server, *const std.net.Address, File) callconv(.Async) void,
         \\};
         \\test "hi" {
-        \\    var ptr = @ptrCast(fn (i32) callconv(.Async) void, other);
+        \\    var ptr: fn (i32) callconv(.Async) void = @ptrCast(other);
         \\}
         \\
     );
@@ -1825,10 +1825,10 @@ test "zig fmt: respect line breaks after infix operators" {
         \\        lookup_tables[1][p[6]] ^
         \\        lookup_tables[2][p[5]] ^
         \\        lookup_tables[3][p[4]] ^
-        \\        lookup_tables[4][@truncate(u8, self.crc >> 24)] ^
-        \\        lookup_tables[5][@truncate(u8, self.crc >> 16)] ^
-        \\        lookup_tables[6][@truncate(u8, self.crc >> 8)] ^
-        \\        lookup_tables[7][@truncate(u8, self.crc >> 0)];
+        \\        lookup_tables[4][@as(u8, self.crc >> 24)] ^
+        \\        lookup_tables[5][@as(u8, self.crc >> 16)] ^
+        \\        lookup_tables[6][@as(u8, self.crc >> 8)] ^
+        \\        lookup_tables[7][@as(u8, self.crc >> 0)];
         \\}
         \\
     );
@@ -4814,7 +4814,7 @@ test "zig fmt: use of comments and multiline string literals may force the param
         \\        \\ unknown-length pointers and C pointers cannot be hashed deeply.
         \\        \\ Consider providing your own hash function.
         \\    );
-        \\    return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
+        \\    return @intCast(doMemCheckClientRequestExpr(0, // default return
         \\        .MakeMemUndefined, @intFromPtr(qzz.ptr), qzz.len, 0, 0, 0));
         \\}
         \\
lib/std/zig/perf_test.zig
@@ -18,9 +18,9 @@ pub fn main() !void {
     }
     const end = timer.read();
     memory_used /= iterations;
-    const elapsed_s = @floatFromInt(f64, end - start) / std.time.ns_per_s;
-    const bytes_per_sec_float = @floatFromInt(f64, source.len * iterations) / elapsed_s;
-    const bytes_per_sec = @intFromFloat(u64, @floor(bytes_per_sec_float));
+    const elapsed_s = @as(f64, @floatFromInt(end - start)) / std.time.ns_per_s;
+    const bytes_per_sec_float = @as(f64, @floatFromInt(source.len * iterations)) / elapsed_s;
+    const bytes_per_sec = @as(u64, @intFromFloat(@floor(bytes_per_sec_float)));
 
     var stdout_file = std.io.getStdOut();
     const stdout = stdout_file.writer();
lib/std/zig/render.zig
@@ -2719,7 +2719,7 @@ fn renderIdentifier(ais: *Ais, tree: Ast, token_index: Ast.TokenIndex, space: Sp
     while (contents_i < contents.len and buf_i < longest_keyword_or_primitive_len) {
         if (contents[contents_i] == '\\') {
             const res = std.zig.string_literal.parseEscapeSequence(contents, &contents_i).success;
-            buf[buf_i] = @intCast(u8, res);
+            buf[buf_i] = @as(u8, @intCast(res));
             buf_i += 1;
         } else {
             buf[buf_i] = contents[contents_i];
@@ -2773,7 +2773,7 @@ fn renderIdentifierContents(writer: anytype, bytes: []const u8) !void {
                 switch (res) {
                     .success => |codepoint| {
                         if (codepoint <= 0x7f) {
-                            const buf = [1]u8{@intCast(u8, codepoint)};
+                            const buf = [1]u8{@as(u8, @intCast(codepoint))};
                             try std.fmt.format(writer, "{}", .{std.zig.fmtEscapes(&buf)});
                         } else {
                             try writer.writeAll(escape_sequence);
lib/std/zig/Server.zig
@@ -132,7 +132,7 @@ pub fn receiveMessage(s: *Server) !InMessage.Header {
 pub fn receiveBody_u32(s: *Server) !u32 {
     const fifo = &s.receive_fifo;
     const buf = fifo.readableSlice(0);
-    const result = @ptrCast(*align(1) const u32, buf[0..4]).*;
+    const result = @as(*align(1) const u32, @ptrCast(buf[0..4])).*;
     fifo.discard(4);
     return bswap(result);
 }
@@ -140,7 +140,7 @@ pub fn receiveBody_u32(s: *Server) !u32 {
 pub fn serveStringMessage(s: *Server, tag: OutMessage.Tag, msg: []const u8) !void {
     return s.serveMessage(.{
         .tag = tag,
-        .bytes_len = @intCast(u32, msg.len),
+        .bytes_len = @as(u32, @intCast(msg.len)),
     }, &.{msg});
 }
 
@@ -152,7 +152,7 @@ pub fn serveMessage(
     var iovecs: [10]std.os.iovec_const = undefined;
     const header_le = bswap(header);
     iovecs[0] = .{
-        .iov_base = @ptrCast([*]const u8, &header_le),
+        .iov_base = @as([*]const u8, @ptrCast(&header_le)),
         .iov_len = @sizeOf(OutMessage.Header),
     };
     for (bufs, iovecs[1 .. bufs.len + 1]) |buf, *iovec| {
@@ -171,7 +171,7 @@ pub fn serveEmitBinPath(
 ) !void {
     try s.serveMessage(.{
         .tag = .emit_bin_path,
-        .bytes_len = @intCast(u32, fs_path.len + @sizeOf(OutMessage.EmitBinPath)),
+        .bytes_len = @as(u32, @intCast(fs_path.len + @sizeOf(OutMessage.EmitBinPath))),
     }, &.{
         std.mem.asBytes(&header),
         fs_path,
@@ -185,7 +185,7 @@ pub fn serveTestResults(
     const msg_le = bswap(msg);
     try s.serveMessage(.{
         .tag = .test_results,
-        .bytes_len = @intCast(u32, @sizeOf(OutMessage.TestResults)),
+        .bytes_len = @as(u32, @intCast(@sizeOf(OutMessage.TestResults))),
     }, &.{
         std.mem.asBytes(&msg_le),
     });
@@ -193,14 +193,14 @@ pub fn serveTestResults(
 
 pub fn serveErrorBundle(s: *Server, error_bundle: std.zig.ErrorBundle) !void {
     const eb_hdr: OutMessage.ErrorBundle = .{
-        .extra_len = @intCast(u32, error_bundle.extra.len),
-        .string_bytes_len = @intCast(u32, error_bundle.string_bytes.len),
+        .extra_len = @as(u32, @intCast(error_bundle.extra.len)),
+        .string_bytes_len = @as(u32, @intCast(error_bundle.string_bytes.len)),
     };
     const bytes_len = @sizeOf(OutMessage.ErrorBundle) +
         4 * error_bundle.extra.len + error_bundle.string_bytes.len;
     try s.serveMessage(.{
         .tag = .error_bundle,
-        .bytes_len = @intCast(u32, bytes_len),
+        .bytes_len = @as(u32, @intCast(bytes_len)),
     }, &.{
         std.mem.asBytes(&eb_hdr),
         // TODO: implement @ptrCast between slices changing the length
@@ -218,8 +218,8 @@ pub const TestMetadata = struct {
 
 pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void {
     const header: OutMessage.TestMetadata = .{
-        .tests_len = bswap(@intCast(u32, test_metadata.names.len)),
-        .string_bytes_len = bswap(@intCast(u32, test_metadata.string_bytes.len)),
+        .tests_len = bswap(@as(u32, @intCast(test_metadata.names.len))),
+        .string_bytes_len = bswap(@as(u32, @intCast(test_metadata.string_bytes.len))),
     };
     const bytes_len = @sizeOf(OutMessage.TestMetadata) +
         3 * 4 * test_metadata.names.len + test_metadata.string_bytes.len;
@@ -237,7 +237,7 @@ pub fn serveTestMetadata(s: *Server, test_metadata: TestMetadata) !void {
 
     return s.serveMessage(.{
         .tag = .test_metadata,
-        .bytes_len = @intCast(u32, bytes_len),
+        .bytes_len = @as(u32, @intCast(bytes_len)),
     }, &.{
         std.mem.asBytes(&header),
         // TODO: implement @ptrCast between slices changing the length
@@ -253,7 +253,7 @@ fn bswap(x: anytype) @TypeOf(x) {
 
     const T = @TypeOf(x);
     switch (@typeInfo(T)) {
-        .Enum => return @enumFromInt(T, @byteSwap(@intFromEnum(x))),
+        .Enum => return @as(T, @enumFromInt(@byteSwap(@intFromEnum(x)))),
         .Int => return @byteSwap(x),
         .Struct => |info| switch (info.layout) {
             .Extern => {
@@ -265,7 +265,7 @@ fn bswap(x: anytype) @TypeOf(x) {
             },
             .Packed => {
                 const I = info.backing_integer.?;
-                return @bitCast(T, @byteSwap(@bitCast(I, x)));
+                return @as(T, @bitCast(@byteSwap(@as(I, @bitCast(x)))));
             },
             .Auto => @compileError("auto layout struct"),
         },
@@ -286,7 +286,7 @@ fn bswap_and_workaround_u32(bytes_ptr: *const [4]u8) u32 {
 /// workaround for https://github.com/ziglang/zig/issues/14904
 fn bswap_and_workaround_tag(bytes_ptr: *const [4]u8) InMessage.Tag {
     const int = std.mem.readIntLittle(u32, bytes_ptr);
-    return @enumFromInt(InMessage.Tag, int);
+    return @as(InMessage.Tag, @enumFromInt(int));
 }
 
 const OutMessage = std.zig.Server.Message;
lib/std/zig/string_literal.zig
@@ -142,7 +142,7 @@ pub fn parseEscapeSequence(slice: []const u8, offset: *usize) ParsedCharLiteral
                 return .{ .failure = .{ .expected_rbrace = i } };
             }
             offset.* = i;
-            return .{ .success = @intCast(u21, value) };
+            return .{ .success = @as(u21, @intCast(value)) };
         },
         else => return .{ .failure = .{ .invalid_escape_character = offset.* - 1 } },
     }
@@ -253,7 +253,7 @@ pub fn parseWrite(writer: anytype, bytes: []const u8) error{OutOfMemory}!Result
                             };
                             try writer.writeAll(buf[0..len]);
                         } else {
-                            try writer.writeByte(@intCast(u8, codepoint));
+                            try writer.writeByte(@as(u8, @intCast(codepoint)));
                         }
                     },
                     .failure => |err| return Result{ .failure = err },
lib/std/zig/tokenizer.zig
@@ -1290,7 +1290,7 @@ pub const Tokenizer = struct {
             // check utf8-encoded character.
             const length = std.unicode.utf8ByteSequenceLength(c0) catch return 1;
             if (self.index + length > self.buffer.len) {
-                return @intCast(u3, self.buffer.len - self.index);
+                return @as(u3, @intCast(self.buffer.len - self.index));
             }
             const bytes = self.buffer[self.index .. self.index + length];
             switch (length) {
lib/std/array_hash_map.zig
@@ -49,7 +49,7 @@ pub fn eqlString(a: []const u8, b: []const u8) bool {
 }
 
 pub fn hashString(s: []const u8) u32 {
-    return @truncate(u32, std.hash.Wyhash.hash(0, s));
+    return @as(u32, @truncate(std.hash.Wyhash.hash(0, s)));
 }
 
 /// Insertion order is preserved.
@@ -617,7 +617,7 @@ pub fn ArrayHashMapUnmanaged(
             return .{
                 .keys = slice.items(.key).ptr,
                 .values = slice.items(.value).ptr,
-                .len = @intCast(u32, slice.len),
+                .len = @as(u32, @intCast(slice.len)),
             };
         }
         pub const Iterator = struct {
@@ -1409,7 +1409,7 @@ pub fn ArrayHashMapUnmanaged(
             indexes: []Index(I),
         ) void {
             const slot = self.getSlotByIndex(old_entry_index, ctx, header, I, indexes);
-            indexes[slot].entry_index = @intCast(I, new_entry_index);
+            indexes[slot].entry_index = @as(I, @intCast(new_entry_index));
         }
 
         fn removeFromIndexByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader) void {
@@ -1508,7 +1508,7 @@ pub fn ArrayHashMapUnmanaged(
                     const new_index = self.entries.addOneAssumeCapacity();
                     indexes[slot] = .{
                         .distance_from_start_index = distance_from_start_index,
-                        .entry_index = @intCast(I, new_index),
+                        .entry_index = @as(I, @intCast(new_index)),
                     };
 
                     // update the hash if applicable
@@ -1549,7 +1549,7 @@ pub fn ArrayHashMapUnmanaged(
                     const new_index = self.entries.addOneAssumeCapacity();
                     if (store_hash) hashes_array.ptr[new_index] = h;
                     indexes[slot] = .{
-                        .entry_index = @intCast(I, new_index),
+                        .entry_index = @as(I, @intCast(new_index)),
                         .distance_from_start_index = distance_from_start_index,
                     };
                     distance_from_start_index = slot_data.distance_from_start_index;
@@ -1639,7 +1639,7 @@ pub fn ArrayHashMapUnmanaged(
                 const start_index = safeTruncate(usize, h);
                 const end_index = start_index +% indexes.len;
                 var index = start_index;
-                var entry_index = @intCast(I, i);
+                var entry_index = @as(I, @intCast(i));
                 var distance_from_start_index: I = 0;
                 while (index != end_index) : ({
                     index +%= 1;
@@ -1776,7 +1776,7 @@ fn capacityIndexSize(bit_index: u8) usize {
 fn safeTruncate(comptime T: type, val: anytype) T {
     if (@bitSizeOf(T) >= @bitSizeOf(@TypeOf(val)))
         return val;
-    return @truncate(T, val);
+    return @as(T, @truncate(val));
 }
 
 /// A single entry in the lookup acceleration structure.  These structs
@@ -1852,13 +1852,13 @@ const IndexHeader = struct {
     fn constrainIndex(header: IndexHeader, i: usize) usize {
         // This is an optimization for modulo of power of two integers;
         // it requires `indexes_len` to always be a power of two.
-        return @intCast(usize, i & header.mask());
+        return @as(usize, @intCast(i & header.mask()));
     }
 
     /// Returns the attached array of indexes.  I must match the type
     /// returned by capacityIndexType.
     fn indexes(header: *IndexHeader, comptime I: type) []Index(I) {
-        const start_ptr = @ptrCast([*]Index(I), @ptrCast([*]u8, header) + @sizeOf(IndexHeader));
+        const start_ptr: [*]Index(I) = @alignCast(@ptrCast(@as([*]u8, @ptrCast(header)) + @sizeOf(IndexHeader)));
         return start_ptr[0..header.length()];
     }
 
@@ -1871,15 +1871,15 @@ const IndexHeader = struct {
         return index_capacities[self.bit_index];
     }
     fn length(self: IndexHeader) usize {
-        return @as(usize, 1) << @intCast(math.Log2Int(usize), self.bit_index);
+        return @as(usize, 1) << @as(math.Log2Int(usize), @intCast(self.bit_index));
     }
     fn mask(self: IndexHeader) u32 {
-        return @intCast(u32, self.length() - 1);
+        return @as(u32, @intCast(self.length() - 1));
     }
 
     fn findBitIndex(desired_capacity: usize) !u8 {
         if (desired_capacity > max_capacity) return error.OutOfMemory;
-        var new_bit_index = @intCast(u8, std.math.log2_int_ceil(usize, desired_capacity));
+        var new_bit_index = @as(u8, @intCast(std.math.log2_int_ceil(usize, desired_capacity)));
         if (desired_capacity > index_capacities[new_bit_index]) new_bit_index += 1;
         if (new_bit_index < min_bit_index) new_bit_index = min_bit_index;
         assert(desired_capacity <= index_capacities[new_bit_index]);
@@ -1889,12 +1889,12 @@ const IndexHeader = struct {
     /// Allocates an index header, and fills the entryIndexes array with empty.
     /// The distance array contents are undefined.
     fn alloc(allocator: Allocator, new_bit_index: u8) !*IndexHeader {
-        const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index);
+        const len = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(new_bit_index));
         const index_size = hash_map.capacityIndexSize(new_bit_index);
         const nbytes = @sizeOf(IndexHeader) + index_size * len;
         const bytes = try allocator.alignedAlloc(u8, @alignOf(IndexHeader), nbytes);
         @memset(bytes[@sizeOf(IndexHeader)..], 0xff);
-        const result = @ptrCast(*IndexHeader, bytes.ptr);
+        const result: *IndexHeader = @alignCast(@ptrCast(bytes.ptr));
         result.* = .{
             .bit_index = new_bit_index,
         };
@@ -1904,7 +1904,7 @@ const IndexHeader = struct {
     /// Releases the memory for a header and its associated arrays.
     fn free(header: *IndexHeader, allocator: Allocator) void {
         const index_size = hash_map.capacityIndexSize(header.bit_index);
-        const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
+        const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header);
         const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size];
         allocator.free(slice);
     }
@@ -1912,7 +1912,7 @@ const IndexHeader = struct {
     /// Puts an IndexHeader into the state that it would be in after being freshly allocated.
     fn reset(header: *IndexHeader) void {
         const index_size = hash_map.capacityIndexSize(header.bit_index);
-        const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
+        const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header);
         const nbytes = @sizeOf(IndexHeader) + header.length() * index_size;
         @memset(ptr[@sizeOf(IndexHeader)..nbytes], 0xff);
     }
@@ -2020,25 +2020,25 @@ test "iterator hash map" {
 
     var count: usize = 0;
     while (it.next()) |entry| : (count += 1) {
-        buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*;
+        buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*;
     }
     try testing.expect(count == 3);
     try testing.expect(it.next() == null);
 
     for (buffer, 0..) |_, i| {
-        try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
+        try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]);
     }
 
     it.reset();
     count = 0;
     while (it.next()) |entry| {
-        buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*;
+        buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*;
         count += 1;
         if (count >= 2) break;
     }
 
     for (buffer[0..2], 0..) |_, i| {
-        try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
+        try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]);
     }
 
     it.reset();
@@ -2336,11 +2336,11 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
         fn hash(ctx: Context, key: K) u32 {
             _ = ctx;
             if (comptime trait.hasUniqueRepresentation(K)) {
-                return @truncate(u32, Wyhash.hash(0, std.mem.asBytes(&key)));
+                return @as(u32, @truncate(Wyhash.hash(0, std.mem.asBytes(&key))));
             } else {
                 var hasher = Wyhash.init(0);
                 autoHash(&hasher, key);
-                return @truncate(u32, hasher.final());
+                return @as(u32, @truncate(hasher.final()));
             }
         }
     }.hash;
@@ -2380,7 +2380,7 @@ pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime str
             _ = ctx;
             var hasher = Wyhash.init(0);
             std.hash.autoHashStrat(&hasher, key, strategy);
-            return @truncate(u32, hasher.final());
+            return @as(u32, @truncate(hasher.final()));
         }
     }.hash;
 }
lib/std/array_list.zig
@@ -1123,19 +1123,19 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
         {
             var i: usize = 0;
             while (i < 10) : (i += 1) {
-                list.append(@intCast(i32, i + 1)) catch unreachable;
+                list.append(@as(i32, @intCast(i + 1))) catch unreachable;
             }
         }
 
         {
             var i: usize = 0;
             while (i < 10) : (i += 1) {
-                try testing.expect(list.items[i] == @intCast(i32, i + 1));
+                try testing.expect(list.items[i] == @as(i32, @intCast(i + 1)));
             }
         }
 
         for (list.items, 0..) |v, i| {
-            try testing.expect(v == @intCast(i32, i + 1));
+            try testing.expect(v == @as(i32, @intCast(i + 1)));
         }
 
         try testing.expect(list.pop() == 10);
@@ -1173,19 +1173,19 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
         {
             var i: usize = 0;
             while (i < 10) : (i += 1) {
-                list.append(a, @intCast(i32, i + 1)) catch unreachable;
+                list.append(a, @as(i32, @intCast(i + 1))) catch unreachable;
             }
         }
 
         {
             var i: usize = 0;
             while (i < 10) : (i += 1) {
-                try testing.expect(list.items[i] == @intCast(i32, i + 1));
+                try testing.expect(list.items[i] == @as(i32, @intCast(i + 1)));
             }
         }
 
         for (list.items, 0..) |v, i| {
-            try testing.expect(v == @intCast(i32, i + 1));
+            try testing.expect(v == @as(i32, @intCast(i + 1)));
         }
 
         try testing.expect(list.pop() == 10);
lib/std/base64.zig
@@ -108,12 +108,12 @@ pub const Base64Encoder = struct {
             acc_len += 8;
             while (acc_len >= 6) {
                 acc_len -= 6;
-                dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc >> acc_len))];
+                dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc >> acc_len)))];
                 out_idx += 1;
             }
         }
         if (acc_len > 0) {
-            dest[out_idx] = encoder.alphabet_chars[@truncate(u6, (acc << 6 - acc_len))];
+            dest[out_idx] = encoder.alphabet_chars[@as(u6, @truncate((acc << 6 - acc_len)))];
             out_idx += 1;
         }
         if (encoder.pad_char) |pad_char| {
@@ -144,7 +144,7 @@ pub const Base64Decoder = struct {
             assert(!char_in_alphabet[c]);
             assert(pad_char == null or c != pad_char.?);
 
-            result.char_to_index[c] = @intCast(u8, i);
+            result.char_to_index[c] = @as(u8, @intCast(i));
             char_in_alphabet[c] = true;
         }
         return result;
@@ -196,7 +196,7 @@ pub const Base64Decoder = struct {
             acc_len += 6;
             if (acc_len >= 8) {
                 acc_len -= 8;
-                dest[dest_idx] = @truncate(u8, acc >> acc_len);
+                dest[dest_idx] = @as(u8, @truncate(acc >> acc_len));
                 dest_idx += 1;
             }
         }
@@ -271,7 +271,7 @@ pub const Base64DecoderWithIgnore = struct {
             if (acc_len >= 8) {
                 if (dest_idx == dest.len) return error.NoSpaceLeft;
                 acc_len -= 8;
-                dest[dest_idx] = @truncate(u8, acc >> acc_len);
+                dest[dest_idx] = @as(u8, @truncate(acc >> acc_len));
                 dest_idx += 1;
             }
         }
lib/std/bit_set.zig
@@ -119,19 +119,19 @@ pub fn IntegerBitSet(comptime size: u16) type {
             if (range.start == range.end) return;
             if (MaskInt == u0) return;
 
-            const start_bit = @intCast(ShiftInt, range.start);
+            const start_bit = @as(ShiftInt, @intCast(range.start));
 
             var mask = std.math.boolMask(MaskInt, true) << start_bit;
             if (range.end != bit_length) {
-                const end_bit = @intCast(ShiftInt, range.end);
-                mask &= std.math.boolMask(MaskInt, true) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit));
+                const end_bit = @as(ShiftInt, @intCast(range.end));
+                mask &= std.math.boolMask(MaskInt, true) >> @as(ShiftInt, @truncate(@as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)));
             }
             self.mask &= ~mask;
 
             mask = std.math.boolMask(MaskInt, value) << start_bit;
             if (range.end != bit_length) {
-                const end_bit = @intCast(ShiftInt, range.end);
-                mask &= std.math.boolMask(MaskInt, value) >> @truncate(ShiftInt, @as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit));
+                const end_bit = @as(ShiftInt, @intCast(range.end));
+                mask &= std.math.boolMask(MaskInt, value) >> @as(ShiftInt, @truncate(@as(usize, @bitSizeOf(MaskInt)) - @as(usize, end_bit)));
             }
             self.mask |= mask;
         }
@@ -292,7 +292,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
                         .reverse => {
                             const leading_zeroes = @clz(self.bits_remain);
                             const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
-                            self.bits_remain &= (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
+                            self.bits_remain &= (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1;
                             return top_bit;
                         },
                     }
@@ -302,11 +302,11 @@ pub fn IntegerBitSet(comptime size: u16) type {
 
         fn maskBit(index: usize) MaskInt {
             if (MaskInt == u0) return 0;
-            return @as(MaskInt, 1) << @intCast(ShiftInt, index);
+            return @as(MaskInt, 1) << @as(ShiftInt, @intCast(index));
         }
         fn boolMaskBit(index: usize, value: bool) MaskInt {
             if (MaskInt == u0) return 0;
-            return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index);
+            return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index));
         }
     };
 }
@@ -442,10 +442,10 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
             if (num_masks == 0) return;
 
             const start_mask_index = maskIndex(range.start);
-            const start_bit = @truncate(ShiftInt, range.start);
+            const start_bit = @as(ShiftInt, @truncate(range.start));
 
             const end_mask_index = maskIndex(range.end);
-            const end_bit = @truncate(ShiftInt, range.end);
+            const end_bit = @as(ShiftInt, @truncate(range.end));
 
             if (start_mask_index == end_mask_index) {
                 var mask1 = std.math.boolMask(MaskInt, true) << start_bit;
@@ -634,13 +634,13 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
         }
 
         fn maskBit(index: usize) MaskInt {
-            return @as(MaskInt, 1) << @truncate(ShiftInt, index);
+            return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index));
         }
         fn maskIndex(index: usize) usize {
             return index >> @bitSizeOf(ShiftInt);
         }
         fn boolMaskBit(index: usize, value: bool) MaskInt {
-            return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index);
+            return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index));
         }
     };
 }
@@ -731,7 +731,7 @@ pub const DynamicBitSetUnmanaged = struct {
             // set the padding bits in the old last item to 1
             if (fill and old_masks > 0) {
                 const old_padding_bits = old_masks * @bitSizeOf(MaskInt) - old_len;
-                const old_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, old_padding_bits);
+                const old_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(old_padding_bits));
                 self.masks[old_masks - 1] |= ~old_mask;
             }
 
@@ -745,7 +745,7 @@ pub const DynamicBitSetUnmanaged = struct {
         // Zero out the padding bits
         if (new_len > 0) {
             const padding_bits = new_masks * @bitSizeOf(MaskInt) - new_len;
-            const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+            const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits));
             self.masks[new_masks - 1] &= last_item_mask;
         }
 
@@ -816,10 +816,10 @@ pub const DynamicBitSetUnmanaged = struct {
         if (range.start == range.end) return;
 
         const start_mask_index = maskIndex(range.start);
-        const start_bit = @truncate(ShiftInt, range.start);
+        const start_bit = @as(ShiftInt, @truncate(range.start));
 
         const end_mask_index = maskIndex(range.end);
-        const end_bit = @truncate(ShiftInt, range.end);
+        const end_bit = @as(ShiftInt, @truncate(range.end));
 
         if (start_mask_index == end_mask_index) {
             var mask1 = std.math.boolMask(MaskInt, true) << start_bit;
@@ -887,7 +887,7 @@ pub const DynamicBitSetUnmanaged = struct {
         }
 
         const padding_bits = num_masks * @bitSizeOf(MaskInt) - bit_length;
-        const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+        const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits));
         self.masks[num_masks - 1] &= last_item_mask;
     }
 
@@ -996,7 +996,7 @@ pub const DynamicBitSetUnmanaged = struct {
     pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) {
         const num_masks = numMasks(self.bit_length);
         const padding_bits = num_masks * @bitSizeOf(MaskInt) - self.bit_length;
-        const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
+        const last_item_mask = (~@as(MaskInt, 0)) >> @as(ShiftInt, @intCast(padding_bits));
         return Iterator(options).init(self.masks[0..num_masks], last_item_mask);
     }
 
@@ -1005,13 +1005,13 @@ pub const DynamicBitSetUnmanaged = struct {
     }
 
     fn maskBit(index: usize) MaskInt {
-        return @as(MaskInt, 1) << @truncate(ShiftInt, index);
+        return @as(MaskInt, 1) << @as(ShiftInt, @truncate(index));
     }
     fn maskIndex(index: usize) usize {
         return index >> @bitSizeOf(ShiftInt);
     }
     fn boolMaskBit(index: usize, value: bool) MaskInt {
-        return @as(MaskInt, @intFromBool(value)) << @intCast(ShiftInt, index);
+        return @as(MaskInt, @intFromBool(value)) << @as(ShiftInt, @intCast(index));
     }
     fn numMasks(bit_length: usize) usize {
         return (bit_length + (@bitSizeOf(MaskInt) - 1)) / @bitSizeOf(MaskInt);
@@ -1255,7 +1255,7 @@ fn BitSetIterator(comptime MaskInt: type, comptime options: IteratorOptions) typ
                 .reverse => {
                     const leading_zeroes = @clz(self.bits_remain);
                     const top_bit = (@bitSizeOf(MaskInt) - 1) - leading_zeroes;
-                    const no_top_bit_mask = (@as(MaskInt, 1) << @intCast(ShiftInt, top_bit)) - 1;
+                    const no_top_bit_mask = (@as(MaskInt, 1) << @as(ShiftInt, @intCast(top_bit))) - 1;
                     self.bits_remain &= no_top_bit_mask;
                     return top_bit + self.bit_offset;
                 },
lib/std/bounded_array.zig
@@ -394,7 +394,7 @@ test "BoundedArrayAligned" {
     try a.append(255);
     try a.append(255);
 
-    const b = @ptrCast(*const [2]u16, a.constSlice().ptr);
+    const b = @as(*const [2]u16, @ptrCast(a.constSlice().ptr));
     try testing.expectEqual(@as(u16, 0), b[0]);
     try testing.expectEqual(@as(u16, 65535), b[1]);
 }
lib/std/Build.zig
@@ -1111,7 +1111,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros
             var populated_cpu_features = whitelist_cpu.model.features;
             populated_cpu_features.populateDependencies(all_features);
             for (all_features, 0..) |feature, i_usize| {
-                const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
+                const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize));
                 const in_cpu_set = populated_cpu_features.isEnabled(i);
                 if (in_cpu_set) {
                     log.err("{s} ", .{feature.name});
@@ -1119,7 +1119,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros
             }
             log.err("  Remove: ", .{});
             for (all_features, 0..) |feature, i_usize| {
-                const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
+                const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize));
                 const in_cpu_set = populated_cpu_features.isEnabled(i);
                 const in_actual_set = selected_cpu.features.isEnabled(i);
                 if (in_actual_set and !in_cpu_set) {
@@ -1442,13 +1442,13 @@ pub fn execAllowFail(
     switch (term) {
         .Exited => |code| {
             if (code != 0) {
-                out_code.* = @truncate(u8, code);
+                out_code.* = @as(u8, @truncate(code));
                 return error.ExitCodeFailure;
             }
             return stdout;
         },
         .Signal, .Stopped, .Unknown => |code| {
-            out_code.* = @truncate(u8, code);
+            out_code.* = @as(u8, @truncate(code));
             return error.ProcessTerminated;
         },
     }
@@ -1815,7 +1815,7 @@ pub fn serializeCpu(allocator: Allocator, cpu: std.Target.Cpu) ![]const u8 {
         try mcpu_buffer.appendSlice(cpu.model.name);
 
         for (all_features, 0..) |feature, i_usize| {
-            const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
+            const i = @as(std.Target.Cpu.Feature.Set.Index, @intCast(i_usize));
             const in_cpu_set = populated_cpu_features.isEnabled(i);
             const in_actual_set = cpu.features.isEnabled(i);
             if (in_cpu_set and !in_actual_set) {
@@ -1852,7 +1852,7 @@ pub fn hex64(x: u64) [16]u8 {
     var result: [16]u8 = undefined;
     var i: usize = 0;
     while (i < 8) : (i += 1) {
-        const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
+        const byte = @as(u8, @truncate(x >> @as(u6, @intCast(8 * i))));
         result[i * 2 + 0] = hex_charset[byte >> 4];
         result[i * 2 + 1] = hex_charset[byte & 15];
     }
lib/std/builtin.zig
@@ -784,7 +784,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
 
                     exit_size.* = 256;
 
-                    return @ptrCast([*:0]u16, utf16.ptr);
+                    return @as([*:0]u16, @ptrCast(utf16.ptr));
                 }
             };
 
lib/std/c.zig
@@ -113,7 +113,7 @@ pub usingnamespace switch (builtin.os.tag) {
 
 pub fn getErrno(rc: anytype) c.E {
     if (rc == -1) {
-        return @enumFromInt(c.E, c._errno().*);
+        return @as(c.E, @enumFromInt(c._errno().*));
     } else {
         return .SUCCESS;
     }
lib/std/child_process.zig
@@ -93,7 +93,7 @@ pub const ChildProcess = struct {
             switch (builtin.os.tag) {
                 .linux => {
                     if (rus.rusage) |ru| {
-                        return @intCast(usize, ru.maxrss) * 1024;
+                        return @as(usize, @intCast(ru.maxrss)) * 1024;
                     } else {
                         return null;
                     }
@@ -108,7 +108,7 @@ pub const ChildProcess = struct {
                 .macos, .ios => {
                     if (rus.rusage) |ru| {
                         // Darwin oddly reports in bytes instead of kilobytes.
-                        return @intCast(usize, ru.maxrss);
+                        return @as(usize, @intCast(ru.maxrss));
                     } else {
                         return null;
                     }
@@ -376,7 +376,7 @@ pub const ChildProcess = struct {
             if (windows.kernel32.GetExitCodeProcess(self.id, &exit_code) == 0) {
                 break :x Term{ .Unknown = 0 };
             } else {
-                break :x Term{ .Exited = @truncate(u8, exit_code) };
+                break :x Term{ .Exited = @as(u8, @truncate(exit_code)) };
             }
         });
 
@@ -449,7 +449,7 @@ pub const ChildProcess = struct {
                 // has a value greater than 0
                 if ((fd[0].revents & std.os.POLL.IN) != 0) {
                     const err_int = try readIntFd(err_pipe[0]);
-                    return @errSetCast(SpawnError, @errorFromInt(err_int));
+                    return @as(SpawnError, @errSetCast(@errorFromInt(err_int)));
                 }
             } else {
                 // Write maxInt(ErrInt) to the write end of the err_pipe. This is after
@@ -462,7 +462,7 @@ pub const ChildProcess = struct {
                 // Here we potentially return the fork child's error from the parent
                 // pid.
                 if (err_int != maxInt(ErrInt)) {
-                    return @errSetCast(SpawnError, @errorFromInt(err_int));
+                    return @as(SpawnError, @errSetCast(@errorFromInt(err_int)));
                 }
             }
         }
@@ -542,7 +542,7 @@ pub const ChildProcess = struct {
             } else if (builtin.output_mode == .Exe) {
                 // Then we have Zig start code and this works.
                 // TODO type-safety for null-termination of `os.environ`.
-                break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr);
+                break :m @as([*:null]const ?[*:0]const u8, @ptrCast(os.environ.ptr));
             } else {
                 // TODO come up with a solution for this.
                 @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process");
@@ -605,7 +605,7 @@ pub const ChildProcess = struct {
         }
 
         // we are the parent
-        const pid = @intCast(i32, pid_result);
+        const pid = @as(i32, @intCast(pid_result));
         if (self.stdin_behavior == StdIo.Pipe) {
             self.stdin = File{ .handle = stdin_pipe[1] };
         } else {
@@ -1015,11 +1015,11 @@ fn windowsCreateProcessPathExt(
             else => return windows.unexpectedStatus(rc),
         }
 
-        const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf);
+        const dir_info = @as(*windows.FILE_DIRECTORY_INFORMATION, @ptrCast(&file_information_buf));
         if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) {
             break :found_name null;
         }
-        break :found_name @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2];
+        break :found_name @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2];
     };
 
     const unappended_err = unappended: {
@@ -1104,7 +1104,7 @@ fn windowsCreateProcessPathExt(
             else => return windows.unexpectedStatus(rc),
         }
 
-        const dir_info = @ptrCast(*windows.FILE_DIRECTORY_INFORMATION, &file_information_buf);
+        const dir_info = @as(*windows.FILE_DIRECTORY_INFORMATION, @ptrCast(&file_information_buf));
         // Skip directories
         if (dir_info.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY != 0) continue;
 
@@ -1164,7 +1164,7 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1
         null,
         windows.TRUE,
         windows.CREATE_UNICODE_ENVIRONMENT,
-        @ptrCast(?*anyopaque, envp_ptr),
+        @as(?*anyopaque, @ptrCast(envp_ptr)),
         cwd_ptr,
         lpStartupInfo,
         lpProcessInformation,
@@ -1376,7 +1376,7 @@ fn writeIntFd(fd: i32, value: ErrInt) !void {
         .capable_io_mode = .blocking,
         .intended_io_mode = .blocking,
     };
-    file.writer().writeIntNative(u64, @intCast(u64, value)) catch return error.SystemResources;
+    file.writer().writeIntNative(u64, @as(u64, @intCast(value))) catch return error.SystemResources;
 }
 
 fn readIntFd(fd: i32) !ErrInt {
@@ -1385,7 +1385,7 @@ fn readIntFd(fd: i32) !ErrInt {
         .capable_io_mode = .blocking,
         .intended_io_mode = .blocking,
     };
-    return @intCast(ErrInt, file.reader().readIntNative(u64) catch return error.SystemResources);
+    return @as(ErrInt, @intCast(file.reader().readIntNative(u64) catch return error.SystemResources));
 }
 
 /// Caller must free result.
lib/std/coff.zig
@@ -457,12 +457,12 @@ pub const ImportLookupEntry32 = struct {
 
     pub fn getImportByName(raw: u32) ?ByName {
         if (mask & raw != 0) return null;
-        return @bitCast(ByName, raw);
+        return @as(ByName, @bitCast(raw));
     }
 
     pub fn getImportByOrdinal(raw: u32) ?ByOrdinal {
         if (mask & raw == 0) return null;
-        return @bitCast(ByOrdinal, raw);
+        return @as(ByOrdinal, @bitCast(raw));
     }
 };
 
@@ -483,12 +483,12 @@ pub const ImportLookupEntry64 = struct {
 
     pub fn getImportByName(raw: u64) ?ByName {
         if (mask & raw != 0) return null;
-        return @bitCast(ByName, raw);
+        return @as(ByName, @bitCast(raw));
     }
 
     pub fn getImportByOrdinal(raw: u64) ?ByOrdinal {
         if (mask & raw == 0) return null;
-        return @bitCast(ByOrdinal, raw);
+        return @as(ByOrdinal, @bitCast(raw));
     }
 };
 
@@ -1146,25 +1146,25 @@ pub const Coff = struct {
     }
 
     pub fn getCoffHeader(self: Coff) CoffHeader {
-        return @ptrCast(*align(1) const CoffHeader, self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)]).*;
+        return @as(*align(1) const CoffHeader, @ptrCast(self.data[self.coff_header_offset..][0..@sizeOf(CoffHeader)])).*;
     }
 
     pub fn getOptionalHeader(self: Coff) OptionalHeader {
         assert(self.is_image);
         const offset = self.coff_header_offset + @sizeOf(CoffHeader);
-        return @ptrCast(*align(1) const OptionalHeader, self.data[offset..][0..@sizeOf(OptionalHeader)]).*;
+        return @as(*align(1) const OptionalHeader, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeader)])).*;
     }
 
     pub fn getOptionalHeader32(self: Coff) OptionalHeaderPE32 {
         assert(self.is_image);
         const offset = self.coff_header_offset + @sizeOf(CoffHeader);
-        return @ptrCast(*align(1) const OptionalHeaderPE32, self.data[offset..][0..@sizeOf(OptionalHeaderPE32)]).*;
+        return @as(*align(1) const OptionalHeaderPE32, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE32)])).*;
     }
 
     pub fn getOptionalHeader64(self: Coff) OptionalHeaderPE64 {
         assert(self.is_image);
         const offset = self.coff_header_offset + @sizeOf(CoffHeader);
-        return @ptrCast(*align(1) const OptionalHeaderPE64, self.data[offset..][0..@sizeOf(OptionalHeaderPE64)]).*;
+        return @as(*align(1) const OptionalHeaderPE64, @ptrCast(self.data[offset..][0..@sizeOf(OptionalHeaderPE64)])).*;
     }
 
     pub fn getImageBase(self: Coff) u64 {
@@ -1193,7 +1193,7 @@ pub const Coff = struct {
             else => unreachable, // We assume we have validated the header already
         };
         const offset = self.coff_header_offset + @sizeOf(CoffHeader) + size;
-        return @ptrCast([*]align(1) const ImageDataDirectory, self.data[offset..])[0..self.getNumberOfDataDirectories()];
+        return @as([*]align(1) const ImageDataDirectory, @ptrCast(self.data[offset..]))[0..self.getNumberOfDataDirectories()];
     }
 
     pub fn getSymtab(self: *const Coff) ?Symtab {
@@ -1217,7 +1217,7 @@ pub const Coff = struct {
     pub fn getSectionHeaders(self: *const Coff) []align(1) const SectionHeader {
         const coff_header = self.getCoffHeader();
         const offset = self.coff_header_offset + @sizeOf(CoffHeader) + coff_header.size_of_optional_header;
-        return @ptrCast([*]align(1) const SectionHeader, self.data.ptr + offset)[0..coff_header.number_of_sections];
+        return @as([*]align(1) const SectionHeader, @ptrCast(self.data.ptr + offset))[0..coff_header.number_of_sections];
     }
 
     pub fn getSectionHeadersAlloc(self: *const Coff, allocator: mem.Allocator) ![]SectionHeader {
@@ -1303,9 +1303,9 @@ pub const Symtab = struct {
         return .{
             .name = raw[0..8].*,
             .value = mem.readIntLittle(u32, raw[8..12]),
-            .section_number = @enumFromInt(SectionNumber, mem.readIntLittle(u16, raw[12..14])),
-            .type = @bitCast(SymType, mem.readIntLittle(u16, raw[14..16])),
-            .storage_class = @enumFromInt(StorageClass, raw[16]),
+            .section_number = @as(SectionNumber, @enumFromInt(mem.readIntLittle(u16, raw[12..14]))),
+            .type = @as(SymType, @bitCast(mem.readIntLittle(u16, raw[14..16]))),
+            .storage_class = @as(StorageClass, @enumFromInt(raw[16])),
             .number_of_aux_symbols = raw[17],
         };
     }
@@ -1333,7 +1333,7 @@ pub const Symtab = struct {
     fn asWeakExtDef(raw: []const u8) WeakExternalDefinition {
         return .{
             .tag_index = mem.readIntLittle(u32, raw[0..4]),
-            .flag = @enumFromInt(WeakExternalFlag, mem.readIntLittle(u32, raw[4..8])),
+            .flag = @as(WeakExternalFlag, @enumFromInt(mem.readIntLittle(u32, raw[4..8]))),
             .unused = raw[8..18].*,
         };
     }
@@ -1351,7 +1351,7 @@ pub const Symtab = struct {
             .number_of_linenumbers = mem.readIntLittle(u16, raw[6..8]),
             .checksum = mem.readIntLittle(u32, raw[8..12]),
             .number = mem.readIntLittle(u16, raw[12..14]),
-            .selection = @enumFromInt(ComdatSelection, raw[14]),
+            .selection = @as(ComdatSelection, @enumFromInt(raw[14])),
             .unused = raw[15..18].*,
         };
     }
@@ -1384,6 +1384,6 @@ pub const Strtab = struct {
 
     pub fn get(self: Strtab, off: u32) []const u8 {
         assert(off < self.buffer.len);
-        return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.ptr + off), 0);
+        return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.buffer.ptr + off)), 0);
     }
 };
lib/std/cstr.zig
@@ -89,12 +89,12 @@ pub const NullTerminated2DArray = struct {
         return NullTerminated2DArray{
             .allocator = allocator,
             .byte_count = byte_count,
-            .ptr = @ptrCast(?[*:null]?[*:0]u8, buf.ptr),
+            .ptr = @as(?[*:null]?[*:0]u8, @ptrCast(buf.ptr)),
         };
     }
 
     pub fn deinit(self: *NullTerminated2DArray) void {
-        const buf = @ptrCast([*]u8, self.ptr);
+        const buf = @as([*]u8, @ptrCast(self.ptr));
         self.allocator.free(buf[0..self.byte_count]);
     }
 };
lib/std/debug.zig
@@ -460,8 +460,8 @@ pub const StackIterator = struct {
         // We are unable to determine validity of memory for freestanding targets
         if (native_os == .freestanding) return true;
 
-        const aligned_address = address & ~@intCast(usize, (mem.page_size - 1));
-        const aligned_memory = @ptrFromInt([*]align(mem.page_size) u8, aligned_address)[0..mem.page_size];
+        const aligned_address = address & ~@as(usize, @intCast((mem.page_size - 1)));
+        const aligned_memory = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_address))[0..mem.page_size];
 
         if (native_os != .windows) {
             if (native_os != .wasi) {
@@ -511,7 +511,7 @@ pub const StackIterator = struct {
         if (fp == 0 or !mem.isAligned(fp, @alignOf(usize)) or !isValidMemory(fp))
             return null;
 
-        const new_fp = math.add(usize, @ptrFromInt(*const usize, fp).*, fp_bias) catch return null;
+        const new_fp = math.add(usize, @as(*const usize, @ptrFromInt(fp)).*, fp_bias) catch return null;
 
         // Sanity check: the stack grows down thus all the parent frames must be
         // be at addresses that are greater (or equal) than the previous one.
@@ -520,9 +520,9 @@ pub const StackIterator = struct {
         if (new_fp != 0 and new_fp < self.fp)
             return null;
 
-        const new_pc = @ptrFromInt(
+        const new_pc = @as(
             *const usize,
-            math.add(usize, fp, pc_offset) catch return null,
+            @ptrFromInt(math.add(usize, fp, pc_offset) catch return null),
         ).*;
 
         self.fp = new_fp;
@@ -555,10 +555,10 @@ pub fn writeCurrentStackTrace(
 pub noinline fn walkStackWindows(addresses: []usize) usize {
     if (builtin.cpu.arch == .x86) {
         // RtlVirtualUnwind doesn't exist on x86
-        return windows.ntdll.RtlCaptureStackBackTrace(0, addresses.len, @ptrCast(**anyopaque, addresses.ptr), null);
+        return windows.ntdll.RtlCaptureStackBackTrace(0, addresses.len, @as(**anyopaque, @ptrCast(addresses.ptr)), null);
     }
 
-    const tib = @ptrCast(*const windows.NT_TIB, &windows.teb().Reserved1);
+    const tib = @as(*const windows.NT_TIB, @ptrCast(&windows.teb().Reserved1));
 
     var context: windows.CONTEXT = std.mem.zeroes(windows.CONTEXT);
     windows.ntdll.RtlCaptureContext(&context);
@@ -584,7 +584,7 @@ pub noinline fn walkStackWindows(addresses: []usize) usize {
             );
         } else {
             // leaf function
-            context.setIp(@ptrFromInt(*u64, current_regs.sp).*);
+            context.setIp(@as(*u64, @ptrFromInt(current_regs.sp)).*);
             context.setSp(current_regs.sp + @sizeOf(usize));
         }
 
@@ -734,7 +734,7 @@ fn printLineInfo(
             if (printLineFromFile(out_stream, li)) {
                 if (li.column > 0) {
                     // The caret already takes one char
-                    const space_needed = @intCast(usize, li.column - 1);
+                    const space_needed = @as(usize, @intCast(li.column - 1));
 
                     try out_stream.writeByteNTimes(' ', space_needed);
                     try tty_config.setColor(out_stream, .green);
@@ -883,7 +883,7 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) error{Overflow}![]const u8
 pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugInfo {
     nosuspend {
         const mapped_mem = try mapWholeFile(elf_file);
-        const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]);
+        const hdr = @as(*const elf.Ehdr, @ptrCast(&mapped_mem[0]));
         if (!mem.eql(u8, hdr.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
         if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
 
@@ -896,14 +896,13 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn
 
         const shoff = hdr.e_shoff;
         const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx);
-        const str_shdr = @ptrCast(
-            *const elf.Shdr,
-            @alignCast(@alignOf(elf.Shdr), &mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow]),
-        );
+        const str_shdr: *const elf.Shdr = @ptrCast(@alignCast(
+            &mapped_mem[math.cast(usize, str_section_off) orelse return error.Overflow],
+        ));
         const header_strings = mapped_mem[str_shdr.sh_offset .. str_shdr.sh_offset + str_shdr.sh_size];
-        const shdrs = @ptrCast(
+        const shdrs = @as(
             [*]const elf.Shdr,
-            @alignCast(@alignOf(elf.Shdr), &mapped_mem[shoff]),
+            @ptrCast(@alignCast(&mapped_mem[shoff])),
         )[0..hdr.e_shnum];
 
         var opt_debug_info: ?[]const u8 = null;
@@ -982,10 +981,7 @@ pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugIn
 fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugInfo {
     const mapped_mem = try mapWholeFile(macho_file);
 
-    const hdr = @ptrCast(
-        *const macho.mach_header_64,
-        @alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr),
-    );
+    const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr));
     if (hdr.magic != macho.MH_MAGIC_64)
         return error.InvalidDebugInfo;
 
@@ -998,9 +994,9 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn
         else => {},
     } else return error.MissingDebugInfo;
 
-    const syms = @ptrCast(
+    const syms = @as(
         [*]const macho.nlist_64,
-        @alignCast(@alignOf(macho.nlist_64), &mapped_mem[symtab.symoff]),
+        @ptrCast(@alignCast(&mapped_mem[symtab.symoff])),
     )[0..symtab.nsyms];
     const strings = mapped_mem[symtab.stroff..][0 .. symtab.strsize - 1 :0];
 
@@ -1055,7 +1051,7 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn
                     },
                     .fun_strx => {
                         state = .fun_size;
-                        last_sym.size = @intCast(u32, sym.n_value);
+                        last_sym.size = @as(u32, @intCast(sym.n_value));
                     },
                     else => return error.InvalidDebugInfo,
                 }
@@ -1283,10 +1279,10 @@ pub const DebugInfo = struct {
 
             var it = macho.LoadCommandIterator{
                 .ncmds = header.ncmds,
-                .buffer = @alignCast(@alignOf(u64), @ptrFromInt(
+                .buffer = @alignCast(@as(
                     [*]u8,
-                    @intFromPtr(header) + @sizeOf(macho.mach_header_64),
-                ))[0..header.sizeofcmds],
+                    @ptrFromInt(@intFromPtr(header) + @sizeOf(macho.mach_header_64)),
+                )[0..header.sizeofcmds]),
             };
             while (it.next()) |cmd| switch (cmd.cmd()) {
                 .SEGMENT_64 => {
@@ -1332,7 +1328,7 @@ pub const DebugInfo = struct {
                     return obj_di;
                 }
 
-                const mapped_module = @ptrFromInt([*]const u8, module.base_address)[0..module.size];
+                const mapped_module = @as([*]const u8, @ptrFromInt(module.base_address))[0..module.size];
                 const obj_di = try self.allocator.create(ModuleDebugInfo);
                 errdefer self.allocator.destroy(obj_di);
 
@@ -1465,10 +1461,7 @@ pub const ModuleDebugInfo = switch (native_os) {
             const o_file = try fs.cwd().openFile(o_file_path, .{ .intended_io_mode = .blocking });
             const mapped_mem = try mapWholeFile(o_file);
 
-            const hdr = @ptrCast(
-                *const macho.mach_header_64,
-                @alignCast(@alignOf(macho.mach_header_64), mapped_mem.ptr),
-            );
+            const hdr: *const macho.mach_header_64 = @ptrCast(@alignCast(mapped_mem.ptr));
             if (hdr.magic != std.macho.MH_MAGIC_64)
                 return error.InvalidDebugInfo;
 
@@ -1487,21 +1480,18 @@ pub const ModuleDebugInfo = switch (native_os) {
             if (segcmd == null or symtabcmd == null) return error.MissingDebugInfo;
 
             // Parse symbols
-            const strtab = @ptrCast(
+            const strtab = @as(
                 [*]const u8,
-                &mapped_mem[symtabcmd.?.stroff],
+                @ptrCast(&mapped_mem[symtabcmd.?.stroff]),
             )[0 .. symtabcmd.?.strsize - 1 :0];
-            const symtab = @ptrCast(
+            const symtab = @as(
                 [*]const macho.nlist_64,
-                @alignCast(
-                    @alignOf(macho.nlist_64),
-                    &mapped_mem[symtabcmd.?.symoff],
-                ),
+                @ptrCast(@alignCast(&mapped_mem[symtabcmd.?.symoff])),
             )[0..symtabcmd.?.nsyms];
 
             // TODO handle tentative (common) symbols
             var addr_table = std.StringHashMap(u64).init(allocator);
-            try addr_table.ensureTotalCapacity(@intCast(u32, symtab.len));
+            try addr_table.ensureTotalCapacity(@as(u32, @intCast(symtab.len)));
             for (symtab) |sym| {
                 if (sym.n_strx == 0) continue;
                 if (sym.undf() or sym.tentative() or sym.abs()) continue;
@@ -1943,49 +1933,49 @@ fn dumpSegfaultInfoPosix(sig: i32, addr: usize, ctx_ptr: ?*const anyopaque) void
 
     switch (native_arch) {
         .x86 => {
-            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
-            const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]);
-            const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]);
+            const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
+            const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP]));
+            const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP]));
             dumpStackTraceFromBase(bp, ip);
         },
         .x86_64 => {
-            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+            const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
             const ip = switch (native_os) {
-                .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
-                .freebsd => @intCast(usize, ctx.mcontext.rip),
-                .openbsd => @intCast(usize, ctx.sc_rip),
-                .macos => @intCast(usize, ctx.mcontext.ss.rip),
+                .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])),
+                .freebsd => @as(usize, @intCast(ctx.mcontext.rip)),
+                .openbsd => @as(usize, @intCast(ctx.sc_rip)),
+                .macos => @as(usize, @intCast(ctx.mcontext.ss.rip)),
                 else => unreachable,
             };
             const bp = switch (native_os) {
-                .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
-                .openbsd => @intCast(usize, ctx.sc_rbp),
-                .freebsd => @intCast(usize, ctx.mcontext.rbp),
-                .macos => @intCast(usize, ctx.mcontext.ss.rbp),
+                .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])),
+                .openbsd => @as(usize, @intCast(ctx.sc_rbp)),
+                .freebsd => @as(usize, @intCast(ctx.mcontext.rbp)),
+                .macos => @as(usize, @intCast(ctx.mcontext.ss.rbp)),
                 else => unreachable,
             };
             dumpStackTraceFromBase(bp, ip);
         },
         .arm => {
-            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
-            const ip = @intCast(usize, ctx.mcontext.arm_pc);
-            const bp = @intCast(usize, ctx.mcontext.arm_fp);
+            const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
+            const ip = @as(usize, @intCast(ctx.mcontext.arm_pc));
+            const bp = @as(usize, @intCast(ctx.mcontext.arm_fp));
             dumpStackTraceFromBase(bp, ip);
         },
         .aarch64 => {
-            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+            const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
             const ip = switch (native_os) {
-                .macos => @intCast(usize, ctx.mcontext.ss.pc),
-                .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]),
-                .freebsd => @intCast(usize, ctx.mcontext.gpregs.elr),
-                else => @intCast(usize, ctx.mcontext.pc),
+                .macos => @as(usize, @intCast(ctx.mcontext.ss.pc)),
+                .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.PC])),
+                .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.elr)),
+                else => @as(usize, @intCast(ctx.mcontext.pc)),
             };
             // x29 is the ABI-designated frame pointer
             const bp = switch (native_os) {
-                .macos => @intCast(usize, ctx.mcontext.ss.fp),
-                .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]),
-                .freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]),
-                else => @intCast(usize, ctx.mcontext.regs[29]),
+                .macos => @as(usize, @intCast(ctx.mcontext.ss.fp)),
+                .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.FP])),
+                .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.x[os.REG.FP])),
+                else => @as(usize, @intCast(ctx.mcontext.regs[29])),
             };
             dumpStackTraceFromBase(bp, ip);
         },
lib/std/dwarf.zig
@@ -462,7 +462,7 @@ const LineNumberProgram = struct {
             });
 
             return debug.LineInfo{
-                .line = if (self.prev_line >= 0) @intCast(u64, self.prev_line) else 0,
+                .line = if (self.prev_line >= 0) @as(u64, @intCast(self.prev_line)) else 0,
                 .column = self.prev_column,
                 .file_name = file_name,
             };
@@ -533,7 +533,7 @@ fn parseFormValueConstant(in_stream: anytype, signed: bool, endian: std.builtin.
                 -1 => blk: {
                     if (signed) {
                         const x = try nosuspend leb.readILEB128(i64, in_stream);
-                        break :blk @bitCast(u64, x);
+                        break :blk @as(u64, @bitCast(x));
                     } else {
                         const x = try nosuspend leb.readULEB128(u64, in_stream);
                         break :blk x;
@@ -939,12 +939,12 @@ pub const DwarfInfo = struct {
                 .Const => |c| try c.asUnsignedLe(),
                 .RangeListOffset => |idx| off: {
                     if (compile_unit.is_64) {
-                        const offset_loc = @intCast(usize, compile_unit.rnglists_base + 8 * idx);
+                        const offset_loc = @as(usize, @intCast(compile_unit.rnglists_base + 8 * idx));
                         if (offset_loc + 8 > debug_ranges.len) return badDwarf();
                         const offset = mem.readInt(u64, debug_ranges[offset_loc..][0..8], di.endian);
                         break :off compile_unit.rnglists_base + offset;
                     } else {
-                        const offset_loc = @intCast(usize, compile_unit.rnglists_base + 4 * idx);
+                        const offset_loc = @as(usize, @intCast(compile_unit.rnglists_base + 4 * idx));
                         if (offset_loc + 4 > debug_ranges.len) return badDwarf();
                         const offset = mem.readInt(u32, debug_ranges[offset_loc..][0..4], di.endian);
                         break :off compile_unit.rnglists_base + offset;
@@ -1134,7 +1134,7 @@ pub const DwarfInfo = struct {
                 ),
             };
             if (attr.form_id == FORM.implicit_const) {
-                result.attrs.items[i].value.Const.payload = @bitCast(u64, attr.payload);
+                result.attrs.items[i].value.Const.payload = @as(u64, @bitCast(attr.payload));
             }
         }
         return result;
@@ -1438,7 +1438,7 @@ pub const DwarfInfo = struct {
         const addr_size = debug_addr[compile_unit.addr_base - 2];
         const seg_size = debug_addr[compile_unit.addr_base - 1];
 
-        const byte_offset = @intCast(usize, compile_unit.addr_base + (addr_size + seg_size) * index);
+        const byte_offset = @as(usize, @intCast(compile_unit.addr_base + (addr_size + seg_size) * index));
         if (byte_offset + addr_size > debug_addr.len) return badDwarf();
         return switch (addr_size) {
             1 => debug_addr[byte_offset],
lib/std/dynamic_library.zig
@@ -71,18 +71,18 @@ pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator {
         while (_DYNAMIC[i].d_tag != elf.DT_NULL) : (i += 1) {
             switch (_DYNAMIC[i].d_tag) {
                 elf.DT_DEBUG => {
-                    const ptr = @ptrFromInt(?*RDebug, _DYNAMIC[i].d_val);
+                    const ptr = @as(?*RDebug, @ptrFromInt(_DYNAMIC[i].d_val));
                     if (ptr) |r_debug| {
                         if (r_debug.r_version != 1) return error.InvalidExe;
                         break :init r_debug.r_map;
                     }
                 },
                 elf.DT_PLTGOT => {
-                    const ptr = @ptrFromInt(?[*]usize, _DYNAMIC[i].d_val);
+                    const ptr = @as(?[*]usize, @ptrFromInt(_DYNAMIC[i].d_val));
                     if (ptr) |got_table| {
                         // The address to the link_map structure is stored in
                         // the second slot
-                        break :init @ptrFromInt(?*LinkMap, got_table[1]);
+                        break :init @as(?*LinkMap, @ptrFromInt(got_table[1]));
                     }
                 },
                 else => {},
@@ -132,7 +132,7 @@ pub const ElfDynLib = struct {
         );
         defer os.munmap(file_bytes);
 
-        const eh = @ptrCast(*elf.Ehdr, file_bytes.ptr);
+        const eh = @as(*elf.Ehdr, @ptrCast(file_bytes.ptr));
         if (!mem.eql(u8, eh.e_ident[0..4], elf.MAGIC)) return error.NotElfFile;
         if (eh.e_type != elf.ET.DYN) return error.NotDynamicLibrary;
 
@@ -149,10 +149,10 @@ pub const ElfDynLib = struct {
                 i += 1;
                 ph_addr += eh.e_phentsize;
             }) {
-                const ph = @ptrFromInt(*elf.Phdr, ph_addr);
+                const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr));
                 switch (ph.p_type) {
                     elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz),
-                    elf.PT_DYNAMIC => maybe_dynv = @ptrFromInt([*]usize, elf_addr + ph.p_offset),
+                    elf.PT_DYNAMIC => maybe_dynv = @as([*]usize, @ptrFromInt(elf_addr + ph.p_offset)),
                     else => {},
                 }
             }
@@ -180,7 +180,7 @@ pub const ElfDynLib = struct {
                 i += 1;
                 ph_addr += eh.e_phentsize;
             }) {
-                const ph = @ptrFromInt(*elf.Phdr, ph_addr);
+                const ph = @as(*elf.Phdr, @ptrFromInt(ph_addr));
                 switch (ph.p_type) {
                     elf.PT_LOAD => {
                         // The VirtAddr may not be page-aligned; in such case there will be
@@ -188,7 +188,7 @@ pub const ElfDynLib = struct {
                         const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1);
                         const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
                         const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size);
-                        const ptr = @ptrFromInt([*]align(mem.page_size) u8, aligned_addr);
+                        const ptr = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_addr));
                         const prot = elfToMmapProt(ph.p_flags);
                         if ((ph.p_flags & elf.PF_W) == 0) {
                             // If it does not need write access, it can be mapped from the fd.
@@ -228,11 +228,11 @@ pub const ElfDynLib = struct {
             while (dynv[i] != 0) : (i += 2) {
                 const p = base + dynv[i + 1];
                 switch (dynv[i]) {
-                    elf.DT_STRTAB => maybe_strings = @ptrFromInt([*:0]u8, p),
-                    elf.DT_SYMTAB => maybe_syms = @ptrFromInt([*]elf.Sym, p),
-                    elf.DT_HASH => maybe_hashtab = @ptrFromInt([*]os.Elf_Symndx, p),
-                    elf.DT_VERSYM => maybe_versym = @ptrFromInt([*]u16, p),
-                    elf.DT_VERDEF => maybe_verdef = @ptrFromInt(*elf.Verdef, p),
+                    elf.DT_STRTAB => maybe_strings = @as([*:0]u8, @ptrFromInt(p)),
+                    elf.DT_SYMTAB => maybe_syms = @as([*]elf.Sym, @ptrFromInt(p)),
+                    elf.DT_HASH => maybe_hashtab = @as([*]os.Elf_Symndx, @ptrFromInt(p)),
+                    elf.DT_VERSYM => maybe_versym = @as([*]u16, @ptrFromInt(p)),
+                    elf.DT_VERDEF => maybe_verdef = @as(*elf.Verdef, @ptrFromInt(p)),
                     else => {},
                 }
             }
@@ -261,7 +261,7 @@ pub const ElfDynLib = struct {
 
     pub fn lookup(self: *ElfDynLib, comptime T: type, name: [:0]const u8) ?T {
         if (self.lookupAddress("", name)) |symbol| {
-            return @ptrFromInt(T, symbol);
+            return @as(T, @ptrFromInt(symbol));
         } else {
             return null;
         }
@@ -276,8 +276,8 @@ pub const ElfDynLib = struct {
 
         var i: usize = 0;
         while (i < self.hashtab[1]) : (i += 1) {
-            if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info & 0xf) & OK_TYPES)) continue;
-            if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info >> 4) & OK_BINDS)) continue;
+            if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info & 0xf)) & OK_TYPES)) continue;
+            if (0 == (@as(u32, 1) << @as(u5, @intCast(self.syms[i].st_info >> 4)) & OK_BINDS)) continue;
             if (0 == self.syms[i].st_shndx) continue;
             if (!mem.eql(u8, name, mem.sliceTo(self.strings + self.syms[i].st_name, 0))) continue;
             if (maybe_versym) |versym| {
@@ -301,15 +301,15 @@ pub const ElfDynLib = struct {
 
 fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*:0]u8) bool {
     var def = def_arg;
-    const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
+    const vsym = @as(u32, @bitCast(vsym_arg)) & 0x7fff;
     while (true) {
         if (0 == (def.vd_flags & elf.VER_FLG_BASE) and (def.vd_ndx & 0x7fff) == vsym)
             break;
         if (def.vd_next == 0)
             return false;
-        def = @ptrFromInt(*elf.Verdef, @intFromPtr(def) + def.vd_next);
+        def = @as(*elf.Verdef, @ptrFromInt(@intFromPtr(def) + def.vd_next));
     }
-    const aux = @ptrFromInt(*elf.Verdaux, @intFromPtr(def) + def.vd_aux);
+    const aux = @as(*elf.Verdaux, @ptrFromInt(@intFromPtr(def) + def.vd_aux));
     return mem.eql(u8, vername, mem.sliceTo(strings + aux.vda_name, 0));
 }
 
@@ -347,7 +347,7 @@ pub const WindowsDynLib = struct {
 
     pub fn lookup(self: *WindowsDynLib, comptime T: type, name: [:0]const u8) ?T {
         if (windows.kernel32.GetProcAddress(self.dll, name.ptr)) |addr| {
-            return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), addr));
+            return @as(T, @ptrCast(@alignCast(addr)));
         } else {
             return null;
         }
@@ -381,7 +381,7 @@ pub const DlDynlib = struct {
         // dlsym (and other dl-functions) secretly take shadow parameter - return address on stack
         // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66826
         if (@call(.never_tail, system.dlsym, .{ self.handle, name.ptr })) |symbol| {
-            return @ptrCast(T, @alignCast(@alignOf(@typeInfo(T).Pointer.child), symbol));
+            return @as(T, @ptrCast(@alignCast(symbol)));
         } else {
             return null;
         }
lib/std/elf.zig
@@ -434,8 +434,8 @@ pub const Header = struct {
     }
 
     pub fn parse(hdr_buf: *align(@alignOf(Elf64_Ehdr)) const [@sizeOf(Elf64_Ehdr)]u8) !Header {
-        const hdr32 = @ptrCast(*const Elf32_Ehdr, hdr_buf);
-        const hdr64 = @ptrCast(*const Elf64_Ehdr, hdr_buf);
+        const hdr32 = @as(*const Elf32_Ehdr, @ptrCast(hdr_buf));
+        const hdr64 = @as(*const Elf64_Ehdr, @ptrCast(hdr_buf));
         if (!mem.eql(u8, hdr32.e_ident[0..4], MAGIC)) return error.InvalidElfMagic;
         if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion;
 
@@ -454,7 +454,7 @@ pub const Header = struct {
 
         const machine = if (need_bswap) blk: {
             const value = @intFromEnum(hdr32.e_machine);
-            break :blk @enumFromInt(EM, @byteSwap(value));
+            break :blk @as(EM, @enumFromInt(@byteSwap(value)));
         } else hdr32.e_machine;
 
         return @as(Header, .{
@@ -725,10 +725,10 @@ pub const Elf32_Sym = extern struct {
     st_shndx: Elf32_Section,
 
     pub inline fn st_type(self: @This()) u4 {
-        return @truncate(u4, self.st_info);
+        return @as(u4, @truncate(self.st_info));
     }
     pub inline fn st_bind(self: @This()) u4 {
-        return @truncate(u4, self.st_info >> 4);
+        return @as(u4, @truncate(self.st_info >> 4));
     }
 };
 pub const Elf64_Sym = extern struct {
@@ -740,10 +740,10 @@ pub const Elf64_Sym = extern struct {
     st_size: Elf64_Xword,
 
     pub inline fn st_type(self: @This()) u4 {
-        return @truncate(u4, self.st_info);
+        return @as(u4, @truncate(self.st_info));
     }
     pub inline fn st_bind(self: @This()) u4 {
-        return @truncate(u4, self.st_info >> 4);
+        return @as(u4, @truncate(self.st_info >> 4));
     }
 };
 pub const Elf32_Syminfo = extern struct {
@@ -759,10 +759,10 @@ pub const Elf32_Rel = extern struct {
     r_info: Elf32_Word,
 
     pub inline fn r_sym(self: @This()) u24 {
-        return @truncate(u24, self.r_info >> 8);
+        return @as(u24, @truncate(self.r_info >> 8));
     }
     pub inline fn r_type(self: @This()) u8 {
-        return @truncate(u8, self.r_info);
+        return @as(u8, @truncate(self.r_info));
     }
 };
 pub const Elf64_Rel = extern struct {
@@ -770,10 +770,10 @@ pub const Elf64_Rel = extern struct {
     r_info: Elf64_Xword,
 
     pub inline fn r_sym(self: @This()) u32 {
-        return @truncate(u32, self.r_info >> 32);
+        return @as(u32, @truncate(self.r_info >> 32));
     }
     pub inline fn r_type(self: @This()) u32 {
-        return @truncate(u32, self.r_info);
+        return @as(u32, @truncate(self.r_info));
     }
 };
 pub const Elf32_Rela = extern struct {
@@ -782,10 +782,10 @@ pub const Elf32_Rela = extern struct {
     r_addend: Elf32_Sword,
 
     pub inline fn r_sym(self: @This()) u24 {
-        return @truncate(u24, self.r_info >> 8);
+        return @as(u24, @truncate(self.r_info >> 8));
     }
     pub inline fn r_type(self: @This()) u8 {
-        return @truncate(u8, self.r_info);
+        return @as(u8, @truncate(self.r_info));
     }
 };
 pub const Elf64_Rela = extern struct {
@@ -794,10 +794,10 @@ pub const Elf64_Rela = extern struct {
     r_addend: Elf64_Sxword,
 
     pub inline fn r_sym(self: @This()) u32 {
-        return @truncate(u32, self.r_info >> 32);
+        return @as(u32, @truncate(self.r_info >> 32));
     }
     pub inline fn r_type(self: @This()) u32 {
-        return @truncate(u32, self.r_info);
+        return @as(u32, @truncate(self.r_info));
     }
 };
 pub const Elf32_Dyn = extern struct {
lib/std/enums.zig
@@ -16,7 +16,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def
         fields = fields ++ &[_]StructField{.{
             .name = field.name,
             .type = Data,
-            .default_value = if (field_default) |d| @ptrCast(?*const anyopaque, &d) else null,
+            .default_value = if (field_default) |d| @as(?*const anyopaque, @ptrCast(&d)) else null,
             .is_comptime = false,
             .alignment = if (@sizeOf(Data) > 0) @alignOf(Data) else 0,
         }};
@@ -61,7 +61,7 @@ test tagName {
     const E = enum(u8) { a, b, _ };
     try testing.expect(tagName(E, .a) != null);
     try testing.expectEqualStrings("a", tagName(E, .a).?);
-    try testing.expect(tagName(E, @enumFromInt(E, 42)) == null);
+    try testing.expect(tagName(E, @as(E, @enumFromInt(42))) == null);
 }
 
 /// Determines the length of a direct-mapped enum array, indexed by
@@ -156,7 +156,7 @@ pub fn directEnumArrayDefault(
     var result: [len]Data = if (default) |d| [_]Data{d} ** len else undefined;
     inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f| {
         const enum_value = @field(E, f.name);
-        const index = @intCast(usize, @intFromEnum(enum_value));
+        const index = @as(usize, @intCast(@intFromEnum(enum_value)));
         result[index] = @field(init_values, f.name);
     }
     return result;
@@ -341,7 +341,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
             var self = initWithCount(0);
             inline for (@typeInfo(E).Enum.fields) |field| {
                 const c = @field(init_counts, field.name);
-                const key = @enumFromInt(E, field.value);
+                const key = @as(E, @enumFromInt(field.value));
                 self.counts.set(key, c);
             }
             return self;
@@ -412,7 +412,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
         /// asserts operation will not overflow any key.
         pub fn addSetAssertSafe(self: *Self, other: Self) void {
             inline for (@typeInfo(E).Enum.fields) |field| {
-                const key = @enumFromInt(E, field.value);
+                const key = @as(E, @enumFromInt(field.value));
                 self.addAssertSafe(key, other.getCount(key));
             }
         }
@@ -420,7 +420,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
         /// Increases the all key counts by given multiset.
         pub fn addSet(self: *Self, other: Self) error{Overflow}!void {
             inline for (@typeInfo(E).Enum.fields) |field| {
-                const key = @enumFromInt(E, field.value);
+                const key = @as(E, @enumFromInt(field.value));
                 try self.add(key, other.getCount(key));
             }
         }
@@ -430,7 +430,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
         /// then that key will have a key count of zero.
         pub fn removeSet(self: *Self, other: Self) void {
             inline for (@typeInfo(E).Enum.fields) |field| {
-                const key = @enumFromInt(E, field.value);
+                const key = @as(E, @enumFromInt(field.value));
                 self.remove(key, other.getCount(key));
             }
         }
@@ -439,7 +439,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
         /// given multiset.
         pub fn eql(self: Self, other: Self) bool {
             inline for (@typeInfo(E).Enum.fields) |field| {
-                const key = @enumFromInt(E, field.value);
+                const key = @as(E, @enumFromInt(field.value));
                 if (self.getCount(key) != other.getCount(key)) {
                     return false;
                 }
@@ -451,7 +451,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
         /// equal to the given multiset.
         pub fn subsetOf(self: Self, other: Self) bool {
             inline for (@typeInfo(E).Enum.fields) |field| {
-                const key = @enumFromInt(E, field.value);
+                const key = @as(E, @enumFromInt(field.value));
                 if (self.getCount(key) > other.getCount(key)) {
                     return false;
                 }
@@ -463,7 +463,7 @@ pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type {
         /// equal to the given multiset.
         pub fn supersetOf(self: Self, other: Self) bool {
             inline for (@typeInfo(E).Enum.fields) |field| {
-                const key = @enumFromInt(E, field.value);
+                const key = @as(E, @enumFromInt(field.value));
                 if (self.getCount(key) < other.getCount(key)) {
                     return false;
                 }
@@ -1281,10 +1281,10 @@ test "std.enums.ensureIndexer" {
         pub const Key = u32;
         pub const count: usize = 8;
         pub fn indexOf(k: Key) usize {
-            return @intCast(usize, k);
+            return @as(usize, @intCast(k));
         }
         pub fn keyForIndex(index: usize) Key {
-            return @intCast(Key, index);
+            return @as(Key, @intCast(index));
         }
     });
 }
@@ -1323,14 +1323,14 @@ pub fn EnumIndexer(comptime E: type) type {
             pub const Key = E;
             pub const count = fields_len;
             pub fn indexOf(e: E) usize {
-                return @intCast(usize, @intFromEnum(e) - min);
+                return @as(usize, @intCast(@intFromEnum(e) - min));
             }
             pub fn keyForIndex(i: usize) E {
                 // TODO fix addition semantics.  This calculation
                 // gives up some safety to avoid artificially limiting
                 // the range of signed enum values to max_isize.
-                const enum_value = if (min < 0) @bitCast(isize, i) +% min else i + min;
-                return @enumFromInt(E, @intCast(std.meta.Tag(E), enum_value));
+                const enum_value = if (min < 0) @as(isize, @bitCast(i)) +% min else i + min;
+                return @as(E, @enumFromInt(@as(std.meta.Tag(E), @intCast(enum_value))));
             }
         };
     }
lib/std/fmt.zig
@@ -396,7 +396,7 @@ pub const ArgState = struct {
         }
 
         // Mark this argument as used
-        self.used_args |= @as(ArgSetType, 1) << @intCast(u5, next_index);
+        self.used_args |= @as(ArgSetType, 1) << @as(u5, @intCast(next_index));
         return next_index;
     }
 };
@@ -1056,7 +1056,7 @@ pub fn formatFloatScientific(
     options: FormatOptions,
     writer: anytype,
 ) !void {
-    var x = @floatCast(f64, value);
+    var x = @as(f64, @floatCast(value));
 
     // Errol doesn't handle these special cases.
     if (math.signbit(x)) {
@@ -1167,9 +1167,9 @@ pub fn formatFloatHexadecimal(
     const exponent_mask = (1 << exponent_bits) - 1;
     const exponent_bias = (1 << (exponent_bits - 1)) - 1;
 
-    const as_bits = @bitCast(TU, value);
+    const as_bits = @as(TU, @bitCast(value));
     var mantissa = as_bits & mantissa_mask;
-    var exponent: i32 = @truncate(u16, (as_bits >> mantissa_bits) & exponent_mask);
+    var exponent: i32 = @as(u16, @truncate((as_bits >> mantissa_bits) & exponent_mask));
 
     const is_denormal = exponent == 0 and mantissa != 0;
     const is_zero = exponent == 0 and mantissa == 0;
@@ -1218,7 +1218,7 @@ pub fn formatFloatHexadecimal(
             // Drop the excess bits.
             mantissa >>= 2;
             // Restore the alignment.
-            mantissa <<= @intCast(math.Log2Int(TU), (mantissa_digits - precision) * 4);
+            mantissa <<= @as(math.Log2Int(TU), @intCast((mantissa_digits - precision) * 4));
 
             const overflow = mantissa & (1 << 1 + mantissa_digits * 4) != 0;
             // Prefer a normalized result in case of overflow.
@@ -1296,7 +1296,7 @@ pub fn formatFloatDecimal(
         errol.roundToPrecision(&float_decimal, precision, errol.RoundMode.Decimal);
 
         // exp < 0 means the leading is always 0 as errol result is normalized.
-        var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
+        var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0;
 
         // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
         var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len);
@@ -1325,7 +1325,7 @@ pub fn formatFloatDecimal(
 
         // Zero-fill until we reach significant digits or run out of precision.
         if (float_decimal.exp <= 0) {
-            const zero_digit_count = @intCast(usize, -float_decimal.exp);
+            const zero_digit_count = @as(usize, @intCast(-float_decimal.exp));
             const zeros_to_print = @min(zero_digit_count, precision);
 
             var i: usize = 0;
@@ -1354,7 +1354,7 @@ pub fn formatFloatDecimal(
         }
     } else {
         // exp < 0 means the leading is always 0 as errol result is normalized.
-        var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0;
+        var num_digits_whole = if (float_decimal.exp > 0) @as(usize, @intCast(float_decimal.exp)) else 0;
 
         // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this.
         var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len);
@@ -1380,7 +1380,7 @@ pub fn formatFloatDecimal(
 
         // Zero-fill until we reach significant digits or run out of precision.
         if (float_decimal.exp < 0) {
-            const zero_digit_count = @intCast(usize, -float_decimal.exp);
+            const zero_digit_count = @as(usize, @intCast(-float_decimal.exp));
 
             var i: usize = 0;
             while (i < zero_digit_count) : (i += 1) {
@@ -1423,21 +1423,21 @@ pub fn formatInt(
     if (base == 10) {
         while (a >= 100) : (a = @divTrunc(a, 100)) {
             index -= 2;
-            buf[index..][0..2].* = digits2(@intCast(usize, a % 100));
+            buf[index..][0..2].* = digits2(@as(usize, @intCast(a % 100)));
         }
 
         if (a < 10) {
             index -= 1;
-            buf[index] = '0' + @intCast(u8, a);
+            buf[index] = '0' + @as(u8, @intCast(a));
         } else {
             index -= 2;
-            buf[index..][0..2].* = digits2(@intCast(usize, a));
+            buf[index..][0..2].* = digits2(@as(usize, @intCast(a)));
         }
     } else {
         while (true) {
             const digit = a % base;
             index -= 1;
-            buf[index] = digitToChar(@intCast(u8, digit), case);
+            buf[index] = digitToChar(@as(u8, @intCast(digit)), case);
             a /= base;
             if (a == 0) break;
         }
@@ -1595,10 +1595,10 @@ test "fmtDuration" {
 
 fn formatDurationSigned(ns: i64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
     if (ns < 0) {
-        const data = FormatDurationData{ .ns = @intCast(u64, -ns), .negative = true };
+        const data = FormatDurationData{ .ns = @as(u64, @intCast(-ns)), .negative = true };
         try formatDuration(data, fmt, options, writer);
     } else {
-        const data = FormatDurationData{ .ns = @intCast(u64, ns) };
+        const data = FormatDurationData{ .ns = @as(u64, @intCast(ns)) };
         try formatDuration(data, fmt, options, writer);
     }
 }
@@ -1846,7 +1846,7 @@ fn parseWithSign(
             // The first digit of a negative number.
             // Consider parsing "-4" as an i3.
             // This should work, but positive 4 overflows i3, so we can't cast the digit to T and subtract.
-            x = math.cast(T, -@intCast(i8, digit)) orelse return error.Overflow;
+            x = math.cast(T, -@as(i8, @intCast(digit))) orelse return error.Overflow;
             continue;
         }
         x = try add(T, x, math.cast(T, digit) orelse return error.Overflow);
@@ -2099,7 +2099,7 @@ test "optional" {
         try expectFmt("optional: null\n", "optional: {?}\n", .{value});
     }
     {
-        const value = @ptrFromInt(?*i32, 0xf000d000);
+        const value = @as(?*i32, @ptrFromInt(0xf000d000));
         try expectFmt("optional: *i32@f000d000\n", "optional: {*}\n", .{value});
     }
 }
@@ -2218,7 +2218,7 @@ test "slice" {
     }
     {
         var runtime_zero: usize = 0;
-        const value = @ptrFromInt([*]align(1) const []const u8, 0xdeadbeef)[runtime_zero..runtime_zero];
+        const value = @as([*]align(1) const []const u8, @ptrFromInt(0xdeadbeef))[runtime_zero..runtime_zero];
         try expectFmt("slice: []const u8@deadbeef\n", "slice: {*}\n", .{value});
     }
     {
@@ -2248,17 +2248,17 @@ test "escape non-printable" {
 
 test "pointer" {
     {
-        const value = @ptrFromInt(*align(1) i32, 0xdeadbeef);
+        const value = @as(*align(1) i32, @ptrFromInt(0xdeadbeef));
         try expectFmt("pointer: i32@deadbeef\n", "pointer: {}\n", .{value});
         try expectFmt("pointer: i32@deadbeef\n", "pointer: {*}\n", .{value});
     }
     const FnPtr = *align(1) const fn () void;
     {
-        const value = @ptrFromInt(FnPtr, 0xdeadbeef);
+        const value = @as(FnPtr, @ptrFromInt(0xdeadbeef));
         try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value});
     }
     {
-        const value = @ptrFromInt(FnPtr, 0xdeadbeef);
+        const value = @as(FnPtr, @ptrFromInt(0xdeadbeef));
         try expectFmt("pointer: fn() void@deadbeef\n", "pointer: {}\n", .{value});
     }
 }
@@ -2267,12 +2267,12 @@ test "cstr" {
     try expectFmt(
         "cstr: Test C\n",
         "cstr: {s}\n",
-        .{@ptrCast([*c]const u8, "Test C")},
+        .{@as([*c]const u8, @ptrCast("Test C"))},
     );
     try expectFmt(
         "cstr:     Test C\n",
         "cstr: {s:10}\n",
-        .{@ptrCast([*c]const u8, "Test C")},
+        .{@as([*c]const u8, @ptrCast("Test C"))},
     );
 }
 
@@ -2360,11 +2360,11 @@ test "non-exhaustive enum" {
     };
     try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.One\n", "enum: {}\n", .{Enum.One});
     try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {}\n", .{Enum.Two});
-    try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(4660)\n", "enum: {}\n", .{@enumFromInt(Enum, 0x1234)});
+    try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(4660)\n", "enum: {}\n", .{@as(Enum, @enumFromInt(0x1234))});
     try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.One\n", "enum: {x}\n", .{Enum.One});
     try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {x}\n", .{Enum.Two});
     try expectFmt("enum: fmt.test.non-exhaustive enum.Enum.Two\n", "enum: {X}\n", .{Enum.Two});
-    try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(1234)\n", "enum: {x}\n", .{@enumFromInt(Enum, 0x1234)});
+    try expectFmt("enum: fmt.test.non-exhaustive enum.Enum(1234)\n", "enum: {x}\n", .{@as(Enum, @enumFromInt(0x1234))});
 }
 
 test "float.scientific" {
@@ -2376,11 +2376,11 @@ test "float.scientific" {
 
 test "float.scientific.precision" {
     try expectFmt("f64: 1.40971e-42", "f64: {e:.5}", .{@as(f64, 1.409706e-42)});
-    try expectFmt("f64: 1.00000e-09", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 814313563)))});
-    try expectFmt("f64: 7.81250e-03", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1006632960)))});
+    try expectFmt("f64: 1.00000e-09", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 814313563))))});
+    try expectFmt("f64: 7.81250e-03", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1006632960))))});
     // libc rounds 1.000005e+05 to 1.00000e+05 but zig does 1.00001e+05.
     // In fact, libc doesn't round a lot of 5 cases up when one past the precision point.
-    try expectFmt("f64: 1.00001e+05", "f64: {e:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1203982400)))});
+    try expectFmt("f64: 1.00001e+05", "f64: {e:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1203982400))))});
 }
 
 test "float.special" {
@@ -2472,22 +2472,22 @@ test "float.decimal" {
 }
 
 test "float.libc.sanity" {
-    try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 916964781)))});
-    try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 925353389)))});
-    try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1036831278)))});
-    try expectFmt("f64: 1.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1065353133)))});
-    try expectFmt("f64: 10.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1092616192)))});
+    try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 916964781))))});
+    try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 925353389))))});
+    try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1036831278))))});
+    try expectFmt("f64: 1.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1065353133))))});
+    try expectFmt("f64: 10.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1092616192))))});
 
     // libc differences
     //
     // This is 0.015625 exactly according to gdb. We thus round down,
     // however glibc rounds up for some reason. This occurs for all
     // floats of the form x.yyyy25 on a precision point.
-    try expectFmt("f64: 0.01563", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1015021568)))});
+    try expectFmt("f64: 0.01563", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1015021568))))});
     // errol3 rounds to ... 630 but libc rounds to ...632. Grisu3
     // also rounds to 630 so I'm inclined to believe libc is not
     // optimal here.
-    try expectFmt("f64: 18014400656965630.00000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1518338049)))});
+    try expectFmt("f64: 18014400656965630.00000", "f64: {d:.5}", .{@as(f64, @as(f32, @bitCast(@as(u32, 1518338049))))});
 }
 
 test "custom" {
lib/std/fs.zig
@@ -373,13 +373,13 @@ pub const IterableDir = struct {
                             }
                         }
                         self.index = 0;
-                        self.end_index = @intCast(usize, rc);
+                        self.end_index = @as(usize, @intCast(rc));
                     }
-                    const darwin_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
+                    const darwin_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index]));
                     const next_index = self.index + darwin_entry.reclen();
                     self.index = next_index;
 
-                    const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen];
+                    const name = @as([*]u8, @ptrCast(&darwin_entry.d_name))[0..darwin_entry.d_namlen];
 
                     if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (darwin_entry.d_ino == 0)) {
                         continue :start_over;
@@ -421,13 +421,13 @@ pub const IterableDir = struct {
                         }
                         if (rc == 0) return null;
                         self.index = 0;
-                        self.end_index = @intCast(usize, rc);
+                        self.end_index = @as(usize, @intCast(rc));
                     }
-                    const entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
+                    const entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index]));
                     const next_index = self.index + entry.reclen();
                     self.index = next_index;
 
-                    const name = mem.sliceTo(@ptrCast([*:0]u8, &entry.d_name), 0);
+                    const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&entry.d_name)), 0);
                     if (mem.eql(u8, name, ".") or mem.eql(u8, name, ".."))
                         continue :start_over;
 
@@ -485,13 +485,13 @@ pub const IterableDir = struct {
                         }
                         if (rc == 0) return null;
                         self.index = 0;
-                        self.end_index = @intCast(usize, rc);
+                        self.end_index = @as(usize, @intCast(rc));
                     }
-                    const bsd_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
+                    const bsd_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index]));
                     const next_index = self.index + bsd_entry.reclen();
                     self.index = next_index;
 
-                    const name = @ptrCast([*]u8, &bsd_entry.d_name)[0..bsd_entry.d_namlen];
+                    const name = @as([*]u8, @ptrCast(&bsd_entry.d_name))[0..bsd_entry.d_namlen];
 
                     const skip_zero_fileno = switch (builtin.os.tag) {
                         // d_fileno=0 is used to mark invalid entries or deleted files.
@@ -567,12 +567,12 @@ pub const IterableDir = struct {
                             }
                         }
                         self.index = 0;
-                        self.end_index = @intCast(usize, rc);
+                        self.end_index = @as(usize, @intCast(rc));
                     }
-                    const haiku_entry = @ptrCast(*align(1) os.system.dirent, &self.buf[self.index]);
+                    const haiku_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index]));
                     const next_index = self.index + haiku_entry.reclen();
                     self.index = next_index;
-                    const name = mem.sliceTo(@ptrCast([*:0]u8, &haiku_entry.d_name), 0);
+                    const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&haiku_entry.d_name)), 0);
 
                     if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..") or (haiku_entry.d_ino == 0)) {
                         continue :start_over;
@@ -672,11 +672,11 @@ pub const IterableDir = struct {
                         self.index = 0;
                         self.end_index = rc;
                     }
-                    const linux_entry = @ptrCast(*align(1) linux.dirent64, &self.buf[self.index]);
+                    const linux_entry = @as(*align(1) linux.dirent64, @ptrCast(&self.buf[self.index]));
                     const next_index = self.index + linux_entry.reclen();
                     self.index = next_index;
 
-                    const name = mem.sliceTo(@ptrCast([*:0]u8, &linux_entry.d_name), 0);
+                    const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&linux_entry.d_name)), 0);
 
                     // skip . and .. entries
                     if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
@@ -750,15 +750,14 @@ pub const IterableDir = struct {
                         }
                     }
 
-                    const aligned_ptr = @alignCast(@alignOf(w.FILE_BOTH_DIR_INFORMATION), &self.buf[self.index]);
-                    const dir_info = @ptrCast(*w.FILE_BOTH_DIR_INFORMATION, aligned_ptr);
+                    const dir_info: *w.FILE_BOTH_DIR_INFORMATION = @ptrCast(@alignCast(&self.buf[self.index]));
                     if (dir_info.NextEntryOffset != 0) {
                         self.index += dir_info.NextEntryOffset;
                     } else {
                         self.index = self.buf.len;
                     }
 
-                    const name_utf16le = @ptrCast([*]u16, &dir_info.FileName)[0 .. dir_info.FileNameLength / 2];
+                    const name_utf16le = @as([*]u16, @ptrCast(&dir_info.FileName))[0 .. dir_info.FileNameLength / 2];
 
                     if (mem.eql(u16, name_utf16le, &[_]u16{'.'}) or mem.eql(u16, name_utf16le, &[_]u16{ '.', '.' }))
                         continue;
@@ -835,7 +834,7 @@ pub const IterableDir = struct {
                         self.index = 0;
                         self.end_index = bufused;
                     }
-                    const entry = @ptrCast(*align(1) w.dirent_t, &self.buf[self.index]);
+                    const entry = @as(*align(1) w.dirent_t, @ptrCast(&self.buf[self.index]));
                     const entry_size = @sizeOf(w.dirent_t);
                     const name_index = self.index + entry_size;
                     if (name_index + entry.d_namlen > self.end_index) {
@@ -1789,7 +1788,7 @@ pub const Dir = struct {
             .fd = undefined,
         };
 
-        const path_len_bytes = @intCast(u16, mem.sliceTo(sub_path_w, 0).len * 2);
+        const path_len_bytes = @as(u16, @intCast(mem.sliceTo(sub_path_w, 0).len * 2));
         var nt_name = w.UNICODE_STRING{
             .Length = path_len_bytes,
             .MaximumLength = path_len_bytes,
lib/std/hash_map.zig
@@ -101,7 +101,7 @@ pub const StringIndexContext = struct {
     }
 
     pub fn hash(self: @This(), x: u32) u64 {
-        const x_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + x, 0);
+        const x_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + x, 0);
         return hashString(x_slice);
     }
 };
@@ -110,7 +110,7 @@ pub const StringIndexAdapter = struct {
     bytes: *std.ArrayListUnmanaged(u8),
 
     pub fn eql(self: @This(), a_slice: []const u8, b: u32) bool {
-        const b_slice = mem.sliceTo(@ptrCast([*:0]const u8, self.bytes.items.ptr) + b, 0);
+        const b_slice = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.bytes.items.ptr)) + b, 0);
         return mem.eql(u8, a_slice, b_slice);
     }
 
@@ -777,25 +777,25 @@ pub fn HashMapUnmanaged(
             fingerprint: FingerPrint = free,
             used: u1 = 0,
 
-            const slot_free = @bitCast(u8, Metadata{ .fingerprint = free });
-            const slot_tombstone = @bitCast(u8, Metadata{ .fingerprint = tombstone });
+            const slot_free = @as(u8, @bitCast(Metadata{ .fingerprint = free }));
+            const slot_tombstone = @as(u8, @bitCast(Metadata{ .fingerprint = tombstone }));
 
             pub fn isUsed(self: Metadata) bool {
                 return self.used == 1;
             }
 
             pub fn isTombstone(self: Metadata) bool {
-                return @bitCast(u8, self) == slot_tombstone;
+                return @as(u8, @bitCast(self)) == slot_tombstone;
             }
 
             pub fn isFree(self: Metadata) bool {
-                return @bitCast(u8, self) == slot_free;
+                return @as(u8, @bitCast(self)) == slot_free;
             }
 
             pub fn takeFingerprint(hash: Hash) FingerPrint {
                 const hash_bits = @typeInfo(Hash).Int.bits;
                 const fp_bits = @typeInfo(FingerPrint).Int.bits;
-                return @truncate(FingerPrint, hash >> (hash_bits - fp_bits));
+                return @as(FingerPrint, @truncate(hash >> (hash_bits - fp_bits)));
             }
 
             pub fn fill(self: *Metadata, fp: FingerPrint) void {
@@ -899,7 +899,7 @@ pub fn HashMapUnmanaged(
         }
 
         fn capacityForSize(size: Size) Size {
-            var new_cap = @truncate(u32, (@as(u64, size) * 100) / max_load_percentage + 1);
+            var new_cap = @as(u32, @truncate((@as(u64, size) * 100) / max_load_percentage + 1));
             new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable;
             return new_cap;
         }
@@ -927,7 +927,7 @@ pub fn HashMapUnmanaged(
             if (self.metadata) |_| {
                 self.initMetadatas();
                 self.size = 0;
-                self.available = @truncate(u32, (self.capacity() * max_load_percentage) / 100);
+                self.available = @as(u32, @truncate((self.capacity() * max_load_percentage) / 100));
             }
         }
 
@@ -942,7 +942,7 @@ pub fn HashMapUnmanaged(
         }
 
         fn header(self: *const Self) *Header {
-            return @ptrCast(*Header, @ptrCast([*]Header, @alignCast(@alignOf(Header), self.metadata.?)) - 1);
+            return @ptrCast(@as([*]Header, @ptrCast(@alignCast(self.metadata.?))) - 1);
         }
 
         fn keys(self: *const Self) [*]K {
@@ -1033,7 +1033,7 @@ pub fn HashMapUnmanaged(
 
             const hash = ctx.hash(key);
             const mask = self.capacity() - 1;
-            var idx = @truncate(usize, hash & mask);
+            var idx = @as(usize, @truncate(hash & mask));
 
             var metadata = self.metadata.? + idx;
             while (metadata[0].isUsed()) {
@@ -1147,7 +1147,7 @@ pub fn HashMapUnmanaged(
             const fingerprint = Metadata.takeFingerprint(hash);
             // Don't loop indefinitely when there are no empty slots.
             var limit = self.capacity();
-            var idx = @truncate(usize, hash & mask);
+            var idx = @as(usize, @truncate(hash & mask));
 
             var metadata = self.metadata.? + idx;
             while (!metadata[0].isFree() and limit != 0) {
@@ -1325,7 +1325,7 @@ pub fn HashMapUnmanaged(
             const mask = self.capacity() - 1;
             const fingerprint = Metadata.takeFingerprint(hash);
             var limit = self.capacity();
-            var idx = @truncate(usize, hash & mask);
+            var idx = @as(usize, @truncate(hash & mask));
 
             var first_tombstone_idx: usize = self.capacity(); // invalid index
             var metadata = self.metadata.? + idx;
@@ -1450,7 +1450,7 @@ pub fn HashMapUnmanaged(
         }
 
         fn initMetadatas(self: *Self) void {
-            @memset(@ptrCast([*]u8, self.metadata.?)[0 .. @sizeOf(Metadata) * self.capacity()], 0);
+            @memset(@as([*]u8, @ptrCast(self.metadata.?))[0 .. @sizeOf(Metadata) * self.capacity()], 0);
         }
 
         // This counts the number of occupied slots (not counting tombstones), which is
@@ -1458,7 +1458,7 @@ pub fn HashMapUnmanaged(
         fn load(self: *const Self) Size {
             const max_load = (self.capacity() * max_load_percentage) / 100;
             assert(max_load >= self.available);
-            return @truncate(Size, max_load - self.available);
+            return @as(Size, @truncate(max_load - self.available));
         }
 
         fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) Allocator.Error!void {
@@ -1480,7 +1480,7 @@ pub fn HashMapUnmanaged(
             const new_cap = capacityForSize(self.size);
             try other.allocate(allocator, new_cap);
             other.initMetadatas();
-            other.available = @truncate(u32, (new_cap * max_load_percentage) / 100);
+            other.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100));
 
             var i: Size = 0;
             var metadata = self.metadata.?;
@@ -1515,7 +1515,7 @@ pub fn HashMapUnmanaged(
             defer map.deinit(allocator);
             try map.allocate(allocator, new_cap);
             map.initMetadatas();
-            map.available = @truncate(u32, (new_cap * max_load_percentage) / 100);
+            map.available = @as(u32, @truncate((new_cap * max_load_percentage) / 100));
 
             if (self.size != 0) {
                 const old_capacity = self.capacity();
@@ -1558,15 +1558,15 @@ pub fn HashMapUnmanaged(
 
             const metadata = ptr + @sizeOf(Header);
 
-            const hdr = @ptrFromInt(*Header, ptr);
+            const hdr = @as(*Header, @ptrFromInt(ptr));
             if (@sizeOf([*]V) != 0) {
-                hdr.values = @ptrFromInt([*]V, ptr + vals_start);
+                hdr.values = @as([*]V, @ptrFromInt(ptr + vals_start));
             }
             if (@sizeOf([*]K) != 0) {
-                hdr.keys = @ptrFromInt([*]K, ptr + keys_start);
+                hdr.keys = @as([*]K, @ptrFromInt(ptr + keys_start));
             }
             hdr.capacity = new_capacity;
-            self.metadata = @ptrFromInt([*]Metadata, metadata);
+            self.metadata = @as([*]Metadata, @ptrFromInt(metadata));
         }
 
         fn deallocate(self: *Self, allocator: Allocator) void {
@@ -1589,7 +1589,7 @@ pub fn HashMapUnmanaged(
 
             const total_size = std.mem.alignForward(usize, vals_end, max_align);
 
-            const slice = @ptrFromInt([*]align(max_align) u8, @intFromPtr(self.header()))[0..total_size];
+            const slice = @as([*]align(max_align) u8, @ptrFromInt(@intFromPtr(self.header())))[0..total_size];
             allocator.free(slice);
 
             self.metadata = null;
lib/std/heap.zig
@@ -61,11 +61,11 @@ const CAllocator = struct {
     pub const supports_posix_memalign = @hasDecl(c, "posix_memalign");
 
     fn getHeader(ptr: [*]u8) *[*]u8 {
-        return @ptrFromInt(*[*]u8, @intFromPtr(ptr) - @sizeOf(usize));
+        return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize)));
     }
 
     fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 {
-        const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align);
+        const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
         if (supports_posix_memalign) {
             // The posix_memalign only accepts alignment values that are a
             // multiple of the pointer size
@@ -75,13 +75,13 @@ const CAllocator = struct {
             if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0)
                 return null;
 
-            return @ptrCast([*]u8, aligned_ptr);
+            return @as([*]u8, @ptrCast(aligned_ptr));
         }
 
         // Thin wrapper around regular malloc, overallocate to account for
         // alignment padding and store the original malloc()'ed pointer before
         // the aligned address.
-        var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null);
+        var unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null));
         const unaligned_addr = @intFromPtr(unaligned_ptr);
         const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment);
         var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
@@ -195,7 +195,7 @@ fn rawCAlloc(
     // type in C that is size 8 and has 16 byte alignment, so the alignment may
     // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc
     // is allowed to return a 1-byte aligned pointer.
-    return @ptrCast(?[*]u8, c.malloc(len));
+    return @as(?[*]u8, @ptrCast(c.malloc(len)));
 }
 
 fn rawCResize(
@@ -283,7 +283,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
         }
 
         fn getRecordPtr(buf: []u8) *align(1) usize {
-            return @ptrFromInt(*align(1) usize, @intFromPtr(buf.ptr) + buf.len);
+            return @as(*align(1) usize, @ptrFromInt(@intFromPtr(buf.ptr) + buf.len));
         }
 
         fn alloc(
@@ -293,9 +293,9 @@ pub const HeapAllocator = switch (builtin.os.tag) {
             return_address: usize,
         ) ?[*]u8 {
             _ = return_address;
-            const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
+            const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
 
-            const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+            const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
             const amt = n + ptr_align - 1 + @sizeOf(usize);
             const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
             const heap_handle = optional_heap_handle orelse blk: {
@@ -308,7 +308,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
             const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null;
             const root_addr = @intFromPtr(ptr);
             const aligned_addr = mem.alignForward(usize, root_addr, ptr_align);
-            const buf = @ptrFromInt([*]u8, aligned_addr)[0..n];
+            const buf = @as([*]u8, @ptrFromInt(aligned_addr))[0..n];
             getRecordPtr(buf).* = root_addr;
             return buf.ptr;
         }
@@ -322,7 +322,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
         ) bool {
             _ = log2_buf_align;
             _ = return_address;
-            const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
+            const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
 
             const root_addr = getRecordPtr(buf).*;
             const align_offset = @intFromPtr(buf.ptr) - root_addr;
@@ -330,10 +330,10 @@ pub const HeapAllocator = switch (builtin.os.tag) {
             const new_ptr = os.windows.kernel32.HeapReAlloc(
                 self.heap_handle.?,
                 os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
-                @ptrFromInt(*anyopaque, root_addr),
+                @as(*anyopaque, @ptrFromInt(root_addr)),
                 amt,
             ) orelse return false;
-            assert(new_ptr == @ptrFromInt(*anyopaque, root_addr));
+            assert(new_ptr == @as(*anyopaque, @ptrFromInt(root_addr)));
             getRecordPtr(buf.ptr[0..new_size]).* = root_addr;
             return true;
         }
@@ -346,8 +346,8 @@ pub const HeapAllocator = switch (builtin.os.tag) {
         ) void {
             _ = log2_buf_align;
             _ = return_address;
-            const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
-            os.windows.HeapFree(self.heap_handle.?, 0, @ptrFromInt(*anyopaque, getRecordPtr(buf).*));
+            const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
+            os.windows.HeapFree(self.heap_handle.?, 0, @as(*anyopaque, @ptrFromInt(getRecordPtr(buf).*)));
         }
     },
     else => @compileError("Unsupported OS"),
@@ -415,9 +415,9 @@ pub const FixedBufferAllocator = struct {
     }
 
     fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
-        const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
+        const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
         _ = ra;
-        const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+        const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
         const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
         const adjusted_index = self.end_index + adjust_off;
         const new_end_index = adjusted_index + n;
@@ -433,7 +433,7 @@ pub const FixedBufferAllocator = struct {
         new_size: usize,
         return_address: usize,
     ) bool {
-        const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
+        const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
         _ = log2_buf_align;
         _ = return_address;
         assert(self.ownsSlice(buf)); // sanity check
@@ -462,7 +462,7 @@ pub const FixedBufferAllocator = struct {
         log2_buf_align: u8,
         return_address: usize,
     ) void {
-        const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
+        const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
         _ = log2_buf_align;
         _ = return_address;
         assert(self.ownsSlice(buf)); // sanity check
@@ -473,9 +473,9 @@ pub const FixedBufferAllocator = struct {
     }
 
     fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
-        const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
+        const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
         _ = ra;
-        const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
+        const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
         var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
         while (true) {
             const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
@@ -537,7 +537,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
             log2_ptr_align: u8,
             ra: usize,
         ) ?[*]u8 {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse
                 return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra);
         }
@@ -549,7 +549,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
             new_len: usize,
             ra: usize,
         ) bool {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
                 return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra);
             } else {
@@ -563,7 +563,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
             log2_buf_align: u8,
             ra: usize,
         ) void {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
                 return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra);
             } else {
@@ -728,14 +728,14 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void {
     try testing.expect(slice.len == 100);
     for (slice, 0..) |*item, i| {
         item.* = try allocator.create(i32);
-        item.*.* = @intCast(i32, i);
+        item.*.* = @as(i32, @intCast(i));
     }
 
     slice = try allocator.realloc(slice, 20000);
     try testing.expect(slice.len == 20000);
 
     for (slice[0..100], 0..) |item, i| {
-        try testing.expect(item.* == @intCast(i32, i));
+        try testing.expect(item.* == @as(i32, @intCast(i)));
         allocator.destroy(item);
     }
 
lib/std/io.zig
@@ -275,7 +275,7 @@ pub fn Poller(comptime StreamEnum: type) type {
                     )) {
                         .pending => {
                             self.windows.active.handles_buf[self.windows.active.count] = handle;
-                            self.windows.active.stream_map[self.windows.active.count] = @enumFromInt(StreamEnum, i);
+                            self.windows.active.stream_map[self.windows.active.count] = @as(StreamEnum, @enumFromInt(i));
                             self.windows.active.count += 1;
                         },
                         .closed => {}, // don't add to the wait_objects list
lib/std/leb128.zig
@@ -30,17 +30,17 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T {
         if (value > std.math.maxInt(T)) return error.Overflow;
     }
 
-    return @truncate(T, value);
+    return @as(T, @truncate(value));
 }
 
 /// Write a single unsigned integer as unsigned LEB128 to the given writer.
 pub fn writeULEB128(writer: anytype, uint_value: anytype) !void {
     const T = @TypeOf(uint_value);
     const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
-    var value = @intCast(U, uint_value);
+    var value = @as(U, @intCast(uint_value));
 
     while (true) {
-        const byte = @truncate(u8, value & 0x7f);
+        const byte = @as(u8, @truncate(value & 0x7f));
         value >>= 7;
         if (value == 0) {
             try writer.writeByte(byte);
@@ -71,18 +71,18 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
         if (ov[1] != 0) {
             // Overflow is ok so long as the sign bit is set and this is the last byte
             if (byte & 0x80 != 0) return error.Overflow;
-            if (@bitCast(S, ov[0]) >= 0) return error.Overflow;
+            if (@as(S, @bitCast(ov[0])) >= 0) return error.Overflow;
 
             // and all the overflowed bits are 1
-            const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
-            const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
+            const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift)));
+            const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift;
             if (remaining_bits != -1) return error.Overflow;
         } else {
             // If we don't overflow and this is the last byte and the number being decoded
             // is negative, check that the remaining bits are 1
-            if ((byte & 0x80 == 0) and (@bitCast(S, ov[0]) < 0)) {
-                const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
-                const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
+            if ((byte & 0x80 == 0) and (@as(S, @bitCast(ov[0])) < 0)) {
+                const remaining_shift = @as(u3, @intCast(@typeInfo(U).Int.bits - @as(u16, shift)));
+                const remaining_bits = @as(i8, @bitCast(byte | 0x80)) >> remaining_shift;
                 if (remaining_bits != -1) return error.Overflow;
             }
         }
@@ -92,7 +92,7 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
             const needs_sign_ext = group + 1 < max_group;
             if (byte & 0x40 != 0 and needs_sign_ext) {
                 const ones = @as(S, -1);
-                value |= @bitCast(U, ones) << (shift + 7);
+                value |= @as(U, @bitCast(ones)) << (shift + 7);
             }
             break;
         }
@@ -100,13 +100,13 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
         return error.Overflow;
     }
 
-    const result = @bitCast(S, value);
+    const result = @as(S, @bitCast(value));
     // Only applies if we extended to i8
     if (S != T) {
         if (result > std.math.maxInt(T) or result < std.math.minInt(T)) return error.Overflow;
     }
 
-    return @truncate(T, result);
+    return @as(T, @truncate(result));
 }
 
 /// Write a single signed integer as signed LEB128 to the given writer.
@@ -115,11 +115,11 @@ pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
     const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
     const U = std.meta.Int(.unsigned, @typeInfo(S).Int.bits);
 
-    var value = @intCast(S, int_value);
+    var value = @as(S, @intCast(int_value));
 
     while (true) {
-        const uvalue = @bitCast(U, value);
-        const byte = @truncate(u8, uvalue);
+        const uvalue = @as(U, @bitCast(value));
+        const byte = @as(u8, @truncate(uvalue));
         value >>= 6;
         if (value == -1 or value == 0) {
             try writer.writeByte(byte & 0x7F);
@@ -141,15 +141,15 @@ pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
 pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(.unsigned, l * 7)) void {
     const T = @TypeOf(int);
     const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
-    var value = @intCast(U, int);
+    var value = @as(U, @intCast(int));
 
     comptime var i = 0;
     inline while (i < (l - 1)) : (i += 1) {
-        const byte = @truncate(u8, value) | 0b1000_0000;
+        const byte = @as(u8, @truncate(value)) | 0b1000_0000;
         value >>= 7;
         ptr[i] = byte;
     }
-    ptr[i] = @truncate(u8, value);
+    ptr[i] = @as(u8, @truncate(value));
 }
 
 test "writeUnsignedFixed" {
@@ -245,7 +245,7 @@ test "deserialize signed LEB128" {
     try testing.expect((try test_read_ileb128(i16, "\xff\xff\x7f")) == -1);
     try testing.expect((try test_read_ileb128(i32, "\xff\xff\xff\xff\x7f")) == -1);
     try testing.expect((try test_read_ileb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000);
-    try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @bitCast(i64, @intCast(u64, 0x8000000000000000)));
+    try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000)))));
     try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000);
     try testing.expect((try test_read_ileb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000);
 
@@ -356,7 +356,7 @@ test "serialize unsigned LEB128" {
         const max = std.math.maxInt(T);
         var i = @as(std.meta.Int(.unsigned, @typeInfo(T).Int.bits + 1), min);
 
-        while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
+        while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
     }
 }
 
@@ -374,6 +374,6 @@ test "serialize signed LEB128" {
         const max = std.math.maxInt(T);
         var i = @as(std.meta.Int(.signed, @typeInfo(T).Int.bits + 1), min);
 
-        while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
+        while (i <= max) : (i += 1) try test_write_leb128(@as(T, @intCast(i)));
     }
 }
lib/std/macho.zig
@@ -787,7 +787,7 @@ pub const section_64 = extern struct {
     }
 
     pub fn @"type"(sect: section_64) u8 {
-        return @truncate(u8, sect.flags & 0xff);
+        return @as(u8, @truncate(sect.flags & 0xff));
     }
 
     pub fn attrs(sect: section_64) u32 {
@@ -1870,7 +1870,7 @@ pub const LoadCommandIterator = struct {
 
         pub fn cast(lc: LoadCommand, comptime Cmd: type) ?Cmd {
             if (lc.data.len < @sizeOf(Cmd)) return null;
-            return @ptrCast(*const Cmd, @alignCast(@alignOf(Cmd), &lc.data[0])).*;
+            return @as(*const Cmd, @ptrCast(@alignCast(&lc.data[0]))).*;
         }
 
         /// Asserts LoadCommand is of type segment_command_64.
@@ -1878,9 +1878,9 @@ pub const LoadCommandIterator = struct {
             const segment_lc = lc.cast(segment_command_64).?;
             if (segment_lc.nsects == 0) return &[0]section_64{};
             const data = lc.data[@sizeOf(segment_command_64)..];
-            const sections = @ptrCast(
+            const sections = @as(
                 [*]const section_64,
-                @alignCast(@alignOf(section_64), &data[0]),
+                @ptrCast(@alignCast(&data[0])),
             )[0..segment_lc.nsects];
             return sections;
         }
@@ -1903,16 +1903,16 @@ pub const LoadCommandIterator = struct {
     pub fn next(it: *LoadCommandIterator) ?LoadCommand {
         if (it.index >= it.ncmds) return null;
 
-        const hdr = @ptrCast(
+        const hdr = @as(
             *const load_command,
-            @alignCast(@alignOf(load_command), &it.buffer[0]),
+            @ptrCast(@alignCast(&it.buffer[0])),
         ).*;
         const cmd = LoadCommand{
             .hdr = hdr,
             .data = it.buffer[0..hdr.cmdsize],
         };
 
-        it.buffer = @alignCast(@alignOf(u64), it.buffer[hdr.cmdsize..]);
+        it.buffer = @alignCast(it.buffer[hdr.cmdsize..]);
         it.index += 1;
 
         return cmd;
lib/std/math.zig
@@ -85,31 +85,31 @@ pub const inf_f128 = @compileError("Deprecated: use `inf(f128)` instead");
 pub const epsilon = @compileError("Deprecated: use `floatEps` instead");
 
 pub const nan_u16 = @as(u16, 0x7C01);
-pub const nan_f16 = @bitCast(f16, nan_u16);
+pub const nan_f16 = @as(f16, @bitCast(nan_u16));
 
 pub const qnan_u16 = @as(u16, 0x7E00);
-pub const qnan_f16 = @bitCast(f16, qnan_u16);
+pub const qnan_f16 = @as(f16, @bitCast(qnan_u16));
 
 pub const nan_u32 = @as(u32, 0x7F800001);
-pub const nan_f32 = @bitCast(f32, nan_u32);
+pub const nan_f32 = @as(f32, @bitCast(nan_u32));
 
 pub const qnan_u32 = @as(u32, 0x7FC00000);
-pub const qnan_f32 = @bitCast(f32, qnan_u32);
+pub const qnan_f32 = @as(f32, @bitCast(qnan_u32));
 
 pub const nan_u64 = @as(u64, 0x7FF << 52) | 1;
-pub const nan_f64 = @bitCast(f64, nan_u64);
+pub const nan_f64 = @as(f64, @bitCast(nan_u64));
 
 pub const qnan_u64 = @as(u64, 0x7ff8000000000000);
-pub const qnan_f64 = @bitCast(f64, qnan_u64);
+pub const qnan_f64 = @as(f64, @bitCast(qnan_u64));
 
 pub const nan_f80 = make_f80(F80{ .fraction = 0xA000000000000000, .exp = 0x7fff });
 pub const qnan_f80 = make_f80(F80{ .fraction = 0xC000000000000000, .exp = 0x7fff });
 
 pub const nan_u128 = @as(u128, 0x7fff0000000000000000000000000001);
-pub const nan_f128 = @bitCast(f128, nan_u128);
+pub const nan_f128 = @as(f128, @bitCast(nan_u128));
 
 pub const qnan_u128 = @as(u128, 0x7fff8000000000000000000000000000);
-pub const qnan_f128 = @bitCast(f128, qnan_u128);
+pub const qnan_f128 = @as(f128, @bitCast(qnan_u128));
 
 pub const nan = @import("math/nan.zig").nan;
 pub const snan = @import("math/nan.zig").snan;
@@ -508,10 +508,10 @@ pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
             const C = @typeInfo(T).Vector.child;
             const len = @typeInfo(T).Vector.len;
             if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(len, @as(C, 0));
-            break :blk @splat(len, @intCast(Log2Int(C), abs_shift_amt));
+            break :blk @splat(len, @as(Log2Int(C), @intCast(abs_shift_amt)));
         } else {
             if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0;
-            break :blk @intCast(Log2Int(T), abs_shift_amt);
+            break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
         }
     };
 
@@ -552,10 +552,10 @@ pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
             const C = @typeInfo(T).Vector.child;
             const len = @typeInfo(T).Vector.len;
             if (abs_shift_amt >= @typeInfo(C).Int.bits) return @splat(len, @as(C, 0));
-            break :blk @splat(len, @intCast(Log2Int(C), abs_shift_amt));
+            break :blk @splat(len, @as(Log2Int(C), @intCast(abs_shift_amt)));
         } else {
             if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0;
-            break :blk @intCast(Log2Int(T), abs_shift_amt);
+            break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
         }
     };
 
@@ -596,7 +596,7 @@ pub fn rotr(comptime T: type, x: T, r: anytype) T {
         if (@typeInfo(C).Int.signedness == .signed) {
             @compileError("cannot rotate signed integers");
         }
-        const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits));
+        const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits)));
         return (x >> @splat(@typeInfo(T).Vector.len, ar)) | (x << @splat(@typeInfo(T).Vector.len, 1 + ~ar));
     } else if (@typeInfo(T).Int.signedness == .signed) {
         @compileError("cannot rotate signed integer");
@@ -604,7 +604,7 @@ pub fn rotr(comptime T: type, x: T, r: anytype) T {
         if (T == u0) return 0;
 
         if (isPowerOfTwo(@typeInfo(T).Int.bits)) {
-            const ar = @intCast(Log2Int(T), @mod(r, @typeInfo(T).Int.bits));
+            const ar = @as(Log2Int(T), @intCast(@mod(r, @typeInfo(T).Int.bits)));
             return x >> ar | x << (1 +% ~ar);
         } else {
             const ar = @mod(r, @typeInfo(T).Int.bits);
@@ -640,7 +640,7 @@ pub fn rotl(comptime T: type, x: T, r: anytype) T {
         if (@typeInfo(C).Int.signedness == .signed) {
             @compileError("cannot rotate signed integers");
         }
-        const ar = @intCast(Log2Int(C), @mod(r, @typeInfo(C).Int.bits));
+        const ar = @as(Log2Int(C), @intCast(@mod(r, @typeInfo(C).Int.bits)));
         return (x << @splat(@typeInfo(T).Vector.len, ar)) | (x >> @splat(@typeInfo(T).Vector.len, 1 +% ~ar));
     } else if (@typeInfo(T).Int.signedness == .signed) {
         @compileError("cannot rotate signed integer");
@@ -648,7 +648,7 @@ pub fn rotl(comptime T: type, x: T, r: anytype) T {
         if (T == u0) return 0;
 
         if (isPowerOfTwo(@typeInfo(T).Int.bits)) {
-            const ar = @intCast(Log2Int(T), @mod(r, @typeInfo(T).Int.bits));
+            const ar = @as(Log2Int(T), @intCast(@mod(r, @typeInfo(T).Int.bits)));
             return x << ar | x >> 1 +% ~ar;
         } else {
             const ar = @mod(r, @typeInfo(T).Int.bits);
@@ -1029,9 +1029,9 @@ pub fn absCast(x: anytype) switch (@typeInfo(@TypeOf(x))) {
             if (int_info.signedness == .unsigned) return x;
             const Uint = std.meta.Int(.unsigned, int_info.bits);
             if (x < 0) {
-                return ~@bitCast(Uint, x +% -1);
+                return ~@as(Uint, @bitCast(x +% -1));
             } else {
-                return @intCast(Uint, x);
+                return @as(Uint, @intCast(x));
             }
         },
         else => unreachable,
@@ -1056,7 +1056,7 @@ pub fn negateCast(x: anytype) !std.meta.Int(.signed, @bitSizeOf(@TypeOf(x))) {
 
     if (x == -minInt(int)) return minInt(int);
 
-    return -@intCast(int, x);
+    return -@as(int, @intCast(x));
 }
 
 test "negateCast" {
@@ -1080,7 +1080,7 @@ pub fn cast(comptime T: type, x: anytype) ?T {
     } else if ((is_comptime or minInt(@TypeOf(x)) < minInt(T)) and x < minInt(T)) {
         return null;
     } else {
-        return @intCast(T, x);
+        return @as(T, @intCast(x));
     }
 }
 
@@ -1102,13 +1102,19 @@ test "cast" {
 
 pub const AlignCastError = error{UnalignedMemory};
 
+fn AlignCastResult(comptime alignment: u29, comptime Ptr: type) type {
+    var ptr_info = @typeInfo(Ptr);
+    ptr_info.Pointer.alignment = alignment;
+    return @Type(ptr_info);
+}
+
 /// Align cast a pointer but return an error if it's the wrong alignment
-pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!@TypeOf(@alignCast(alignment, ptr)) {
+pub fn alignCast(comptime alignment: u29, ptr: anytype) AlignCastError!AlignCastResult(alignment, @TypeOf(ptr)) {
     const addr = @intFromPtr(ptr);
     if (addr % alignment != 0) {
         return error.UnalignedMemory;
     }
-    return @alignCast(alignment, ptr);
+    return @alignCast(ptr);
 }
 
 /// Asserts `int > 0`.
@@ -1172,7 +1178,7 @@ pub inline fn floor(value: anytype) @TypeOf(value) {
 pub fn floorPowerOfTwo(comptime T: type, value: T) T {
     const uT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
     if (value <= 0) return 0;
-    return @as(T, 1) << log2_int(uT, @intCast(uT, value));
+    return @as(T, 1) << log2_int(uT, @as(uT, @intCast(value)));
 }
 
 test "floorPowerOfTwo" {
@@ -1211,7 +1217,7 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(
     assert(value != 0);
     const PromotedType = std.meta.Int(@typeInfo(T).Int.signedness, @typeInfo(T).Int.bits + 1);
     const ShiftType = std.math.Log2Int(PromotedType);
-    return @as(PromotedType, 1) << @intCast(ShiftType, @typeInfo(T).Int.bits - @clz(value - 1));
+    return @as(PromotedType, 1) << @as(ShiftType, @intCast(@typeInfo(T).Int.bits - @clz(value - 1)));
 }
 
 /// Returns the next power of two (if the value is not already a power of two).
@@ -1227,7 +1233,7 @@ pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
     if (overflowBit & x != 0) {
         return error.Overflow;
     }
-    return @intCast(T, x);
+    return @as(T, @intCast(x));
 }
 
 /// Returns the next power of two (if the value is not already a power
@@ -1277,7 +1283,7 @@ pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
     if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned)
         @compileError("log2_int requires an unsigned integer, found " ++ @typeName(T));
     assert(x != 0);
-    return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(x));
+    return @as(Log2Int(T), @intCast(@typeInfo(T).Int.bits - 1 - @clz(x)));
 }
 
 /// Return the log base 2 of integer value x, rounding up to the
@@ -1311,8 +1317,8 @@ pub fn lossyCast(comptime T: type, value: anytype) T {
     switch (@typeInfo(T)) {
         .Float => {
             switch (@typeInfo(@TypeOf(value))) {
-                .Int => return @floatFromInt(T, value),
-                .Float => return @floatCast(T, value),
+                .Int => return @as(T, @floatFromInt(value)),
+                .Float => return @as(T, @floatCast(value)),
                 .ComptimeInt => return @as(T, value),
                 .ComptimeFloat => return @as(T, value),
                 else => @compileError("bad type"),
@@ -1326,7 +1332,7 @@ pub fn lossyCast(comptime T: type, value: anytype) T {
                     } else if (value <= minInt(T)) {
                         return @as(T, minInt(T));
                     } else {
-                        return @intCast(T, value);
+                        return @as(T, @intCast(value));
                     }
                 },
                 .Float, .ComptimeFloat => {
@@ -1335,7 +1341,7 @@ pub fn lossyCast(comptime T: type, value: anytype) T {
                     } else if (value <= minInt(T)) {
                         return @as(T, minInt(T));
                     } else {
-                        return @intFromFloat(T, value);
+                        return @as(T, @intFromFloat(value));
                     }
                 },
                 else => @compileError("bad type"),
@@ -1594,7 +1600,7 @@ test "compare between signed and unsigned" {
     try testing.expect(compare(@as(u8, 255), .gt, @as(i9, -1)));
     try testing.expect(!compare(@as(u8, 255), .lte, @as(i9, -1)));
     try testing.expect(compare(@as(u8, 1), .lt, @as(u8, 2)));
-    try testing.expect(@bitCast(u8, @as(i8, -1)) == @as(u8, 255));
+    try testing.expect(@as(u8, @bitCast(@as(i8, -1))) == @as(u8, 255));
     try testing.expect(!compare(@as(u8, 255), .eq, @as(i8, -1)));
     try testing.expect(compare(@as(u8, 1), .eq, @as(u8, 1)));
 }
@@ -1624,7 +1630,7 @@ test "order.compare" {
 
 test "compare.reverse" {
     inline for (@typeInfo(CompareOperator).Enum.fields) |op_field| {
-        const op = @enumFromInt(CompareOperator, op_field.value);
+        const op = @as(CompareOperator, @enumFromInt(op_field.value));
         try testing.expect(compare(2, op, 3) == compare(3, op.reverse(), 2));
         try testing.expect(compare(3, op, 3) == compare(3, op.reverse(), 3));
         try testing.expect(compare(4, op, 3) == compare(3, op.reverse(), 4));
@@ -1646,10 +1652,10 @@ pub inline fn boolMask(comptime MaskInt: type, value: bool) MaskInt {
     if (MaskInt == u1) return @intFromBool(value);
     if (MaskInt == i1) {
         // The @as here is a workaround for #7950
-        return @bitCast(i1, @as(u1, @intFromBool(value)));
+        return @as(i1, @bitCast(@as(u1, @intFromBool(value))));
     }
 
-    return -%@intCast(MaskInt, @intFromBool(value));
+    return -%@as(MaskInt, @intCast(@intFromBool(value)));
 }
 
 test "boolMask" {
@@ -1680,7 +1686,7 @@ test "boolMask" {
 
 /// Return the mod of `num` with the smallest integer type
 pub fn comptimeMod(num: anytype, comptime denom: comptime_int) IntFittingRange(0, denom - 1) {
-    return @intCast(IntFittingRange(0, denom - 1), @mod(num, denom));
+    return @as(IntFittingRange(0, denom - 1), @intCast(@mod(num, denom)));
 }
 
 pub const F80 = struct {
@@ -1690,14 +1696,14 @@ pub const F80 = struct {
 
 pub fn make_f80(repr: F80) f80 {
     const int = (@as(u80, repr.exp) << 64) | repr.fraction;
-    return @bitCast(f80, int);
+    return @as(f80, @bitCast(int));
 }
 
 pub fn break_f80(x: f80) F80 {
-    const int = @bitCast(u80, x);
+    const int = @as(u80, @bitCast(x));
     return .{
-        .fraction = @truncate(u64, int),
-        .exp = @truncate(u16, int >> 64),
+        .fraction = @as(u64, @truncate(int)),
+        .exp = @as(u16, @truncate(int >> 64)),
     };
 }
 
@@ -1709,7 +1715,7 @@ pub inline fn sign(i: anytype) @TypeOf(i) {
     const T = @TypeOf(i);
     return switch (@typeInfo(T)) {
         .Int, .ComptimeInt => @as(T, @intFromBool(i > 0)) - @as(T, @intFromBool(i < 0)),
-        .Float, .ComptimeFloat => @floatFromInt(T, @intFromBool(i > 0)) - @floatFromInt(T, @intFromBool(i < 0)),
+        .Float, .ComptimeFloat => @as(T, @floatFromInt(@intFromBool(i > 0))) - @as(T, @floatFromInt(@intFromBool(i < 0))),
         .Vector => |vinfo| blk: {
             switch (@typeInfo(vinfo.child)) {
                 .Int, .Float => {
lib/std/mem.zig
@@ -69,7 +69,7 @@ pub fn ValidationAllocator(comptime T: type) type {
             ret_addr: usize,
         ) ?[*]u8 {
             assert(n > 0);
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             const underlying = self.getUnderlyingAllocatorPtr();
             const result = underlying.rawAlloc(n, log2_ptr_align, ret_addr) orelse
                 return null;
@@ -84,7 +84,7 @@ pub fn ValidationAllocator(comptime T: type) type {
             new_len: usize,
             ret_addr: usize,
         ) bool {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             assert(buf.len > 0);
             const underlying = self.getUnderlyingAllocatorPtr();
             return underlying.rawResize(buf, log2_buf_align, new_len, ret_addr);
@@ -96,7 +96,7 @@ pub fn ValidationAllocator(comptime T: type) type {
             log2_buf_align: u8,
             ret_addr: usize,
         ) void {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
+            const self: *Self = @ptrCast(@alignCast(ctx));
             assert(buf.len > 0);
             const underlying = self.getUnderlyingAllocatorPtr();
             underlying.rawFree(buf, log2_buf_align, ret_addr);
@@ -169,7 +169,7 @@ test "Allocator.resize" {
         var values = try testing.allocator.alloc(T, 100);
         defer testing.allocator.free(values);
 
-        for (values, 0..) |*v, i| v.* = @intCast(T, i);
+        for (values, 0..) |*v, i| v.* = @as(T, @intCast(i));
         if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
         values = values.ptr[0 .. values.len + 10];
         try testing.expect(values.len == 110);
@@ -185,7 +185,7 @@ test "Allocator.resize" {
         var values = try testing.allocator.alloc(T, 100);
         defer testing.allocator.free(values);
 
-        for (values, 0..) |*v, i| v.* = @floatFromInt(T, i);
+        for (values, 0..) |*v, i| v.* = @as(T, @floatFromInt(i));
         if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
         values = values.ptr[0 .. values.len + 10];
         try testing.expect(values.len == 110);
@@ -233,7 +233,7 @@ pub fn zeroes(comptime T: type) T {
             return @as(T, 0);
         },
         .Enum, .EnumLiteral => {
-            return @enumFromInt(T, 0);
+            return @as(T, @enumFromInt(0));
         },
         .Void => {
             return {};
@@ -264,7 +264,7 @@ pub fn zeroes(comptime T: type) T {
             switch (ptr_info.size) {
                 .Slice => {
                     if (ptr_info.sentinel) |sentinel| {
-                        if (ptr_info.child == u8 and @ptrCast(*const u8, sentinel).* == 0) {
+                        if (ptr_info.child == u8 and @as(*const u8, @ptrCast(sentinel)).* == 0) {
                             return ""; // A special case for the most common use-case: null-terminated strings.
                         }
                         @compileError("Can't set a sentinel slice to zero. This would require allocating memory.");
@@ -282,7 +282,7 @@ pub fn zeroes(comptime T: type) T {
         },
         .Array => |info| {
             if (info.sentinel) |sentinel_ptr| {
-                const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*;
+                const sentinel = @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*;
                 return [_:sentinel]info.child{zeroes(info.child)} ** info.len;
             }
             return [_]info.child{zeroes(info.child)} ** info.len;
@@ -456,7 +456,7 @@ pub fn zeroInit(comptime T: type, init: anytype) T {
                                 },
                             }
                         } else if (field.default_value) |default_value_ptr| {
-                            const default_value = @ptrCast(*align(1) const field.type, default_value_ptr).*;
+                            const default_value = @as(*align(1) const field.type, @ptrCast(default_value_ptr)).*;
                             @field(value, field.name) = default_value;
                         } else {
                             switch (@typeInfo(field.type)) {
@@ -709,7 +709,7 @@ pub fn span(ptr: anytype) Span(@TypeOf(ptr)) {
     const l = len(ptr);
     const ptr_info = @typeInfo(Result).Pointer;
     if (ptr_info.sentinel) |s_ptr| {
-        const s = @ptrCast(*align(1) const ptr_info.child, s_ptr).*;
+        const s = @as(*align(1) const ptr_info.child, @ptrCast(s_ptr)).*;
         return ptr[0..l :s];
     } else {
         return ptr[0..l];
@@ -740,7 +740,7 @@ fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type {
                         // to find the value searched for, which is only the case if it matches
                         // the sentinel of the type passed.
                         if (array_info.sentinel) |sentinel_ptr| {
-                            const sentinel = @ptrCast(*align(1) const array_info.child, sentinel_ptr).*;
+                            const sentinel = @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*;
                             if (end == sentinel) {
                                 new_ptr_info.sentinel = &end;
                             } else {
@@ -755,7 +755,7 @@ fn SliceTo(comptime T: type, comptime end: meta.Elem(T)) type {
                     // to find the value searched for, which is only the case if it matches
                     // the sentinel of the type passed.
                     if (ptr_info.sentinel) |sentinel_ptr| {
-                        const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*;
+                        const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*;
                         if (end == sentinel) {
                             new_ptr_info.sentinel = &end;
                         } else {
@@ -793,7 +793,7 @@ pub fn sliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) SliceTo(@Typ
     const length = lenSliceTo(ptr, end);
     const ptr_info = @typeInfo(Result).Pointer;
     if (ptr_info.sentinel) |s_ptr| {
-        const s = @ptrCast(*align(1) const ptr_info.child, s_ptr).*;
+        const s = @as(*align(1) const ptr_info.child, @ptrCast(s_ptr)).*;
         return ptr[0..length :s];
     } else {
         return ptr[0..length];
@@ -810,11 +810,11 @@ test "sliceTo" {
         try testing.expectEqualSlices(u16, array[0..2], sliceTo(&array, 3));
         try testing.expectEqualSlices(u16, array[0..2], sliceTo(array[0..3], 3));
 
-        const sentinel_ptr = @ptrCast([*:5]u16, &array);
+        const sentinel_ptr = @as([*:5]u16, @ptrCast(&array));
         try testing.expectEqualSlices(u16, array[0..2], sliceTo(sentinel_ptr, 3));
         try testing.expectEqualSlices(u16, array[0..4], sliceTo(sentinel_ptr, 99));
 
-        const optional_sentinel_ptr = @ptrCast(?[*:5]u16, &array);
+        const optional_sentinel_ptr = @as(?[*:5]u16, @ptrCast(&array));
         try testing.expectEqualSlices(u16, array[0..2], sliceTo(optional_sentinel_ptr, 3).?);
         try testing.expectEqualSlices(u16, array[0..4], sliceTo(optional_sentinel_ptr, 99).?);
 
@@ -846,7 +846,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize {
             .One => switch (@typeInfo(ptr_info.child)) {
                 .Array => |array_info| {
                     if (array_info.sentinel) |sentinel_ptr| {
-                        const sentinel = @ptrCast(*align(1) const array_info.child, sentinel_ptr).*;
+                        const sentinel = @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*;
                         if (sentinel == end) {
                             return indexOfSentinel(array_info.child, end, ptr);
                         }
@@ -856,7 +856,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize {
                 else => {},
             },
             .Many => if (ptr_info.sentinel) |sentinel_ptr| {
-                const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*;
+                const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*;
                 // We may be looking for something other than the sentinel,
                 // but iterating past the sentinel would be a bug so we need
                 // to check for both.
@@ -870,7 +870,7 @@ fn lenSliceTo(ptr: anytype, comptime end: meta.Elem(@TypeOf(ptr))) usize {
             },
             .Slice => {
                 if (ptr_info.sentinel) |sentinel_ptr| {
-                    const sentinel = @ptrCast(*align(1) const ptr_info.child, sentinel_ptr).*;
+                    const sentinel = @as(*align(1) const ptr_info.child, @ptrCast(sentinel_ptr)).*;
                     if (sentinel == end) {
                         return indexOfSentinel(ptr_info.child, sentinel, ptr);
                     }
@@ -893,7 +893,7 @@ test "lenSliceTo" {
         try testing.expectEqual(@as(usize, 2), lenSliceTo(&array, 3));
         try testing.expectEqual(@as(usize, 2), lenSliceTo(array[0..3], 3));
 
-        const sentinel_ptr = @ptrCast([*:5]u16, &array);
+        const sentinel_ptr = @as([*:5]u16, @ptrCast(&array));
         try testing.expectEqual(@as(usize, 2), lenSliceTo(sentinel_ptr, 3));
         try testing.expectEqual(@as(usize, 4), lenSliceTo(sentinel_ptr, 99));
 
@@ -925,7 +925,7 @@ pub fn len(value: anytype) usize {
             .Many => {
                 const sentinel_ptr = info.sentinel orelse
                     @compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value)));
-                const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*;
+                const sentinel = @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*;
                 return indexOfSentinel(info.child, sentinel, value);
             },
             .C => {
@@ -1331,7 +1331,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: Endian)
         .Little => {
             const ShiftType = math.Log2Int(ReturnType);
             for (bytes, 0..) |b, index| {
-                result = result | (@as(ReturnType, b) << @intCast(ShiftType, index * 8));
+                result = result | (@as(ReturnType, b) << @as(ShiftType, @intCast(index * 8)));
             }
         },
     }
@@ -1359,8 +1359,8 @@ pub fn readVarPackedInt(
     const Log2N = std.math.Log2Int(T);
 
     const read_size = (bit_count + (bit_offset % 8) + 7) / 8;
-    const bit_shift = @intCast(u3, bit_offset % 8);
-    const pad = @intCast(Log2N, @bitSizeOf(T) - bit_count);
+    const bit_shift = @as(u3, @intCast(bit_offset % 8));
+    const pad = @as(Log2N, @intCast(@bitSizeOf(T) - bit_count));
 
     const lowest_byte = switch (endian) {
         .Big => bytes.len - (bit_offset / 8) - read_size,
@@ -1372,17 +1372,17 @@ pub fn readVarPackedInt(
         // These are the same shifts/masks we perform below, but adds `@truncate`/`@intCast`
         // where needed since int is smaller than a byte.
         const value = if (read_size == 1) b: {
-            break :b @truncate(uN, read_bytes[0] >> bit_shift);
+            break :b @as(uN, @truncate(read_bytes[0] >> bit_shift));
         } else b: {
             const i: u1 = @intFromBool(endian == .Big);
-            const head = @truncate(uN, read_bytes[i] >> bit_shift);
-            const tail_shift = @intCast(Log2N, @as(u4, 8) - bit_shift);
-            const tail = @truncate(uN, read_bytes[1 - i]);
+            const head = @as(uN, @truncate(read_bytes[i] >> bit_shift));
+            const tail_shift = @as(Log2N, @intCast(@as(u4, 8) - bit_shift));
+            const tail = @as(uN, @truncate(read_bytes[1 - i]));
             break :b (tail << tail_shift) | head;
         };
         switch (signedness) {
-            .signed => return @intCast(T, (@bitCast(iN, value) << pad) >> pad),
-            .unsigned => return @intCast(T, (@bitCast(uN, value) << pad) >> pad),
+            .signed => return @as(T, @intCast((@as(iN, @bitCast(value)) << pad) >> pad)),
+            .unsigned => return @as(T, @intCast((@as(uN, @bitCast(value)) << pad) >> pad)),
         }
     }
 
@@ -1398,13 +1398,13 @@ pub fn readVarPackedInt(
         .Little => {
             int = read_bytes[0] >> bit_shift;
             for (read_bytes[1..], 0..) |elem, i| {
-                int |= (@as(uN, elem) << @intCast(Log2N, (8 * (i + 1) - bit_shift)));
+                int |= (@as(uN, elem) << @as(Log2N, @intCast((8 * (i + 1) - bit_shift))));
             }
         },
     }
     switch (signedness) {
-        .signed => return @intCast(T, (@bitCast(iN, int) << pad) >> pad),
-        .unsigned => return @intCast(T, (@bitCast(uN, int) << pad) >> pad),
+        .signed => return @as(T, @intCast((@as(iN, @bitCast(int)) << pad) >> pad)),
+        .unsigned => return @as(T, @intCast((@as(uN, @bitCast(int)) << pad) >> pad)),
     }
 }
 
@@ -1414,7 +1414,7 @@ pub fn readVarPackedInt(
 /// Assumes the endianness of memory is native. This means the function can
 /// simply pointer cast memory.
 pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
-    return @ptrCast(*align(1) const T, bytes).*;
+    return @as(*align(1) const T, @ptrCast(bytes)).*;
 }
 
 /// Reads an integer from memory with bit count specified by T.
@@ -1480,10 +1480,10 @@ fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T
     const Log2N = std.math.Log2Int(T);
 
     const bit_count = @as(usize, @bitSizeOf(T));
-    const bit_shift = @intCast(u3, bit_offset % 8);
+    const bit_shift = @as(u3, @intCast(bit_offset % 8));
 
     const load_size = (bit_count + 7) / 8;
-    const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count);
+    const load_tail_bits = @as(u3, @intCast((load_size * 8) - bit_count));
     const LoadInt = std.meta.Int(.unsigned, load_size * 8);
 
     if (bit_count == 0)
@@ -1492,13 +1492,13 @@ fn readPackedIntLittle(comptime T: type, bytes: []const u8, bit_offset: usize) T
     // Read by loading a LoadInt, and then follow it up with a 1-byte read
     // of the tail if bit_offset pushed us over a byte boundary.
     const read_bytes = bytes[bit_offset / 8 ..];
-    const val = @truncate(uN, readIntLittle(LoadInt, read_bytes[0..load_size]) >> bit_shift);
+    const val = @as(uN, @truncate(readIntLittle(LoadInt, read_bytes[0..load_size]) >> bit_shift));
     if (bit_shift > load_tail_bits) {
-        const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits);
+        const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits));
         const tail_byte = read_bytes[load_size];
-        const tail_truncated = if (bit_count < 8) @truncate(uN, tail_byte) else @as(uN, tail_byte);
-        return @bitCast(T, val | (tail_truncated << (@truncate(Log2N, bit_count) -% tail_bits)));
-    } else return @bitCast(T, val);
+        const tail_truncated = if (bit_count < 8) @as(uN, @truncate(tail_byte)) else @as(uN, tail_byte);
+        return @as(T, @bitCast(val | (tail_truncated << (@as(Log2N, @truncate(bit_count)) -% tail_bits))));
+    } else return @as(T, @bitCast(val));
 }
 
 fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T {
@@ -1506,11 +1506,11 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T {
     const Log2N = std.math.Log2Int(T);
 
     const bit_count = @as(usize, @bitSizeOf(T));
-    const bit_shift = @intCast(u3, bit_offset % 8);
+    const bit_shift = @as(u3, @intCast(bit_offset % 8));
     const byte_count = (@as(usize, bit_shift) + bit_count + 7) / 8;
 
     const load_size = (bit_count + 7) / 8;
-    const load_tail_bits = @intCast(u3, (load_size * 8) - bit_count);
+    const load_tail_bits = @as(u3, @intCast((load_size * 8) - bit_count));
     const LoadInt = std.meta.Int(.unsigned, load_size * 8);
 
     if (bit_count == 0)
@@ -1520,12 +1520,12 @@ fn readPackedIntBig(comptime T: type, bytes: []const u8, bit_offset: usize) T {
     // of the tail if bit_offset pushed us over a byte boundary.
     const end = bytes.len - (bit_offset / 8);
     const read_bytes = bytes[(end - byte_count)..end];
-    const val = @truncate(uN, readIntBig(LoadInt, bytes[(end - load_size)..end][0..load_size]) >> bit_shift);
+    const val = @as(uN, @truncate(readIntBig(LoadInt, bytes[(end - load_size)..end][0..load_size]) >> bit_shift));
     if (bit_shift > load_tail_bits) {
-        const tail_bits = @intCast(Log2N, bit_shift - load_tail_bits);
-        const tail_byte = if (bit_count < 8) @truncate(uN, read_bytes[0]) else @as(uN, read_bytes[0]);
-        return @bitCast(T, val | (tail_byte << (@truncate(Log2N, bit_count) -% tail_bits)));
-    } else return @bitCast(T, val);
+        const tail_bits = @as(Log2N, @intCast(bit_shift - load_tail_bits));
+        const tail_byte = if (bit_count < 8) @as(uN, @truncate(read_bytes[0])) else @as(uN, read_bytes[0]);
+        return @as(T, @bitCast(val | (tail_byte << (@as(Log2N, @truncate(bit_count)) -% tail_bits))));
+    } else return @as(T, @bitCast(val));
 }
 
 pub const readPackedIntNative = switch (native_endian) {
@@ -1605,7 +1605,7 @@ test "readIntBig and readIntLittle" {
 /// This function stores in native endian, which means it is implemented as a simple
 /// memory store.
 pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void {
-    @ptrCast(*align(1) T, buf).* = value;
+    @as(*align(1) T, @ptrCast(buf)).* = value;
 }
 
 /// Writes an integer to memory, storing it in twos-complement.
@@ -1642,10 +1642,10 @@ fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value:
     const Log2N = std.math.Log2Int(T);
 
     const bit_count = @as(usize, @bitSizeOf(T));
-    const bit_shift = @intCast(u3, bit_offset % 8);
+    const bit_shift = @as(u3, @intCast(bit_offset % 8));
 
     const store_size = (@bitSizeOf(T) + 7) / 8;
-    const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count);
+    const store_tail_bits = @as(u3, @intCast((store_size * 8) - bit_count));
     const StoreInt = std.meta.Int(.unsigned, store_size * 8);
 
     if (bit_count == 0)
@@ -1656,11 +1656,11 @@ fn writePackedIntLittle(comptime T: type, bytes: []u8, bit_offset: usize, value:
     const write_bytes = bytes[bit_offset / 8 ..];
     const head = write_bytes[0] & ((@as(u8, 1) << bit_shift) - 1);
 
-    var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head);
+    var write_value = (@as(StoreInt, @as(uN, @bitCast(value))) << bit_shift) | @as(StoreInt, @intCast(head));
     if (bit_shift > store_tail_bits) {
-        const tail_len = @intCast(Log2N, bit_shift - store_tail_bits);
-        write_bytes[store_size] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1);
-        write_bytes[store_size] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len)));
+        const tail_len = @as(Log2N, @intCast(bit_shift - store_tail_bits));
+        write_bytes[store_size] &= ~((@as(u8, 1) << @as(u3, @intCast(tail_len))) - 1);
+        write_bytes[store_size] |= @as(u8, @intCast((@as(uN, @bitCast(value)) >> (@as(Log2N, @truncate(bit_count)) -% tail_len))));
     } else if (bit_shift < store_tail_bits) {
         const tail_len = store_tail_bits - bit_shift;
         const tail = write_bytes[store_size - 1] & (@as(u8, 0xfe) << (7 - tail_len));
@@ -1675,11 +1675,11 @@ fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T)
     const Log2N = std.math.Log2Int(T);
 
     const bit_count = @as(usize, @bitSizeOf(T));
-    const bit_shift = @intCast(u3, bit_offset % 8);
+    const bit_shift = @as(u3, @intCast(bit_offset % 8));
     const byte_count = (bit_shift + bit_count + 7) / 8;
 
     const store_size = (@bitSizeOf(T) + 7) / 8;
-    const store_tail_bits = @intCast(u3, (store_size * 8) - bit_count);
+    const store_tail_bits = @as(u3, @intCast((store_size * 8) - bit_count));
     const StoreInt = std.meta.Int(.unsigned, store_size * 8);
 
     if (bit_count == 0)
@@ -1691,11 +1691,11 @@ fn writePackedIntBig(comptime T: type, bytes: []u8, bit_offset: usize, value: T)
     const write_bytes = bytes[(end - byte_count)..end];
     const head = write_bytes[byte_count - 1] & ((@as(u8, 1) << bit_shift) - 1);
 
-    var write_value = (@as(StoreInt, @bitCast(uN, value)) << bit_shift) | @intCast(StoreInt, head);
+    var write_value = (@as(StoreInt, @as(uN, @bitCast(value))) << bit_shift) | @as(StoreInt, @intCast(head));
     if (bit_shift > store_tail_bits) {
-        const tail_len = @intCast(Log2N, bit_shift - store_tail_bits);
-        write_bytes[0] &= ~((@as(u8, 1) << @intCast(u3, tail_len)) - 1);
-        write_bytes[0] |= @intCast(u8, (@bitCast(uN, value) >> (@truncate(Log2N, bit_count) -% tail_len)));
+        const tail_len = @as(Log2N, @intCast(bit_shift - store_tail_bits));
+        write_bytes[0] &= ~((@as(u8, 1) << @as(u3, @intCast(tail_len))) - 1);
+        write_bytes[0] |= @as(u8, @intCast((@as(uN, @bitCast(value)) >> (@as(Log2N, @truncate(bit_count)) -% tail_len))));
     } else if (bit_shift < store_tail_bits) {
         const tail_len = store_tail_bits - bit_shift;
         const tail = write_bytes[0] & (@as(u8, 0xfe) << (7 - tail_len));
@@ -1744,14 +1744,14 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
         return @memset(buffer, 0);
     } else if (@typeInfo(T).Int.bits == 8) {
         @memset(buffer, 0);
-        buffer[0] = @bitCast(u8, value);
+        buffer[0] = @as(u8, @bitCast(value));
         return;
     }
     // TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
     const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
-    var bits = @bitCast(uint, value);
+    var bits = @as(uint, @bitCast(value));
     for (buffer) |*b| {
-        b.* = @truncate(u8, bits);
+        b.* = @as(u8, @truncate(bits));
         bits >>= 8;
     }
 }
@@ -1768,17 +1768,17 @@ pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
         return @memset(buffer, 0);
     } else if (@typeInfo(T).Int.bits == 8) {
         @memset(buffer, 0);
-        buffer[buffer.len - 1] = @bitCast(u8, value);
+        buffer[buffer.len - 1] = @as(u8, @bitCast(value));
         return;
     }
 
     // TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
     const uint = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
-    var bits = @bitCast(uint, value);
+    var bits = @as(uint, @bitCast(value));
     var index: usize = buffer.len;
     while (index != 0) {
         index -= 1;
-        buffer[index] = @truncate(u8, bits);
+        buffer[index] = @as(u8, @truncate(bits));
         bits >>= 8;
     }
 }
@@ -1822,7 +1822,7 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value
     const uN = std.meta.Int(.unsigned, @bitSizeOf(T));
     const Log2N = std.math.Log2Int(T);
 
-    const bit_shift = @intCast(u3, bit_offset % 8);
+    const bit_shift = @as(u3, @intCast(bit_offset % 8));
     const write_size = (bit_count + bit_shift + 7) / 8;
     const lowest_byte = switch (endian) {
         .Big => bytes.len - (bit_offset / 8) - write_size,
@@ -1833,8 +1833,8 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value
     if (write_size == 1) {
         // Single byte writes are handled specially, since we need to mask bits
         // on both ends of the byte.
-        const mask = (@as(u8, 0xff) >> @intCast(u3, 8 - bit_count));
-        const new_bits = @intCast(u8, @bitCast(uN, value) & mask) << bit_shift;
+        const mask = (@as(u8, 0xff) >> @as(u3, @intCast(8 - bit_count)));
+        const new_bits = @as(u8, @intCast(@as(uN, @bitCast(value)) & mask)) << bit_shift;
         write_bytes[0] = (write_bytes[0] & ~(mask << bit_shift)) | new_bits;
         return;
     }
@@ -1843,31 +1843,31 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value
 
     // Iterate bytes forward for Little-endian, backward for Big-endian
     const delta: i2 = if (endian == .Big) -1 else 1;
-    const start = if (endian == .Big) @intCast(isize, write_bytes.len - 1) else 0;
+    const start = if (endian == .Big) @as(isize, @intCast(write_bytes.len - 1)) else 0;
 
     var i: isize = start; // isize for signed index arithmetic
 
     // Write first byte, using a mask to protects bits preceding bit_offset
     const head_mask = @as(u8, 0xff) >> bit_shift;
-    write_bytes[@intCast(usize, i)] &= ~(head_mask << bit_shift);
-    write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & head_mask) << bit_shift;
-    remaining >>= @intCast(Log2N, @as(u4, 8) - bit_shift);
+    write_bytes[@as(usize, @intCast(i))] &= ~(head_mask << bit_shift);
+    write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & head_mask)) << bit_shift;
+    remaining >>= @as(Log2N, @intCast(@as(u4, 8) - bit_shift));
     i += delta;
 
     // Write bytes[1..bytes.len - 1]
     if (@bitSizeOf(T) > 8) {
-        const loop_end = start + delta * (@intCast(isize, write_size) - 1);
+        const loop_end = start + delta * (@as(isize, @intCast(write_size)) - 1);
         while (i != loop_end) : (i += delta) {
-            write_bytes[@intCast(usize, i)] = @truncate(u8, @bitCast(uN, remaining));
+            write_bytes[@as(usize, @intCast(i))] = @as(u8, @truncate(@as(uN, @bitCast(remaining))));
             remaining >>= 8;
         }
     }
 
     // Write last byte, using a mask to protect bits following bit_offset + bit_count
-    const following_bits = -%@truncate(u3, bit_shift + bit_count);
+    const following_bits = -%@as(u3, @truncate(bit_shift + bit_count));
     const tail_mask = (@as(u8, 0xff) << following_bits) >> following_bits;
-    write_bytes[@intCast(usize, i)] &= ~tail_mask;
-    write_bytes[@intCast(usize, i)] |= @intCast(u8, @bitCast(uN, remaining) & tail_mask);
+    write_bytes[@as(usize, @intCast(i))] &= ~tail_mask;
+    write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & tail_mask));
 }
 
 test "writeIntBig and writeIntLittle" {
@@ -3799,15 +3799,14 @@ pub fn alignPointerOffset(ptr: anytype, align_to: usize) ?usize {
 ///   type.
 pub fn alignPointer(ptr: anytype, align_to: usize) ?@TypeOf(ptr) {
     const adjust_off = alignPointerOffset(ptr, align_to) orelse return null;
-    const T = @TypeOf(ptr);
     // Avoid the use of ptrFromInt to avoid losing the pointer provenance info.
-    return @alignCast(@typeInfo(T).Pointer.alignment, ptr + adjust_off);
+    return @alignCast(ptr + adjust_off);
 }
 
 test "alignPointer" {
     const S = struct {
         fn checkAlign(comptime T: type, base: usize, align_to: usize, expected: usize) !void {
-            var ptr = @ptrFromInt(T, base);
+            var ptr = @as(T, @ptrFromInt(base));
             var aligned = alignPointer(ptr, align_to);
             try testing.expectEqual(expected, @intFromPtr(aligned));
         }
@@ -3854,9 +3853,7 @@ fn AsBytesReturnType(comptime P: type) type {
 
 /// Given a pointer to a single item, returns a slice of the underlying bytes, preserving pointer attributes.
 pub fn asBytes(ptr: anytype) AsBytesReturnType(@TypeOf(ptr)) {
-    const P = @TypeOf(ptr);
-    const T = AsBytesReturnType(P);
-    return @ptrCast(T, @alignCast(meta.alignment(T), ptr));
+    return @ptrCast(@alignCast(ptr));
 }
 
 test "asBytes" {
@@ -3902,7 +3899,7 @@ test "asBytes" {
 
 test "asBytes preserves pointer attributes" {
     const inArr: u32 align(16) = 0xDEADBEEF;
-    const inPtr = @ptrCast(*align(16) const volatile u32, &inArr);
+    const inPtr = @as(*align(16) const volatile u32, @ptrCast(&inArr));
     const outSlice = asBytes(inPtr);
 
     const in = @typeInfo(@TypeOf(inPtr)).Pointer;
@@ -3948,7 +3945,7 @@ fn BytesAsValueReturnType(comptime T: type, comptime B: type) type {
 /// Given a pointer to an array of bytes, returns a pointer to a value of the specified type
 /// backed by those bytes, preserving pointer attributes.
 pub fn bytesAsValue(comptime T: type, bytes: anytype) BytesAsValueReturnType(T, @TypeOf(bytes)) {
-    return @ptrCast(BytesAsValueReturnType(T, @TypeOf(bytes)), bytes);
+    return @as(BytesAsValueReturnType(T, @TypeOf(bytes)), @ptrCast(bytes));
 }
 
 test "bytesAsValue" {
@@ -3993,7 +3990,7 @@ test "bytesAsValue" {
 
 test "bytesAsValue preserves pointer attributes" {
     const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF };
-    const inSlice = @ptrCast(*align(16) const volatile [4]u8, &inArr)[0..];
+    const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..];
     const outPtr = bytesAsValue(u32, inSlice);
 
     const in = @typeInfo(@TypeOf(inSlice)).Pointer;
@@ -4043,7 +4040,7 @@ pub fn bytesAsSlice(comptime T: type, bytes: anytype) BytesAsSliceReturnType(T,
 
     const cast_target = CopyPtrAttrs(@TypeOf(bytes), .Many, T);
 
-    return @ptrCast(cast_target, bytes)[0..@divExact(bytes.len, @sizeOf(T))];
+    return @as(cast_target, @ptrCast(bytes))[0..@divExact(bytes.len, @sizeOf(T))];
 }
 
 test "bytesAsSlice" {
@@ -4101,7 +4098,7 @@ test "bytesAsSlice with specified alignment" {
 
 test "bytesAsSlice preserves pointer attributes" {
     const inArr align(16) = [4]u8{ 0xDE, 0xAD, 0xBE, 0xEF };
-    const inSlice = @ptrCast(*align(16) const volatile [4]u8, &inArr)[0..];
+    const inSlice = @as(*align(16) const volatile [4]u8, @ptrCast(&inArr))[0..];
     const outSlice = bytesAsSlice(u16, inSlice);
 
     const in = @typeInfo(@TypeOf(inSlice)).Pointer;
@@ -4133,7 +4130,7 @@ pub fn sliceAsBytes(slice: anytype) SliceAsBytesReturnType(@TypeOf(slice)) {
 
     const cast_target = CopyPtrAttrs(Slice, .Many, u8);
 
-    return @ptrCast(cast_target, slice)[0 .. slice.len * @sizeOf(meta.Elem(Slice))];
+    return @as(cast_target, @ptrCast(slice))[0 .. slice.len * @sizeOf(meta.Elem(Slice))];
 }
 
 test "sliceAsBytes" {
@@ -4197,7 +4194,7 @@ test "sliceAsBytes and bytesAsSlice back" {
 
 test "sliceAsBytes preserves pointer attributes" {
     const inArr align(16) = [2]u16{ 0xDEAD, 0xBEEF };
-    const inSlice = @ptrCast(*align(16) const volatile [2]u16, &inArr)[0..];
+    const inSlice = @as(*align(16) const volatile [2]u16, @ptrCast(&inArr))[0..];
     const outSlice = sliceAsBytes(inSlice);
 
     const in = @typeInfo(@TypeOf(inSlice)).Pointer;
@@ -4218,7 +4215,7 @@ pub fn alignForward(comptime T: type, addr: T, alignment: T) T {
 }
 
 pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize {
-    const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment);
+    const alignment = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(log2_alignment));
     return alignForward(usize, addr, alignment);
 }
 
@@ -4282,7 +4279,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
 /// .stage2_c doesn't support asm blocks yet, so use volatile stores instead
 var deopt_target: if (builtin.zig_backend == .stage2_c) u8 else void = undefined;
 fn doNotOptimizeAwayC(ptr: anytype) void {
-    const dest = @ptrCast(*volatile u8, &deopt_target);
+    const dest = @as(*volatile u8, @ptrCast(&deopt_target));
     for (asBytes(ptr)) |b| {
         dest.* = b;
     }
@@ -4433,7 +4430,7 @@ pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_ali
         error.Overflow => return null,
     };
     const alignment_offset = begin_address_aligned - begin_address;
-    return @alignCast(new_alignment, bytes[alignment_offset .. alignment_offset + new_length]);
+    return @alignCast(bytes[alignment_offset .. alignment_offset + new_length]);
 }
 
 /// Returns the largest sub-slice within the given slice that conforms to the new alignment,
@@ -4445,7 +4442,7 @@ pub fn alignInSlice(slice: anytype, comptime new_alignment: usize) ?AlignedSlice
     const Element = @TypeOf(slice[0]);
     const slice_length_bytes = aligned_bytes.len - (aligned_bytes.len % @sizeOf(Element));
     const aligned_slice = bytesAsSlice(Element, aligned_bytes[0..slice_length_bytes]);
-    return @alignCast(new_alignment, aligned_slice);
+    return @alignCast(aligned_slice);
 }
 
 test "read/write(Var)PackedInt" {
@@ -4490,8 +4487,8 @@ test "read/write(Var)PackedInt" {
                     for ([_]PackedType{
                         ~@as(PackedType, 0), // all ones: -1 iN / maxInt uN
                         @as(PackedType, 0), // all zeros: 0 iN / 0 uN
-                        @bitCast(PackedType, @as(iPackedType, math.maxInt(iPackedType))), // maxInt iN
-                        @bitCast(PackedType, @as(iPackedType, math.minInt(iPackedType))), // maxInt iN
+                        @as(PackedType, @bitCast(@as(iPackedType, math.maxInt(iPackedType)))), // maxInt iN
+                        @as(PackedType, @bitCast(@as(iPackedType, math.minInt(iPackedType)))), // maxInt iN
                         random.int(PackedType), // random
                         random.int(PackedType), // random
                     }) |write_value| {
@@ -4502,11 +4499,11 @@ test "read/write(Var)PackedInt" {
 
                             // Read
                             const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, native_endian);
-                            try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+                            try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset)))))));
 
                             // Write
                             writePackedInt(PackedType, asBytes(&value), offset, write_value, native_endian);
-                            try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+                            try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset)))))));
 
                             // Read again
                             const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, native_endian);
@@ -4515,9 +4512,9 @@ test "read/write(Var)PackedInt" {
                             // Verify bits outside of the target integer are unmodified
                             const diff_bits = init_value ^ value;
                             if (offset != offset_at_end)
-                                try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+                                try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0);
                             if (offset != 0)
-                                try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+                                try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0);
                         }
 
                         { // Fixed-size Read/Write (Foreign-endian)
@@ -4527,11 +4524,11 @@ test "read/write(Var)PackedInt" {
 
                             // Read
                             const read_value1 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian);
-                            try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+                            try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset)))))));
 
                             // Write
                             writePackedInt(PackedType, asBytes(&value), offset, write_value, foreign_endian);
-                            try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+                            try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset)))))));
 
                             // Read again
                             const read_value2 = readPackedInt(PackedType, asBytes(&value), offset, foreign_endian);
@@ -4540,9 +4537,9 @@ test "read/write(Var)PackedInt" {
                             // Verify bits outside of the target integer are unmodified
                             const diff_bits = init_value ^ @byteSwap(value);
                             if (offset != offset_at_end)
-                                try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+                                try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0);
                             if (offset != 0)
-                                try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+                                try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0);
                         }
 
                         const signedness = @typeInfo(PackedType).Int.signedness;
@@ -4559,11 +4556,11 @@ test "read/write(Var)PackedInt" {
 
                                 // Read
                                 const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness);
-                                try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+                                try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset)))))));
 
                                 // Write
                                 writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), native_endian);
-                                try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, value >> @intCast(Log2T, offset))));
+                                try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(value >> @as(Log2T, @intCast(offset)))))));
 
                                 // Read again
                                 const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), native_endian, signedness);
@@ -4572,9 +4569,9 @@ test "read/write(Var)PackedInt" {
                                 // Verify bits outside of the target integer are unmodified
                                 const diff_bits = init_value ^ value;
                                 if (offset != offset_at_end)
-                                    try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+                                    try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0);
                                 if (offset != 0)
-                                    try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+                                    try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0);
                             }
 
                             { // Variable-size Read/Write (Foreign-endian)
@@ -4587,11 +4584,11 @@ test "read/write(Var)PackedInt" {
 
                                 // Read
                                 const read_value1 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness);
-                                try expect(read_value1 == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+                                try expect(read_value1 == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset)))))));
 
                                 // Write
                                 writeVarPackedInt(asBytes(&value), offset, @bitSizeOf(PackedType), @as(U, write_value), foreign_endian);
-                                try expect(write_value == @bitCast(PackedType, @truncate(uPackedType, @byteSwap(value) >> @intCast(Log2T, offset))));
+                                try expect(write_value == @as(PackedType, @bitCast(@as(uPackedType, @truncate(@byteSwap(value) >> @as(Log2T, @intCast(offset)))))));
 
                                 // Read again
                                 const read_value2 = readVarPackedInt(U, asBytes(&value), offset, @bitSizeOf(PackedType), foreign_endian, signedness);
@@ -4600,9 +4597,9 @@ test "read/write(Var)PackedInt" {
                                 // Verify bits outside of the target integer are unmodified
                                 const diff_bits = init_value ^ @byteSwap(value);
                                 if (offset != offset_at_end)
-                                    try expect(diff_bits >> @intCast(Log2T, offset + @bitSizeOf(PackedType)) == 0);
+                                    try expect(diff_bits >> @as(Log2T, @intCast(offset + @bitSizeOf(PackedType))) == 0);
                                 if (offset != 0)
-                                    try expect(diff_bits << @intCast(Log2T, @bitSizeOf(BackingType) - offset) == 0);
+                                    try expect(diff_bits << @as(Log2T, @intCast(@bitSizeOf(BackingType) - offset)) == 0);
                             }
                         }
                     }
lib/std/meta.zig
@@ -185,18 +185,18 @@ pub fn sentinel(comptime T: type) ?Elem(T) {
     switch (@typeInfo(T)) {
         .Array => |info| {
             const sentinel_ptr = info.sentinel orelse return null;
-            return @ptrCast(*const info.child, sentinel_ptr).*;
+            return @as(*const info.child, @ptrCast(sentinel_ptr)).*;
         },
         .Pointer => |info| {
             switch (info.size) {
                 .Many, .Slice => {
                     const sentinel_ptr = info.sentinel orelse return null;
-                    return @ptrCast(*align(1) const info.child, sentinel_ptr).*;
+                    return @as(*align(1) const info.child, @ptrCast(sentinel_ptr)).*;
                 },
                 .One => switch (@typeInfo(info.child)) {
                     .Array => |array_info| {
                         const sentinel_ptr = array_info.sentinel orelse return null;
-                        return @ptrCast(*align(1) const array_info.child, sentinel_ptr).*;
+                        return @as(*align(1) const array_info.child, @ptrCast(sentinel_ptr)).*;
                     },
                     else => {},
                 },
@@ -241,7 +241,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
                             .Array = .{
                                 .len = array_info.len,
                                 .child = array_info.child,
-                                .sentinel = @ptrCast(?*const anyopaque, &sentinel_val),
+                                .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)),
                             },
                         }),
                         .is_allowzero = info.is_allowzero,
@@ -259,7 +259,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
                     .address_space = info.address_space,
                     .child = info.child,
                     .is_allowzero = info.is_allowzero,
-                    .sentinel = @ptrCast(?*const anyopaque, &sentinel_val),
+                    .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)),
                 },
             }),
             else => {},
@@ -277,7 +277,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
                                 .address_space = ptr_info.address_space,
                                 .child = ptr_info.child,
                                 .is_allowzero = ptr_info.is_allowzero,
-                                .sentinel = @ptrCast(?*const anyopaque, &sentinel_val),
+                                .sentinel = @as(?*const anyopaque, @ptrCast(&sentinel_val)),
                             },
                         }),
                     },
@@ -929,8 +929,8 @@ test "intToEnum with error return" {
     try testing.expect(intToEnum(E1, zero) catch unreachable == E1.A);
     try testing.expect(intToEnum(E2, one) catch unreachable == E2.B);
     try testing.expect(intToEnum(E3, zero) catch unreachable == E3.A);
-    try testing.expect(intToEnum(E3, 127) catch unreachable == @enumFromInt(E3, 127));
-    try testing.expect(intToEnum(E3, -128) catch unreachable == @enumFromInt(E3, -128));
+    try testing.expect(intToEnum(E3, 127) catch unreachable == @as(E3, @enumFromInt(127)));
+    try testing.expect(intToEnum(E3, -128) catch unreachable == @as(E3, @enumFromInt(-128)));
     try testing.expectError(error.InvalidEnumTag, intToEnum(E1, one));
     try testing.expectError(error.InvalidEnumTag, intToEnum(E3, 128));
     try testing.expectError(error.InvalidEnumTag, intToEnum(E3, -129));
@@ -943,7 +943,7 @@ pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTa
 
     if (!enum_info.is_exhaustive) {
         if (std.math.cast(enum_info.tag_type, tag_int)) |tag| {
-            return @enumFromInt(EnumTag, tag);
+            return @as(EnumTag, @enumFromInt(tag));
         }
         return error.InvalidEnumTag;
     }
lib/std/multi_array_list.zig
@@ -78,7 +78,7 @@ pub fn MultiArrayList(comptime T: type) type {
                 const casted_ptr: [*]F = if (@sizeOf(F) == 0)
                     undefined
                 else
-                    @ptrCast([*]F, @alignCast(@alignOf(F), byte_ptr));
+                    @ptrCast(@alignCast(byte_ptr));
                 return casted_ptr[0..self.len];
             }
 
@@ -89,14 +89,14 @@ pub fn MultiArrayList(comptime T: type) type {
                     else => unreachable,
                 };
                 inline for (fields, 0..) |field_info, i| {
-                    self.items(@enumFromInt(Field, i))[index] = @field(e, field_info.name);
+                    self.items(@as(Field, @enumFromInt(i)))[index] = @field(e, field_info.name);
                 }
             }
 
             pub fn get(self: Slice, index: usize) T {
                 var result: Elem = undefined;
                 inline for (fields, 0..) |field_info, i| {
-                    @field(result, field_info.name) = self.items(@enumFromInt(Field, i))[index];
+                    @field(result, field_info.name) = self.items(@as(Field, @enumFromInt(i)))[index];
                 }
                 return switch (@typeInfo(T)) {
                     .Struct => result,
@@ -110,10 +110,9 @@ pub fn MultiArrayList(comptime T: type) type {
                     return .{};
                 }
                 const unaligned_ptr = self.ptrs[sizes.fields[0]];
-                const aligned_ptr = @alignCast(@alignOf(Elem), unaligned_ptr);
-                const casted_ptr = @ptrCast([*]align(@alignOf(Elem)) u8, aligned_ptr);
+                const aligned_ptr: [*]align(@alignOf(Elem)) u8 = @alignCast(unaligned_ptr);
                 return .{
-                    .bytes = casted_ptr,
+                    .bytes = aligned_ptr,
                     .len = self.len,
                     .capacity = self.capacity,
                 };
@@ -294,7 +293,7 @@ pub fn MultiArrayList(comptime T: type) type {
             };
             const slices = self.slice();
             inline for (fields, 0..) |field_info, field_index| {
-                const field_slice = slices.items(@enumFromInt(Field, field_index));
+                const field_slice = slices.items(@as(Field, @enumFromInt(field_index)));
                 var i: usize = self.len - 1;
                 while (i > index) : (i -= 1) {
                     field_slice[i] = field_slice[i - 1];
@@ -309,7 +308,7 @@ pub fn MultiArrayList(comptime T: type) type {
         pub fn swapRemove(self: *Self, index: usize) void {
             const slices = self.slice();
             inline for (fields, 0..) |_, i| {
-                const field_slice = slices.items(@enumFromInt(Field, i));
+                const field_slice = slices.items(@as(Field, @enumFromInt(i)));
                 field_slice[index] = field_slice[self.len - 1];
                 field_slice[self.len - 1] = undefined;
             }
@@ -321,7 +320,7 @@ pub fn MultiArrayList(comptime T: type) type {
         pub fn orderedRemove(self: *Self, index: usize) void {
             const slices = self.slice();
             inline for (fields, 0..) |_, field_index| {
-                const field_slice = slices.items(@enumFromInt(Field, field_index));
+                const field_slice = slices.items(@as(Field, @enumFromInt(field_index)));
                 var i = index;
                 while (i < self.len - 1) : (i += 1) {
                     field_slice[i] = field_slice[i + 1];
@@ -358,7 +357,7 @@ pub fn MultiArrayList(comptime T: type) type {
                 const self_slice = self.slice();
                 inline for (fields, 0..) |field_info, i| {
                     if (@sizeOf(field_info.type) != 0) {
-                        const field = @enumFromInt(Field, i);
+                        const field = @as(Field, @enumFromInt(i));
                         const dest_slice = self_slice.items(field)[new_len..];
                         // We use memset here for more efficient codegen in safety-checked,
                         // valgrind-enabled builds. Otherwise the valgrind client request
@@ -379,7 +378,7 @@ pub fn MultiArrayList(comptime T: type) type {
             const other_slice = other.slice();
             inline for (fields, 0..) |field_info, i| {
                 if (@sizeOf(field_info.type) != 0) {
-                    const field = @enumFromInt(Field, i);
+                    const field = @as(Field, @enumFromInt(i));
                     @memcpy(other_slice.items(field), self_slice.items(field));
                 }
             }
@@ -440,7 +439,7 @@ pub fn MultiArrayList(comptime T: type) type {
             const other_slice = other.slice();
             inline for (fields, 0..) |field_info, i| {
                 if (@sizeOf(field_info.type) != 0) {
-                    const field = @enumFromInt(Field, i);
+                    const field = @as(Field, @enumFromInt(i));
                     @memcpy(other_slice.items(field), self_slice.items(field));
                 }
             }
@@ -459,7 +458,7 @@ pub fn MultiArrayList(comptime T: type) type {
             const result_slice = result.slice();
             inline for (fields, 0..) |field_info, i| {
                 if (@sizeOf(field_info.type) != 0) {
-                    const field = @enumFromInt(Field, i);
+                    const field = @as(Field, @enumFromInt(i));
                     @memcpy(result_slice.items(field), self_slice.items(field));
                 }
             }
@@ -476,7 +475,7 @@ pub fn MultiArrayList(comptime T: type) type {
                 pub fn swap(sc: @This(), a_index: usize, b_index: usize) void {
                     inline for (fields, 0..) |field_info, i| {
                         if (@sizeOf(field_info.type) != 0) {
-                            const field = @enumFromInt(Field, i);
+                            const field = @as(Field, @enumFromInt(i));
                             const ptr = sc.slice.items(field);
                             mem.swap(field_info.type, &ptr[a_index], &ptr[b_index]);
                         }
@@ -592,9 +591,9 @@ test "basic usage" {
     var i: usize = 0;
     while (i < 6) : (i += 1) {
         try list.append(ally, .{
-            .a = @intCast(u32, 4 + i),
+            .a = @as(u32, @intCast(4 + i)),
             .b = "whatever",
-            .c = @intCast(u8, 'd' + i),
+            .c = @as(u8, @intCast('d' + i)),
         });
     }
 
@@ -791,7 +790,7 @@ test "union" {
 
     // Add 6 more things to force a capacity increase.
     for (0..6) |i| {
-        try list.append(ally, .{ .a = @intCast(u32, 4 + i) });
+        try list.append(ally, .{ .a = @as(u32, @intCast(4 + i)) });
     }
 
     try testing.expectEqualSlices(
lib/std/net.zig
@@ -137,8 +137,8 @@ pub const Address = extern union {
     /// on the address family.
     pub fn initPosix(addr: *align(4) const os.sockaddr) Address {
         switch (addr.family) {
-            os.AF.INET => return Address{ .in = Ip4Address{ .sa = @ptrCast(*const os.sockaddr.in, addr).* } },
-            os.AF.INET6 => return Address{ .in6 = Ip6Address{ .sa = @ptrCast(*const os.sockaddr.in6, addr).* } },
+            os.AF.INET => return Address{ .in = Ip4Address{ .sa = @as(*const os.sockaddr.in, @ptrCast(addr)).* } },
+            os.AF.INET6 => return Address{ .in6 = Ip6Address{ .sa = @as(*const os.sockaddr.in6, @ptrCast(addr)).* } },
             else => unreachable,
         }
     }
@@ -165,8 +165,8 @@ pub const Address = extern union {
     }
 
     pub fn eql(a: Address, b: Address) bool {
-        const a_bytes = @ptrCast([*]const u8, &a.any)[0..a.getOsSockLen()];
-        const b_bytes = @ptrCast([*]const u8, &b.any)[0..b.getOsSockLen()];
+        const a_bytes = @as([*]const u8, @ptrCast(&a.any))[0..a.getOsSockLen()];
+        const b_bytes = @as([*]const u8, @ptrCast(&b.any))[0..b.getOsSockLen()];
         return mem.eql(u8, a_bytes, b_bytes);
     }
 
@@ -187,7 +187,7 @@ pub const Address = extern union {
                 // provide the full buffer size (e.g. getsockname, getpeername, recvfrom, accept).
                 //
                 // To access the path, std.mem.sliceTo(&address.un.path, 0) should be used.
-                return @intCast(os.socklen_t, @sizeOf(os.sockaddr.un));
+                return @as(os.socklen_t, @intCast(@sizeOf(os.sockaddr.un)));
             },
 
             else => unreachable,
@@ -260,7 +260,7 @@ pub const Ip4Address = extern struct {
         return Ip4Address{
             .sa = os.sockaddr.in{
                 .port = mem.nativeToBig(u16, port),
-                .addr = @ptrCast(*align(1) const u32, &addr).*,
+                .addr = @as(*align(1) const u32, @ptrCast(&addr)).*,
             },
         };
     }
@@ -285,7 +285,7 @@ pub const Ip4Address = extern struct {
     ) !void {
         if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self);
         _ = options;
-        const bytes = @ptrCast(*const [4]u8, &self.sa.addr);
+        const bytes = @as(*const [4]u8, @ptrCast(&self.sa.addr));
         try std.fmt.format(out_stream, "{}.{}.{}.{}:{}", .{
             bytes[0],
             bytes[1],
@@ -354,9 +354,9 @@ pub const Ip6Address = extern struct {
                 if (index == 14) {
                     return error.InvalidEnd;
                 }
-                ip_slice[index] = @truncate(u8, x >> 8);
+                ip_slice[index] = @as(u8, @truncate(x >> 8));
                 index += 1;
-                ip_slice[index] = @truncate(u8, x);
+                ip_slice[index] = @as(u8, @truncate(x));
                 index += 1;
 
                 x = 0;
@@ -408,13 +408,13 @@ pub const Ip6Address = extern struct {
         }
 
         if (index == 14) {
-            ip_slice[14] = @truncate(u8, x >> 8);
-            ip_slice[15] = @truncate(u8, x);
+            ip_slice[14] = @as(u8, @truncate(x >> 8));
+            ip_slice[15] = @as(u8, @truncate(x));
             return result;
         } else {
-            ip_slice[index] = @truncate(u8, x >> 8);
+            ip_slice[index] = @as(u8, @truncate(x >> 8));
             index += 1;
-            ip_slice[index] = @truncate(u8, x);
+            ip_slice[index] = @as(u8, @truncate(x));
             index += 1;
             @memcpy(result.sa.addr[16 - index ..][0..index], ip_slice[0..index]);
             return result;
@@ -473,9 +473,9 @@ pub const Ip6Address = extern struct {
                 if (index == 14) {
                     return error.InvalidEnd;
                 }
-                ip_slice[index] = @truncate(u8, x >> 8);
+                ip_slice[index] = @as(u8, @truncate(x >> 8));
                 index += 1;
-                ip_slice[index] = @truncate(u8, x);
+                ip_slice[index] = @as(u8, @truncate(x));
                 index += 1;
 
                 x = 0;
@@ -542,13 +542,13 @@ pub const Ip6Address = extern struct {
         result.sa.scope_id = resolved_scope_id;
 
         if (index == 14) {
-            ip_slice[14] = @truncate(u8, x >> 8);
-            ip_slice[15] = @truncate(u8, x);
+            ip_slice[14] = @as(u8, @truncate(x >> 8));
+            ip_slice[15] = @as(u8, @truncate(x));
             return result;
         } else {
-            ip_slice[index] = @truncate(u8, x >> 8);
+            ip_slice[index] = @as(u8, @truncate(x >> 8));
             index += 1;
-            ip_slice[index] = @truncate(u8, x);
+            ip_slice[index] = @as(u8, @truncate(x));
             index += 1;
             @memcpy(result.sa.addr[16 - index ..][0..index], ip_slice[0..index]);
             return result;
@@ -597,7 +597,7 @@ pub const Ip6Address = extern struct {
             });
             return;
         }
-        const big_endian_parts = @ptrCast(*align(1) const [8]u16, &self.sa.addr);
+        const big_endian_parts = @as(*align(1) const [8]u16, @ptrCast(&self.sa.addr));
         const native_endian_parts = switch (native_endian) {
             .Big => big_endian_parts.*,
             .Little => blk: {
@@ -668,7 +668,7 @@ fn if_nametoindex(name: []const u8) !u32 {
         // TODO investigate if this needs to be integrated with evented I/O.
         try os.ioctl_SIOCGIFINDEX(sockfd, &ifr);
 
-        return @bitCast(u32, ifr.ifru.ivalue);
+        return @as(u32, @bitCast(ifr.ifru.ivalue));
     }
 
     if (comptime builtin.target.os.tag.isDarwin()) {
@@ -682,7 +682,7 @@ fn if_nametoindex(name: []const u8) !u32 {
         const index = os.system.if_nametoindex(if_slice);
         if (index == 0)
             return error.InterfaceNotFound;
-        return @bitCast(u32, index);
+        return @as(u32, @bitCast(index));
     }
 
     @compileError("std.net.if_nametoindex unimplemented for this OS");
@@ -804,8 +804,8 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
         var first = true;
         while (true) {
             const rc = ws2_32.getaddrinfo(name_c.ptr, port_c.ptr, &hints, &res);
-            switch (@enumFromInt(os.windows.ws2_32.WinsockError, @intCast(u16, rc))) {
-                @enumFromInt(os.windows.ws2_32.WinsockError, 0) => break,
+            switch (@as(os.windows.ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(rc))))) {
+                @as(os.windows.ws2_32.WinsockError, @enumFromInt(0)) => break,
                 .WSATRY_AGAIN => return error.TemporaryNameServerFailure,
                 .WSANO_RECOVERY => return error.NameServerFailure,
                 .WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported,
@@ -841,7 +841,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
         var i: usize = 0;
         while (it) |info| : (it = info.next) {
             const addr = info.addr orelse continue;
-            result.addrs[i] = Address.initPosix(@alignCast(4, addr));
+            result.addrs[i] = Address.initPosix(@alignCast(addr));
 
             if (info.canonname) |n| {
                 if (result.canon_name == null) {
@@ -874,7 +874,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
         };
         var res: ?*os.addrinfo = null;
         switch (sys.getaddrinfo(name_c.ptr, port_c.ptr, &hints, &res)) {
-            @enumFromInt(sys.EAI, 0) => {},
+            @as(sys.EAI, @enumFromInt(0)) => {},
             .ADDRFAMILY => return error.HostLacksNetworkAddresses,
             .AGAIN => return error.TemporaryNameServerFailure,
             .BADFLAGS => unreachable, // Invalid hints
@@ -908,7 +908,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
         var i: usize = 0;
         while (it) |info| : (it = info.next) {
             const addr = info.addr orelse continue;
-            result.addrs[i] = Address.initPosix(@alignCast(4, addr));
+            result.addrs[i] = Address.initPosix(@alignCast(addr));
 
             if (info.canonname) |n| {
                 if (result.canon_name == null) {
@@ -1020,7 +1020,7 @@ fn linuxLookupName(
     for (addrs.items, 0..) |*addr, i| {
         var key: i32 = 0;
         var sa6: os.sockaddr.in6 = undefined;
-        @memset(@ptrCast([*]u8, &sa6)[0..@sizeOf(os.sockaddr.in6)], 0);
+        @memset(@as([*]u8, @ptrCast(&sa6))[0..@sizeOf(os.sockaddr.in6)], 0);
         var da6 = os.sockaddr.in6{
             .family = os.AF.INET6,
             .scope_id = addr.addr.in6.sa.scope_id,
@@ -1029,7 +1029,7 @@ fn linuxLookupName(
             .addr = [1]u8{0} ** 16,
         };
         var sa4: os.sockaddr.in = undefined;
-        @memset(@ptrCast([*]u8, &sa4)[0..@sizeOf(os.sockaddr.in)], 0);
+        @memset(@as([*]u8, @ptrCast(&sa4))[0..@sizeOf(os.sockaddr.in)], 0);
         var da4 = os.sockaddr.in{
             .family = os.AF.INET,
             .port = 65535,
@@ -1042,18 +1042,18 @@ fn linuxLookupName(
         var dalen: os.socklen_t = undefined;
         if (addr.addr.any.family == os.AF.INET6) {
             da6.addr = addr.addr.in6.sa.addr;
-            da = @ptrCast(*os.sockaddr, &da6);
+            da = @ptrCast(&da6);
             dalen = @sizeOf(os.sockaddr.in6);
-            sa = @ptrCast(*os.sockaddr, &sa6);
+            sa = @ptrCast(&sa6);
             salen = @sizeOf(os.sockaddr.in6);
         } else {
             sa6.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*;
             da6.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*;
             mem.writeIntNative(u32, da6.addr[12..], addr.addr.in.sa.addr);
             da4.addr = addr.addr.in.sa.addr;
-            da = @ptrCast(*os.sockaddr, &da4);
+            da = @ptrCast(&da4);
             dalen = @sizeOf(os.sockaddr.in);
-            sa = @ptrCast(*os.sockaddr, &sa4);
+            sa = @ptrCast(&sa4);
             salen = @sizeOf(os.sockaddr.in);
         }
         const dpolicy = policyOf(da6.addr);
@@ -1070,7 +1070,7 @@ fn linuxLookupName(
             os.getsockname(fd, sa, &salen) catch break :syscalls;
             if (addr.addr.any.family == os.AF.INET) {
                 // TODO sa6.addr[12..16] should return *[4]u8, making this cast unnecessary.
-                mem.writeIntNative(u32, @ptrCast(*[4]u8, &sa6.addr[12]), sa4.addr);
+                mem.writeIntNative(u32, @as(*[4]u8, @ptrCast(&sa6.addr[12])), sa4.addr);
             }
             if (dscope == @as(i32, scopeOf(sa6.addr))) key |= DAS_MATCHINGSCOPE;
             if (dlabel == labelOf(sa6.addr)) key |= DAS_MATCHINGLABEL;
@@ -1079,7 +1079,7 @@ fn linuxLookupName(
         key |= dprec << DAS_PREC_SHIFT;
         key |= (15 - dscope) << DAS_SCOPE_SHIFT;
         key |= prefixlen << DAS_PREFIX_SHIFT;
-        key |= (MAXADDRS - @intCast(i32, i)) << DAS_ORDER_SHIFT;
+        key |= (MAXADDRS - @as(i32, @intCast(i))) << DAS_ORDER_SHIFT;
         addr.sortkey = key;
     }
     mem.sort(LookupAddr, addrs.items, {}, addrCmpLessThan);
@@ -1171,7 +1171,7 @@ fn prefixMatch(s: [16]u8, d: [16]u8) u8 {
     // address. However the definition of the source prefix length is
     // not clear and thus this limiting is not yet implemented.
     var i: u8 = 0;
-    while (i < 128 and ((s[i / 8] ^ d[i / 8]) & (@as(u8, 128) >> @intCast(u3, i % 8))) == 0) : (i += 1) {}
+    while (i < 128 and ((s[i / 8] ^ d[i / 8]) & (@as(u8, 128) >> @as(u3, @intCast(i % 8)))) == 0) : (i += 1) {}
     return i;
 }
 
@@ -1577,7 +1577,7 @@ fn resMSendRc(
 
     // Get local address and open/bind a socket
     var sa: Address = undefined;
-    @memset(@ptrCast([*]u8, &sa)[0..@sizeOf(Address)], 0);
+    @memset(@as([*]u8, @ptrCast(&sa))[0..@sizeOf(Address)], 0);
     sa.any.family = family;
     try os.bind(fd, &sa.any, sl);
 
@@ -1588,13 +1588,13 @@ fn resMSendRc(
     }};
     const retry_interval = timeout / attempts;
     var next: u32 = 0;
-    var t2: u64 = @bitCast(u64, std.time.milliTimestamp());
+    var t2: u64 = @as(u64, @bitCast(std.time.milliTimestamp()));
     var t0 = t2;
     var t1 = t2 - retry_interval;
 
     var servfail_retry: usize = undefined;
 
-    outer: while (t2 - t0 < timeout) : (t2 = @bitCast(u64, std.time.milliTimestamp())) {
+    outer: while (t2 - t0 < timeout) : (t2 = @as(u64, @bitCast(std.time.milliTimestamp()))) {
         if (t2 - t1 >= retry_interval) {
             // Query all configured nameservers in parallel
             var i: usize = 0;
lib/std/os.zig
@@ -494,7 +494,7 @@ pub fn getrandom(buffer: []u8) GetRandomError!void {
             const res = if (use_c) blk: {
                 const rc = std.c.getrandom(buf.ptr, buf.len, 0);
                 break :blk .{
-                    .num_read = @bitCast(usize, rc),
+                    .num_read = @as(usize, @bitCast(rc)),
                     .err = std.c.getErrno(rc),
                 };
             } else blk: {
@@ -608,7 +608,7 @@ pub fn abort() noreturn {
         sigprocmask(SIG.UNBLOCK, &sigabrtmask, null);
 
         // Beyond this point should be unreachable.
-        @ptrFromInt(*allowzero volatile u8, 0).* = 0;
+        @as(*allowzero volatile u8, @ptrFromInt(0)).* = 0;
         raise(SIG.KILL) catch {};
         exit(127); // Pid 1 might not be signalled in some containers.
     }
@@ -678,10 +678,10 @@ pub fn exit(status: u8) noreturn {
         // exit() is only available if exitBootServices() has not been called yet.
         // This call to exit should not fail, so we don't care about its return value.
         if (uefi.system_table.boot_services) |bs| {
-            _ = bs.exit(uefi.handle, @enumFromInt(uefi.Status, status), 0, null);
+            _ = bs.exit(uefi.handle, @as(uefi.Status, @enumFromInt(status)), 0, null);
         }
         // If we can't exit, reboot the system instead.
-        uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @enumFromInt(uefi.Status, status), 0, null);
+        uefi.system_table.runtime_services.resetSystem(uefi.tables.ResetType.ResetCold, @as(uefi.Status, @enumFromInt(status)), 0, null);
     }
     system.exit(status);
 }
@@ -759,7 +759,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
     while (true) {
         const rc = system.read(fd, buf.ptr, adjusted_len);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .INTR => continue,
             .INVAL => unreachable,
             .FAULT => unreachable,
@@ -818,7 +818,7 @@ pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
         // TODO handle the case when iov_len is too large and get rid of this @intCast
         const rc = system.readv(fd, iov.ptr, iov_count);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .INTR => continue,
             .INVAL => unreachable,
             .FAULT => unreachable,
@@ -892,11 +892,11 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
 
     const pread_sym = if (lfs64_abi) system.pread64 else system.pread;
 
-    const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+    const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
     while (true) {
         const rc = pread_sym(fd, buf.ptr, adjusted_len, ioffset);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .INTR => continue,
             .INVAL => unreachable,
             .FAULT => unreachable,
@@ -929,7 +929,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
     if (builtin.os.tag == .windows) {
         var io_status_block: windows.IO_STATUS_BLOCK = undefined;
         var eof_info = windows.FILE_END_OF_FILE_INFORMATION{
-            .EndOfFile = @bitCast(windows.LARGE_INTEGER, length),
+            .EndOfFile = @as(windows.LARGE_INTEGER, @bitCast(length)),
         };
 
         const rc = windows.ntdll.NtSetInformationFile(
@@ -965,7 +965,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
     while (true) {
         const ftruncate_sym = if (lfs64_abi) system.ftruncate64 else system.ftruncate;
 
-        const ilen = @bitCast(i64, length); // the OS treats this as unsigned
+        const ilen = @as(i64, @bitCast(length)); // the OS treats this as unsigned
         switch (errno(ftruncate_sym(fd, ilen))) {
             .SUCCESS => return,
             .INTR => continue,
@@ -1001,7 +1001,7 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
     if (have_pread_but_not_preadv) {
         // We could loop here; but proper usage of `preadv` must handle partial reads anyway.
         // So we simply read into the first vector only.
-        if (iov.len == 0) return @intCast(usize, 0);
+        if (iov.len == 0) return @as(usize, @intCast(0));
         const first = iov[0];
         return pread(fd, first.iov_base[0..first.iov_len], offset);
     }
@@ -1030,11 +1030,11 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
 
     const preadv_sym = if (lfs64_abi) system.preadv64 else system.preadv;
 
-    const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+    const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
     while (true) {
         const rc = preadv_sym(fd, iov.ptr, iov_count, ioffset);
         switch (errno(rc)) {
-            .SUCCESS => return @bitCast(usize, rc),
+            .SUCCESS => return @as(usize, @bitCast(rc)),
             .INTR => continue,
             .INVAL => unreachable,
             .FAULT => unreachable,
@@ -1143,7 +1143,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
     while (true) {
         const rc = system.write(fd, bytes.ptr, adjusted_len);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .INTR => continue,
             .INVAL => return error.InvalidArgument,
             .FAULT => unreachable,
@@ -1212,11 +1212,11 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
         }
     }
 
-    const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len);
+    const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len));
     while (true) {
         const rc = system.writev(fd, iov.ptr, iov_count);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .INTR => continue,
             .INVAL => return error.InvalidArgument,
             .FAULT => unreachable,
@@ -1304,11 +1304,11 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
 
     const pwrite_sym = if (lfs64_abi) system.pwrite64 else system.pwrite;
 
-    const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+    const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
     while (true) {
         const rc = pwrite_sym(fd, bytes.ptr, adjusted_len, ioffset);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .INTR => continue,
             .INVAL => return error.InvalidArgument,
             .FAULT => unreachable,
@@ -1390,12 +1390,12 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usiz
 
     const pwritev_sym = if (lfs64_abi) system.pwritev64 else system.pwritev;
 
-    const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @intCast(u31, iov.len);
-    const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+    const iov_count = if (iov.len > IOV_MAX) IOV_MAX else @as(u31, @intCast(iov.len));
+    const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
     while (true) {
         const rc = pwritev_sym(fd, iov.ptr, iov_count, ioffset);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .INTR => continue,
             .INVAL => return error.InvalidArgument,
             .FAULT => unreachable,
@@ -1504,7 +1504,7 @@ pub fn openZ(file_path: [*:0]const u8, flags: u32, perm: mode_t) OpenError!fd_t
     while (true) {
         const rc = open_sym(file_path, flags, perm);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(fd_t, rc),
+            .SUCCESS => return @as(fd_t, @intCast(rc)),
             .INTR => continue,
 
             .FAULT => unreachable,
@@ -1653,11 +1653,11 @@ fn openOptionsFromFlagsWasi(fd: fd_t, oflag: u32) OpenError!WasiOpenOptions {
     rights &= fsb_cur.fs_rights_inheriting;
 
     return WasiOpenOptions{
-        .oflags = @truncate(w.oflags_t, (oflag >> 12)) & 0xfff,
+        .oflags = @as(w.oflags_t, @truncate((oflag >> 12))) & 0xfff,
         .lookup_flags = if (oflag & O.NOFOLLOW == 0) w.LOOKUP_SYMLINK_FOLLOW else 0,
         .fs_rights_base = rights,
         .fs_rights_inheriting = fsb_cur.fs_rights_inheriting,
-        .fs_flags = @truncate(w.fdflags_t, oflag & 0xfff),
+        .fs_flags = @as(w.fdflags_t, @truncate(oflag & 0xfff)),
     };
 }
 
@@ -1717,7 +1717,7 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t)
     while (true) {
         const rc = openat_sym(dir_fd, file_path, flags, mode);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(fd_t, rc),
+            .SUCCESS => return @as(fd_t, @intCast(rc)),
             .INTR => continue,
 
             .FAULT => unreachable,
@@ -1765,7 +1765,7 @@ pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t)
 pub fn dup(old_fd: fd_t) !fd_t {
     const rc = system.dup(old_fd);
     return switch (errno(rc)) {
-        .SUCCESS => return @intCast(fd_t, rc),
+        .SUCCESS => return @as(fd_t, @intCast(rc)),
         .MFILE => error.ProcessFdQuotaExceeded,
         .BADF => unreachable, // invalid file descriptor
         else => |err| return unexpectedErrno(err),
@@ -2024,7 +2024,7 @@ pub fn getcwd(out_buffer: []u8) GetCwdError![]u8 {
 
     const err = if (builtin.link_libc) blk: {
         const c_err = if (std.c.getcwd(out_buffer.ptr, out_buffer.len)) |_| 0 else std.c._errno().*;
-        break :blk @enumFromInt(E, c_err);
+        break :blk @as(E, @enumFromInt(c_err));
     } else blk: {
         break :blk errno(system.getcwd(out_buffer.ptr, out_buffer.len));
     };
@@ -2661,12 +2661,12 @@ pub fn renameatW(
     const struct_len = @sizeOf(windows.FILE_RENAME_INFORMATION) - 1 + new_path_w.len * 2;
     if (struct_len > struct_buf_len) return error.NameTooLong;
 
-    const rename_info = @ptrCast(*windows.FILE_RENAME_INFORMATION, &rename_info_buf);
+    const rename_info = @as(*windows.FILE_RENAME_INFORMATION, @ptrCast(&rename_info_buf));
 
     rename_info.* = .{
         .ReplaceIfExists = ReplaceIfExists,
         .RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(new_path_w)) null else new_dir_fd,
-        .FileNameLength = @intCast(u32, new_path_w.len * 2), // already checked error.NameTooLong
+        .FileNameLength = @as(u32, @intCast(new_path_w.len * 2)), // already checked error.NameTooLong
         .FileName = undefined,
     };
     @memcpy(@as([*]u16, &rename_info.FileName)[0..new_path_w.len], new_path_w);
@@ -2677,7 +2677,7 @@ pub fn renameatW(
         src_fd,
         &io_status_block,
         rename_info,
-        @intCast(u32, struct_len), // already checked for error.NameTooLong
+        @as(u32, @intCast(struct_len)), // already checked for error.NameTooLong
         .FileRenameInformation,
     );
 
@@ -3049,7 +3049,7 @@ pub fn readlinkZ(file_path: [*:0]const u8, out_buffer: []u8) ReadLinkError![]u8
     }
     const rc = system.readlink(file_path, out_buffer.ptr, out_buffer.len);
     switch (errno(rc)) {
-        .SUCCESS => return out_buffer[0..@bitCast(usize, rc)],
+        .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))],
         .ACCES => return error.AccessDenied,
         .FAULT => unreachable,
         .INVAL => return error.NotLink,
@@ -3115,7 +3115,7 @@ pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) Read
     }
     const rc = system.readlinkat(dirfd, file_path, out_buffer.ptr, out_buffer.len);
     switch (errno(rc)) {
-        .SUCCESS => return out_buffer[0..@bitCast(usize, rc)],
+        .SUCCESS => return out_buffer[0..@as(usize, @bitCast(rc))],
         .ACCES => return error.AccessDenied,
         .FAULT => unreachable,
         .INVAL => return error.NotLink,
@@ -3227,7 +3227,7 @@ pub fn isatty(handle: fd_t) bool {
     if (builtin.os.tag == .linux) {
         while (true) {
             var wsz: linux.winsize = undefined;
-            const fd = @bitCast(usize, @as(isize, handle));
+            const fd = @as(usize, @bitCast(@as(isize, handle)));
             const rc = linux.syscall3(.ioctl, fd, linux.T.IOCGWINSZ, @intFromPtr(&wsz));
             switch (linux.getErrno(rc)) {
                 .SUCCESS => return true,
@@ -3271,14 +3271,14 @@ pub fn isCygwinPty(handle: fd_t) bool {
     var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = [_]u8{0} ** (name_bytes_offset + num_name_bytes);
 
     var io_status_block: windows.IO_STATUS_BLOCK = undefined;
-    const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @intCast(u32, name_info_bytes.len), .FileNameInformation);
+    const rc = windows.ntdll.NtQueryInformationFile(handle, &io_status_block, &name_info_bytes, @as(u32, @intCast(name_info_bytes.len)), .FileNameInformation);
     switch (rc) {
         .SUCCESS => {},
         .INVALID_PARAMETER => unreachable,
         else => return false,
     }
 
-    const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
+    const name_info = @as(*const windows.FILE_NAME_INFO, @ptrCast(&name_info_bytes[0]));
     const name_bytes = name_info_bytes[name_bytes_offset .. name_bytes_offset + @as(usize, name_info.FileNameLength)];
     const name_wide = mem.bytesAsSlice(u16, name_bytes);
     // Note: The name we get from NtQueryInformationFile will be prefixed with a '\', e.g. \msys-1888ae32e00d56aa-pty0-to-master
@@ -3325,9 +3325,9 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t
         else
             0;
         const rc = try windows.WSASocketW(
-            @bitCast(i32, domain),
-            @bitCast(i32, filtered_sock_type),
-            @bitCast(i32, protocol),
+            @as(i32, @bitCast(domain)),
+            @as(i32, @bitCast(filtered_sock_type)),
+            @as(i32, @bitCast(protocol)),
             null,
             0,
             flags,
@@ -3353,7 +3353,7 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t
     const rc = system.socket(domain, filtered_sock_type, protocol);
     switch (errno(rc)) {
         .SUCCESS => {
-            const fd = @intCast(fd_t, rc);
+            const fd = @as(fd_t, @intCast(rc));
             if (!have_sock_flags) {
                 try setSockFlags(fd, socket_type);
             }
@@ -3679,7 +3679,7 @@ pub fn accept(
         } else {
             switch (errno(rc)) {
                 .SUCCESS => {
-                    break @intCast(socket_t, rc);
+                    break @as(socket_t, @intCast(rc));
                 },
                 .INTR => continue,
                 .AGAIN => return error.WouldBlock,
@@ -3723,7 +3723,7 @@ pub const EpollCreateError = error{
 pub fn epoll_create1(flags: u32) EpollCreateError!i32 {
     const rc = system.epoll_create1(flags);
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(i32, rc),
+        .SUCCESS => return @as(i32, @intCast(rc)),
         else => |err| return unexpectedErrno(err),
 
         .INVAL => unreachable,
@@ -3782,9 +3782,9 @@ pub fn epoll_ctl(epfd: i32, op: u32, fd: i32, event: ?*linux.epoll_event) EpollC
 pub fn epoll_wait(epfd: i32, events: []linux.epoll_event, timeout: i32) usize {
     while (true) {
         // TODO get rid of the @intCast
-        const rc = system.epoll_wait(epfd, events.ptr, @intCast(u32, events.len), timeout);
+        const rc = system.epoll_wait(epfd, events.ptr, @as(u32, @intCast(events.len)), timeout);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .INTR => continue,
             .BADF => unreachable,
             .FAULT => unreachable,
@@ -3803,7 +3803,7 @@ pub const EventFdError = error{
 pub fn eventfd(initval: u32, flags: u32) EventFdError!i32 {
     const rc = system.eventfd(initval, flags);
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(i32, rc),
+        .SUCCESS => return @as(i32, @intCast(rc)),
         else => |err| return unexpectedErrno(err),
 
         .INVAL => unreachable, // invalid parameters
@@ -3937,7 +3937,7 @@ pub const ConnectError = error{
 /// return error.WouldBlock when EAGAIN or EINPROGRESS is received.
 pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) ConnectError!void {
     if (builtin.os.tag == .windows) {
-        const rc = windows.ws2_32.connect(sock, sock_addr, @intCast(i32, len));
+        const rc = windows.ws2_32.connect(sock, sock_addr, @as(i32, @intCast(len)));
         if (rc == 0) return;
         switch (windows.ws2_32.WSAGetLastError()) {
             .WSAEADDRINUSE => return error.AddressInUse,
@@ -3992,10 +3992,10 @@ pub fn connect(sock: socket_t, sock_addr: *const sockaddr, len: socklen_t) Conne
 pub fn getsockoptError(sockfd: fd_t) ConnectError!void {
     var err_code: i32 = undefined;
     var size: u32 = @sizeOf(u32);
-    const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @ptrCast([*]u8, &err_code), &size);
+    const rc = system.getsockopt(sockfd, SOL.SOCKET, SO.ERROR, @as([*]u8, @ptrCast(&err_code)), &size);
     assert(size == 4);
     switch (errno(rc)) {
-        .SUCCESS => switch (@enumFromInt(E, err_code)) {
+        .SUCCESS => switch (@as(E, @enumFromInt(err_code))) {
             .SUCCESS => return,
             .ACCES => return error.PermissionDenied,
             .PERM => return error.PermissionDenied,
@@ -4035,13 +4035,13 @@ pub const WaitPidResult = struct {
 pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult {
     const Status = if (builtin.link_libc) c_int else u32;
     var status: Status = undefined;
-    const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags;
+    const coerced_flags = if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags;
     while (true) {
         const rc = system.waitpid(pid, &status, coerced_flags);
         switch (errno(rc)) {
             .SUCCESS => return .{
-                .pid = @intCast(pid_t, rc),
-                .status = @bitCast(u32, status),
+                .pid = @as(pid_t, @intCast(rc)),
+                .status = @as(u32, @bitCast(status)),
             },
             .INTR => continue,
             .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error.
@@ -4054,13 +4054,13 @@ pub fn waitpid(pid: pid_t, flags: u32) WaitPidResult {
 pub fn wait4(pid: pid_t, flags: u32, ru: ?*rusage) WaitPidResult {
     const Status = if (builtin.link_libc) c_int else u32;
     var status: Status = undefined;
-    const coerced_flags = if (builtin.link_libc) @intCast(c_int, flags) else flags;
+    const coerced_flags = if (builtin.link_libc) @as(c_int, @intCast(flags)) else flags;
     while (true) {
         const rc = system.wait4(pid, &status, coerced_flags, ru);
         switch (errno(rc)) {
             .SUCCESS => return .{
-                .pid = @intCast(pid_t, rc),
-                .status = @bitCast(u32, status),
+                .pid = @as(pid_t, @intCast(rc)),
+                .status = @as(u32, @bitCast(status)),
             },
             .INTR => continue,
             .CHILD => unreachable, // The process specified does not exist. It would be a race condition to handle this error.
@@ -4182,7 +4182,7 @@ pub const KQueueError = error{
 pub fn kqueue() KQueueError!i32 {
     const rc = system.kqueue();
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(i32, rc),
+        .SUCCESS => return @as(i32, @intCast(rc)),
         .MFILE => return error.ProcessFdQuotaExceeded,
         .NFILE => return error.SystemFdQuotaExceeded,
         else => |err| return unexpectedErrno(err),
@@ -4223,7 +4223,7 @@ pub fn kevent(
             timeout,
         );
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .ACCES => return error.AccessDenied,
             .FAULT => unreachable,
             .BADF => unreachable, // Always a race condition.
@@ -4247,7 +4247,7 @@ pub const INotifyInitError = error{
 pub fn inotify_init1(flags: u32) INotifyInitError!i32 {
     const rc = system.inotify_init1(flags);
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(i32, rc),
+        .SUCCESS => return @as(i32, @intCast(rc)),
         .INVAL => unreachable,
         .MFILE => return error.ProcessFdQuotaExceeded,
         .NFILE => return error.SystemFdQuotaExceeded,
@@ -4276,7 +4276,7 @@ pub fn inotify_add_watch(inotify_fd: i32, pathname: []const u8, mask: u32) INoti
 pub fn inotify_add_watchZ(inotify_fd: i32, pathname: [*:0]const u8, mask: u32) INotifyAddWatchError!i32 {
     const rc = system.inotify_add_watch(inotify_fd, pathname, mask);
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(i32, rc),
+        .SUCCESS => return @as(i32, @intCast(rc)),
         .ACCES => return error.AccessDenied,
         .BADF => unreachable,
         .FAULT => unreachable,
@@ -4319,7 +4319,7 @@ pub const MProtectError = error{
 pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void {
     assert(mem.isAligned(memory.len, mem.page_size));
     if (builtin.os.tag == .windows) {
-        const win_prot: windows.DWORD = switch (@truncate(u3, protection)) {
+        const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) {
             0b000 => windows.PAGE_NOACCESS,
             0b001 => windows.PAGE_READONLY,
             0b010 => unreachable, // +w -r not allowed
@@ -4350,7 +4350,7 @@ pub const ForkError = error{SystemResources} || UnexpectedError;
 pub fn fork() ForkError!pid_t {
     const rc = system.fork();
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(pid_t, rc),
+        .SUCCESS => return @as(pid_t, @intCast(rc)),
         .AGAIN => return error.SystemResources,
         .NOMEM => return error.SystemResources,
         else => |err| return unexpectedErrno(err),
@@ -4391,14 +4391,14 @@ pub fn mmap(
 ) MMapError![]align(mem.page_size) u8 {
     const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap;
 
-    const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+    const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
     const rc = mmap_sym(ptr, length, prot, flags, fd, ioffset);
     const err = if (builtin.link_libc) blk: {
-        if (rc != std.c.MAP.FAILED) return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, rc))[0..length];
-        break :blk @enumFromInt(E, system._errno().*);
+        if (rc != std.c.MAP.FAILED) return @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length];
+        break :blk @as(E, @enumFromInt(system._errno().*));
     } else blk: {
         const err = errno(rc);
-        if (err == .SUCCESS) return @ptrFromInt([*]align(mem.page_size) u8, rc)[0..length];
+        if (err == .SUCCESS) return @as([*]align(mem.page_size) u8, @ptrFromInt(rc))[0..length];
         break :blk err;
     };
     switch (err) {
@@ -4781,7 +4781,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void {
     }
     if (builtin.os.tag == .wasi and !builtin.link_libc) {
         var new_offset: wasi.filesize_t = undefined;
-        switch (wasi.fd_seek(fd, @bitCast(wasi.filedelta_t, offset), .SET, &new_offset)) {
+        switch (wasi.fd_seek(fd, @as(wasi.filedelta_t, @bitCast(offset)), .SET, &new_offset)) {
             .SUCCESS => return,
             .BADF => unreachable, // always a race condition
             .INVAL => return error.Unseekable,
@@ -4795,7 +4795,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void {
 
     const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
 
-    const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+    const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
     switch (errno(lseek_sym(fd, ioffset, SEEK.SET))) {
         .SUCCESS => return,
         .BADF => unreachable, // always a race condition
@@ -4811,7 +4811,7 @@ pub fn lseek_SET(fd: fd_t, offset: u64) SeekError!void {
 pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void {
     if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
         var result: u64 = undefined;
-        switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.CUR))) {
+        switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.CUR))) {
             .SUCCESS => return,
             .BADF => unreachable, // always a race condition
             .INVAL => return error.Unseekable,
@@ -4839,7 +4839,7 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void {
     }
     const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
 
-    const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+    const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
     switch (errno(lseek_sym(fd, ioffset, SEEK.CUR))) {
         .SUCCESS => return,
         .BADF => unreachable, // always a race condition
@@ -4855,7 +4855,7 @@ pub fn lseek_CUR(fd: fd_t, offset: i64) SeekError!void {
 pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void {
     if (builtin.os.tag == .linux and !builtin.link_libc and @sizeOf(usize) == 4) {
         var result: u64 = undefined;
-        switch (errno(system.llseek(fd, @bitCast(u64, offset), &result, SEEK.END))) {
+        switch (errno(system.llseek(fd, @as(u64, @bitCast(offset)), &result, SEEK.END))) {
             .SUCCESS => return,
             .BADF => unreachable, // always a race condition
             .INVAL => return error.Unseekable,
@@ -4883,7 +4883,7 @@ pub fn lseek_END(fd: fd_t, offset: i64) SeekError!void {
     }
     const lseek_sym = if (lfs64_abi) system.lseek64 else system.lseek;
 
-    const ioffset = @bitCast(i64, offset); // the OS treats this as unsigned
+    const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned
     switch (errno(lseek_sym(fd, ioffset, SEEK.END))) {
         .SUCCESS => return,
         .BADF => unreachable, // always a race condition
@@ -4929,7 +4929,7 @@ pub fn lseek_CUR_get(fd: fd_t) SeekError!u64 {
 
     const rc = lseek_sym(fd, 0, SEEK.CUR);
     switch (errno(rc)) {
-        .SUCCESS => return @bitCast(u64, rc),
+        .SUCCESS => return @as(u64, @bitCast(rc)),
         .BADF => unreachable, // always a race condition
         .INVAL => return error.Unseekable,
         .OVERFLOW => return error.Unseekable,
@@ -4952,7 +4952,7 @@ pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) FcntlError!usize {
     while (true) {
         const rc = system.fcntl(fd, cmd, arg);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
             .INTR => continue,
             .AGAIN, .ACCES => return error.Locked,
             .BADF => unreachable,
@@ -5122,7 +5122,7 @@ pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[MAX_PATH_BYTES]u8) RealP
 
         return getFdPath(fd, out_buffer);
     }
-    const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@enumFromInt(E, std.c._errno().*)) {
+    const result_path = std.c.realpath(pathname, out_buffer) orelse switch (@as(E, @enumFromInt(std.c._errno().*))) {
         .SUCCESS => unreachable,
         .INVAL => unreachable,
         .BADF => unreachable,
@@ -5269,7 +5269,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
                 };
                 var i: usize = 0;
                 while (i < len) {
-                    const kf: *align(1) system.kinfo_file = @ptrCast(*align(1) system.kinfo_file, &buf[i]);
+                    const kf: *align(1) system.kinfo_file = @as(*align(1) system.kinfo_file, @ptrCast(&buf[i]));
                     if (kf.fd == fd) {
                         len = mem.indexOfScalar(u8, &kf.path, 0) orelse MAX_PATH_BYTES;
                         if (len == 0) return error.NameTooLong;
@@ -5277,7 +5277,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
                         @memcpy(result, kf.path[0..len]);
                         return result;
                     }
-                    i += @intCast(usize, kf.structsize);
+                    i += @as(usize, @intCast(kf.structsize));
                 }
                 return error.InvalidHandle;
             }
@@ -5357,22 +5357,22 @@ pub fn dl_iterate_phdr(
     if (builtin.link_libc) {
         switch (system.dl_iterate_phdr(struct {
             fn callbackC(info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.C) c_int {
-                const context_ptr = @ptrCast(*const Context, @alignCast(@alignOf(*const Context), data));
+                const context_ptr: *const Context = @ptrCast(@alignCast(data));
                 callback(info, size, context_ptr.*) catch |err| return @intFromError(err);
                 return 0;
             }
-        }.callbackC, @ptrFromInt(?*anyopaque, @intFromPtr(&context)))) {
+        }.callbackC, @as(?*anyopaque, @ptrFromInt(@intFromPtr(&context))))) {
             0 => return,
-            else => |err| return @errSetCast(Error, @errorFromInt(@intCast(u16, err))), // TODO don't hardcode u16
+            else => |err| return @as(Error, @errSetCast(@errorFromInt(@as(u16, @intCast(err))))), // TODO don't hardcode u16
         }
     }
 
     const elf_base = std.process.getBaseAddress();
-    const ehdr = @ptrFromInt(*elf.Ehdr, elf_base);
+    const ehdr = @as(*elf.Ehdr, @ptrFromInt(elf_base));
     // Make sure the base address points to an ELF image.
     assert(mem.eql(u8, ehdr.e_ident[0..4], elf.MAGIC));
     const n_phdr = ehdr.e_phnum;
-    const phdrs = (@ptrFromInt([*]elf.Phdr, elf_base + ehdr.e_phoff))[0..n_phdr];
+    const phdrs = (@as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff)))[0..n_phdr];
 
     var it = dl.linkmap_iterator(phdrs) catch unreachable;
 
@@ -5406,12 +5406,12 @@ pub fn dl_iterate_phdr(
         var dlpi_phnum: u16 = undefined;
 
         if (entry.l_addr != 0) {
-            const elf_header = @ptrFromInt(*elf.Ehdr, entry.l_addr);
-            dlpi_phdr = @ptrFromInt([*]elf.Phdr, entry.l_addr + elf_header.e_phoff);
+            const elf_header = @as(*elf.Ehdr, @ptrFromInt(entry.l_addr));
+            dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(entry.l_addr + elf_header.e_phoff));
             dlpi_phnum = elf_header.e_phnum;
         } else {
             // This is the running ELF image
-            dlpi_phdr = @ptrFromInt([*]elf.Phdr, elf_base + ehdr.e_phoff);
+            dlpi_phdr = @as([*]elf.Phdr, @ptrFromInt(elf_base + ehdr.e_phoff));
             dlpi_phnum = ehdr.e_phnum;
         }
 
@@ -5433,11 +5433,11 @@ pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError;
 pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void {
     if (builtin.os.tag == .wasi and !builtin.link_libc) {
         var ts: timestamp_t = undefined;
-        switch (system.clock_time_get(@bitCast(u32, clk_id), 1, &ts)) {
+        switch (system.clock_time_get(@as(u32, @bitCast(clk_id)), 1, &ts)) {
             .SUCCESS => {
                 tp.* = .{
-                    .tv_sec = @intCast(i64, ts / std.time.ns_per_s),
-                    .tv_nsec = @intCast(isize, ts % std.time.ns_per_s),
+                    .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)),
+                    .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)),
                 };
             },
             .INVAL => return error.UnsupportedClock,
@@ -5453,8 +5453,8 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void {
             const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
             const ft_per_s = std.time.ns_per_s / 100;
             tp.* = .{
-                .tv_sec = @intCast(i64, ft64 / ft_per_s) + std.time.epoch.windows,
-                .tv_nsec = @intCast(c_long, ft64 % ft_per_s) * 100,
+                .tv_sec = @as(i64, @intCast(ft64 / ft_per_s)) + std.time.epoch.windows,
+                .tv_nsec = @as(c_long, @intCast(ft64 % ft_per_s)) * 100,
             };
             return;
         } else {
@@ -5474,10 +5474,10 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void {
 pub fn clock_getres(clk_id: i32, res: *timespec) ClockGetTimeError!void {
     if (builtin.os.tag == .wasi and !builtin.link_libc) {
         var ts: timestamp_t = undefined;
-        switch (system.clock_res_get(@bitCast(u32, clk_id), &ts)) {
+        switch (system.clock_res_get(@as(u32, @bitCast(clk_id)), &ts)) {
             .SUCCESS => res.* = .{
-                .tv_sec = @intCast(i64, ts / std.time.ns_per_s),
-                .tv_nsec = @intCast(isize, ts % std.time.ns_per_s),
+                .tv_sec = @as(i64, @intCast(ts / std.time.ns_per_s)),
+                .tv_nsec = @as(isize, @intCast(ts % std.time.ns_per_s)),
             },
             .INVAL => return error.UnsupportedClock,
             else => |err| return unexpectedErrno(err),
@@ -5747,7 +5747,7 @@ pub fn res_mkquery(
         // TODO determine the circumstances for this and whether or
         // not this should be an error.
         if (j - i - 1 > 62) unreachable;
-        q[i - 1] = @intCast(u8, j - i);
+        q[i - 1] = @as(u8, @intCast(j - i));
     }
     q[i + 1] = ty;
     q[i + 3] = class;
@@ -5756,10 +5756,10 @@ pub fn res_mkquery(
     var ts: timespec = undefined;
     clock_gettime(CLOCK.REALTIME, &ts) catch {};
     const UInt = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(ts.tv_nsec)));
-    const unsec = @bitCast(UInt, ts.tv_nsec);
-    const id = @truncate(u32, unsec + unsec / 65536);
-    q[0] = @truncate(u8, id / 256);
-    q[1] = @truncate(u8, id);
+    const unsec = @as(UInt, @bitCast(ts.tv_nsec));
+    const id = @as(u32, @truncate(unsec + unsec / 65536));
+    q[0] = @as(u8, @truncate(id / 256));
+    q[1] = @as(u8, @truncate(id));
 
     @memcpy(buf[0..n], q[0..n]);
     return n;
@@ -5865,11 +5865,11 @@ pub fn sendmsg(
                     else => |err| return windows.unexpectedWSAError(err),
                 }
             } else {
-                return @intCast(usize, rc);
+                return @as(usize, @intCast(rc));
             }
         } else {
             switch (errno(rc)) {
-                .SUCCESS => return @intCast(usize, rc),
+                .SUCCESS => return @as(usize, @intCast(rc)),
 
                 .ACCES => return error.AccessDenied,
                 .AGAIN => return error.WouldBlock,
@@ -5965,13 +5965,13 @@ pub fn sendto(
                 .WSANOTINITIALISED => unreachable, // A successful WSAStartup call must occur before using this function.
                 else => |err| return windows.unexpectedWSAError(err),
             },
-            else => |rc| return @intCast(usize, rc),
+            else => |rc| return @as(usize, @intCast(rc)),
         }
     }
     while (true) {
         const rc = system.sendto(sockfd, buf.ptr, buf.len, flags, dest_addr, addrlen);
         switch (errno(rc)) {
-            .SUCCESS => return @intCast(usize, rc),
+            .SUCCESS => return @as(usize, @intCast(rc)),
 
             .ACCES => return error.AccessDenied,
             .AGAIN => return error.WouldBlock,
@@ -6125,16 +6125,16 @@ pub fn sendfile(
             // Here we match BSD behavior, making a zero count value send as many bytes as possible.
             const adjusted_count_tmp = if (in_len == 0) max_count else @min(in_len, @as(size_t, max_count));
             // TODO we should not need this cast; improve return type of @min
-            const adjusted_count = @intCast(usize, adjusted_count_tmp);
+            const adjusted_count = @as(usize, @intCast(adjusted_count_tmp));
 
             const sendfile_sym = if (lfs64_abi) system.sendfile64 else system.sendfile;
 
             while (true) {
-                var offset: off_t = @bitCast(off_t, in_offset);
+                var offset: off_t = @as(off_t, @bitCast(in_offset));
                 const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count);
                 switch (errno(rc)) {
                     .SUCCESS => {
-                        const amt = @bitCast(usize, rc);
+                        const amt = @as(usize, @bitCast(rc));
                         total_written += amt;
                         if (in_len == 0 and amt == 0) {
                             // We have detected EOF from `in_fd`.
@@ -6209,9 +6209,9 @@ pub fn sendfile(
 
             while (true) {
                 var sbytes: off_t = undefined;
-                const offset = @bitCast(off_t, in_offset);
+                const offset = @as(off_t, @bitCast(in_offset));
                 const err = errno(system.sendfile(in_fd, out_fd, offset, adjusted_count, hdtr, &sbytes, flags));
-                const amt = @bitCast(usize, sbytes);
+                const amt = @as(usize, @bitCast(sbytes));
                 switch (err) {
                     .SUCCESS => return amt,
 
@@ -6286,13 +6286,13 @@ pub fn sendfile(
 
             const adjusted_count_temporary = @min(in_len, @as(u63, max_count));
             // TODO we should not need this int cast; improve the return type of `@min`
-            const adjusted_count = @intCast(u63, adjusted_count_temporary);
+            const adjusted_count = @as(u63, @intCast(adjusted_count_temporary));
 
             while (true) {
                 var sbytes: off_t = adjusted_count;
-                const signed_offset = @bitCast(i64, in_offset);
+                const signed_offset = @as(i64, @bitCast(in_offset));
                 const err = errno(system.sendfile(in_fd, out_fd, signed_offset, &sbytes, hdtr, flags));
-                const amt = @bitCast(usize, sbytes);
+                const amt = @as(usize, @bitCast(sbytes));
                 switch (err) {
                     .SUCCESS => return amt,
 
@@ -6342,7 +6342,7 @@ pub fn sendfile(
         // Here we match BSD behavior, making a zero count value send as many bytes as possible.
         const adjusted_count_tmp = if (in_len == 0) buf.len else @min(buf.len, in_len);
         // TODO we should not need this cast; improve return type of @min
-        const adjusted_count = @intCast(usize, adjusted_count_tmp);
+        const adjusted_count = @as(usize, @intCast(adjusted_count_tmp));
         const amt_read = try pread(in_fd, buf[0..adjusted_count], in_offset);
         if (amt_read == 0) {
             if (in_len == 0) {
@@ -6413,14 +6413,14 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
         std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok) and
         has_copy_file_range_syscall.load(.Monotonic)))
     {
-        var off_in_copy = @bitCast(i64, off_in);
-        var off_out_copy = @bitCast(i64, off_out);
+        var off_in_copy = @as(i64, @bitCast(off_in));
+        var off_out_copy = @as(i64, @bitCast(off_out));
 
         while (true) {
             const rc = system.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags);
             if (builtin.os.tag == .freebsd) {
                 switch (system.getErrno(rc)) {
-                    .SUCCESS => return @intCast(usize, rc),
+                    .SUCCESS => return @as(usize, @intCast(rc)),
                     .BADF => return error.FilesOpenedWithWrongFlags,
                     .FBIG => return error.FileTooBig,
                     .IO => return error.InputOutput,
@@ -6433,7 +6433,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
                 }
             } else { // assume linux
                 switch (system.getErrno(rc)) {
-                    .SUCCESS => return @intCast(usize, rc),
+                    .SUCCESS => return @as(usize, @intCast(rc)),
                     .BADF => return error.FilesOpenedWithWrongFlags,
                     .FBIG => return error.FileTooBig,
                     .IO => return error.InputOutput,
@@ -6486,11 +6486,11 @@ pub fn poll(fds: []pollfd, timeout: i32) PollError!usize {
                     else => |err| return windows.unexpectedWSAError(err),
                 }
             } else {
-                return @intCast(usize, rc);
+                return @as(usize, @intCast(rc));
             }
         } else {
             switch (errno(rc)) {
-                .SUCCESS => return @intCast(usize, rc),
+                .SUCCESS => return @as(usize, @intCast(rc)),
                 .FAULT => unreachable,
                 .INTR => continue,
                 .INVAL => unreachable,
@@ -6520,7 +6520,7 @@ pub fn ppoll(fds: []pollfd, timeout: ?*const timespec, mask: ?*const sigset_t) P
     const fds_count = math.cast(nfds_t, fds.len) orelse return error.SystemResources;
     const rc = system.ppoll(fds.ptr, fds_count, ts_ptr, mask);
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(usize, rc),
+        .SUCCESS => return @as(usize, @intCast(rc)),
         .FAULT => unreachable,
         .INTR => return error.SignalInterrupt,
         .INVAL => unreachable,
@@ -6585,11 +6585,11 @@ pub fn recvfrom(
                     else => |err| return windows.unexpectedWSAError(err),
                 }
             } else {
-                return @intCast(usize, rc);
+                return @as(usize, @intCast(rc));
             }
         } else {
             switch (errno(rc)) {
-                .SUCCESS => return @intCast(usize, rc),
+                .SUCCESS => return @as(usize, @intCast(rc)),
                 .BADF => unreachable, // always a race condition
                 .FAULT => unreachable,
                 .INVAL => unreachable,
@@ -6681,7 +6681,7 @@ pub const SetSockOptError = error{
 /// Set a socket's options.
 pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSockOptError!void {
     if (builtin.os.tag == .windows) {
-        const rc = windows.ws2_32.setsockopt(fd, @intCast(i32, level), @intCast(i32, optname), opt.ptr, @intCast(i32, opt.len));
+        const rc = windows.ws2_32.setsockopt(fd, @as(i32, @intCast(level)), @as(i32, @intCast(optname)), opt.ptr, @as(i32, @intCast(opt.len)));
         if (rc == windows.ws2_32.SOCKET_ERROR) {
             switch (windows.ws2_32.WSAGetLastError()) {
                 .WSANOTINITIALISED => unreachable,
@@ -6694,7 +6694,7 @@ pub fn setsockopt(fd: socket_t, level: u32, optname: u32, opt: []const u8) SetSo
         }
         return;
     } else {
-        switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @intCast(socklen_t, opt.len)))) {
+        switch (errno(system.setsockopt(fd, level, optname, opt.ptr, @as(socklen_t, @intCast(opt.len))))) {
             .SUCCESS => {},
             .BADF => unreachable, // always a race condition
             .NOTSOCK => unreachable, // always a race condition
@@ -6731,7 +6731,7 @@ pub fn memfd_createZ(name: [*:0]const u8, flags: u32) MemFdCreateError!fd_t {
             const getErrno = if (use_c) std.c.getErrno else linux.getErrno;
             const rc = sys.memfd_create(name, flags);
             switch (getErrno(rc)) {
-                .SUCCESS => return @intCast(fd_t, rc),
+                .SUCCESS => return @as(fd_t, @intCast(rc)),
                 .FAULT => unreachable, // name has invalid memory
                 .INVAL => unreachable, // name/flags are faulty
                 .NFILE => return error.SystemFdQuotaExceeded,
@@ -6881,7 +6881,7 @@ pub fn ioctl_SIOCGIFINDEX(fd: fd_t, ifr: *ifreq) IoCtl_SIOCGIFINDEX_Error!void {
 pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t {
     const rc = system.signalfd(fd, mask, flags);
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(fd_t, rc),
+        .SUCCESS => return @as(fd_t, @intCast(rc)),
         .BADF, .INVAL => unreachable,
         .NFILE => return error.SystemFdQuotaExceeded,
         .NOMEM => return error.SystemResources,
@@ -6989,7 +6989,7 @@ pub fn prctl(option: PR, args: anytype) PrctlError!u31 {
 
     const rc = system.prctl(@intFromEnum(option), buf[0], buf[1], buf[2], buf[3]);
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(u31, rc),
+        .SUCCESS => return @as(u31, @intCast(rc)),
         .ACCES => return error.AccessDenied,
         .BADF => return error.InvalidFileDescriptor,
         .FAULT => return error.InvalidAddress,
@@ -7170,7 +7170,7 @@ pub fn perf_event_open(
 ) PerfEventOpenError!fd_t {
     const rc = system.perf_event_open(attr, pid, cpu, group_fd, flags);
     switch (errno(rc)) {
-        .SUCCESS => return @intCast(fd_t, rc),
+        .SUCCESS => return @as(fd_t, @intCast(rc)),
         .@"2BIG" => return error.TooBig,
         .ACCES => return error.PermissionDenied,
         .BADF => unreachable, // group_fd file descriptor is not valid.
@@ -7205,7 +7205,7 @@ pub const TimerFdSetError = TimerFdGetError || error{Canceled};
 pub fn timerfd_create(clokid: i32, flags: u32) TimerFdCreateError!fd_t {
     var rc = linux.timerfd_create(clokid, flags);
     return switch (errno(rc)) {
-        .SUCCESS => @intCast(fd_t, rc),
+        .SUCCESS => @as(fd_t, @intCast(rc)),
         .INVAL => unreachable,
         .MFILE => return error.ProcessFdQuotaExceeded,
         .NFILE => return error.SystemFdQuotaExceeded,
@@ -7267,7 +7267,7 @@ pub fn ptrace(request: u32, pid: pid_t, addr: usize, signal: usize) PtraceError!
         .macos, .ios, .tvos, .watchos => switch (errno(darwin.ptrace(
             math.cast(i32, request) orelse return error.Overflow,
             pid,
-            @ptrFromInt(?[*]u8, addr),
+            @as(?[*]u8, @ptrFromInt(addr)),
             math.cast(i32, signal) orelse return error.Overflow,
         ))) {
             .SUCCESS => {},
lib/std/packed_int_array.zig
@@ -73,25 +73,25 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
             const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
 
             //read bytes as container
-            const value_ptr = @ptrCast(*align(1) const Container, &bytes[start_byte]);
+            const value_ptr = @as(*align(1) const Container, @ptrCast(&bytes[start_byte]));
             var value = value_ptr.*;
 
             if (endian != native_endian) value = @byteSwap(value);
 
             switch (endian) {
                 .Big => {
-                    value <<= @intCast(Shift, head_keep_bits);
-                    value >>= @intCast(Shift, head_keep_bits);
-                    value >>= @intCast(Shift, tail_keep_bits);
+                    value <<= @as(Shift, @intCast(head_keep_bits));
+                    value >>= @as(Shift, @intCast(head_keep_bits));
+                    value >>= @as(Shift, @intCast(tail_keep_bits));
                 },
                 .Little => {
-                    value <<= @intCast(Shift, tail_keep_bits);
-                    value >>= @intCast(Shift, tail_keep_bits);
-                    value >>= @intCast(Shift, head_keep_bits);
+                    value <<= @as(Shift, @intCast(tail_keep_bits));
+                    value >>= @as(Shift, @intCast(tail_keep_bits));
+                    value >>= @as(Shift, @intCast(head_keep_bits));
                 },
             }
 
-            return @bitCast(Int, @truncate(UnInt, value));
+            return @as(Int, @bitCast(@as(UnInt, @truncate(value))));
         }
 
         /// Sets the integer at `index` to `val` within the packed data beginning
@@ -115,21 +115,21 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
             const head_keep_bits = bit_index - (start_byte * 8);
             const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
             const keep_shift = switch (endian) {
-                .Big => @intCast(Shift, tail_keep_bits),
-                .Little => @intCast(Shift, head_keep_bits),
+                .Big => @as(Shift, @intCast(tail_keep_bits)),
+                .Little => @as(Shift, @intCast(head_keep_bits)),
             };
 
             //position the bits where they need to be in the container
-            const value = @intCast(Container, @bitCast(UnInt, int)) << keep_shift;
+            const value = @as(Container, @intCast(@as(UnInt, @bitCast(int)))) << keep_shift;
 
             //read existing bytes
-            const target_ptr = @ptrCast(*align(1) Container, &bytes[start_byte]);
+            const target_ptr = @as(*align(1) Container, @ptrCast(&bytes[start_byte]));
             var target = target_ptr.*;
 
             if (endian != native_endian) target = @byteSwap(target);
 
             //zero the bits we want to replace in the existing bytes
-            const inv_mask = @intCast(Container, std.math.maxInt(UnInt)) << keep_shift;
+            const inv_mask = @as(Container, @intCast(std.math.maxInt(UnInt))) << keep_shift;
             const mask = ~inv_mask;
             target &= mask;
 
@@ -156,7 +156,7 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: Endian) type {
             if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0);
 
             var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length);
-            new_slice.bit_offset = @intCast(u3, (bit_index - (start_byte * 8)));
+            new_slice.bit_offset = @as(u3, @intCast((bit_index - (start_byte * 8))));
             return new_slice;
         }
 
@@ -398,7 +398,7 @@ test "PackedIntArray init" {
     const PackedArray = PackedIntArray(u3, 8);
     var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 });
     var i = @as(usize, 0);
-    while (i < packed_array.len) : (i += 1) try testing.expectEqual(@intCast(u3, i), packed_array.get(i));
+    while (i < packed_array.len) : (i += 1) try testing.expectEqual(@as(u3, @intCast(i)), packed_array.get(i));
 }
 
 test "PackedIntArray initAllTo" {
@@ -469,7 +469,7 @@ test "PackedIntSlice of PackedInt(Array/Slice)" {
 
         var i = @as(usize, 0);
         while (i < packed_array.len) : (i += 1) {
-            packed_array.set(i, @intCast(Int, i % limit));
+            packed_array.set(i, @as(Int, @intCast(i % limit)));
         }
 
         //slice of array
lib/std/pdb.zig
@@ -573,7 +573,7 @@ pub const Pdb = struct {
             if (this_record_len % 4 != 0) {
                 const round_to_next_4 = (this_record_len | 0x3) + 1;
                 const march_forward_bytes = round_to_next_4 - this_record_len;
-                try stream.seekBy(@intCast(isize, march_forward_bytes));
+                try stream.seekBy(@as(isize, @intCast(march_forward_bytes)));
                 this_record_len += march_forward_bytes;
             }
 
@@ -689,14 +689,14 @@ pub const Pdb = struct {
 
         var symbol_i: usize = 0;
         while (symbol_i != module.symbols.len) {
-            const prefix = @ptrCast(*align(1) RecordPrefix, &module.symbols[symbol_i]);
+            const prefix = @as(*align(1) RecordPrefix, @ptrCast(&module.symbols[symbol_i]));
             if (prefix.RecordLen < 2)
                 return null;
             switch (prefix.RecordKind) {
                 .S_LPROC32, .S_GPROC32 => {
-                    const proc_sym = @ptrCast(*align(1) ProcSym, &module.symbols[symbol_i + @sizeOf(RecordPrefix)]);
+                    const proc_sym = @as(*align(1) ProcSym, @ptrCast(&module.symbols[symbol_i + @sizeOf(RecordPrefix)]));
                     if (address >= proc_sym.CodeOffset and address < proc_sym.CodeOffset + proc_sym.CodeSize) {
-                        return mem.sliceTo(@ptrCast([*:0]u8, &proc_sym.Name[0]), 0);
+                        return mem.sliceTo(@as([*:0]u8, @ptrCast(&proc_sym.Name[0])), 0);
                     }
                 },
                 else => {},
@@ -715,7 +715,7 @@ pub const Pdb = struct {
         var skip_len: usize = undefined;
         const checksum_offset = module.checksum_offset orelse return error.MissingDebugInfo;
         while (sect_offset != subsect_info.len) : (sect_offset += skip_len) {
-            const subsect_hdr = @ptrCast(*align(1) DebugSubsectionHeader, &subsect_info[sect_offset]);
+            const subsect_hdr = @as(*align(1) DebugSubsectionHeader, @ptrCast(&subsect_info[sect_offset]));
             skip_len = subsect_hdr.Length;
             sect_offset += @sizeOf(DebugSubsectionHeader);
 
@@ -723,7 +723,7 @@ pub const Pdb = struct {
                 .Lines => {
                     var line_index = sect_offset;
 
-                    const line_hdr = @ptrCast(*align(1) LineFragmentHeader, &subsect_info[line_index]);
+                    const line_hdr = @as(*align(1) LineFragmentHeader, @ptrCast(&subsect_info[line_index]));
                     if (line_hdr.RelocSegment == 0)
                         return error.MissingDebugInfo;
                     line_index += @sizeOf(LineFragmentHeader);
@@ -737,7 +737,7 @@ pub const Pdb = struct {
                         const subsection_end_index = sect_offset + subsect_hdr.Length;
 
                         while (line_index < subsection_end_index) {
-                            const block_hdr = @ptrCast(*align(1) LineBlockFragmentHeader, &subsect_info[line_index]);
+                            const block_hdr = @as(*align(1) LineBlockFragmentHeader, @ptrCast(&subsect_info[line_index]));
                             line_index += @sizeOf(LineBlockFragmentHeader);
                             const start_line_index = line_index;
 
@@ -749,7 +749,7 @@ pub const Pdb = struct {
                             // This is done with a simple linear search.
                             var line_i: u32 = 0;
                             while (line_i < block_hdr.NumLines) : (line_i += 1) {
-                                const line_num_entry = @ptrCast(*align(1) LineNumberEntry, &subsect_info[line_index]);
+                                const line_num_entry = @as(*align(1) LineNumberEntry, @ptrCast(&subsect_info[line_index]));
                                 line_index += @sizeOf(LineNumberEntry);
 
                                 const vaddr_start = frag_vaddr_start + line_num_entry.Offset;
@@ -761,7 +761,7 @@ pub const Pdb = struct {
                             // line_i == 0 would mean that no matching LineNumberEntry was found.
                             if (line_i > 0) {
                                 const subsect_index = checksum_offset + block_hdr.NameIndex;
-                                const chksum_hdr = @ptrCast(*align(1) FileChecksumEntryHeader, &module.subsect_info[subsect_index]);
+                                const chksum_hdr = @as(*align(1) FileChecksumEntryHeader, @ptrCast(&module.subsect_info[subsect_index]));
                                 const strtab_offset = @sizeOf(PDBStringTableHeader) + chksum_hdr.FileNameOffset;
                                 try self.string_table.?.seekTo(strtab_offset);
                                 const source_file_name = try self.string_table.?.reader().readUntilDelimiterAlloc(self.allocator, 0, 1024);
@@ -771,13 +771,13 @@ pub const Pdb = struct {
                                 const column = if (has_column) blk: {
                                     const start_col_index = start_line_index + @sizeOf(LineNumberEntry) * block_hdr.NumLines;
                                     const col_index = start_col_index + @sizeOf(ColumnNumberEntry) * line_entry_idx;
-                                    const col_num_entry = @ptrCast(*align(1) ColumnNumberEntry, &subsect_info[col_index]);
+                                    const col_num_entry = @as(*align(1) ColumnNumberEntry, @ptrCast(&subsect_info[col_index]));
                                     break :blk col_num_entry.StartColumn;
                                 } else 0;
 
                                 const found_line_index = start_line_index + line_entry_idx * @sizeOf(LineNumberEntry);
-                                const line_num_entry = @ptrCast(*align(1) LineNumberEntry, &subsect_info[found_line_index]);
-                                const flags = @ptrCast(*LineNumberEntry.Flags, &line_num_entry.Flags);
+                                const line_num_entry = @as(*align(1) LineNumberEntry, @ptrCast(&subsect_info[found_line_index]));
+                                const flags = @as(*LineNumberEntry.Flags, @ptrCast(&line_num_entry.Flags));
 
                                 return debug.LineInfo{
                                     .file_name = source_file_name,
@@ -836,7 +836,7 @@ pub const Pdb = struct {
         var sect_offset: usize = 0;
         var skip_len: usize = undefined;
         while (sect_offset != mod.subsect_info.len) : (sect_offset += skip_len) {
-            const subsect_hdr = @ptrCast(*align(1) DebugSubsectionHeader, &mod.subsect_info[sect_offset]);
+            const subsect_hdr = @as(*align(1) DebugSubsectionHeader, @ptrCast(&mod.subsect_info[sect_offset]));
             skip_len = subsect_hdr.Length;
             sect_offset += @sizeOf(DebugSubsectionHeader);
 
@@ -1038,7 +1038,7 @@ const MsfStream = struct {
     }
 
     fn read(self: *MsfStream, buffer: []u8) !usize {
-        var block_id = @intCast(usize, self.pos / self.block_size);
+        var block_id = @as(usize, @intCast(self.pos / self.block_size));
         if (block_id >= self.blocks.len) return 0; // End of Stream
         var block = self.blocks[block_id];
         var offset = self.pos % self.block_size;
@@ -1069,7 +1069,7 @@ const MsfStream = struct {
     }
 
     pub fn seekBy(self: *MsfStream, len: i64) !void {
-        self.pos = @intCast(u64, @intCast(i64, self.pos) + len);
+        self.pos = @as(u64, @intCast(@as(i64, @intCast(self.pos)) + len));
         if (self.pos >= self.blocks.len * self.block_size)
             return error.EOF;
     }
lib/std/process.zig
@@ -68,7 +68,7 @@ pub const EnvMap = struct {
     pub const EnvNameHashContext = struct {
         fn upcase(c: u21) u21 {
             if (c <= std.math.maxInt(u16))
-                return std.os.windows.ntdll.RtlUpcaseUnicodeChar(@intCast(u16, c));
+                return std.os.windows.ntdll.RtlUpcaseUnicodeChar(@as(u16, @intCast(c)));
             return c;
         }
 
@@ -80,9 +80,9 @@ pub const EnvMap = struct {
                 while (it.nextCodepoint()) |cp| {
                     const cp_upper = upcase(cp);
                     h.update(&[_]u8{
-                        @intCast(u8, (cp_upper >> 16) & 0xff),
-                        @intCast(u8, (cp_upper >> 8) & 0xff),
-                        @intCast(u8, (cp_upper >> 0) & 0xff),
+                        @as(u8, @intCast((cp_upper >> 16) & 0xff)),
+                        @as(u8, @intCast((cp_upper >> 8) & 0xff)),
+                        @as(u8, @intCast((cp_upper >> 0) & 0xff)),
                     });
                 }
                 return h.final();
@@ -872,8 +872,8 @@ pub fn argsFree(allocator: Allocator, args_alloc: []const [:0]u8) void {
     for (args_alloc) |arg| {
         total_bytes += @sizeOf([]u8) + arg.len + 1;
     }
-    const unaligned_allocated_buf = @ptrCast([*]const u8, args_alloc.ptr)[0..total_bytes];
-    const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf);
+    const unaligned_allocated_buf = @as([*]const u8, @ptrCast(args_alloc.ptr))[0..total_bytes];
+    const aligned_allocated_buf: []align(@alignOf([]u8)) const u8 = @alignCast(unaligned_allocated_buf);
     return allocator.free(aligned_allocated_buf);
 }
 
@@ -1143,7 +1143,7 @@ pub fn execve(
         } else if (builtin.output_mode == .Exe) {
             // Then we have Zig start code and this works.
             // TODO type-safety for null-termination of `os.environ`.
-            break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr);
+            break :m @as([*:null]const ?[*:0]const u8, @ptrCast(os.environ.ptr));
         } else {
             // TODO come up with a solution for this.
             @compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process");
@@ -1175,7 +1175,7 @@ pub fn totalSystemMemory() TotalSystemMemoryError!usize {
                 error.NameTooLong, error.UnknownName => unreachable,
                 else => return error.UnknownTotalSystemMemory,
             };
-            return @intCast(usize, physmem);
+            return @as(usize, @intCast(physmem));
         },
         .openbsd => {
             const mib: [2]c_int = [_]c_int{
@@ -1192,7 +1192,7 @@ pub fn totalSystemMemory() TotalSystemMemoryError!usize {
                 else => return error.UnknownTotalSystemMemory,
             };
             assert(physmem >= 0);
-            return @bitCast(usize, physmem);
+            return @as(usize, @bitCast(physmem));
         },
         .windows => {
             var sbi: std.os.windows.SYSTEM_BASIC_INFORMATION = undefined;
lib/std/Progress.zig
@@ -232,14 +232,14 @@ fn clearWithHeldLock(p: *Progress, end_ptr: *usize) void {
             }
 
             var cursor_pos = windows.COORD{
-                .X = info.dwCursorPosition.X - @intCast(windows.SHORT, p.columns_written),
+                .X = info.dwCursorPosition.X - @as(windows.SHORT, @intCast(p.columns_written)),
                 .Y = info.dwCursorPosition.Y,
             };
 
             if (cursor_pos.X < 0)
                 cursor_pos.X = 0;
 
-            const fill_chars = @intCast(windows.DWORD, info.dwSize.X - cursor_pos.X);
+            const fill_chars = @as(windows.DWORD, @intCast(info.dwSize.X - cursor_pos.X));
 
             var written: windows.DWORD = undefined;
             if (windows.kernel32.FillConsoleOutputAttribute(
lib/std/rand.zig
@@ -41,8 +41,7 @@ pub const Random = struct {
         assert(@typeInfo(@typeInfo(Ptr).Pointer.child) == .Struct); // Must point to a struct
         const gen = struct {
             fn fill(ptr: *anyopaque, buf: []u8) void {
-                const alignment = @typeInfo(Ptr).Pointer.alignment;
-                const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
+                const self: Ptr = @ptrCast(@alignCast(ptr));
                 fillFn(self, buf);
             }
         };
@@ -97,7 +96,7 @@ pub const Random = struct {
             r.uintLessThan(Index, values.len);
 
         const MinInt = MinArrayIndex(Index);
-        return values[@intCast(MinInt, index)];
+        return values[@as(MinInt, @intCast(index))];
     }
 
     /// Returns a random int `i` such that `minInt(T) <= i <= maxInt(T)`.
@@ -114,8 +113,8 @@ pub const Random = struct {
         // TODO: endian portability is pointless if the underlying prng isn't endian portable.
         // TODO: document the endian portability of this library.
         const byte_aligned_result = mem.readIntSliceLittle(ByteAlignedT, &rand_bytes);
-        const unsigned_result = @truncate(UnsignedT, byte_aligned_result);
-        return @bitCast(T, unsigned_result);
+        const unsigned_result = @as(UnsignedT, @truncate(byte_aligned_result));
+        return @as(T, @bitCast(unsigned_result));
     }
 
     /// Constant-time implementation off `uintLessThan`.
@@ -126,9 +125,9 @@ pub const Random = struct {
         comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
         assert(0 < less_than);
         if (bits <= 32) {
-            return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than));
+            return @as(T, @intCast(limitRangeBiased(u32, r.int(u32), less_than)));
         } else {
-            return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than));
+            return @as(T, @intCast(limitRangeBiased(u64, r.int(u64), less_than)));
         }
     }
 
@@ -156,7 +155,7 @@ pub const Random = struct {
         //   "Lemire's (with an extra tweak from me)"
         var x: Small = r.int(Small);
         var m: Large = @as(Large, x) * @as(Large, less_than);
-        var l: Small = @truncate(Small, m);
+        var l: Small = @as(Small, @truncate(m));
         if (l < less_than) {
             var t: Small = -%less_than;
 
@@ -169,10 +168,10 @@ pub const Random = struct {
             while (l < t) {
                 x = r.int(Small);
                 m = @as(Large, x) * @as(Large, less_than);
-                l = @truncate(Small, m);
+                l = @as(Small, @truncate(m));
             }
         }
-        return @intCast(T, m >> small_bits);
+        return @as(T, @intCast(m >> small_bits));
     }
 
     /// Constant-time implementation off `uintAtMost`.
@@ -206,10 +205,10 @@ pub const Random = struct {
         if (info.signedness == .signed) {
             // Two's complement makes this math pretty easy.
             const UnsignedT = std.meta.Int(.unsigned, info.bits);
-            const lo = @bitCast(UnsignedT, at_least);
-            const hi = @bitCast(UnsignedT, less_than);
+            const lo = @as(UnsignedT, @bitCast(at_least));
+            const hi = @as(UnsignedT, @bitCast(less_than));
             const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo);
-            return @bitCast(T, result);
+            return @as(T, @bitCast(result));
         } else {
             // The signed implementation would work fine, but we can use stricter arithmetic operators here.
             return at_least + r.uintLessThanBiased(T, less_than - at_least);
@@ -225,10 +224,10 @@ pub const Random = struct {
         if (info.signedness == .signed) {
             // Two's complement makes this math pretty easy.
             const UnsignedT = std.meta.Int(.unsigned, info.bits);
-            const lo = @bitCast(UnsignedT, at_least);
-            const hi = @bitCast(UnsignedT, less_than);
+            const lo = @as(UnsignedT, @bitCast(at_least));
+            const hi = @as(UnsignedT, @bitCast(less_than));
             const result = lo +% r.uintLessThan(UnsignedT, hi -% lo);
-            return @bitCast(T, result);
+            return @as(T, @bitCast(result));
         } else {
             // The signed implementation would work fine, but we can use stricter arithmetic operators here.
             return at_least + r.uintLessThan(T, less_than - at_least);
@@ -243,10 +242,10 @@ pub const Random = struct {
         if (info.signedness == .signed) {
             // Two's complement makes this math pretty easy.
             const UnsignedT = std.meta.Int(.unsigned, info.bits);
-            const lo = @bitCast(UnsignedT, at_least);
-            const hi = @bitCast(UnsignedT, at_most);
+            const lo = @as(UnsignedT, @bitCast(at_least));
+            const hi = @as(UnsignedT, @bitCast(at_most));
             const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo);
-            return @bitCast(T, result);
+            return @as(T, @bitCast(result));
         } else {
             // The signed implementation would work fine, but we can use stricter arithmetic operators here.
             return at_least + r.uintAtMostBiased(T, at_most - at_least);
@@ -262,10 +261,10 @@ pub const Random = struct {
         if (info.signedness == .signed) {
             // Two's complement makes this math pretty easy.
             const UnsignedT = std.meta.Int(.unsigned, info.bits);
-            const lo = @bitCast(UnsignedT, at_least);
-            const hi = @bitCast(UnsignedT, at_most);
+            const lo = @as(UnsignedT, @bitCast(at_least));
+            const hi = @as(UnsignedT, @bitCast(at_most));
             const result = lo +% r.uintAtMost(UnsignedT, hi -% lo);
-            return @bitCast(T, result);
+            return @as(T, @bitCast(result));
         } else {
             // The signed implementation would work fine, but we can use stricter arithmetic operators here.
             return at_least + r.uintAtMost(T, at_most - at_least);
@@ -294,9 +293,9 @@ pub const Random = struct {
                         rand_lz += @clz(r.int(u32) | 0x7FF);
                     }
                 }
-                const mantissa = @truncate(u23, rand);
+                const mantissa = @as(u23, @truncate(rand));
                 const exponent = @as(u32, 126 - rand_lz) << 23;
-                return @bitCast(f32, exponent | mantissa);
+                return @as(f32, @bitCast(exponent | mantissa));
             },
             f64 => {
                 // Use 52 random bits for the mantissa, and the rest for the exponent.
@@ -321,7 +320,7 @@ pub const Random = struct {
                 }
                 const mantissa = rand & 0xFFFFFFFFFFFFF;
                 const exponent = (1022 - rand_lz) << 52;
-                return @bitCast(f64, exponent | mantissa);
+                return @as(f64, @bitCast(exponent | mantissa));
             },
             else => @compileError("unknown floating point type"),
         }
@@ -333,7 +332,7 @@ pub const Random = struct {
     pub fn floatNorm(r: Random, comptime T: type) T {
         const value = ziggurat.next_f64(r, ziggurat.NormDist);
         switch (T) {
-            f32 => return @floatCast(f32, value),
+            f32 => return @as(f32, @floatCast(value)),
             f64 => return value,
             else => @compileError("unknown floating point type"),
         }
@@ -345,7 +344,7 @@ pub const Random = struct {
     pub fn floatExp(r: Random, comptime T: type) T {
         const value = ziggurat.next_f64(r, ziggurat.ExpDist);
         switch (T) {
-            f32 => return @floatCast(f32, value),
+            f32 => return @as(f32, @floatCast(value)),
             f64 => return value,
             else => @compileError("unknown floating point type"),
         }
@@ -379,10 +378,10 @@ pub const Random = struct {
         }
 
         // `i <= j < max <= maxInt(MinInt)`
-        const max = @intCast(MinInt, buf.len);
+        const max = @as(MinInt, @intCast(buf.len));
         var i: MinInt = 0;
         while (i < max - 1) : (i += 1) {
-            const j = @intCast(MinInt, r.intRangeLessThan(Index, i, max));
+            const j = @as(MinInt, @intCast(r.intRangeLessThan(Index, i, max)));
             mem.swap(T, &buf[i], &buf[j]);
         }
     }
@@ -445,7 +444,7 @@ pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
     //   http://www.pcg-random.org/posts/bounded-rands.html
     //   "Integer Multiplication (Biased)"
     var m: T2 = @as(T2, random_int) * @as(T2, less_than);
-    return @intCast(T, m >> bits);
+    return @as(T, @intCast(m >> bits));
 }
 
 // Generator to extend 64-bit seed values into longer sequences.
lib/std/segmented_list.zig
@@ -107,7 +107,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
         }
 
         pub fn deinit(self: *Self, allocator: Allocator) void {
-            self.freeShelves(allocator, @intCast(ShelfIndex, self.dynamic_segments.len), 0);
+            self.freeShelves(allocator, @as(ShelfIndex, @intCast(self.dynamic_segments.len)), 0);
             allocator.free(self.dynamic_segments);
             self.* = undefined;
         }
@@ -171,7 +171,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
         /// TODO update this and related methods to match the conventions set by ArrayList
         pub fn setCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
             if (prealloc_item_count != 0) {
-                if (new_capacity <= @as(usize, 1) << (prealloc_exp + @intCast(ShelfIndex, self.dynamic_segments.len))) {
+                if (new_capacity <= @as(usize, 1) << (prealloc_exp + @as(ShelfIndex, @intCast(self.dynamic_segments.len)))) {
                     return self.shrinkCapacity(allocator, new_capacity);
                 }
             }
@@ -181,7 +181,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
         /// Only grows capacity, or retains current capacity.
         pub fn growCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
             const new_cap_shelf_count = shelfCount(new_capacity);
-            const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len);
+            const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len));
             if (new_cap_shelf_count <= old_shelf_count) return;
 
             const new_dynamic_segments = try allocator.alloc([*]T, new_cap_shelf_count);
@@ -206,7 +206,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
         /// It may fail to reduce the capacity in which case the capacity will remain unchanged.
         pub fn shrinkCapacity(self: *Self, allocator: Allocator, new_capacity: usize) void {
             if (new_capacity <= prealloc_item_count) {
-                const len = @intCast(ShelfIndex, self.dynamic_segments.len);
+                const len = @as(ShelfIndex, @intCast(self.dynamic_segments.len));
                 self.freeShelves(allocator, len, 0);
                 allocator.free(self.dynamic_segments);
                 self.dynamic_segments = &[_][*]T{};
@@ -214,7 +214,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
             }
 
             const new_cap_shelf_count = shelfCount(new_capacity);
-            const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len);
+            const old_shelf_count = @as(ShelfIndex, @intCast(self.dynamic_segments.len));
             assert(new_cap_shelf_count <= old_shelf_count);
             if (new_cap_shelf_count == old_shelf_count) return;
 
@@ -424,7 +424,7 @@ fn testSegmentedList(comptime prealloc: usize) !void {
     {
         var i: usize = 0;
         while (i < 100) : (i += 1) {
-            try list.append(testing.allocator, @intCast(i32, i + 1));
+            try list.append(testing.allocator, @as(i32, @intCast(i + 1)));
             try testing.expect(list.len == i + 1);
         }
     }
@@ -432,7 +432,7 @@ fn testSegmentedList(comptime prealloc: usize) !void {
     {
         var i: usize = 0;
         while (i < 100) : (i += 1) {
-            try testing.expect(list.at(i).* == @intCast(i32, i + 1));
+            try testing.expect(list.at(i).* == @as(i32, @intCast(i + 1)));
         }
     }
 
@@ -492,7 +492,7 @@ fn testSegmentedList(comptime prealloc: usize) !void {
         var i: i32 = 0;
         while (i < 100) : (i += 1) {
             try list.append(testing.allocator, i + 1);
-            control[@intCast(usize, i)] = i + 1;
+            control[@as(usize, @intCast(i))] = i + 1;
         }
 
         @memset(dest[0..], 0);
lib/std/simd.zig
@@ -93,8 +93,8 @@ pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) {
         var out: [len]T = undefined;
         for (&out, 0..) |*element, i| {
             element.* = switch (@typeInfo(T)) {
-                .Int => @intCast(T, i),
-                .Float => @floatFromInt(T, i),
+                .Int => @as(T, @intCast(i)),
+                .Float => @as(T, @floatFromInt(i)),
                 else => @compileError("Can't use type " ++ @typeName(T) ++ " in iota."),
             };
         }
@@ -107,7 +107,7 @@ pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) {
 pub fn repeat(comptime len: usize, vec: anytype) @Vector(len, std.meta.Child(@TypeOf(vec))) {
     const Child = std.meta.Child(@TypeOf(vec));
 
-    return @shuffle(Child, vec, undefined, iota(i32, len) % @splat(len, @intCast(i32, vectorLength(@TypeOf(vec)))));
+    return @shuffle(Child, vec, undefined, iota(i32, len) % @splat(len, @as(i32, @intCast(vectorLength(@TypeOf(vec))))));
 }
 
 /// Returns a vector containing all elements of the first vector at the lower indices followed by all elements of the second vector
@@ -139,8 +139,8 @@ pub fn interlace(vecs: anytype) @Vector(vectorLength(@TypeOf(vecs[0])) * vecs.le
     const a_vec_count = (1 + vecs_arr.len) >> 1;
     const b_vec_count = vecs_arr.len >> 1;
 
-    const a = interlace(@ptrCast(*const [a_vec_count]VecType, vecs_arr[0..a_vec_count]).*);
-    const b = interlace(@ptrCast(*const [b_vec_count]VecType, vecs_arr[a_vec_count..]).*);
+    const a = interlace(@as(*const [a_vec_count]VecType, @ptrCast(vecs_arr[0..a_vec_count])).*);
+    const b = interlace(@as(*const [b_vec_count]VecType, @ptrCast(vecs_arr[a_vec_count..])).*);
 
     const a_len = vectorLength(@TypeOf(a));
     const b_len = vectorLength(@TypeOf(b));
@@ -148,10 +148,10 @@ pub fn interlace(vecs: anytype) @Vector(vectorLength(@TypeOf(vecs[0])) * vecs.le
 
     const indices = comptime blk: {
         const count_up = iota(i32, len);
-        const cycle = @divFloor(count_up, @splat(len, @intCast(i32, vecs_arr.len)));
+        const cycle = @divFloor(count_up, @splat(len, @as(i32, @intCast(vecs_arr.len))));
         const select_mask = repeat(len, join(@splat(a_vec_count, true), @splat(b_vec_count, false)));
-        const a_indices = count_up - cycle * @splat(len, @intCast(i32, b_vec_count));
-        const b_indices = shiftElementsRight(count_up - cycle * @splat(len, @intCast(i32, a_vec_count)), a_vec_count, 0);
+        const a_indices = count_up - cycle * @splat(len, @as(i32, @intCast(b_vec_count)));
+        const b_indices = shiftElementsRight(count_up - cycle * @splat(len, @as(i32, @intCast(a_vec_count))), a_vec_count, 0);
         break :blk @select(i32, select_mask, a_indices, ~b_indices);
     };
 
@@ -174,7 +174,7 @@ pub fn deinterlace(
 
     comptime var i: usize = 0; // for-loops don't work for this, apparently.
     inline while (i < out.len) : (i += 1) {
-        const indices = comptime iota(i32, vec_len) * @splat(vec_len, @intCast(i32, vec_count)) + @splat(vec_len, @intCast(i32, i));
+        const indices = comptime iota(i32, vec_len) * @splat(vec_len, @as(i32, @intCast(vec_count))) + @splat(vec_len, @as(i32, @intCast(i)));
         out[i] = @shuffle(Child, interlaced, undefined, indices);
     }
 
@@ -189,9 +189,9 @@ pub fn extract(
     const Child = std.meta.Child(@TypeOf(vec));
     const len = vectorLength(@TypeOf(vec));
 
-    std.debug.assert(@intCast(comptime_int, first) + @intCast(comptime_int, count) <= len);
+    std.debug.assert(@as(comptime_int, @intCast(first)) + @as(comptime_int, @intCast(count)) <= len);
 
-    return @shuffle(Child, vec, undefined, iota(i32, count) + @splat(count, @intCast(i32, first)));
+    return @shuffle(Child, vec, undefined, iota(i32, count) + @splat(count, @as(i32, @intCast(first))));
 }
 
 test "vector patterns" {
@@ -263,7 +263,7 @@ pub fn reverseOrder(vec: anytype) @TypeOf(vec) {
     const Child = std.meta.Child(@TypeOf(vec));
     const len = vectorLength(@TypeOf(vec));
 
-    return @shuffle(Child, vec, undefined, @splat(len, @intCast(i32, len) - 1) - iota(i32, len));
+    return @shuffle(Child, vec, undefined, @splat(len, @as(i32, @intCast(len)) - 1) - iota(i32, len));
 }
 
 test "vector shifting" {
lib/std/start.zig
@@ -190,7 +190,7 @@ fn exit2(code: usize) noreturn {
             else => @compileError("TODO"),
         },
         .windows => {
-            ExitProcess(@truncate(u32, code));
+            ExitProcess(@as(u32, @truncate(code)));
         },
         else => @compileError("TODO"),
     }
@@ -387,23 +387,23 @@ fn wWinMainCRTStartup() callconv(std.os.windows.WINAPI) noreturn {
     std.debug.maybeEnableSegfaultHandler();
 
     const result: std.os.windows.INT = initEventLoopAndCallWinMain();
-    std.os.windows.kernel32.ExitProcess(@bitCast(std.os.windows.UINT, result));
+    std.os.windows.kernel32.ExitProcess(@as(std.os.windows.UINT, @bitCast(result)));
 }
 
 fn posixCallMainAndExit() callconv(.C) noreturn {
     @setAlignStack(16);
 
     const argc = argc_argv_ptr[0];
-    const argv = @ptrCast([*][*:0]u8, argc_argv_ptr + 1);
+    const argv = @as([*][*:0]u8, @ptrCast(argc_argv_ptr + 1));
 
-    const envp_optional = @ptrCast([*:null]?[*:0]u8, @alignCast(@alignOf(usize), argv + argc + 1));
+    const envp_optional: [*:null]?[*:0]u8 = @ptrCast(@alignCast(argv + argc + 1));
     var envp_count: usize = 0;
     while (envp_optional[envp_count]) |_| : (envp_count += 1) {}
-    const envp = @ptrCast([*][*:0]u8, envp_optional)[0..envp_count];
+    const envp = @as([*][*:0]u8, @ptrCast(envp_optional))[0..envp_count];
 
     if (native_os == .linux) {
         // Find the beginning of the auxiliary vector
-        const auxv = @ptrCast([*]elf.Auxv, @alignCast(@alignOf(usize), envp.ptr + envp_count + 1));
+        const auxv: [*]elf.Auxv = @ptrCast(@alignCast(envp.ptr + envp_count + 1));
         std.os.linux.elf_aux_maybe = auxv;
 
         var at_hwcap: usize = 0;
@@ -419,7 +419,7 @@ fn posixCallMainAndExit() callconv(.C) noreturn {
                     else => continue,
                 }
             }
-            break :init @ptrFromInt([*]elf.Phdr, at_phdr)[0..at_phnum];
+            break :init @as([*]elf.Phdr, @ptrFromInt(at_phdr))[0..at_phnum];
         };
 
         // Apply the initial relocations as early as possible in the startup
@@ -495,20 +495,20 @@ fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
 fn main(c_argc: c_int, c_argv: [*][*:0]c_char, c_envp: [*:null]?[*:0]c_char) callconv(.C) c_int {
     var env_count: usize = 0;
     while (c_envp[env_count] != null) : (env_count += 1) {}
-    const envp = @ptrCast([*][*:0]u8, c_envp)[0..env_count];
+    const envp = @as([*][*:0]u8, @ptrCast(c_envp))[0..env_count];
 
     if (builtin.os.tag == .linux) {
         const at_phdr = std.c.getauxval(elf.AT_PHDR);
         const at_phnum = std.c.getauxval(elf.AT_PHNUM);
-        const phdrs = (@ptrFromInt([*]elf.Phdr, at_phdr))[0..at_phnum];
+        const phdrs = (@as([*]elf.Phdr, @ptrFromInt(at_phdr)))[0..at_phnum];
         expandStackSize(phdrs);
     }
 
-    return @call(.always_inline, callMainWithArgs, .{ @intCast(usize, c_argc), @ptrCast([*][*:0]u8, c_argv), envp });
+    return @call(.always_inline, callMainWithArgs, .{ @as(usize, @intCast(c_argc)), @as([*][*:0]u8, @ptrCast(c_argv)), envp });
 }
 
 fn mainWithoutEnv(c_argc: c_int, c_argv: [*][*:0]c_char) callconv(.C) c_int {
-    std.os.argv = @ptrCast([*][*:0]u8, c_argv)[0..@intCast(usize, c_argc)];
+    std.os.argv = @as([*][*:0]u8, @ptrCast(c_argv))[0..@as(usize, @intCast(c_argc))];
     return @call(.always_inline, callMain, .{});
 }
 
@@ -629,7 +629,7 @@ pub fn callMain() u8 {
 
 pub fn call_wWinMain() std.os.windows.INT {
     const MAIN_HINSTANCE = @typeInfo(@TypeOf(root.wWinMain)).Fn.params[0].type.?;
-    const hInstance = @ptrCast(MAIN_HINSTANCE, std.os.windows.kernel32.GetModuleHandleW(null).?);
+    const hInstance = @as(MAIN_HINSTANCE, @ptrCast(std.os.windows.kernel32.GetModuleHandleW(null).?));
     const lpCmdLine = std.os.windows.kernel32.GetCommandLineW();
 
     // There's no (documented) way to get the nCmdShow parameter, so we're
lib/std/start_windows_tls.zig
@@ -42,7 +42,7 @@ export const _tls_used linksection(".rdata$T") = IMAGE_TLS_DIRECTORY{
     .StartAddressOfRawData = &_tls_start,
     .EndAddressOfRawData = &_tls_end,
     .AddressOfIndex = &_tls_index,
-    .AddressOfCallBacks = @ptrCast(*anyopaque, &__xl_a),
+    .AddressOfCallBacks = @as(*anyopaque, @ptrCast(&__xl_a)),
     .SizeOfZeroFill = 0,
     .Characteristics = 0,
 };
lib/std/tar.zig
@@ -70,8 +70,8 @@ pub const Header = struct {
     }
 
     pub fn fileType(header: Header) FileType {
-        const result = @enumFromInt(FileType, header.bytes[156]);
-        return if (result == @enumFromInt(FileType, 0)) .normal else result;
+        const result = @as(FileType, @enumFromInt(header.bytes[156]));
+        return if (result == @as(FileType, @enumFromInt(0))) .normal else result;
     }
 
     fn str(header: Header, start: usize, end: usize) []const u8 {
@@ -117,7 +117,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi
         start += 512;
         const file_size = try header.fileSize();
         const rounded_file_size = std.mem.alignForward(u64, file_size, 512);
-        const pad_len = @intCast(usize, rounded_file_size - file_size);
+        const pad_len = @as(usize, @intCast(rounded_file_size - file_size));
         const unstripped_file_name = try header.fullFileName(&file_name_buffer);
         switch (header.fileType()) {
             .directory => {
@@ -146,14 +146,14 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi
                     }
                     // Ask for the rounded up file size + 512 for the next header.
                     // TODO: https://github.com/ziglang/zig/issues/14039
-                    const ask = @intCast(usize, @min(
+                    const ask = @as(usize, @intCast(@min(
                         buffer.len - end,
                         rounded_file_size + 512 - file_off -| (end - start),
-                    ));
+                    )));
                     end += try reader.readAtLeast(buffer[end..], ask);
                     if (end - start < ask) return error.UnexpectedEndOfStream;
                     // TODO: https://github.com/ziglang/zig/issues/14039
-                    const slice = buffer[start..@intCast(usize, @min(file_size - file_off + start, end))];
+                    const slice = buffer[start..@as(usize, @intCast(@min(file_size - file_off + start, end)))];
                     try file.writeAll(slice);
                     file_off += slice.len;
                     start += slice.len;
@@ -167,7 +167,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi
             },
             .global_extended_header, .extended_header => {
                 if (start + rounded_file_size > end) return error.TarHeadersTooBig;
-                start = @intCast(usize, start + rounded_file_size);
+                start = @as(usize, @intCast(start + rounded_file_size));
             },
             .hard_link => return error.TarUnsupportedFileType,
             .symbolic_link => return error.TarUnsupportedFileType,
lib/std/target.zig
@@ -711,14 +711,14 @@ pub const Target = struct {
 
                 pub fn isEnabled(set: Set, arch_feature_index: Index) bool {
                     const usize_index = arch_feature_index / @bitSizeOf(usize);
-                    const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize));
+                    const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize)));
                     return (set.ints[usize_index] & (@as(usize, 1) << bit_index)) != 0;
                 }
 
                 /// Adds the specified feature but not its dependencies.
                 pub fn addFeature(set: *Set, arch_feature_index: Index) void {
                     const usize_index = arch_feature_index / @bitSizeOf(usize);
-                    const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize));
+                    const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize)));
                     set.ints[usize_index] |= @as(usize, 1) << bit_index;
                 }
 
@@ -730,7 +730,7 @@ pub const Target = struct {
                 /// Removes the specified feature but not its dependents.
                 pub fn removeFeature(set: *Set, arch_feature_index: Index) void {
                     const usize_index = arch_feature_index / @bitSizeOf(usize);
-                    const bit_index = @intCast(ShiftInt, arch_feature_index % @bitSizeOf(usize));
+                    const bit_index = @as(ShiftInt, @intCast(arch_feature_index % @bitSizeOf(usize)));
                     set.ints[usize_index] &= ~(@as(usize, 1) << bit_index);
                 }
 
@@ -745,7 +745,7 @@ pub const Target = struct {
                     var old = set.ints;
                     while (true) {
                         for (all_features_list, 0..) |feature, index_usize| {
-                            const index = @intCast(Index, index_usize);
+                            const index = @as(Index, @intCast(index_usize));
                             if (set.isEnabled(index)) {
                                 set.addFeatureSet(feature.dependencies);
                             }
@@ -757,7 +757,7 @@ pub const Target = struct {
                 }
 
                 pub fn asBytes(set: *const Set) *const [byte_count]u8 {
-                    return @ptrCast(*const [byte_count]u8, &set.ints);
+                    return @as(*const [byte_count]u8, @ptrCast(&set.ints));
                 }
 
                 pub fn eql(set: Set, other_set: Set) bool {
@@ -1526,7 +1526,7 @@ pub const Target = struct {
         pub fn set(self: *DynamicLinker, dl_or_null: ?[]const u8) void {
             if (dl_or_null) |dl| {
                 @memcpy(self.buffer[0..dl.len], dl);
-                self.max_byte = @intCast(u8, dl.len - 1);
+                self.max_byte = @as(u8, @intCast(dl.len - 1));
             } else {
                 self.max_byte = null;
             }
@@ -1537,12 +1537,12 @@ pub const Target = struct {
         var result: DynamicLinker = .{};
         const S = struct {
             fn print(r: *DynamicLinker, comptime fmt: []const u8, args: anytype) DynamicLinker {
-                r.max_byte = @intCast(u8, (std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1);
+                r.max_byte = @as(u8, @intCast((std.fmt.bufPrint(&r.buffer, fmt, args) catch unreachable).len - 1));
                 return r.*;
             }
             fn copy(r: *DynamicLinker, s: []const u8) DynamicLinker {
                 @memcpy(r.buffer[0..s.len], s);
-                r.max_byte = @intCast(u8, s.len - 1);
+                r.max_byte = @as(u8, @intCast(s.len - 1));
                 return r.*;
             }
         };
@@ -1970,7 +1970,7 @@ pub const Target = struct {
                 16 => 2,
                 32 => 4,
                 64 => 8,
-                80 => @intCast(u16, mem.alignForward(usize, 10, c_type_alignment(t, .longdouble))),
+                80 => @as(u16, @intCast(mem.alignForward(usize, 10, c_type_alignment(t, .longdouble)))),
                 128 => 16,
                 else => unreachable,
             },
lib/std/Thread.zig
@@ -66,7 +66,7 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
             if (self.getHandle() == std.c.pthread_self()) {
                 // Set the name of the calling thread (no thread id required).
                 const err = try os.prctl(.SET_NAME, .{@intFromPtr(name_with_terminator.ptr)});
-                switch (@enumFromInt(os.E, err)) {
+                switch (@as(os.E, @enumFromInt(err))) {
                     .SUCCESS => return,
                     else => |e| return os.unexpectedErrno(e),
                 }
@@ -176,7 +176,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
             if (self.getHandle() == std.c.pthread_self()) {
                 // Get the name of the calling thread (no thread id required).
                 const err = try os.prctl(.GET_NAME, .{@intFromPtr(buffer.ptr)});
-                switch (@enumFromInt(os.E, err)) {
+                switch (@as(os.E, @enumFromInt(err))) {
                     .SUCCESS => return std.mem.sliceTo(buffer, 0),
                     else => |e| return os.unexpectedErrno(e),
                 }
@@ -211,7 +211,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
                 null,
             )) {
                 .SUCCESS => {
-                    const string = @ptrCast(*const os.windows.UNICODE_STRING, &buf);
+                    const string = @as(*const os.windows.UNICODE_STRING, @ptrCast(&buf));
                     const len = try std.unicode.utf16leToUtf8(buffer, string.Buffer[0 .. string.Length / 2]);
                     return if (len > 0) buffer[0..len] else null;
                 },
@@ -510,7 +510,7 @@ const WindowsThreadImpl = struct {
             thread: ThreadCompletion,
 
             fn entryFn(raw_ptr: windows.PVOID) callconv(.C) windows.DWORD {
-                const self = @ptrCast(*@This(), @alignCast(@alignOf(@This()), raw_ptr));
+                const self: *@This() = @ptrCast(@alignCast(raw_ptr));
                 defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
                     .running => {},
                     .completed => unreachable,
@@ -525,7 +525,7 @@ const WindowsThreadImpl = struct {
         const alloc_ptr = windows.kernel32.HeapAlloc(heap_handle, 0, alloc_bytes) orelse return error.OutOfMemory;
         errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0);
 
-        const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes];
+        const instance_bytes = @as([*]u8, @ptrCast(alloc_ptr))[0..alloc_bytes];
         var fba = std.heap.FixedBufferAllocator.init(instance_bytes);
         const instance = fba.allocator().create(Instance) catch unreachable;
         instance.* = .{
@@ -547,7 +547,7 @@ const WindowsThreadImpl = struct {
             null,
             stack_size,
             Instance.entryFn,
-            @ptrCast(*anyopaque, instance),
+            @as(*anyopaque, @ptrCast(instance)),
             0,
             null,
         ) orelse {
@@ -596,19 +596,19 @@ const PosixThreadImpl = struct {
                 return thread_id;
             },
             .dragonfly => {
-                return @bitCast(u32, c.lwp_gettid());
+                return @as(u32, @bitCast(c.lwp_gettid()));
             },
             .netbsd => {
-                return @bitCast(u32, c._lwp_self());
+                return @as(u32, @bitCast(c._lwp_self()));
             },
             .freebsd => {
-                return @bitCast(u32, c.pthread_getthreadid_np());
+                return @as(u32, @bitCast(c.pthread_getthreadid_np()));
             },
             .openbsd => {
-                return @bitCast(u32, c.getthrid());
+                return @as(u32, @bitCast(c.getthrid()));
             },
             .haiku => {
-                return @bitCast(u32, c.find_thread(null));
+                return @as(u32, @bitCast(c.find_thread(null)));
             },
             else => {
                 return @intFromPtr(c.pthread_self());
@@ -629,7 +629,7 @@ const PosixThreadImpl = struct {
                     error.NameTooLong, error.UnknownName => unreachable,
                     else => |e| return e,
                 };
-                return @intCast(usize, count);
+                return @as(usize, @intCast(count));
             },
             .solaris => {
                 // The "proper" way to get the cpu count would be to query
@@ -637,7 +637,7 @@ const PosixThreadImpl = struct {
                 // cpu.
                 const rc = c.sysconf(os._SC.NPROCESSORS_ONLN);
                 return switch (os.errno(rc)) {
-                    .SUCCESS => @intCast(usize, rc),
+                    .SUCCESS => @as(usize, @intCast(rc)),
                     else => |err| os.unexpectedErrno(err),
                 };
             },
@@ -645,7 +645,7 @@ const PosixThreadImpl = struct {
                 var system_info: os.system.system_info = undefined;
                 const rc = os.system.get_system_info(&system_info); // always returns B_OK
                 return switch (os.errno(rc)) {
-                    .SUCCESS => @intCast(usize, system_info.cpu_count),
+                    .SUCCESS => @as(usize, @intCast(system_info.cpu_count)),
                     else => |err| os.unexpectedErrno(err),
                 };
             },
@@ -657,7 +657,7 @@ const PosixThreadImpl = struct {
                     error.NameTooLong, error.UnknownName => unreachable,
                     else => |e| return e,
                 };
-                return @intCast(usize, count);
+                return @as(usize, @intCast(count));
             },
         }
     }
@@ -675,7 +675,7 @@ const PosixThreadImpl = struct {
                     return callFn(f, @as(Args, undefined));
                 }
 
-                const args_ptr = @ptrCast(*Args, @alignCast(@alignOf(Args), raw_arg));
+                const args_ptr: *Args = @ptrCast(@alignCast(raw_arg));
                 defer allocator.destroy(args_ptr);
                 return callFn(f, args_ptr.*);
             }
@@ -699,7 +699,7 @@ const PosixThreadImpl = struct {
             &handle,
             &attr,
             Instance.entryFn,
-            if (@sizeOf(Args) > 1) @ptrCast(*anyopaque, args_ptr) else undefined,
+            if (@sizeOf(Args) > 1) @as(*anyopaque, @ptrCast(args_ptr)) else undefined,
         )) {
             .SUCCESS => return Impl{ .handle = handle },
             .AGAIN => return error.SystemResources,
@@ -742,7 +742,7 @@ const LinuxThreadImpl = struct {
 
     fn getCurrentId() Id {
         return tls_thread_id orelse {
-            const tid = @bitCast(u32, linux.gettid());
+            const tid = @as(u32, @bitCast(linux.gettid()));
             tls_thread_id = tid;
             return tid;
         };
@@ -911,7 +911,7 @@ const LinuxThreadImpl = struct {
             thread: ThreadCompletion,
 
             fn entryFn(raw_arg: usize) callconv(.C) u8 {
-                const self = @ptrFromInt(*@This(), raw_arg);
+                const self = @as(*@This(), @ptrFromInt(raw_arg));
                 defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
                     .running => {},
                     .completed => unreachable,
@@ -969,7 +969,7 @@ const LinuxThreadImpl = struct {
 
         // map everything but the guard page as read/write
         os.mprotect(
-            @alignCast(page_size, mapped[guard_offset..]),
+            @alignCast(mapped[guard_offset..]),
             os.PROT.READ | os.PROT.WRITE,
         ) catch |err| switch (err) {
             error.AccessDenied => unreachable,
@@ -994,7 +994,7 @@ const LinuxThreadImpl = struct {
             };
         }
 
-        const instance = @ptrCast(*Instance, @alignCast(@alignOf(Instance), &mapped[instance_offset]));
+        const instance: *Instance = @ptrCast(@alignCast(&mapped[instance_offset]));
         instance.* = .{
             .fn_args = args,
             .thread = .{ .mapped = mapped },
lib/std/time.zig
@@ -70,7 +70,7 @@ pub fn timestamp() i64 {
 /// before the epoch.
 /// See `std.os.clock_gettime` for a POSIX timestamp.
 pub fn milliTimestamp() i64 {
-    return @intCast(i64, @divFloor(nanoTimestamp(), ns_per_ms));
+    return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_ms)));
 }
 
 /// Get a calendar timestamp, in microseconds, relative to UTC 1970-01-01.
@@ -79,7 +79,7 @@ pub fn milliTimestamp() i64 {
 /// before the epoch.
 /// See `std.os.clock_gettime` for a POSIX timestamp.
 pub fn microTimestamp() i64 {
-    return @intCast(i64, @divFloor(nanoTimestamp(), ns_per_us));
+    return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_us)));
 }
 
 /// Get a calendar timestamp, in nanoseconds, relative to UTC 1970-01-01.
@@ -96,7 +96,7 @@ pub fn nanoTimestamp() i128 {
         var ft: os.windows.FILETIME = undefined;
         os.windows.kernel32.GetSystemTimeAsFileTime(&ft);
         const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-        return @as(i128, @bitCast(i64, ft64) + epoch_adj) * 100;
+        return @as(i128, @as(i64, @bitCast(ft64)) + epoch_adj) * 100;
     }
 
     if (builtin.os.tag == .wasi and !builtin.link_libc) {
@@ -239,9 +239,9 @@ pub const Instant = struct {
             }
 
             // Convert to ns using fixed point.
-            const scale = @as(u64, std.time.ns_per_s << 32) / @intCast(u32, qpf);
+            const scale = @as(u64, std.time.ns_per_s << 32) / @as(u32, @intCast(qpf));
             const result = (@as(u96, qpc) * scale) >> 32;
-            return @truncate(u64, result);
+            return @as(u64, @truncate(result));
         }
 
         // WASI timestamps are directly in nanoseconds
@@ -250,9 +250,9 @@ pub const Instant = struct {
         }
 
         // Convert timespec diff to ns
-        const seconds = @intCast(u64, self.timestamp.tv_sec - earlier.timestamp.tv_sec);
-        const elapsed = (seconds * ns_per_s) + @intCast(u32, self.timestamp.tv_nsec);
-        return elapsed - @intCast(u32, earlier.timestamp.tv_nsec);
+        const seconds = @as(u64, @intCast(self.timestamp.tv_sec - earlier.timestamp.tv_sec));
+        const elapsed = (seconds * ns_per_s) + @as(u32, @intCast(self.timestamp.tv_nsec));
+        return elapsed - @as(u32, @intCast(earlier.timestamp.tv_nsec));
     }
 };
 
lib/std/tz.zig
@@ -155,8 +155,8 @@ pub const Tz = struct {
             if (corr > std.math.maxInt(i16)) return error.Malformed; // Unreasonably large correction
 
             leapseconds[i] = .{
-                .occurrence = @intCast(i48, occur),
-                .correction = @intCast(i16, corr),
+                .occurrence = @as(i48, @intCast(occur)),
+                .correction = @as(i16, @intCast(corr)),
             };
         }
 
lib/std/unicode.zig
@@ -45,22 +45,22 @@ pub fn utf8Encode(c: u21, out: []u8) !u3 {
         // - Increasing the initial shift by 6 each time
         // - Each time after the first shorten the shifted
         //   value to a max of 0b111111 (63)
-        1 => out[0] = @intCast(u8, c), // Can just do 0 + codepoint for initial range
+        1 => out[0] = @as(u8, @intCast(c)), // Can just do 0 + codepoint for initial range
         2 => {
-            out[0] = @intCast(u8, 0b11000000 | (c >> 6));
-            out[1] = @intCast(u8, 0b10000000 | (c & 0b111111));
+            out[0] = @as(u8, @intCast(0b11000000 | (c >> 6)));
+            out[1] = @as(u8, @intCast(0b10000000 | (c & 0b111111)));
         },
         3 => {
             if (0xd800 <= c and c <= 0xdfff) return error.Utf8CannotEncodeSurrogateHalf;
-            out[0] = @intCast(u8, 0b11100000 | (c >> 12));
-            out[1] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111));
-            out[2] = @intCast(u8, 0b10000000 | (c & 0b111111));
+            out[0] = @as(u8, @intCast(0b11100000 | (c >> 12)));
+            out[1] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111)));
+            out[2] = @as(u8, @intCast(0b10000000 | (c & 0b111111)));
         },
         4 => {
-            out[0] = @intCast(u8, 0b11110000 | (c >> 18));
-            out[1] = @intCast(u8, 0b10000000 | ((c >> 12) & 0b111111));
-            out[2] = @intCast(u8, 0b10000000 | ((c >> 6) & 0b111111));
-            out[3] = @intCast(u8, 0b10000000 | (c & 0b111111));
+            out[0] = @as(u8, @intCast(0b11110000 | (c >> 18)));
+            out[1] = @as(u8, @intCast(0b10000000 | ((c >> 12) & 0b111111)));
+            out[2] = @as(u8, @intCast(0b10000000 | ((c >> 6) & 0b111111)));
+            out[3] = @as(u8, @intCast(0b10000000 | (c & 0b111111)));
         },
         else => unreachable,
     }
@@ -695,11 +695,11 @@ pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u1
     var it = view.iterator();
     while (it.nextCodepoint()) |codepoint| {
         if (codepoint < 0x10000) {
-            const short = @intCast(u16, codepoint);
+            const short = @as(u16, @intCast(codepoint));
             try result.append(mem.nativeToLittle(u16, short));
         } else {
-            const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800;
-            const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00;
+            const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800;
+            const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00;
             var out: [2]u16 = undefined;
             out[0] = mem.nativeToLittle(u16, high);
             out[1] = mem.nativeToLittle(u16, low);
@@ -720,12 +720,12 @@ pub fn utf8ToUtf16Le(utf16le: []u16, utf8: []const u8) !usize {
         const next_src_i = src_i + n;
         const codepoint = utf8Decode(utf8[src_i..next_src_i]) catch return error.InvalidUtf8;
         if (codepoint < 0x10000) {
-            const short = @intCast(u16, codepoint);
+            const short = @as(u16, @intCast(codepoint));
             utf16le[dest_i] = mem.nativeToLittle(u16, short);
             dest_i += 1;
         } else {
-            const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800;
-            const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00;
+            const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800;
+            const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00;
             utf16le[dest_i] = mem.nativeToLittle(u16, high);
             utf16le[dest_i + 1] = mem.nativeToLittle(u16, low);
             dest_i += 2;
lib/std/valgrind.zig
@@ -94,7 +94,7 @@ pub fn IsTool(base: [2]u8, code: usize) bool {
 }
 
 fn doClientRequestExpr(default: usize, request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
-    return doClientRequest(default, @intCast(usize, @intFromEnum(request)), a1, a2, a3, a4, a5);
+    return doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5);
 }
 
 fn doClientRequestStmt(request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void {
lib/std/zig.zig
@@ -36,7 +36,7 @@ pub fn hashSrc(src: []const u8) SrcHash {
 }
 
 pub fn srcHashEql(a: SrcHash, b: SrcHash) bool {
-    return @bitCast(u128, a) == @bitCast(u128, b);
+    return @as(u128, @bitCast(a)) == @as(u128, @bitCast(b));
 }
 
 pub fn hashName(parent_hash: SrcHash, sep: []const u8, name: []const u8) SrcHash {
lib/ssp.zig
@@ -46,7 +46,7 @@ export var __stack_chk_guard: usize = blk: {
     var buf = [1]u8{0} ** @sizeOf(usize);
     buf[@sizeOf(usize) - 1] = 255;
     buf[@sizeOf(usize) - 2] = '\n';
-    break :blk @bitCast(usize, buf);
+    break :blk @as(usize, @bitCast(buf));
 };
 
 export fn __strcpy_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.C) [*:0]u8 {
lib/test_runner.zig
@@ -70,12 +70,12 @@ fn mainServer() !void {
                 defer std.testing.allocator.free(expected_panic_msgs);
 
                 for (test_fns, names, async_frame_sizes, expected_panic_msgs) |test_fn, *name, *async_frame_size, *expected_panic_msg| {
-                    name.* = @intCast(u32, string_bytes.items.len);
+                    name.* = @as(u32, @intCast(string_bytes.items.len));
                     try string_bytes.ensureUnusedCapacity(std.testing.allocator, test_fn.name.len + 1);
                     string_bytes.appendSliceAssumeCapacity(test_fn.name);
                     string_bytes.appendAssumeCapacity(0);
 
-                    async_frame_size.* = @intCast(u32, test_fn.async_frame_size orelse 0);
+                    async_frame_size.* = @as(u32, @intCast(test_fn.async_frame_size orelse 0));
                     expected_panic_msg.* = 0;
                 }
 
@@ -163,7 +163,7 @@ fn mainTerminal() void {
                     std.heap.page_allocator.free(async_frame_buffer);
                     async_frame_buffer = std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size) catch @panic("out of memory");
                 }
-                const casted_fn = @ptrCast(fn () callconv(.Async) anyerror!void, test_fn.func);
+                const casted_fn = @as(fn () callconv(.Async) anyerror!void, @ptrCast(test_fn.func));
                 break :blk await @asyncCall(async_frame_buffer, {}, casted_fn, .{});
             },
             .blocking => {
src/arch/aarch64/bits.zig
@@ -80,34 +80,34 @@ pub const Register = enum(u8) {
 
     pub fn id(self: Register) u6 {
         return switch (@intFromEnum(self)) {
-            @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.x0)),
-            @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.w0)),
+            @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))),
+            @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))),
 
             @intFromEnum(Register.sp) => 32,
             @intFromEnum(Register.wsp) => 32,
 
-            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.q0) + 33),
-            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.d0) + 33),
-            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.s0) + 33),
-            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.h0) + 33),
-            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @intCast(u6, @intFromEnum(self) - @intFromEnum(Register.b0) + 33),
+            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0) + 33)),
+            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0) + 33)),
+            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0) + 33)),
+            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0) + 33)),
+            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0) + 33)),
             else => unreachable,
         };
     }
 
     pub fn enc(self: Register) u5 {
         return switch (@intFromEnum(self)) {
-            @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.x0)),
-            @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.w0)),
+            @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))),
+            @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))),
 
             @intFromEnum(Register.sp) => 31,
             @intFromEnum(Register.wsp) => 31,
 
-            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.q0)),
-            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.d0)),
-            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.s0)),
-            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.h0)),
-            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @intCast(u5, @intFromEnum(self) - @intFromEnum(Register.b0)),
+            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0))),
+            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0))),
+            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0))),
+            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0))),
+            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0))),
             else => unreachable,
         };
     }
@@ -133,13 +133,13 @@ pub const Register = enum(u8) {
     /// Convert from a general-purpose register to its 64 bit alias.
     pub fn toX(self: Register) Register {
         return switch (@intFromEnum(self)) {
-            @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @enumFromInt(
+            @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0)),
             ),
-            @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @enumFromInt(
+            @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0)),
             ),
             else => unreachable,
         };
@@ -148,13 +148,13 @@ pub const Register = enum(u8) {
     /// Convert from a general-purpose register to its 32 bit alias.
     pub fn toW(self: Register) Register {
         return switch (@intFromEnum(self)) {
-            @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @enumFromInt(
+            @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0)),
             ),
-            @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @enumFromInt(
+            @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0)),
             ),
             else => unreachable,
         };
@@ -163,25 +163,25 @@ pub const Register = enum(u8) {
     /// Convert from a floating-point register to its 128 bit alias.
     pub fn toQ(self: Register) Register {
         return switch (@intFromEnum(self)) {
-            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0)),
             ),
-            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0)),
             ),
-            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0)),
             ),
-            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0)),
             ),
-            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0)),
             ),
             else => unreachable,
         };
@@ -190,25 +190,25 @@ pub const Register = enum(u8) {
     /// Convert from a floating-point register to its 64 bit alias.
     pub fn toD(self: Register) Register {
         return switch (@intFromEnum(self)) {
-            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0)),
             ),
-            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0)),
             ),
-            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0)),
             ),
-            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0)),
             ),
-            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0)),
             ),
             else => unreachable,
         };
@@ -217,25 +217,25 @@ pub const Register = enum(u8) {
     /// Convert from a floating-point register to its 32 bit alias.
     pub fn toS(self: Register) Register {
         return switch (@intFromEnum(self)) {
-            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0)),
             ),
-            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0)),
             ),
-            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0)),
             ),
-            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0)),
             ),
-            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0)),
             ),
             else => unreachable,
         };
@@ -244,25 +244,25 @@ pub const Register = enum(u8) {
     /// Convert from a floating-point register to its 16 bit alias.
     pub fn toH(self: Register) Register {
         return switch (@intFromEnum(self)) {
-            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0)),
             ),
-            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0)),
             ),
-            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0)),
             ),
-            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0)),
             ),
-            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0)),
             ),
             else => unreachable,
         };
@@ -271,25 +271,25 @@ pub const Register = enum(u8) {
     /// Convert from a floating-point register to its 8 bit alias.
     pub fn toB(self: Register) Register {
         return switch (@intFromEnum(self)) {
-            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @enumFromInt(
+            @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0)),
             ),
-            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @enumFromInt(
+            @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0)),
             ),
-            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @enumFromInt(
+            @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0)),
             ),
-            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @enumFromInt(
+            @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0)),
             ),
-            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @enumFromInt(
+            @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
                 Register,
-                @intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0),
+                @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0)),
             ),
             else => unreachable,
         };
@@ -612,27 +612,27 @@ pub const Instruction = union(enum) {
 
     pub fn toU32(self: Instruction) u32 {
         return switch (self) {
-            .move_wide_immediate => |v| @bitCast(u32, v),
-            .pc_relative_address => |v| @bitCast(u32, v),
-            .load_store_register => |v| @bitCast(u32, v),
-            .load_store_register_pair => |v| @bitCast(u32, v),
-            .load_literal => |v| @bitCast(u32, v),
-            .exception_generation => |v| @bitCast(u32, v),
-            .unconditional_branch_register => |v| @bitCast(u32, v),
-            .unconditional_branch_immediate => |v| @bitCast(u32, v),
-            .no_operation => |v| @bitCast(u32, v),
-            .logical_shifted_register => |v| @bitCast(u32, v),
-            .add_subtract_immediate => |v| @bitCast(u32, v),
-            .logical_immediate => |v| @bitCast(u32, v),
-            .bitfield => |v| @bitCast(u32, v),
-            .add_subtract_shifted_register => |v| @bitCast(u32, v),
-            .add_subtract_extended_register => |v| @bitCast(u32, v),
+            .move_wide_immediate => |v| @as(u32, @bitCast(v)),
+            .pc_relative_address => |v| @as(u32, @bitCast(v)),
+            .load_store_register => |v| @as(u32, @bitCast(v)),
+            .load_store_register_pair => |v| @as(u32, @bitCast(v)),
+            .load_literal => |v| @as(u32, @bitCast(v)),
+            .exception_generation => |v| @as(u32, @bitCast(v)),
+            .unconditional_branch_register => |v| @as(u32, @bitCast(v)),
+            .unconditional_branch_immediate => |v| @as(u32, @bitCast(v)),
+            .no_operation => |v| @as(u32, @bitCast(v)),
+            .logical_shifted_register => |v| @as(u32, @bitCast(v)),
+            .add_subtract_immediate => |v| @as(u32, @bitCast(v)),
+            .logical_immediate => |v| @as(u32, @bitCast(v)),
+            .bitfield => |v| @as(u32, @bitCast(v)),
+            .add_subtract_shifted_register => |v| @as(u32, @bitCast(v)),
+            .add_subtract_extended_register => |v| @as(u32, @bitCast(v)),
             // TODO once packed structs work, this can be refactored
             .conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
             .compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31),
             .conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31,
-            .data_processing_3_source => |v| @bitCast(u32, v),
-            .data_processing_2_source => |v| @bitCast(u32, v),
+            .data_processing_3_source => |v| @as(u32, @bitCast(v)),
+            .data_processing_2_source => |v| @as(u32, @bitCast(v)),
         };
     }
 
@@ -650,7 +650,7 @@ pub const Instruction = union(enum) {
             .move_wide_immediate = .{
                 .rd = rd.enc(),
                 .imm16 = imm16,
-                .hw = @intCast(u2, shift / 16),
+                .hw = @as(u2, @intCast(shift / 16)),
                 .opc = opc,
                 .sf = switch (rd.size()) {
                     32 => 0,
@@ -663,12 +663,12 @@ pub const Instruction = union(enum) {
 
     fn pcRelativeAddress(rd: Register, imm21: i21, op: u1) Instruction {
         assert(rd.size() == 64);
-        const imm21_u = @bitCast(u21, imm21);
+        const imm21_u = @as(u21, @bitCast(imm21));
         return Instruction{
             .pc_relative_address = .{
                 .rd = rd.enc(),
-                .immlo = @truncate(u2, imm21_u),
-                .immhi = @truncate(u19, imm21_u >> 2),
+                .immlo = @as(u2, @truncate(imm21_u)),
+                .immhi = @as(u19, @truncate(imm21_u >> 2)),
                 .op = op,
             },
         };
@@ -704,15 +704,15 @@ pub const Instruction = union(enum) {
         pub fn toU12(self: LoadStoreOffset) u12 {
             return switch (self) {
                 .immediate => |imm_type| switch (imm_type) {
-                    .post_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 1,
-                    .pre_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 3,
+                    .post_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 1,
+                    .pre_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 3,
                     .unsigned => |v| v,
                 },
                 .register => |r| switch (r.shift) {
-                    .uxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 16 + 2050,
-                    .lsl => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 24 + 2050,
-                    .sxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 48 + 2050,
-                    .sxtx => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 56 + 2050,
+                    .uxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 16 + 2050,
+                    .lsl => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 24 + 2050,
+                    .sxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 48 + 2050,
+                    .sxtx => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 56 + 2050,
                 },
             };
         }
@@ -894,7 +894,7 @@ pub const Instruction = union(enum) {
         switch (rt1.size()) {
             32 => {
                 assert(-256 <= offset and offset <= 252);
-                const imm7 = @truncate(u7, @bitCast(u9, offset >> 2));
+                const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 2))));
                 return Instruction{
                     .load_store_register_pair = .{
                         .rt1 = rt1.enc(),
@@ -909,7 +909,7 @@ pub const Instruction = union(enum) {
             },
             64 => {
                 assert(-512 <= offset and offset <= 504);
-                const imm7 = @truncate(u7, @bitCast(u9, offset >> 3));
+                const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 3))));
                 return Instruction{
                     .load_store_register_pair = .{
                         .rt1 = rt1.enc(),
@@ -982,7 +982,7 @@ pub const Instruction = union(enum) {
     ) Instruction {
         return Instruction{
             .unconditional_branch_immediate = .{
-                .imm26 = @bitCast(u26, @intCast(i26, offset >> 2)),
+                .imm26 = @as(u26, @bitCast(@as(i26, @intCast(offset >> 2)))),
                 .op = op,
             },
         };
@@ -1188,7 +1188,7 @@ pub const Instruction = union(enum) {
             .conditional_branch = .{
                 .cond = @intFromEnum(cond),
                 .o0 = o0,
-                .imm19 = @bitCast(u19, @intCast(i19, offset >> 2)),
+                .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))),
                 .o1 = o1,
             },
         };
@@ -1204,7 +1204,7 @@ pub const Instruction = union(enum) {
         return Instruction{
             .compare_and_branch = .{
                 .rt = rt.enc(),
-                .imm19 = @bitCast(u19, @intCast(i19, offset >> 2)),
+                .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))),
                 .op = op,
                 .sf = switch (rt.size()) {
                     32 => 0b0,
@@ -1609,12 +1609,12 @@ pub const Instruction = union(enum) {
     }
 
     pub fn asrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
-        const imms = @intCast(u6, rd.size() - 1);
+        const imms = @as(u6, @intCast(rd.size() - 1));
         return sbfm(rd, rn, shift, imms);
     }
 
     pub fn sbfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
-        return sbfm(rd, rn, lsb, @intCast(u6, lsb + width - 1));
+        return sbfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1)));
     }
 
     pub fn sxtb(rd: Register, rn: Register) Instruction {
@@ -1631,17 +1631,17 @@ pub const Instruction = union(enum) {
     }
 
     pub fn lslImmediate(rd: Register, rn: Register, shift: u6) Instruction {
-        const size = @intCast(u6, rd.size() - 1);
+        const size = @as(u6, @intCast(rd.size() - 1));
         return ubfm(rd, rn, size - shift + 1, size - shift);
     }
 
     pub fn lsrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
-        const imms = @intCast(u6, rd.size() - 1);
+        const imms = @as(u6, @intCast(rd.size() - 1));
         return ubfm(rd, rn, shift, imms);
     }
 
     pub fn ubfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
-        return ubfm(rd, rn, lsb, @intCast(u6, lsb + width - 1));
+        return ubfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1)));
     }
 
     pub fn uxtb(rd: Register, rn: Register) Instruction {
src/arch/aarch64/CodeGen.zig
@@ -187,8 +187,8 @@ const DbgInfoReloc = struct {
                     .stack_argument_offset,
                     => |offset| blk: {
                         const adjusted_offset = switch (reloc.mcv) {
-                            .stack_offset => -@intCast(i32, offset),
-                            .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset),
+                            .stack_offset => -@as(i32, @intCast(offset)),
+                            .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
                             else => unreachable,
                         };
                         break :blk .{ .stack = .{
@@ -224,8 +224,8 @@ const DbgInfoReloc = struct {
                         const adjusted_offset = switch (reloc.mcv) {
                             .ptr_stack_offset,
                             .stack_offset,
-                            => -@intCast(i32, offset),
-                            .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset),
+                            => -@as(i32, @intCast(offset)),
+                            .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
                             else => unreachable,
                         };
                         break :blk .{
@@ -440,7 +440,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
 
     try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
 
-    const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+    const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len));
     self.mir_instructions.appendAssumeCapacity(inst);
     return result_index;
 }
@@ -460,11 +460,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
 
 pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    const result = @intCast(u32, self.mir_extra.items.len);
+    const result = @as(u32, @intCast(self.mir_extra.items.len));
     inline for (fields) |field| {
         self.mir_extra.appendAssumeCapacity(switch (field.type) {
             u32 => @field(extra, field.name),
-            i32 => @bitCast(u32, @field(extra, field.name)),
+            i32 => @as(u32, @bitCast(@field(extra, field.name))),
             else => @compileError("bad field type"),
         });
     }
@@ -524,7 +524,7 @@ fn gen(self: *Self) !void {
 
                     const ty = self.typeOfIndex(inst);
 
-                    const abi_size = @intCast(u32, ty.abiSize(mod));
+                    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
                     const abi_align = ty.abiAlignment(mod);
                     const stack_offset = try self.allocMem(abi_size, abi_align, inst);
                     try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@@ -547,7 +547,7 @@ fn gen(self: *Self) !void {
         self.saved_regs_stack_space = 16;
         inline for (callee_preserved_regs) |reg| {
             if (self.register_manager.isRegAllocated(reg)) {
-                saved_regs |= @as(u32, 1) << @intCast(u5, reg.id());
+                saved_regs |= @as(u32, 1) << @as(u5, @intCast(reg.id()));
                 self.saved_regs_stack_space += 8;
             }
         }
@@ -597,14 +597,14 @@ fn gen(self: *Self) !void {
         for (self.exitlude_jump_relocs.items) |jmp_reloc| {
             self.mir_instructions.set(jmp_reloc, .{
                 .tag = .b,
-                .data = .{ .inst = @intCast(u32, self.mir_instructions.len) },
+                .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len)) },
             });
         }
 
         // add sp, sp, #stack_size
         _ = try self.addInst(.{
             .tag = .add_immediate,
-            .data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = @intCast(u12, stack_size) } },
+            .data = .{ .rr_imm12_sh = .{ .rd = .sp, .rn = .sp, .imm12 = @as(u12, @intCast(stack_size)) } },
         });
 
         // <load other registers>
@@ -948,15 +948,15 @@ fn finishAirBookkeeping(self: *Self) void {
 fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
     var tomb_bits = self.liveness.getTombBits(inst);
     for (operands) |op| {
-        const dies = @truncate(u1, tomb_bits) != 0;
+        const dies = @as(u1, @truncate(tomb_bits)) != 0;
         tomb_bits >>= 1;
         if (!dies) continue;
         const op_int = @intFromEnum(op);
         if (op_int < Air.ref_start_index) continue;
-        const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+        const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
         self.processDeath(op_index);
     }
-    const is_used = @truncate(u1, tomb_bits) == 0;
+    const is_used = @as(u1, @truncate(tomb_bits)) == 0;
     if (is_used) {
         log.debug("%{d} => {}", .{ inst, result });
         const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
@@ -1232,7 +1232,7 @@ fn truncRegister(
                     .rd = dest_reg,
                     .rn = operand_reg,
                     .lsb = 0,
-                    .width = @intCast(u6, int_bits),
+                    .width = @as(u6, @intCast(int_bits)),
                 } },
             });
         },
@@ -1877,7 +1877,7 @@ fn binOpImmediate(
         => .{ .rr_imm12_sh = .{
             .rd = dest_reg,
             .rn = lhs_reg,
-            .imm12 = @intCast(u12, rhs_immediate),
+            .imm12 = @as(u12, @intCast(rhs_immediate)),
         } },
         .lsl_immediate,
         .asr_immediate,
@@ -1885,7 +1885,7 @@ fn binOpImmediate(
         => .{ .rr_shift = .{
             .rd = dest_reg,
             .rn = lhs_reg,
-            .shift = @intCast(u6, rhs_immediate),
+            .shift = @as(u6, @intCast(rhs_immediate)),
         } },
         else => unreachable,
     };
@@ -2526,9 +2526,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
         const tuple_align = tuple_ty.abiAlignment(mod);
-        const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
 
         switch (lhs_ty.zigTypeTag(mod)) {
             .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
@@ -2654,9 +2654,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
         const tuple_align = tuple_ty.abiAlignment(mod);
-        const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
 
         switch (lhs_ty.zigTypeTag(mod)) {
             .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
@@ -2777,7 +2777,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                                 } },
                             });
 
-                            const shift: u6 = @intCast(u6, @as(u7, 64) - @intCast(u7, int_info.bits));
+                            const shift: u6 = @as(u6, @intCast(@as(u7, 64) - @as(u7, @intCast(int_info.bits))));
                             if (shift > 0) {
                                 // lsl dest_high, dest, #shift
                                 _ = try self.addInst(.{
@@ -2837,7 +2837,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                                     .data = .{ .rr_shift = .{
                                         .rd = dest_high_reg,
                                         .rn = dest_reg,
-                                        .shift = @intCast(u6, int_info.bits),
+                                        .shift = @as(u6, @intCast(int_info.bits)),
                                     } },
                                 });
 
@@ -2878,9 +2878,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
         const tuple_align = tuple_ty.abiAlignment(mod);
-        const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
 
         switch (lhs_ty.zigTypeTag(mod)) {
             .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
@@ -2917,7 +2917,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                             .data = .{ .rr_shift = .{
                                 .rd = dest_reg,
                                 .rn = lhs_reg,
-                                .shift = @intCast(u6, imm),
+                                .shift = @as(u6, @intCast(imm)),
                             } },
                         });
 
@@ -2932,7 +2932,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                             .data = .{ .rr_shift = .{
                                 .rd = reconstructed_reg,
                                 .rn = dest_reg,
-                                .shift = @intCast(u6, imm),
+                                .shift = @as(u6, @intCast(imm)),
                             } },
                         });
                     } else {
@@ -3072,7 +3072,7 @@ fn errUnionErr(
         return try error_union_bind.resolveToMcv(self);
     }
 
-    const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod));
+    const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod)));
     switch (try error_union_bind.resolveToMcv(self)) {
         .register => {
             var operand_reg: Register = undefined;
@@ -3094,7 +3094,7 @@ fn errUnionErr(
             );
 
             const err_bit_offset = err_offset * 8;
-            const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8;
+            const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8;
 
             _ = try self.addInst(.{
                 .tag = .ubfx, // errors are unsigned integers
@@ -3103,8 +3103,8 @@ fn errUnionErr(
                         // Set both registers to the X variant to get the full width
                         .rd = dest_reg.toX(),
                         .rn = operand_reg.toX(),
-                        .lsb = @intCast(u6, err_bit_offset),
-                        .width = @intCast(u7, err_bit_size),
+                        .lsb = @as(u6, @intCast(err_bit_offset)),
+                        .width = @as(u7, @intCast(err_bit_size)),
                     },
                 },
             });
@@ -3152,7 +3152,7 @@ fn errUnionPayload(
         return MCValue.none;
     }
 
-    const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+    const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
     switch (try error_union_bind.resolveToMcv(self)) {
         .register => {
             var operand_reg: Register = undefined;
@@ -3174,7 +3174,7 @@ fn errUnionPayload(
             );
 
             const payload_bit_offset = payload_offset * 8;
-            const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8;
+            const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8;
 
             _ = try self.addInst(.{
                 .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
@@ -3183,8 +3183,8 @@ fn errUnionPayload(
                         // Set both registers to the X variant to get the full width
                         .rd = dest_reg.toX(),
                         .rn = operand_reg.toX(),
-                        .lsb = @intCast(u5, payload_bit_offset),
-                        .width = @intCast(u6, payload_bit_size),
+                        .lsb = @as(u5, @intCast(payload_bit_offset)),
+                        .width = @as(u6, @intCast(payload_bit_size)),
                     },
                 },
             });
@@ -3283,9 +3283,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
             break :result MCValue{ .register = reg };
         }
 
-        const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod));
+        const optional_abi_size = @as(u32, @intCast(optional_ty.abiSize(mod)));
         const optional_abi_align = optional_ty.abiAlignment(mod);
-        const offset = @intCast(u32, payload_ty.abiSize(mod));
+        const offset = @as(u32, @intCast(payload_ty.abiSize(mod)));
 
         const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst);
         try self.genSetStack(payload_ty, stack_offset, operand);
@@ -3308,13 +3308,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
         const operand = try self.resolveInst(ty_op.operand);
         if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
 
-        const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+        const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
         const abi_align = error_union_ty.abiAlignment(mod);
         const stack_offset = try self.allocMem(abi_size, abi_align, inst);
         const payload_off = errUnionPayloadOffset(payload_ty, mod);
         const err_off = errUnionErrorOffset(payload_ty, mod);
-        try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand);
-        try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 });
+        try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand);
+        try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 });
 
         break :result MCValue{ .stack_offset = stack_offset };
     };
@@ -3332,13 +3332,13 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
         const operand = try self.resolveInst(ty_op.operand);
         if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
 
-        const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+        const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
         const abi_align = error_union_ty.abiAlignment(mod);
         const stack_offset = try self.allocMem(abi_size, abi_align, inst);
         const payload_off = errUnionPayloadOffset(payload_ty, mod);
         const err_off = errUnionErrorOffset(payload_ty, mod);
-        try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand);
-        try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef);
+        try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand);
+        try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef);
 
         break :result MCValue{ .stack_offset = stack_offset };
     };
@@ -3454,7 +3454,7 @@ fn ptrElemVal(
 ) !MCValue {
     const mod = self.bin_file.options.module.?;
     const elem_ty = ptr_ty.childType(mod);
-    const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+    const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
 
     // TODO optimize for elem_sizes of 1, 2, 4, 8
     switch (elem_size) {
@@ -3716,7 +3716,7 @@ fn genInlineMemcpy(
     _ = try self.addInst(.{
         .tag = .b_cond,
         .data = .{ .inst_cond = .{
-            .inst = @intCast(u32, self.mir_instructions.len + 5),
+            .inst = @as(u32, @intCast(self.mir_instructions.len + 5)),
             .cond = .ge,
         } },
     });
@@ -3754,7 +3754,7 @@ fn genInlineMemcpy(
     // b loop
     _ = try self.addInst(.{
         .tag = .b,
-        .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) },
+        .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 5)) },
     });
 
     // end:
@@ -3824,7 +3824,7 @@ fn genInlineMemsetCode(
     _ = try self.addInst(.{
         .tag = .b_cond,
         .data = .{ .inst_cond = .{
-            .inst = @intCast(u32, self.mir_instructions.len + 4),
+            .inst = @as(u32, @intCast(self.mir_instructions.len + 4)),
             .cond = .ge,
         } },
     });
@@ -3852,7 +3852,7 @@ fn genInlineMemsetCode(
     // b loop
     _ = try self.addInst(.{
         .tag = .b,
-        .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) },
+        .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 4)) },
     });
 
     // end:
@@ -4002,7 +4002,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
                                     } },
                                 });
                             },
-                            .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @intCast(u32, addr) }),
+                            .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
                             .linker_load => |load_struct| {
                                 const tag: Mir.Inst.Tag = switch (load_struct.type) {
                                     .got => .load_memory_ptr_got,
@@ -4092,7 +4092,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
         const mcv = try self.resolveInst(operand);
         const ptr_ty = self.typeOf(operand);
         const struct_ty = ptr_ty.childType(mod);
-        const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
         switch (mcv) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -4117,7 +4117,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
         const mcv = try self.resolveInst(operand);
         const struct_ty = self.typeOf(operand);
         const struct_field_ty = struct_ty.structFieldType(index, mod);
-        const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
 
         switch (mcv) {
             .dead, .unreach => unreachable,
@@ -4169,7 +4169,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const field_ptr = try self.resolveInst(extra.field_ptr);
         const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod);
-        const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod));
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod)));
         switch (field_ptr) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -4243,7 +4243,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     const pl_op = self.air.instructions.items(.data)[inst].pl_op;
     const callee = pl_op.operand;
     const extra = self.air.extraData(Air.Call, pl_op.payload);
-    const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+    const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
     const ty = self.typeOf(callee);
     const mod = self.bin_file.options.module.?;
 
@@ -4269,8 +4269,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     if (info.return_value == .stack_offset) {
         log.debug("airCall: return by reference", .{});
         const ret_ty = fn_ty.fnReturnType(mod);
-        const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
-        const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
+        const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
+        const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
         const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
 
         const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
@@ -4314,7 +4314,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
                 const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
                 const atom = elf_file.getAtom(atom_index);
                 _ = try atom.getOrCreateOffsetTableEntry(elf_file);
-                const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
+                const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
                 try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr });
             } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
                 const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
@@ -4473,7 +4473,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
             // location.
             const op_inst = Air.refToIndex(un_op).?;
             if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
-                const abi_size = @intCast(u32, ret_ty.abiSize(mod));
+                const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
                 const abi_align = ret_ty.abiAlignment(mod);
 
                 const offset = try self.allocMem(abi_size, abi_align, null);
@@ -4554,7 +4554,7 @@ fn cmp(
                 .tag = .cmp_immediate,
                 .data = .{ .r_imm12_sh = .{
                     .rn = lhs_reg,
-                    .imm12 = @intCast(u12, rhs_immediate.?),
+                    .imm12 = @as(u12, @intCast(rhs_immediate.?)),
                 } },
             });
         } else {
@@ -4696,7 +4696,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
     if (self.liveness.operandDies(inst, 0)) {
         const op_int = @intFromEnum(pl_op.operand);
         if (op_int >= Air.ref_start_index) {
-            const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+            const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
             self.processDeath(op_index);
         }
     }
@@ -4833,7 +4833,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
         if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
             break :blk .{ .ty = operand_ty, .bind = operand_bind };
 
-        const offset = @intCast(u32, payload_ty.abiSize(mod));
+        const offset = @as(u32, @intCast(payload_ty.abiSize(mod)));
         const operand_mcv = try operand_bind.resolveToMcv(self);
         const new_mcv: MCValue = switch (operand_mcv) {
             .register => |source_reg| new: {
@@ -4841,7 +4841,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
                 const raw_reg = try self.register_manager.allocReg(null, gp);
                 const dest_reg = raw_reg.toX();
 
-                const shift = @intCast(u6, offset * 8);
+                const shift = @as(u6, @intCast(offset * 8));
                 if (shift == 0) {
                     try self.genSetReg(payload_ty, dest_reg, operand_mcv);
                 } else {
@@ -5026,7 +5026,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const loop = self.air.extraData(Air.Block, ty_pl.payload);
     const body = self.air.extra[loop.end..][0..loop.data.body_len];
-    const start_index = @intCast(u32, self.mir_instructions.len);
+    const start_index = @as(u32, @intCast(self.mir_instructions.len));
 
     try self.genBody(body);
     try self.jump(start_index);
@@ -5091,7 +5091,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
     var case_i: u32 = 0;
     while (case_i < switch_br.data.cases_len) : (case_i += 1) {
         const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
-        const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+        const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
         assert(items.len > 0);
         const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
         extra_index = case.end + items.len + case_body.len;
@@ -5209,9 +5209,9 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
 fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
     const tag = self.mir_instructions.items(.tag)[inst];
     switch (tag) {
-        .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
-        .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
-        .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
+        .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
+        .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
+        .b => self.mir_instructions.items(.data)[inst].inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
         else => unreachable,
     }
 }
@@ -5262,12 +5262,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
 fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const extra = self.air.extraData(Air.Asm, ty_pl.payload);
-    const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
-    const clobbers_len = @truncate(u31, extra.data.flags);
+    const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+    const clobbers_len = @as(u31, @truncate(extra.data.flags));
     var extra_i: usize = extra.end;
-    const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+    const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
     extra_i += outputs.len;
-    const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+    const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
     extra_i += inputs.len;
 
     const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -5401,7 +5401,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
 
 fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     switch (mcv) {
         .dead => unreachable,
         .unreach, .none => return, // Nothing to do.
@@ -5460,7 +5460,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
 
             const overflow_bit_ty = ty.structFieldType(1, mod);
-            const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
+            const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod)));
             const raw_cond_reg = try self.register_manager.allocReg(null, gp);
             const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty);
 
@@ -5589,7 +5589,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                 .tag = .ldr_ptr_stack,
                 .data = .{ .load_store_stack = .{
                     .rt = reg,
-                    .offset = @intCast(u32, off),
+                    .offset = @as(u32, @intCast(off)),
                 } },
             });
         },
@@ -5605,13 +5605,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
         .immediate => |x| {
             _ = try self.addInst(.{
                 .tag = .movz,
-                .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x) } },
+                .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x)) } },
             });
 
             if (x & 0x0000_0000_ffff_0000 != 0) {
                 _ = try self.addInst(.{
                     .tag = .movk,
-                    .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 16), .hw = 1 } },
+                    .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 16)), .hw = 1 } },
                 });
             }
 
@@ -5619,13 +5619,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                 if (x & 0x0000_ffff_0000_0000 != 0) {
                     _ = try self.addInst(.{
                         .tag = .movk,
-                        .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 32), .hw = 2 } },
+                        .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 32)), .hw = 2 } },
                     });
                 }
                 if (x & 0xffff_0000_0000_0000 != 0) {
                     _ = try self.addInst(.{
                         .tag = .movk,
-                        .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 48), .hw = 3 } },
+                        .data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @as(u16, @truncate(x >> 48)), .hw = 3 } },
                     });
                 }
             }
@@ -5696,7 +5696,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .tag = tag,
                         .data = .{ .load_store_stack = .{
                             .rt = reg,
-                            .offset = @intCast(u32, off),
+                            .offset = @as(u32, @intCast(off)),
                         } },
                     });
                 },
@@ -5720,7 +5720,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .tag = tag,
                         .data = .{ .load_store_stack = .{
                             .rt = reg,
-                            .offset = @intCast(u32, off),
+                            .offset = @as(u32, @intCast(off)),
                         } },
                     });
                 },
@@ -5733,7 +5733,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
 
 fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     switch (mcv) {
         .dead => unreachable,
         .none, .unreach => return,
@@ -5840,7 +5840,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
                             } },
                         });
                     },
-                    .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+                    .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
                     .linker_load => |load_struct| {
                         const tag: Mir.Inst.Tag = switch (load_struct.type) {
                             .got => .load_memory_ptr_got,
@@ -5937,7 +5937,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
         const ptr_ty = self.typeOf(ty_op.operand);
         const ptr = try self.resolveInst(ty_op.operand);
         const array_ty = ptr_ty.childType(mod);
-        const array_len = @intCast(u32, array_ty.arrayLen(mod));
+        const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
 
         const ptr_bits = self.target.ptrBitWidth();
         const ptr_bytes = @divExact(ptr_bits, 8);
@@ -6058,7 +6058,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
     const vector_ty = self.typeOfIndex(inst);
     const len = vector_ty.vectorLen(mod);
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+    const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
     const result: MCValue = res: {
         if (self.liveness.isUnused(inst)) break :res MCValue.dead;
         return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch});
@@ -6105,7 +6105,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
     const result: MCValue = result: {
         const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
         const error_union_ty = self.typeOf(pl_op.operand);
-        const error_union_size = @intCast(u32, error_union_ty.abiSize(mod));
+        const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
         const error_union_align = error_union_ty.abiAlignment(mod);
 
         // The error union will die in the body. However, we need the
@@ -6247,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+                const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
                 if (ret_ty_size == 0) {
                     assert(ret_ty.isError(mod));
                     result.return_value = .{ .immediate = 0 };
@@ -6259,7 +6259,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             }
 
             for (fn_info.param_types, 0..) |ty, i| {
-                const param_size = @intCast(u32, ty.toType().abiSize(mod));
+                const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
                 if (param_size == 0) {
                     result.args[i] = .{ .none = {} };
                     continue;
@@ -6305,7 +6305,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+                const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
                 if (ret_ty_size == 0) {
                     assert(ret_ty.isError(mod));
                     result.return_value = .{ .immediate = 0 };
@@ -6325,7 +6325,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
 
             for (fn_info.param_types, 0..) |ty, i| {
                 if (ty.toType().abiSize(mod) > 0) {
-                    const param_size = @intCast(u32, ty.toType().abiSize(mod));
+                    const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
                     const param_alignment = ty.toType().abiAlignment(mod);
 
                     stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
src/arch/aarch64/Emit.zig
@@ -81,7 +81,7 @@ pub fn emitMir(
 
     // Emit machine code
     for (mir_tags, 0..) |tag, index| {
-        const inst = @intCast(u32, index);
+        const inst = @as(u32, @intCast(index));
         switch (tag) {
             .add_immediate => try emit.mirAddSubtractImmediate(inst),
             .adds_immediate => try emit.mirAddSubtractImmediate(inst),
@@ -324,7 +324,7 @@ fn lowerBranches(emit: *Emit) !void {
     // TODO optimization opportunity: do this in codegen while
     // generating MIR
     for (mir_tags, 0..) |tag, index| {
-        const inst = @intCast(u32, index);
+        const inst = @as(u32, @intCast(index));
         if (isBranch(tag)) {
             const target_inst = emit.branchTarget(inst);
 
@@ -369,7 +369,7 @@ fn lowerBranches(emit: *Emit) !void {
         var current_code_offset: usize = 0;
 
         for (mir_tags, 0..) |tag, index| {
-            const inst = @intCast(u32, index);
+            const inst = @as(u32, @intCast(index));
 
             // If this instruction contained in the code offset
             // mapping (when it is a target of a branch or if it is a
@@ -384,7 +384,7 @@ fn lowerBranches(emit: *Emit) !void {
                 const target_inst = emit.branchTarget(inst);
                 if (target_inst < inst) {
                     const target_offset = emit.code_offset_mapping.get(target_inst).?;
-                    const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset);
+                    const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset));
                     const branch_type = emit.branch_types.getPtr(inst).?;
                     const optimal_branch_type = try emit.optimalBranchType(tag, offset);
                     if (branch_type.* != optimal_branch_type) {
@@ -403,7 +403,7 @@ fn lowerBranches(emit: *Emit) !void {
                 for (origin_list.items) |forward_branch_inst| {
                     const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
                     const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
-                    const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset);
+                    const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset));
                     const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
                     const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
                     if (branch_type.* != optimal_branch_type) {
@@ -434,7 +434,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
 }
 
 fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
-    const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
+    const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line));
     const delta_pc: usize = self.code.items.len - self.prev_di_pc;
     switch (self.debug_output) {
         .dwarf => |dw| {
@@ -451,13 +451,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
             // increasing the line number
             try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
             // increasing the pc
-            const d_pc_p9 = @intCast(i64, delta_pc) - quant;
+            const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
             if (d_pc_p9 > 0) {
                 // minus one because if its the last one, we want to leave space to change the line which is one quanta
-                try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant);
+                try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant);
                 if (dbg_out.pcop_change_index.*) |pci|
                     dbg_out.dbg_line.items[pci] += 1;
-                dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
+                dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
             } else if (d_pc_p9 == 0) {
                 // we don't need to do anything, because adding the quant does it for us
             } else unreachable;
@@ -548,13 +548,13 @@ fn mirConditionalBranchImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
     const tag = emit.mir.instructions.items(.tag)[inst];
     const inst_cond = emit.mir.instructions.items(.data)[inst].inst_cond;
 
-    const offset = @intCast(i64, emit.code_offset_mapping.get(inst_cond.inst).?) - @intCast(i64, emit.code.items.len);
+    const offset = @as(i64, @intCast(emit.code_offset_mapping.get(inst_cond.inst).?)) - @as(i64, @intCast(emit.code.items.len));
     const branch_type = emit.branch_types.get(inst).?;
     log.debug("mirConditionalBranchImmediate: {} offset={}", .{ inst, offset });
 
     switch (branch_type) {
         .b_cond => switch (tag) {
-            .b_cond => try emit.writeInstruction(Instruction.bCond(inst_cond.cond, @intCast(i21, offset))),
+            .b_cond => try emit.writeInstruction(Instruction.bCond(inst_cond.cond, @as(i21, @intCast(offset)))),
             else => unreachable,
         },
         else => unreachable,
@@ -572,14 +572,14 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
         emit.mir.instructions.items(.tag)[target_inst],
     });
 
-    const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len);
+    const offset = @as(i64, @intCast(emit.code_offset_mapping.get(target_inst).?)) - @as(i64, @intCast(emit.code.items.len));
     const branch_type = emit.branch_types.get(inst).?;
     log.debug("mirBranch: {} offset={}", .{ inst, offset });
 
     switch (branch_type) {
         .unconditional_branch_immediate => switch (tag) {
-            .b => try emit.writeInstruction(Instruction.b(@intCast(i28, offset))),
-            .bl => try emit.writeInstruction(Instruction.bl(@intCast(i28, offset))),
+            .b => try emit.writeInstruction(Instruction.b(@as(i28, @intCast(offset)))),
+            .bl => try emit.writeInstruction(Instruction.bl(@as(i28, @intCast(offset)))),
             else => unreachable,
         },
         else => unreachable,
@@ -590,13 +590,13 @@ fn mirCompareAndBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
     const tag = emit.mir.instructions.items(.tag)[inst];
     const r_inst = emit.mir.instructions.items(.data)[inst].r_inst;
 
-    const offset = @intCast(i64, emit.code_offset_mapping.get(r_inst.inst).?) - @intCast(i64, emit.code.items.len);
+    const offset = @as(i64, @intCast(emit.code_offset_mapping.get(r_inst.inst).?)) - @as(i64, @intCast(emit.code.items.len));
     const branch_type = emit.branch_types.get(inst).?;
     log.debug("mirCompareAndBranch: {} offset={}", .{ inst, offset });
 
     switch (branch_type) {
         .cbz => switch (tag) {
-            .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @intCast(i21, offset))),
+            .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @as(i21, @intCast(offset)))),
             else => unreachable,
         },
         else => unreachable,
@@ -662,7 +662,7 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
     const relocation = emit.mir.instructions.items(.data)[inst].relocation;
 
     const offset = blk: {
-        const offset = @intCast(u32, emit.code.items.len);
+        const offset = @as(u32, @intCast(emit.code.items.len));
         // bl
         try emit.writeInstruction(Instruction.bl(0));
         break :blk offset;
@@ -837,11 +837,11 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
     const tag = emit.mir.instructions.items(.tag)[inst];
     const payload = emit.mir.instructions.items(.data)[inst].payload;
     const data = emit.mir.extraData(Mir.LoadMemoryPie, payload).data;
-    const reg = @enumFromInt(Register, data.register);
+    const reg = @as(Register, @enumFromInt(data.register));
 
     // PC-relative displacement to the entry in memory.
     // adrp
-    const offset = @intCast(u32, emit.code.items.len);
+    const offset = @as(u32, @intCast(emit.code.items.len));
     try emit.writeInstruction(Instruction.adrp(reg.toX(), 0));
 
     switch (tag) {
@@ -1220,7 +1220,7 @@ fn mirNop(emit: *Emit) !void {
 }
 
 fn regListIsSet(reg_list: u32, reg: Register) bool {
-    return reg_list & @as(u32, 1) << @intCast(u5, reg.id()) != 0;
+    return reg_list & @as(u32, 1) << @as(u5, @intCast(reg.id())) != 0;
 }
 
 fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
@@ -1245,7 +1245,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
             var count: u6 = 0;
             var other_reg: ?Register = null;
             while (i > 0) : (i -= 1) {
-                const reg = @enumFromInt(Register, i - 1);
+                const reg = @as(Register, @enumFromInt(i - 1));
                 if (regListIsSet(reg_list, reg)) {
                     if (count == 0 and odd_number_of_regs) {
                         try emit.writeInstruction(Instruction.ldr(
@@ -1274,7 +1274,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
             var count: u6 = 0;
             var other_reg: ?Register = null;
             while (i < 32) : (i += 1) {
-                const reg = @enumFromInt(Register, i);
+                const reg = @as(Register, @enumFromInt(i));
                 if (regListIsSet(reg_list, reg)) {
                     if (count == number_of_regs - 1 and odd_number_of_regs) {
                         try emit.writeInstruction(Instruction.str(
src/arch/aarch64/Mir.zig
@@ -507,7 +507,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
     inline for (fields) |field| {
         @field(result, field.name) = switch (field.type) {
             u32 => mir.extra[i],
-            i32 => @bitCast(i32, mir.extra[i]),
+            i32 => @as(i32, @bitCast(mir.extra[i])),
             else => @compileError("bad field type"),
         };
         i += 1;
src/arch/arm/abi.zig
@@ -13,7 +13,7 @@ pub const Class = union(enum) {
     i64_array: u8,
 
     fn arrSize(total_size: u64, arr_size: u64) Class {
-        const count = @intCast(u8, std.mem.alignForward(u64, total_size, arr_size) / arr_size);
+        const count = @as(u8, @intCast(std.mem.alignForward(u64, total_size, arr_size) / arr_size));
         if (arr_size == 32) {
             return .{ .i32_array = count };
         } else {
src/arch/arm/bits.zig
@@ -159,7 +159,7 @@ pub const Register = enum(u5) {
     /// Returns the unique 4-bit ID of this register which is used in
     /// the machine code
     pub fn id(self: Register) u4 {
-        return @truncate(u4, @intFromEnum(self));
+        return @as(u4, @truncate(@intFromEnum(self)));
     }
 
     pub fn dwarfLocOp(self: Register) u8 {
@@ -399,8 +399,8 @@ pub const Instruction = union(enum) {
 
             pub fn toU8(self: Shift) u8 {
                 return switch (self) {
-                    .register => |v| @bitCast(u8, v),
-                    .immediate => |v| @bitCast(u8, v),
+                    .register => |v| @as(u8, @bitCast(v)),
+                    .immediate => |v| @as(u8, @bitCast(v)),
                 };
             }
 
@@ -425,8 +425,8 @@ pub const Instruction = union(enum) {
 
         pub fn toU12(self: Operand) u12 {
             return switch (self) {
-                .register => |v| @bitCast(u12, v),
-                .immediate => |v| @bitCast(u12, v),
+                .register => |v| @as(u12, @bitCast(v)),
+                .immediate => |v| @as(u12, @bitCast(v)),
             };
         }
 
@@ -463,8 +463,8 @@ pub const Instruction = union(enum) {
                 if (x & mask == x) {
                     break Operand{
                         .immediate = .{
-                            .imm = @intCast(u8, std.math.rotl(u32, x, 2 * i)),
-                            .rotate = @intCast(u4, i),
+                            .imm = @as(u8, @intCast(std.math.rotl(u32, x, 2 * i))),
+                            .rotate = @as(u4, @intCast(i)),
                         },
                     };
                 }
@@ -522,7 +522,7 @@ pub const Instruction = union(enum) {
 
         pub fn toU12(self: Offset) u12 {
             return switch (self) {
-                .register => |v| @bitCast(u12, v),
+                .register => |v| @as(u12, @bitCast(v)),
                 .immediate => |v| v,
             };
         }
@@ -604,20 +604,20 @@ pub const Instruction = union(enum) {
 
     pub fn toU32(self: Instruction) u32 {
         return switch (self) {
-            .data_processing => |v| @bitCast(u32, v),
-            .multiply => |v| @bitCast(u32, v),
-            .multiply_long => |v| @bitCast(u32, v),
-            .signed_multiply_halfwords => |v| @bitCast(u32, v),
-            .integer_saturating_arithmetic => |v| @bitCast(u32, v),
-            .bit_field_extract => |v| @bitCast(u32, v),
-            .single_data_transfer => |v| @bitCast(u32, v),
-            .extra_load_store => |v| @bitCast(u32, v),
-            .block_data_transfer => |v| @bitCast(u32, v),
-            .branch => |v| @bitCast(u32, v),
-            .branch_exchange => |v| @bitCast(u32, v),
-            .supervisor_call => |v| @bitCast(u32, v),
+            .data_processing => |v| @as(u32, @bitCast(v)),
+            .multiply => |v| @as(u32, @bitCast(v)),
+            .multiply_long => |v| @as(u32, @bitCast(v)),
+            .signed_multiply_halfwords => |v| @as(u32, @bitCast(v)),
+            .integer_saturating_arithmetic => |v| @as(u32, @bitCast(v)),
+            .bit_field_extract => |v| @as(u32, @bitCast(v)),
+            .single_data_transfer => |v| @as(u32, @bitCast(v)),
+            .extra_load_store => |v| @as(u32, @bitCast(v)),
+            .block_data_transfer => |v| @as(u32, @bitCast(v)),
+            .branch => |v| @as(u32, @bitCast(v)),
+            .branch_exchange => |v| @as(u32, @bitCast(v)),
+            .supervisor_call => |v| @as(u32, @bitCast(v)),
             .undefined_instruction => |v| v.imm32,
-            .breakpoint => |v| @intCast(u32, v.imm4) | (@intCast(u32, v.fixed_1) << 4) | (@intCast(u32, v.imm12) << 8) | (@intCast(u32, v.fixed_2_and_cond) << 20),
+            .breakpoint => |v| @as(u32, @intCast(v.imm4)) | (@as(u32, @intCast(v.fixed_1)) << 4) | (@as(u32, @intCast(v.imm12)) << 8) | (@as(u32, @intCast(v.fixed_2_and_cond)) << 20),
         };
     }
 
@@ -656,9 +656,9 @@ pub const Instruction = union(enum) {
                 .i = 1,
                 .opcode = if (top) 0b1010 else 0b1000,
                 .s = 0,
-                .rn = @truncate(u4, imm >> 12),
+                .rn = @as(u4, @truncate(imm >> 12)),
                 .rd = rd.id(),
-                .op2 = @truncate(u12, imm),
+                .op2 = @as(u12, @truncate(imm)),
             },
         };
     }
@@ -760,7 +760,7 @@ pub const Instruction = union(enum) {
                 .rn = rn.id(),
                 .lsb = lsb,
                 .rd = rd.id(),
-                .widthm1 = @intCast(u5, width - 1),
+                .widthm1 = @as(u5, @intCast(width - 1)),
                 .unsigned = unsigned,
                 .cond = @intFromEnum(cond),
             },
@@ -810,11 +810,11 @@ pub const Instruction = union(enum) {
         offset: ExtraLoadStoreOffset,
     ) Instruction {
         const imm4l: u4 = switch (offset) {
-            .immediate => |imm| @truncate(u4, imm),
+            .immediate => |imm| @as(u4, @truncate(imm)),
             .register => |reg| reg,
         };
         const imm4h: u4 = switch (offset) {
-            .immediate => |imm| @truncate(u4, imm >> 4),
+            .immediate => |imm| @as(u4, @truncate(imm >> 4)),
             .register => 0b0000,
         };
 
@@ -853,7 +853,7 @@ pub const Instruction = union(enum) {
     ) Instruction {
         return Instruction{
             .block_data_transfer = .{
-                .register_list = @bitCast(u16, reg_list),
+                .register_list = @as(u16, @bitCast(reg_list)),
                 .rn = rn.id(),
                 .load_store = load_store,
                 .write_back = @intFromBool(write_back),
@@ -870,7 +870,7 @@ pub const Instruction = union(enum) {
             .branch = .{
                 .cond = @intFromEnum(cond),
                 .link = link,
-                .offset = @bitCast(u24, @intCast(i24, offset >> 2)),
+                .offset = @as(u24, @bitCast(@as(i24, @intCast(offset >> 2)))),
             },
         };
     }
@@ -904,8 +904,8 @@ pub const Instruction = union(enum) {
     fn breakpoint(imm: u16) Instruction {
         return Instruction{
             .breakpoint = .{
-                .imm12 = @truncate(u12, imm >> 4),
-                .imm4 = @truncate(u4, imm),
+                .imm12 = @as(u12, @truncate(imm >> 4)),
+                .imm4 = @as(u4, @truncate(imm)),
             },
         };
     }
@@ -1319,7 +1319,7 @@ pub const Instruction = union(enum) {
                 const reg = @as(Register, arg);
                 register_list |= @as(u16, 1) << reg.id();
             }
-            return ldm(cond, .sp, true, @bitCast(RegisterList, register_list));
+            return ldm(cond, .sp, true, @as(RegisterList, @bitCast(register_list)));
         }
     }
 
@@ -1343,7 +1343,7 @@ pub const Instruction = union(enum) {
                 const reg = @as(Register, arg);
                 register_list |= @as(u16, 1) << reg.id();
             }
-            return stmdb(cond, .sp, true, @bitCast(RegisterList, register_list));
+            return stmdb(cond, .sp, true, @as(RegisterList, @bitCast(register_list)));
         }
     }
 
src/arch/arm/CodeGen.zig
@@ -266,8 +266,8 @@ const DbgInfoReloc = struct {
                     .stack_argument_offset,
                     => blk: {
                         const adjusted_stack_offset = switch (reloc.mcv) {
-                            .stack_offset => |offset| -@intCast(i32, offset),
-                            .stack_argument_offset => |offset| @intCast(i32, function.saved_regs_stack_space + offset),
+                            .stack_offset => |offset| -@as(i32, @intCast(offset)),
+                            .stack_argument_offset => |offset| @as(i32, @intCast(function.saved_regs_stack_space + offset)),
                             else => unreachable,
                         };
                         break :blk .{ .stack = .{
@@ -303,8 +303,8 @@ const DbgInfoReloc = struct {
                         const adjusted_offset = switch (reloc.mcv) {
                             .ptr_stack_offset,
                             .stack_offset,
-                            => -@intCast(i32, offset),
-                            .stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset),
+                            => -@as(i32, @intCast(offset)),
+                            .stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
                             else => unreachable,
                         };
                         break :blk .{ .stack = .{
@@ -446,7 +446,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
 
     try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
 
-    const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+    const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len));
     self.mir_instructions.appendAssumeCapacity(inst);
     return result_index;
 }
@@ -466,11 +466,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
 
 pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    const result = @intCast(u32, self.mir_extra.items.len);
+    const result = @as(u32, @intCast(self.mir_extra.items.len));
     inline for (fields) |field| {
         self.mir_extra.appendAssumeCapacity(switch (field.type) {
             u32 => @field(extra, field.name),
-            i32 => @bitCast(u32, @field(extra, field.name)),
+            i32 => @as(u32, @bitCast(@field(extra, field.name))),
             else => @compileError("bad field type"),
         });
     }
@@ -522,7 +522,7 @@ fn gen(self: *Self) !void {
 
                     const ty = self.typeOfIndex(inst);
 
-                    const abi_size = @intCast(u32, ty.abiSize(mod));
+                    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
                     const abi_align = ty.abiAlignment(mod);
                     const stack_offset = try self.allocMem(abi_size, abi_align, inst);
                     try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@@ -588,7 +588,7 @@ fn gen(self: *Self) !void {
         for (self.exitlude_jump_relocs.items) |jmp_reloc| {
             self.mir_instructions.set(jmp_reloc, .{
                 .tag = .b,
-                .data = .{ .inst = @intCast(u32, self.mir_instructions.len) },
+                .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len)) },
             });
         }
 
@@ -934,15 +934,15 @@ fn finishAirBookkeeping(self: *Self) void {
 fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
     var tomb_bits = self.liveness.getTombBits(inst);
     for (operands) |op| {
-        const dies = @truncate(u1, tomb_bits) != 0;
+        const dies = @as(u1, @truncate(tomb_bits)) != 0;
         tomb_bits >>= 1;
         if (!dies) continue;
         const op_int = @intFromEnum(op);
         if (op_int < Air.ref_start_index) continue;
-        const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+        const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
         self.processDeath(op_index);
     }
-    const is_used = @truncate(u1, tomb_bits) == 0;
+    const is_used = @as(u1, @truncate(tomb_bits)) == 0;
     if (is_used) {
         log.debug("%{d} => {}", .{ inst, result });
         const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
@@ -1201,7 +1201,7 @@ fn truncRegister(
             .rd = dest_reg,
             .rn = operand_reg,
             .lsb = 0,
-            .width = @intCast(u6, int_bits),
+            .width = @as(u6, @intCast(int_bits)),
         } },
     });
 }
@@ -1591,9 +1591,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
         const tuple_align = tuple_ty.abiAlignment(mod);
-        const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
 
         switch (lhs_ty.zigTypeTag(mod)) {
             .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
@@ -1704,9 +1704,9 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
         const tuple_align = tuple_ty.abiAlignment(mod);
-        const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
 
         switch (lhs_ty.zigTypeTag(mod)) {
             .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
@@ -1866,9 +1866,9 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
         const rhs_ty = self.typeOf(extra.rhs);
 
         const tuple_ty = self.typeOfIndex(inst);
-        const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+        const tuple_size = @as(u32, @intCast(tuple_ty.abiSize(mod)));
         const tuple_align = tuple_ty.abiAlignment(mod);
-        const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
+        const overflow_bit_offset = @as(u32, @intCast(tuple_ty.structFieldOffset(1, mod)));
 
         switch (lhs_ty.zigTypeTag(mod)) {
             .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
@@ -1915,7 +1915,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                             .data = .{ .rr_shift = .{
                                 .rd = dest_reg,
                                 .rm = lhs_reg,
-                                .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)),
+                                .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_mcv.immediate))),
                             } },
                         });
 
@@ -1927,7 +1927,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                             .data = .{ .rr_shift = .{
                                 .rd = reconstructed_reg,
                                 .rm = dest_reg,
-                                .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)),
+                                .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_mcv.immediate))),
                             } },
                         });
                     } else {
@@ -2020,7 +2020,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[inst].ty_op;
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const optional_ty = self.typeOfIndex(inst);
-        const abi_size = @intCast(u32, optional_ty.abiSize(mod));
+        const abi_size = @as(u32, @intCast(optional_ty.abiSize(mod)));
 
         // Optional with a zero-bit payload type is just a boolean true
         if (abi_size == 1) {
@@ -2049,7 +2049,7 @@ fn errUnionErr(
         return try error_union_bind.resolveToMcv(self);
     }
 
-    const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod));
+    const err_offset = @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod)));
     switch (try error_union_bind.resolveToMcv(self)) {
         .register => {
             var operand_reg: Register = undefined;
@@ -2071,15 +2071,15 @@ fn errUnionErr(
             );
 
             const err_bit_offset = err_offset * 8;
-            const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8;
+            const err_bit_size = @as(u32, @intCast(err_ty.abiSize(mod))) * 8;
 
             _ = try self.addInst(.{
                 .tag = .ubfx, // errors are unsigned integers
                 .data = .{ .rr_lsb_width = .{
                     .rd = dest_reg,
                     .rn = operand_reg,
-                    .lsb = @intCast(u5, err_bit_offset),
-                    .width = @intCast(u6, err_bit_size),
+                    .lsb = @as(u5, @intCast(err_bit_offset)),
+                    .width = @as(u6, @intCast(err_bit_size)),
                 } },
             });
 
@@ -2126,7 +2126,7 @@ fn errUnionPayload(
         return MCValue.none;
     }
 
-    const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+    const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
     switch (try error_union_bind.resolveToMcv(self)) {
         .register => {
             var operand_reg: Register = undefined;
@@ -2148,15 +2148,15 @@ fn errUnionPayload(
             );
 
             const payload_bit_offset = payload_offset * 8;
-            const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8;
+            const payload_bit_size = @as(u32, @intCast(payload_ty.abiSize(mod))) * 8;
 
             _ = try self.addInst(.{
                 .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
                 .data = .{ .rr_lsb_width = .{
                     .rd = dest_reg,
                     .rn = operand_reg,
-                    .lsb = @intCast(u5, payload_bit_offset),
-                    .width = @intCast(u6, payload_bit_size),
+                    .lsb = @as(u5, @intCast(payload_bit_offset)),
+                    .width = @as(u6, @intCast(payload_bit_size)),
                 } },
             });
 
@@ -2235,13 +2235,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
         const operand = try self.resolveInst(ty_op.operand);
         if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
 
-        const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+        const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
         const abi_align = error_union_ty.abiAlignment(mod);
-        const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst));
+        const stack_offset = @as(u32, @intCast(try self.allocMem(abi_size, abi_align, inst)));
         const payload_off = errUnionPayloadOffset(payload_ty, mod);
         const err_off = errUnionErrorOffset(payload_ty, mod);
-        try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand);
-        try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 });
+        try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), operand);
+        try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), .{ .immediate = 0 });
 
         break :result MCValue{ .stack_offset = stack_offset };
     };
@@ -2259,13 +2259,13 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
         const operand = try self.resolveInst(ty_op.operand);
         if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
 
-        const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+        const abi_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
         const abi_align = error_union_ty.abiAlignment(mod);
-        const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst));
+        const stack_offset = @as(u32, @intCast(try self.allocMem(abi_size, abi_align, inst)));
         const payload_off = errUnionPayloadOffset(payload_ty, mod);
         const err_off = errUnionErrorOffset(payload_ty, mod);
-        try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand);
-        try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef);
+        try self.genSetStack(error_ty, stack_offset - @as(u32, @intCast(err_off)), operand);
+        try self.genSetStack(payload_ty, stack_offset - @as(u32, @intCast(payload_off)), .undef);
 
         break :result MCValue{ .stack_offset = stack_offset };
     };
@@ -2369,7 +2369,7 @@ fn ptrElemVal(
 ) !MCValue {
     const mod = self.bin_file.options.module.?;
     const elem_ty = ptr_ty.childType(mod);
-    const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+    const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
 
     switch (elem_size) {
         1, 4 => {
@@ -2480,7 +2480,7 @@ fn arrayElemVal(
         => {
             const ptr_to_mcv = switch (mcv) {
                 .stack_offset => |off| MCValue{ .ptr_stack_offset = off },
-                .memory => |addr| MCValue{ .immediate = @intCast(u32, addr) },
+                .memory => |addr| MCValue{ .immediate = @as(u32, @intCast(addr)) },
                 .stack_argument_offset => |off| blk: {
                     const reg = try self.register_manager.allocReg(null, gp);
 
@@ -2654,7 +2654,7 @@ fn reuseOperand(
 fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
     const mod = self.bin_file.options.module.?;
     const elem_ty = ptr_ty.childType(mod);
-    const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+    const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
 
     switch (ptr) {
         .none => unreachable,
@@ -2759,7 +2759,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
 
 fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
     const mod = self.bin_file.options.module.?;
-    const elem_size = @intCast(u32, value_ty.abiSize(mod));
+    const elem_size = @as(u32, @intCast(value_ty.abiSize(mod)));
 
     switch (ptr) {
         .none => unreachable,
@@ -2814,7 +2814,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
                                 // sub src_reg, fp, #off
                                 try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
                             },
-                            .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+                            .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
                             .stack_argument_offset => |off| {
                                 _ = try self.addInst(.{
                                     .tag = .ldr_ptr_stack_argument,
@@ -2882,7 +2882,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
         const mcv = try self.resolveInst(operand);
         const ptr_ty = self.typeOf(operand);
         const struct_ty = ptr_ty.childType(mod);
-        const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
         switch (mcv) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -2906,7 +2906,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
     const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
         const mcv = try self.resolveInst(operand);
         const struct_ty = self.typeOf(operand);
-        const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
         const struct_field_ty = struct_ty.structFieldType(index, mod);
 
         switch (mcv) {
@@ -2970,15 +2970,15 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                 );
 
                 const field_bit_offset = struct_field_offset * 8;
-                const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8;
+                const field_bit_size = @as(u32, @intCast(struct_field_ty.abiSize(mod))) * 8;
 
                 _ = try self.addInst(.{
                     .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
                     .data = .{ .rr_lsb_width = .{
                         .rd = dest_reg,
                         .rn = operand_reg,
-                        .lsb = @intCast(u5, field_bit_offset),
-                        .width = @intCast(u6, field_bit_size),
+                        .lsb = @as(u5, @intCast(field_bit_offset)),
+                        .width = @as(u6, @intCast(field_bit_size)),
                     } },
                 });
 
@@ -3003,7 +3003,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
             return self.fail("TODO implement @fieldParentPtr codegen for unions", .{});
         }
 
-        const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod));
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(extra.field_index, mod)));
         switch (field_ptr) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -3364,7 +3364,7 @@ fn binOpImmediate(
         => .{ .rr_shift = .{
             .rd = dest_reg,
             .rm = lhs_reg,
-            .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_immediate)),
+            .shift_amount = Instruction.ShiftAmount.imm(@as(u5, @intCast(rhs_immediate))),
         } },
         else => unreachable,
     };
@@ -3895,7 +3895,7 @@ fn ptrArithmetic(
                 .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
                 else => ptr_ty.childType(mod),
             };
-            const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+            const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
 
             const base_tag: Air.Inst.Tag = switch (tag) {
                 .ptr_add => .add,
@@ -4022,7 +4022,7 @@ fn genInlineMemcpy(
     _ = try self.addInst(.{
         .tag = .b,
         .cond = .ge,
-        .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 5) },
+        .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len + 5)) },
     });
 
     // ldrb tmp, [src, count]
@@ -4058,7 +4058,7 @@ fn genInlineMemcpy(
     // b loop
     _ = try self.addInst(.{
         .tag = .b,
-        .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) },
+        .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 5)) },
     });
 
     // end:
@@ -4126,7 +4126,7 @@ fn genInlineMemsetCode(
     _ = try self.addInst(.{
         .tag = .b,
         .cond = .ge,
-        .data = .{ .inst = @intCast(u32, self.mir_instructions.len + 4) },
+        .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len + 4)) },
     });
 
     // strb val, [src, count]
@@ -4152,7 +4152,7 @@ fn genInlineMemsetCode(
     // b loop
     _ = try self.addInst(.{
         .tag = .b,
-        .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 4) },
+        .data = .{ .inst = @as(u32, @intCast(self.mir_instructions.len - 4)) },
     });
 
     // end:
@@ -4216,7 +4216,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     const pl_op = self.air.instructions.items(.data)[inst].pl_op;
     const callee = pl_op.operand;
     const extra = self.air.extraData(Air.Call, pl_op.payload);
-    const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+    const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
     const ty = self.typeOf(callee);
     const mod = self.bin_file.options.module.?;
 
@@ -4248,8 +4248,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
         log.debug("airCall: return by reference", .{});
         const ret_ty = fn_ty.fnReturnType(mod);
-        const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
-        const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
+        const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
+        const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
         const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
 
         const ptr_ty = try mod.singleMutPtrType(ret_ty);
@@ -4294,7 +4294,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
                 const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
                 const atom = elf_file.getAtom(atom_index);
                 _ = try atom.getOrCreateOffsetTableEntry(elf_file);
-                const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
+                const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
                 try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr });
             } else if (self.bin_file.cast(link.File.MachO)) |_| {
                 unreachable; // unsupported architecture for MachO
@@ -4425,7 +4425,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
             // location.
             const op_inst = Air.refToIndex(un_op).?;
             if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
-                const abi_size = @intCast(u32, ret_ty.abiSize(mod));
+                const abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
                 const abi_align = ret_ty.abiAlignment(mod);
 
                 const offset = try self.allocMem(abi_size, abi_align, null);
@@ -4651,7 +4651,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
     if (self.liveness.operandDies(inst, 0)) {
         const op_int = @intFromEnum(pl_op.operand);
         if (op_int >= Air.ref_start_index) {
-            const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+            const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
             self.processDeath(op_index);
         }
     }
@@ -4956,7 +4956,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const loop = self.air.extraData(Air.Block, ty_pl.payload);
     const body = self.air.extra[loop.end..][0..loop.data.body_len];
-    const start_index = @intCast(Mir.Inst.Index, self.mir_instructions.len);
+    const start_index = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len));
 
     try self.genBody(body);
     try self.jump(start_index);
@@ -5021,7 +5021,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
     var case_i: u32 = 0;
     while (case_i < switch_br.data.cases_len) : (case_i += 1) {
         const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
-        const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+        const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
         assert(items.len > 0);
         const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
         extra_index = case.end + items.len + case_body.len;
@@ -5139,7 +5139,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
 fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
     const tag = self.mir_instructions.items(.tag)[inst];
     switch (tag) {
-        .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Air.Inst.Index, self.mir_instructions.len),
+        .b => self.mir_instructions.items(.data)[inst].inst = @as(Air.Inst.Index, @intCast(self.mir_instructions.len)),
         else => unreachable,
     }
 }
@@ -5188,12 +5188,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
 fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const extra = self.air.extraData(Air.Asm, ty_pl.payload);
-    const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
-    const clobbers_len = @truncate(u31, extra.data.flags);
+    const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+    const clobbers_len = @as(u31, @truncate(extra.data.flags));
     var extra_i: usize = extra.end;
-    const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+    const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
     extra_i += outputs.len;
-    const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+    const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
     extra_i += inputs.len;
 
     const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -5323,7 +5323,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
 
 fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     switch (mcv) {
         .dead => unreachable,
         .unreach, .none => return, // Nothing to do.
@@ -5376,7 +5376,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
                 },
                 2 => {
                     const offset = if (stack_offset <= math.maxInt(u8)) blk: {
-                        break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset));
+                        break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(stack_offset)));
                     } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }));
 
                     _ = try self.addInst(.{
@@ -5404,7 +5404,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
 
             const overflow_bit_ty = ty.structFieldType(1, mod);
-            const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
+            const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod)));
             const cond_reg = try self.register_manager.allocReg(null, gp);
 
             // C flag: movcs reg, #1
@@ -5457,7 +5457,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
                         // sub src_reg, fp, #off
                         try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
                     },
-                    .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+                    .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
                     .stack_argument_offset => |off| {
                         _ = try self.addInst(.{
                             .tag = .ldr_ptr_stack_argument,
@@ -5554,7 +5554,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .tag = .movw,
                         .data = .{ .r_imm16 = .{
                             .rd = reg,
-                            .imm16 = @intCast(u16, x),
+                            .imm16 = @as(u16, @intCast(x)),
                         } },
                     });
                 } else {
@@ -5562,7 +5562,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .tag = .mov,
                         .data = .{ .r_op_mov = .{
                             .rd = reg,
-                            .op = Instruction.Operand.imm(@truncate(u8, x), 0),
+                            .op = Instruction.Operand.imm(@as(u8, @truncate(x)), 0),
                         } },
                     });
                     _ = try self.addInst(.{
@@ -5570,7 +5570,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .data = .{ .rr_op = .{
                             .rd = reg,
                             .rn = reg,
-                            .op = Instruction.Operand.imm(@truncate(u8, x >> 8), 12),
+                            .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 8)), 12),
                         } },
                     });
                 }
@@ -5585,14 +5585,14 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .tag = .movw,
                         .data = .{ .r_imm16 = .{
                             .rd = reg,
-                            .imm16 = @truncate(u16, x),
+                            .imm16 = @as(u16, @truncate(x)),
                         } },
                     });
                     _ = try self.addInst(.{
                         .tag = .movt,
                         .data = .{ .r_imm16 = .{
                             .rd = reg,
-                            .imm16 = @truncate(u16, x >> 16),
+                            .imm16 = @as(u16, @truncate(x >> 16)),
                         } },
                     });
                 } else {
@@ -5605,7 +5605,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .tag = .mov,
                         .data = .{ .r_op_mov = .{
                             .rd = reg,
-                            .op = Instruction.Operand.imm(@truncate(u8, x), 0),
+                            .op = Instruction.Operand.imm(@as(u8, @truncate(x)), 0),
                         } },
                     });
                     _ = try self.addInst(.{
@@ -5613,7 +5613,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .data = .{ .rr_op = .{
                             .rd = reg,
                             .rn = reg,
-                            .op = Instruction.Operand.imm(@truncate(u8, x >> 8), 12),
+                            .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 8)), 12),
                         } },
                     });
                     _ = try self.addInst(.{
@@ -5621,7 +5621,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .data = .{ .rr_op = .{
                             .rd = reg,
                             .rn = reg,
-                            .op = Instruction.Operand.imm(@truncate(u8, x >> 16), 8),
+                            .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 16)), 8),
                         } },
                     });
                     _ = try self.addInst(.{
@@ -5629,7 +5629,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .data = .{ .rr_op = .{
                             .rd = reg,
                             .rn = reg,
-                            .op = Instruction.Operand.imm(@truncate(u8, x >> 24), 4),
+                            .op = Instruction.Operand.imm(@as(u8, @truncate(x >> 24)), 4),
                         } },
                     });
                 }
@@ -5654,12 +5654,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
         .memory => |addr| {
             // The value is in memory at a hard-coded address.
             // If the type is a pointer, it means the pointer address is at this memory location.
-            try self.genSetReg(ty, reg, .{ .immediate = @intCast(u32, addr) });
+            try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @intCast(addr)) });
             try self.genLdrRegister(reg, reg, ty);
         },
         .stack_offset => |off| {
             // TODO: maybe addressing from sp instead of fp
-            const abi_size = @intCast(u32, ty.abiSize(mod));
+            const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
 
             const tag: Mir.Inst.Tag = switch (abi_size) {
                 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb,
@@ -5677,7 +5677,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
 
             if (extra_offset) {
                 const offset = if (off <= math.maxInt(u8)) blk: {
-                    break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off));
+                    break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(off)));
                 } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }));
 
                 _ = try self.addInst(.{
@@ -5693,7 +5693,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                 });
             } else {
                 const offset = if (off <= math.maxInt(u12)) blk: {
-                    break :blk Instruction.Offset.imm(@intCast(u12, off));
+                    break :blk Instruction.Offset.imm(@as(u12, @intCast(off)));
                 } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }), .none);
 
                 _ = try self.addInst(.{
@@ -5732,7 +5732,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
 
 fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     switch (mcv) {
         .dead => unreachable,
         .none, .unreach => return,
@@ -5771,7 +5771,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
                 },
                 2 => {
                     const offset = if (stack_offset <= math.maxInt(u8)) blk: {
-                        break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset));
+                        break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(stack_offset)));
                     } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }));
 
                     _ = try self.addInst(.{
@@ -5814,7 +5814,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
                         // sub src_reg, fp, #off
                         try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
                     },
-                    .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }),
+                    .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @as(u32, @intCast(addr)) }),
                     .stack_argument_offset => |off| {
                         _ = try self.addInst(.{
                             .tag = .ldr_ptr_stack_argument,
@@ -5893,7 +5893,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
         const ptr_ty = self.typeOf(ty_op.operand);
         const ptr = try self.resolveInst(ty_op.operand);
         const array_ty = ptr_ty.childType(mod);
-        const array_len = @intCast(u32, array_ty.arrayLen(mod));
+        const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
 
         const stack_offset = try self.allocMem(8, 8, inst);
         try self.genSetStack(ptr_ty, stack_offset, ptr);
@@ -6010,7 +6010,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
     const vector_ty = self.typeOfIndex(inst);
     const len = vector_ty.vectorLen(mod);
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+    const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
     const result: MCValue = res: {
         if (self.liveness.isUnused(inst)) break :res MCValue.dead;
         return self.fail("TODO implement airAggregateInit for arm", .{});
@@ -6058,7 +6058,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
         const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
         const error_union_ty = self.typeOf(pl_op.operand);
         const mod = self.bin_file.options.module.?;
-        const error_union_size = @intCast(u32, error_union_ty.abiSize(mod));
+        const error_union_size = @as(u32, @intCast(error_union_ty.abiSize(mod)));
         const error_union_align = error_union_ty.abiAlignment(mod);
 
         // The error union will die in the body. However, we need the
@@ -6141,7 +6141,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
             .none => .none,
             .undef => .undef,
             .load_got, .load_direct, .load_tlv => unreachable, // TODO
-            .immediate => |imm| .{ .immediate = @truncate(u32, imm) },
+            .immediate => |imm| .{ .immediate = @as(u32, @truncate(imm)) },
             .memory => |addr| .{ .memory = addr },
         },
         .fail => |msg| {
@@ -6198,7 +6198,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+                const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
                 // TODO handle cases where multiple registers are used
                 if (ret_ty_size <= 4) {
                     result.return_value = .{ .register = c_abi_int_return_regs[0] };
@@ -6216,7 +6216,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
                 if (ty.toType().abiAlignment(mod) == 8)
                     ncrn = std.mem.alignForward(usize, ncrn, 2);
 
-                const param_size = @intCast(u32, ty.toType().abiSize(mod));
+                const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
                 if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
                     if (param_size <= 4) {
                         result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
@@ -6245,7 +6245,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+                const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
                 if (ret_ty_size == 0) {
                     assert(ret_ty.isError(mod));
                     result.return_value = .{ .immediate = 0 };
@@ -6264,7 +6264,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
 
             for (fn_info.param_types, 0..) |ty, i| {
                 if (ty.toType().abiSize(mod) > 0) {
-                    const param_size = @intCast(u32, ty.toType().abiSize(mod));
+                    const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
                     const param_alignment = ty.toType().abiAlignment(mod);
 
                     stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
src/arch/arm/Emit.zig
@@ -78,7 +78,7 @@ pub fn emitMir(
 
     // Emit machine code
     for (mir_tags, 0..) |tag, index| {
-        const inst = @intCast(u32, index);
+        const inst = @as(u32, @intCast(index));
         switch (tag) {
             .add => try emit.mirDataProcessing(inst),
             .adds => try emit.mirDataProcessing(inst),
@@ -241,7 +241,7 @@ fn lowerBranches(emit: *Emit) !void {
     // TODO optimization opportunity: do this in codegen while
     // generating MIR
     for (mir_tags, 0..) |tag, index| {
-        const inst = @intCast(u32, index);
+        const inst = @as(u32, @intCast(index));
         if (isBranch(tag)) {
             const target_inst = emit.branchTarget(inst);
 
@@ -286,7 +286,7 @@ fn lowerBranches(emit: *Emit) !void {
         var current_code_offset: usize = 0;
 
         for (mir_tags, 0..) |tag, index| {
-            const inst = @intCast(u32, index);
+            const inst = @as(u32, @intCast(index));
 
             // If this instruction contained in the code offset
             // mapping (when it is a target of a branch or if it is a
@@ -301,7 +301,7 @@ fn lowerBranches(emit: *Emit) !void {
                 const target_inst = emit.branchTarget(inst);
                 if (target_inst < inst) {
                     const target_offset = emit.code_offset_mapping.get(target_inst).?;
-                    const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset + 8);
+                    const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset + 8));
                     const branch_type = emit.branch_types.getPtr(inst).?;
                     const optimal_branch_type = try emit.optimalBranchType(tag, offset);
                     if (branch_type.* != optimal_branch_type) {
@@ -320,7 +320,7 @@ fn lowerBranches(emit: *Emit) !void {
                 for (origin_list.items) |forward_branch_inst| {
                     const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
                     const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
-                    const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset + 8);
+                    const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset + 8));
                     const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
                     const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
                     if (branch_type.* != optimal_branch_type) {
@@ -351,7 +351,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
 }
 
 fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
-    const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
+    const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line));
     const delta_pc: usize = self.code.items.len - self.prev_di_pc;
     switch (self.debug_output) {
         .dwarf => |dw| {
@@ -368,13 +368,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
             // increasing the line number
             try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
             // increasing the pc
-            const d_pc_p9 = @intCast(i64, delta_pc) - quant;
+            const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
             if (d_pc_p9 > 0) {
                 // minus one because if its the last one, we want to leave space to change the line which is one quanta
-                try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant);
+                try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant);
                 if (dbg_out.pcop_change_index.*) |pci|
                     dbg_out.dbg_line.items[pci] += 1;
-                dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
+                dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
             } else if (d_pc_p9 == 0) {
                 // we don't need to do anything, because adding the quant does it for us
             } else unreachable;
@@ -448,13 +448,13 @@ fn mirSubStackPointer(emit: *Emit, inst: Mir.Inst.Index) !void {
                 const scratch: Register = .r4;
 
                 if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) {
-                    try emit.writeInstruction(Instruction.movw(cond, scratch, @truncate(u16, imm32)));
-                    try emit.writeInstruction(Instruction.movt(cond, scratch, @truncate(u16, imm32 >> 16)));
+                    try emit.writeInstruction(Instruction.movw(cond, scratch, @as(u16, @truncate(imm32))));
+                    try emit.writeInstruction(Instruction.movt(cond, scratch, @as(u16, @truncate(imm32 >> 16))));
                 } else {
-                    try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@truncate(u8, imm32), 0)));
-                    try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 8), 12)));
-                    try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 16), 8)));
-                    try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 24), 4)));
+                    try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32)), 0)));
+                    try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 8)), 12)));
+                    try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 16)), 8)));
+                    try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 24)), 4)));
                 }
 
                 break :blk Instruction.Operand.reg(scratch, Instruction.Operand.Shift.none);
@@ -484,12 +484,12 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
     const cond = emit.mir.instructions.items(.cond)[inst];
     const target_inst = emit.mir.instructions.items(.data)[inst].inst;
 
-    const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len + 8);
+    const offset = @as(i64, @intCast(emit.code_offset_mapping.get(target_inst).?)) - @as(i64, @intCast(emit.code.items.len + 8));
     const branch_type = emit.branch_types.get(inst).?;
 
     switch (branch_type) {
         .b => switch (tag) {
-            .b => try emit.writeInstruction(Instruction.b(cond, @intCast(i26, offset))),
+            .b => try emit.writeInstruction(Instruction.b(cond, @as(i26, @intCast(offset)))),
             else => unreachable,
         },
     }
@@ -585,7 +585,7 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
         .ldrb_stack_argument,
         => {
             const offset = if (raw_offset <= math.maxInt(u12)) blk: {
-                break :blk Instruction.Offset.imm(@intCast(u12, raw_offset));
+                break :blk Instruction.Offset.imm(@as(u12, @intCast(raw_offset)));
             } else return emit.fail("TODO mirLoadStack larger offsets", .{});
 
             switch (tag) {
@@ -599,7 +599,7 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
         .ldrsh_stack_argument,
         => {
             const offset = if (raw_offset <= math.maxInt(u8)) blk: {
-                break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, raw_offset));
+                break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(raw_offset)));
             } else return emit.fail("TODO mirLoadStack larger offsets", .{});
 
             switch (tag) {
src/arch/arm/Mir.zig
@@ -287,7 +287,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
     inline for (fields) |field| {
         @field(result, field.name) = switch (field.type) {
             u32 => mir.extra[i],
-            i32 => @bitCast(i32, mir.extra[i]),
+            i32 => @as(i32, @bitCast(mir.extra[i])),
             else => @compileError("bad field type"),
         };
         i += 1;
src/arch/riscv64/bits.zig
@@ -56,12 +56,12 @@ pub const Instruction = union(enum) {
     // TODO: once packed structs work we can remove this monstrosity.
     pub fn toU32(self: Instruction) u32 {
         return switch (self) {
-            .R => |v| @bitCast(u32, v),
-            .I => |v| @bitCast(u32, v),
-            .S => |v| @bitCast(u32, v),
-            .B => |v| @intCast(u32, v.opcode) + (@intCast(u32, v.imm11) << 7) + (@intCast(u32, v.imm1_4) << 8) + (@intCast(u32, v.funct3) << 12) + (@intCast(u32, v.rs1) << 15) + (@intCast(u32, v.rs2) << 20) + (@intCast(u32, v.imm5_10) << 25) + (@intCast(u32, v.imm12) << 31),
-            .U => |v| @bitCast(u32, v),
-            .J => |v| @bitCast(u32, v),
+            .R => |v| @as(u32, @bitCast(v)),
+            .I => |v| @as(u32, @bitCast(v)),
+            .S => |v| @as(u32, @bitCast(v)),
+            .B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31),
+            .U => |v| @as(u32, @bitCast(v)),
+            .J => |v| @as(u32, @bitCast(v)),
         };
     }
 
@@ -80,7 +80,7 @@ pub const Instruction = union(enum) {
 
     // RISC-V is all signed all the time -- convert immediates to unsigned for processing
     fn iType(op: u7, fn3: u3, rd: Register, r1: Register, imm: i12) Instruction {
-        const umm = @bitCast(u12, imm);
+        const umm = @as(u12, @bitCast(imm));
 
         return Instruction{
             .I = .{
@@ -94,7 +94,7 @@ pub const Instruction = union(enum) {
     }
 
     fn sType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i12) Instruction {
-        const umm = @bitCast(u12, imm);
+        const umm = @as(u12, @bitCast(imm));
 
         return Instruction{
             .S = .{
@@ -102,8 +102,8 @@ pub const Instruction = union(enum) {
                 .funct3 = fn3,
                 .rs1 = r1.id(),
                 .rs2 = r2.id(),
-                .imm0_4 = @truncate(u5, umm),
-                .imm5_11 = @truncate(u7, umm >> 5),
+                .imm0_4 = @as(u5, @truncate(umm)),
+                .imm5_11 = @as(u7, @truncate(umm >> 5)),
             },
         };
     }
@@ -111,7 +111,7 @@ pub const Instruction = union(enum) {
     // Use significance value rather than bit value, same for J-type
     // -- less burden on callsite, bonus semantic checking
     fn bType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i13) Instruction {
-        const umm = @bitCast(u13, imm);
+        const umm = @as(u13, @bitCast(imm));
         assert(umm % 2 == 0); // misaligned branch target
 
         return Instruction{
@@ -120,17 +120,17 @@ pub const Instruction = union(enum) {
                 .funct3 = fn3,
                 .rs1 = r1.id(),
                 .rs2 = r2.id(),
-                .imm1_4 = @truncate(u4, umm >> 1),
-                .imm5_10 = @truncate(u6, umm >> 5),
-                .imm11 = @truncate(u1, umm >> 11),
-                .imm12 = @truncate(u1, umm >> 12),
+                .imm1_4 = @as(u4, @truncate(umm >> 1)),
+                .imm5_10 = @as(u6, @truncate(umm >> 5)),
+                .imm11 = @as(u1, @truncate(umm >> 11)),
+                .imm12 = @as(u1, @truncate(umm >> 12)),
             },
         };
     }
 
     // We have to extract the 20 bits anyway -- let's not make it more painful
     fn uType(op: u7, rd: Register, imm: i20) Instruction {
-        const umm = @bitCast(u20, imm);
+        const umm = @as(u20, @bitCast(imm));
 
         return Instruction{
             .U = .{
@@ -142,17 +142,17 @@ pub const Instruction = union(enum) {
     }
 
     fn jType(op: u7, rd: Register, imm: i21) Instruction {
-        const umm = @bitCast(u21, imm);
+        const umm = @as(u21, @bitCast(imm));
         assert(umm % 2 == 0); // misaligned jump target
 
         return Instruction{
             .J = .{
                 .opcode = op,
                 .rd = rd.id(),
-                .imm1_10 = @truncate(u10, umm >> 1),
-                .imm11 = @truncate(u1, umm >> 11),
-                .imm12_19 = @truncate(u8, umm >> 12),
-                .imm20 = @truncate(u1, umm >> 20),
+                .imm1_10 = @as(u10, @truncate(umm >> 1)),
+                .imm11 = @as(u1, @truncate(umm >> 11)),
+                .imm12_19 = @as(u8, @truncate(umm >> 12)),
+                .imm20 = @as(u1, @truncate(umm >> 20)),
             },
         };
     }
@@ -258,7 +258,7 @@ pub const Instruction = union(enum) {
     }
 
     pub fn sltiu(rd: Register, r1: Register, imm: u12) Instruction {
-        return iType(0b0010011, 0b011, rd, r1, @bitCast(i12, imm));
+        return iType(0b0010011, 0b011, rd, r1, @as(i12, @bitCast(imm)));
     }
 
     // Arithmetic/Logical, Register-Immediate (32-bit)
@@ -407,7 +407,7 @@ pub const Register = enum(u6) {
     /// Returns the unique 4-bit ID of this register which is used in
     /// the machine code
     pub fn id(self: Register) u5 {
-        return @truncate(u5, @intFromEnum(self));
+        return @as(u5, @truncate(@intFromEnum(self)));
     }
 
     pub fn dwarfLocOp(reg: Register) u8 {
src/arch/riscv64/CodeGen.zig
@@ -323,7 +323,7 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
 
     try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
 
-    const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+    const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len));
     self.mir_instructions.appendAssumeCapacity(inst);
     return result_index;
 }
@@ -336,11 +336,11 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
 
 pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    const result = @intCast(u32, self.mir_extra.items.len);
+    const result = @as(u32, @intCast(self.mir_extra.items.len));
     inline for (fields) |field| {
         self.mir_extra.appendAssumeCapacity(switch (field.type) {
             u32 => @field(extra, field.name),
-            i32 => @bitCast(u32, @field(extra, field.name)),
+            i32 => @as(u32, @bitCast(@field(extra, field.name))),
             else => @compileError("bad field type"),
         });
     }
@@ -752,15 +752,15 @@ fn finishAirBookkeeping(self: *Self) void {
 fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
     var tomb_bits = self.liveness.getTombBits(inst);
     for (operands) |op| {
-        const dies = @truncate(u1, tomb_bits) != 0;
+        const dies = @as(u1, @truncate(tomb_bits)) != 0;
         tomb_bits >>= 1;
         if (!dies) continue;
         const op_int = @intFromEnum(op);
         if (op_int < Air.ref_start_index) continue;
-        const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+        const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
         self.processDeath(op_index);
     }
-    const is_used = @truncate(u1, tomb_bits) == 0;
+    const is_used = @as(u1, @truncate(tomb_bits)) == 0;
     if (is_used) {
         log.debug("%{d} => {}", .{ inst, result });
         const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
@@ -1709,7 +1709,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     const fn_ty = self.typeOf(pl_op.operand);
     const callee = pl_op.operand;
     const extra = self.air.extraData(Air.Call, pl_op.payload);
-    const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+    const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
 
     var info = try self.resolveCallingConventionValues(fn_ty);
     defer info.deinit(self);
@@ -1747,7 +1747,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
                 const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
                 const atom = elf_file.getAtom(atom_index);
                 _ = try atom.getOrCreateOffsetTableEntry(elf_file);
-                const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
+                const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
                 try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr });
                 _ = try self.addInst(.{
                     .tag = .jalr,
@@ -2139,12 +2139,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
 fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const extra = self.air.extraData(Air.Asm, ty_pl.payload);
-    const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
-    const clobbers_len = @truncate(u31, extra.data.flags);
+    const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+    const clobbers_len = @as(u31, @truncate(extra.data.flags));
     var extra_i: usize = extra.end;
-    const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+    const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
     extra_i += outputs.len;
-    const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+    const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
     extra_i += inputs.len;
 
     const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -2289,20 +2289,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
             return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa });
         },
         .immediate => |unsigned_x| {
-            const x = @bitCast(i64, unsigned_x);
+            const x = @as(i64, @bitCast(unsigned_x));
             if (math.minInt(i12) <= x and x <= math.maxInt(i12)) {
                 _ = try self.addInst(.{
                     .tag = .addi,
                     .data = .{ .i_type = .{
                         .rd = reg,
                         .rs1 = .zero,
-                        .imm12 = @intCast(i12, x),
+                        .imm12 = @as(i12, @intCast(x)),
                     } },
                 });
             } else if (math.minInt(i32) <= x and x <= math.maxInt(i32)) {
-                const lo12 = @truncate(i12, x);
+                const lo12 = @as(i12, @truncate(x));
                 const carry: i32 = if (lo12 < 0) 1 else 0;
-                const hi20 = @truncate(i20, (x >> 12) +% carry);
+                const hi20 = @as(i20, @truncate((x >> 12) +% carry));
 
                 // TODO: add test case for 32-bit immediate
                 _ = try self.addInst(.{
@@ -2501,7 +2501,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
     const vector_ty = self.typeOfIndex(inst);
     const len = vector_ty.vectorLen(mod);
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+    const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
     const result: MCValue = res: {
         if (self.liveness.isUnused(inst)) break :res MCValue.dead;
         return self.fail("TODO implement airAggregateInit for riscv64", .{});
@@ -2653,7 +2653,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
             const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
 
             for (fn_info.param_types, 0..) |ty, i| {
-                const param_size = @intCast(u32, ty.toType().abiSize(mod));
+                const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
                 if (param_size <= 8) {
                     if (next_register < argument_registers.len) {
                         result.args[i] = .{ .register = argument_registers[next_register] };
@@ -2690,7 +2690,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
     } else switch (cc) {
         .Naked => unreachable,
         .Unspecified, .C => {
-            const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+            const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
             if (ret_ty_size <= 8) {
                 result.return_value = .{ .register = .a0 };
             } else if (ret_ty_size <= 16) {
src/arch/riscv64/Emit.zig
@@ -39,7 +39,7 @@ pub fn emitMir(
 
     // Emit machine code
     for (mir_tags, 0..) |tag, index| {
-        const inst = @intCast(u32, index);
+        const inst = @as(u32, @intCast(index));
         switch (tag) {
             .add => try emit.mirRType(inst),
             .sub => try emit.mirRType(inst),
@@ -85,7 +85,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
 }
 
 fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
-    const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
+    const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line));
     const delta_pc: usize = self.code.items.len - self.prev_di_pc;
     switch (self.debug_output) {
         .dwarf => |dw| {
@@ -102,13 +102,13 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
             // increasing the line number
             try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
             // increasing the pc
-            const d_pc_p9 = @intCast(i64, delta_pc) - quant;
+            const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
             if (d_pc_p9 > 0) {
                 // minus one because if its the last one, we want to leave space to change the line which is one quanta
-                try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant);
+                try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, quant) + 128)) - quant);
                 if (dbg_out.pcop_change_index.*) |pci|
                     dbg_out.dbg_line.items[pci] += 1;
-                dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
+                dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
             } else if (d_pc_p9 == 0) {
                 // we don't need to do anything, because adding the quant does it for us
             } else unreachable;
src/arch/riscv64/Mir.zig
@@ -135,7 +135,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
     inline for (fields) |field| {
         @field(result, field.name) = switch (field.type) {
             u32 => mir.extra[i],
-            i32 => @bitCast(i32, mir.extra[i]),
+            i32 => @as(i32, @bitCast(mir.extra[i])),
             else => @compileError("bad field type"),
         };
         i += 1;
src/arch/sparc64/bits.zig
@@ -16,7 +16,7 @@ pub const Register = enum(u6) {
     // zig fmt: on
 
     pub fn id(self: Register) u5 {
-        return @truncate(u5, @intFromEnum(self));
+        return @as(u5, @truncate(@intFromEnum(self)));
     }
 
     pub fn enc(self: Register) u5 {
@@ -96,9 +96,9 @@ pub const FloatingPointRegister = enum(u7) {
 
     pub fn id(self: FloatingPointRegister) u6 {
         return switch (self.size()) {
-            32 => @truncate(u6, @intFromEnum(self)),
-            64 => @truncate(u6, (@intFromEnum(self) - 32) * 2),
-            128 => @truncate(u6, (@intFromEnum(self) - 64) * 4),
+            32 => @as(u6, @truncate(@intFromEnum(self))),
+            64 => @as(u6, @truncate((@intFromEnum(self) - 32) * 2)),
+            128 => @as(u6, @truncate((@intFromEnum(self) - 64) * 4)),
             else => unreachable,
         };
     }
@@ -109,7 +109,7 @@ pub const FloatingPointRegister = enum(u7) {
         // (See section 5.1.4.1 of SPARCv9 ISA specification)
 
         const reg_id = self.id();
-        return @truncate(u5, reg_id | (reg_id >> 5));
+        return @as(u5, @truncate(reg_id | (reg_id >> 5)));
     }
 
     /// Returns the bit-width of the register.
@@ -752,13 +752,13 @@ pub const Instruction = union(enum) {
     // See section 6.2 of the SPARCv9 ISA manual.
 
     fn format1(disp: i32) Instruction {
-        const udisp = @bitCast(u32, disp);
+        const udisp = @as(u32, @bitCast(disp));
 
         // In SPARC, branch target needs to be aligned to 4 bytes.
         assert(udisp % 4 == 0);
 
         // Discard the last two bits since those are implicitly zero.
-        const udisp_truncated = @truncate(u30, udisp >> 2);
+        const udisp_truncated = @as(u30, @truncate(udisp >> 2));
         return Instruction{
             .format_1 = .{
                 .disp30 = udisp_truncated,
@@ -777,13 +777,13 @@ pub const Instruction = union(enum) {
     }
 
     fn format2b(op2: u3, cond: Condition, annul: bool, disp: i24) Instruction {
-        const udisp = @bitCast(u24, disp);
+        const udisp = @as(u24, @bitCast(disp));
 
         // In SPARC, branch target needs to be aligned to 4 bytes.
         assert(udisp % 4 == 0);
 
         // Discard the last two bits since those are implicitly zero.
-        const udisp_truncated = @truncate(u22, udisp >> 2);
+        const udisp_truncated = @as(u22, @truncate(udisp >> 2));
         return Instruction{
             .format_2b = .{
                 .a = @intFromBool(annul),
@@ -795,16 +795,16 @@ pub const Instruction = union(enum) {
     }
 
     fn format2c(op2: u3, cond: Condition, annul: bool, pt: bool, ccr: CCR, disp: i21) Instruction {
-        const udisp = @bitCast(u21, disp);
+        const udisp = @as(u21, @bitCast(disp));
 
         // In SPARC, branch target needs to be aligned to 4 bytes.
         assert(udisp % 4 == 0);
 
         // Discard the last two bits since those are implicitly zero.
-        const udisp_truncated = @truncate(u19, udisp >> 2);
+        const udisp_truncated = @as(u19, @truncate(udisp >> 2));
 
-        const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
-        const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+        const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+        const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
         return Instruction{
             .format_2c = .{
                 .a = @intFromBool(annul),
@@ -819,16 +819,16 @@ pub const Instruction = union(enum) {
     }
 
     fn format2d(op2: u3, rcond: RCondition, annul: bool, pt: bool, rs1: Register, disp: i18) Instruction {
-        const udisp = @bitCast(u18, disp);
+        const udisp = @as(u18, @bitCast(disp));
 
         // In SPARC, branch target needs to be aligned to 4 bytes.
         assert(udisp % 4 == 0);
 
         // Discard the last two bits since those are implicitly zero,
         // and split it into low and high parts.
-        const udisp_truncated = @truncate(u16, udisp >> 2);
-        const udisp_hi = @truncate(u2, (udisp_truncated & 0b1100_0000_0000_0000) >> 14);
-        const udisp_lo = @truncate(u14, udisp_truncated & 0b0011_1111_1111_1111);
+        const udisp_truncated = @as(u16, @truncate(udisp >> 2));
+        const udisp_hi = @as(u2, @truncate((udisp_truncated & 0b1100_0000_0000_0000) >> 14));
+        const udisp_lo = @as(u14, @truncate(udisp_truncated & 0b0011_1111_1111_1111));
         return Instruction{
             .format_2d = .{
                 .a = @intFromBool(annul),
@@ -860,7 +860,7 @@ pub const Instruction = union(enum) {
                 .rd = rd.enc(),
                 .op3 = op3,
                 .rs1 = rs1.enc(),
-                .simm13 = @bitCast(u13, imm),
+                .simm13 = @as(u13, @bitCast(imm)),
             },
         };
     }
@@ -880,7 +880,7 @@ pub const Instruction = union(enum) {
                 .op = op,
                 .op3 = op3,
                 .rs1 = rs1.enc(),
-                .simm13 = @bitCast(u13, imm),
+                .simm13 = @as(u13, @bitCast(imm)),
             },
         };
     }
@@ -904,7 +904,7 @@ pub const Instruction = union(enum) {
                 .op3 = op3,
                 .rs1 = rs1.enc(),
                 .rcond = @intFromEnum(rcond),
-                .simm10 = @bitCast(u10, imm),
+                .simm10 = @as(u10, @bitCast(imm)),
             },
         };
     }
@@ -922,8 +922,8 @@ pub const Instruction = union(enum) {
     fn format3h(cmask: MemCompletionConstraint, mmask: MemOrderingConstraint) Instruction {
         return Instruction{
             .format_3h = .{
-                .cmask = @bitCast(u3, cmask),
-                .mmask = @bitCast(u4, mmask),
+                .cmask = @as(u3, @bitCast(cmask)),
+                .mmask = @as(u4, @bitCast(mmask)),
             },
         };
     }
@@ -995,8 +995,8 @@ pub const Instruction = union(enum) {
         };
     }
     fn format3o(op: u2, op3: u6, opf: u9, ccr: CCR, rs1: Register, rs2: Register) Instruction {
-        const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
-        const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+        const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+        const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
         return Instruction{
             .format_3o = .{
                 .op = op,
@@ -1051,8 +1051,8 @@ pub const Instruction = union(enum) {
     }
 
     fn format4a(op3: u6, ccr: CCR, rs1: Register, rs2: Register, rd: Register) Instruction {
-        const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
-        const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+        const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+        const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
         return Instruction{
             .format_4a = .{
                 .rd = rd.enc(),
@@ -1066,8 +1066,8 @@ pub const Instruction = union(enum) {
     }
 
     fn format4b(op3: u6, ccr: CCR, rs1: Register, imm: i11, rd: Register) Instruction {
-        const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
-        const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+        const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+        const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
         return Instruction{
             .format_4b = .{
                 .rd = rd.enc(),
@@ -1075,15 +1075,15 @@ pub const Instruction = union(enum) {
                 .rs1 = rs1.enc(),
                 .cc1 = ccr_cc1,
                 .cc0 = ccr_cc0,
-                .simm11 = @bitCast(u11, imm),
+                .simm11 = @as(u11, @bitCast(imm)),
             },
         };
     }
 
     fn format4c(op3: u6, cond: Condition, ccr: CCR, rs2: Register, rd: Register) Instruction {
-        const ccr_cc2 = @truncate(u1, @intFromEnum(ccr) >> 2);
-        const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
-        const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+        const ccr_cc2 = @as(u1, @truncate(@intFromEnum(ccr) >> 2));
+        const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+        const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
         return Instruction{
             .format_4c = .{
                 .rd = rd.enc(),
@@ -1098,9 +1098,9 @@ pub const Instruction = union(enum) {
     }
 
     fn format4d(op3: u6, cond: Condition, ccr: CCR, imm: i11, rd: Register) Instruction {
-        const ccr_cc2 = @truncate(u1, @intFromEnum(ccr) >> 2);
-        const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
-        const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+        const ccr_cc2 = @as(u1, @truncate(@intFromEnum(ccr) >> 2));
+        const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+        const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
         return Instruction{
             .format_4d = .{
                 .rd = rd.enc(),
@@ -1109,14 +1109,14 @@ pub const Instruction = union(enum) {
                 .cond = cond.enc(),
                 .cc1 = ccr_cc1,
                 .cc0 = ccr_cc0,
-                .simm11 = @bitCast(u11, imm),
+                .simm11 = @as(u11, @bitCast(imm)),
             },
         };
     }
 
     fn format4e(op3: u6, ccr: CCR, rs1: Register, rd: Register, sw_trap: u7) Instruction {
-        const ccr_cc1 = @truncate(u1, @intFromEnum(ccr) >> 1);
-        const ccr_cc0 = @truncate(u1, @intFromEnum(ccr));
+        const ccr_cc1 = @as(u1, @truncate(@intFromEnum(ccr) >> 1));
+        const ccr_cc0 = @as(u1, @truncate(@intFromEnum(ccr)));
         return Instruction{
             .format_4e = .{
                 .rd = rd.enc(),
@@ -1468,8 +1468,8 @@ pub const Instruction = union(enum) {
     pub fn trap(comptime s2: type, cond: ICondition, ccr: CCR, rs1: Register, rs2: s2) Instruction {
         // Tcc instructions abuse the rd field to store the conditionals.
         return switch (s2) {
-            Register => format4a(0b11_1010, ccr, rs1, rs2, @enumFromInt(Register, @intFromEnum(cond))),
-            u7 => format4e(0b11_1010, ccr, rs1, @enumFromInt(Register, @intFromEnum(cond)), rs2),
+            Register => format4a(0b11_1010, ccr, rs1, rs2, @as(Register, @enumFromInt(@intFromEnum(cond)))),
+            u7 => format4e(0b11_1010, ccr, rs1, @as(Register, @enumFromInt(@intFromEnum(cond))), rs2),
             else => unreachable,
         };
     }
src/arch/sparc64/CodeGen.zig
@@ -415,7 +415,7 @@ fn gen(self: *Self) !void {
                     .branch_predict_int = .{
                         .ccr = .xcc,
                         .cond = .al,
-                        .inst = @intCast(u32, self.mir_instructions.len),
+                        .inst = @as(u32, @intCast(self.mir_instructions.len)),
                     },
                 },
             });
@@ -840,7 +840,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
     const vector_ty = self.typeOfIndex(inst);
     const len = vector_ty.vectorLen(mod);
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+    const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
     const result: MCValue = res: {
         if (self.liveness.isUnused(inst)) break :res MCValue.dead;
         return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch});
@@ -876,7 +876,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
         const ptr_ty = self.typeOf(ty_op.operand);
         const ptr = try self.resolveInst(ty_op.operand);
         const array_ty = ptr_ty.childType(mod);
-        const array_len = @intCast(u32, array_ty.arrayLen(mod));
+        const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
 
         const ptr_bits = self.target.ptrBitWidth();
         const ptr_bytes = @divExact(ptr_bits, 8);
@@ -893,11 +893,11 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const extra = self.air.extraData(Air.Asm, ty_pl.payload);
     const is_volatile = (extra.data.flags & 0x80000000) != 0;
-    const clobbers_len = @truncate(u31, extra.data.flags);
+    const clobbers_len = @as(u31, @truncate(extra.data.flags));
     var extra_i: usize = extra.end;
-    const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.outputs_len]);
+    const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.outputs_len]));
     extra_i += outputs.len;
-    const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.inputs_len]);
+    const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.inputs_len]));
     extra_i += inputs.len;
 
     const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -1237,13 +1237,13 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
                 switch (operand) {
                     .immediate => |imm| {
                         const swapped = switch (int_info.bits) {
-                            16 => @byteSwap(@intCast(u16, imm)),
-                            24 => @byteSwap(@intCast(u24, imm)),
-                            32 => @byteSwap(@intCast(u32, imm)),
-                            40 => @byteSwap(@intCast(u40, imm)),
-                            48 => @byteSwap(@intCast(u48, imm)),
-                            56 => @byteSwap(@intCast(u56, imm)),
-                            64 => @byteSwap(@intCast(u64, imm)),
+                            16 => @byteSwap(@as(u16, @intCast(imm))),
+                            24 => @byteSwap(@as(u24, @intCast(imm))),
+                            32 => @byteSwap(@as(u32, @intCast(imm))),
+                            40 => @byteSwap(@as(u40, @intCast(imm))),
+                            48 => @byteSwap(@as(u48, @intCast(imm))),
+                            56 => @byteSwap(@as(u56, @intCast(imm))),
+                            64 => @byteSwap(@as(u64, @intCast(imm))),
                             else => return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}),
                         };
                         break :result .{ .immediate = swapped };
@@ -1295,7 +1295,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     const pl_op = self.air.instructions.items(.data)[inst].pl_op;
     const callee = pl_op.operand;
     const extra = self.air.extraData(Air.Call, pl_op.payload);
-    const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]);
+    const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len]));
     const ty = self.typeOf(callee);
     const mod = self.bin_file.options.module.?;
     const fn_ty = switch (ty.zigTypeTag(mod)) {
@@ -1348,7 +1348,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
                     const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
                     const atom = elf_file.getAtom(atom_index);
                     _ = try atom.getOrCreateOffsetTableEntry(elf_file);
-                    break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
+                    break :blk @as(u32, @intCast(atom.getOffsetTableAddress(elf_file)));
                 } else unreachable;
 
                 try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr });
@@ -1515,7 +1515,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
     if (self.liveness.operandDies(inst, 0)) {
         const op_int = @intFromEnum(pl_op.operand);
         if (op_int >= Air.ref_start_index) {
-            const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+            const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
             self.processDeath(op_index);
         }
     }
@@ -1851,7 +1851,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const loop = self.air.extraData(Air.Block, ty_pl.payload);
     const body = self.air.extra[loop.end .. loop.end + loop.data.body_len];
-    const start = @intCast(u32, self.mir_instructions.len);
+    const start = @as(u32, @intCast(self.mir_instructions.len));
 
     try self.genBody(body);
     try self.jump(start);
@@ -2574,7 +2574,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
         const mod = self.bin_file.options.module.?;
         const mcv = try self.resolveInst(operand);
         const struct_ty = self.typeOf(operand);
-        const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
 
         switch (mcv) {
             .dead, .unreach => unreachable,
@@ -2772,7 +2772,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
 fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
     const gpa = self.gpa;
     try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
-    const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+    const result_index = @as(Air.Inst.Index, @intCast(self.mir_instructions.len));
     self.mir_instructions.appendAssumeCapacity(inst);
     return result_index;
 }
@@ -3207,7 +3207,7 @@ fn binOpImmediate(
                 .is_imm = true,
                 .rd = dest_reg,
                 .rs1 = lhs_reg,
-                .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) },
+                .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) },
             },
         },
         .sll,
@@ -3218,7 +3218,7 @@ fn binOpImmediate(
                 .is_imm = true,
                 .rd = dest_reg,
                 .rs1 = lhs_reg,
-                .rs2_or_imm = .{ .imm = @intCast(u5, rhs.immediate) },
+                .rs2_or_imm = .{ .imm = @as(u5, @intCast(rhs.immediate)) },
             },
         },
         .sllx,
@@ -3229,14 +3229,14 @@ fn binOpImmediate(
                 .is_imm = true,
                 .rd = dest_reg,
                 .rs1 = lhs_reg,
-                .rs2_or_imm = .{ .imm = @intCast(u6, rhs.immediate) },
+                .rs2_or_imm = .{ .imm = @as(u6, @intCast(rhs.immediate)) },
             },
         },
         .cmp => .{
             .arithmetic_2op = .{
                 .is_imm = true,
                 .rs1 = lhs_reg,
-                .rs2_or_imm = .{ .imm = @intCast(u12, rhs.immediate) },
+                .rs2_or_imm = .{ .imm = @as(u12, @intCast(rhs.immediate)) },
             },
         },
         else => unreachable,
@@ -3535,7 +3535,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
         return MCValue.none;
     }
 
-    const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+    const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
     switch (error_union_mcv) {
         .register => return self.fail("TODO errUnionPayload for registers", .{}),
         .stack_offset => |off| {
@@ -3565,15 +3565,15 @@ fn finishAirBookkeeping(self: *Self) void {
 fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
     var tomb_bits = self.liveness.getTombBits(inst);
     for (operands) |op| {
-        const dies = @truncate(u1, tomb_bits) != 0;
+        const dies = @as(u1, @truncate(tomb_bits)) != 0;
         tomb_bits >>= 1;
         if (!dies) continue;
         const op_int = @intFromEnum(op);
         if (op_int < Air.ref_start_index) continue;
-        const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
+        const op_index = @as(Air.Inst.Index, @intCast(op_int - Air.ref_start_index));
         self.processDeath(op_index);
     }
-    const is_used = @truncate(u1, tomb_bits) == 0;
+    const is_used = @as(u1, @truncate(tomb_bits)) == 0;
     if (is_used) {
         log.debug("%{d} => {}", .{ inst, result });
         const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
@@ -3663,7 +3663,7 @@ fn genInlineMemcpy(
         .data = .{ .branch_predict_reg = .{
             .cond = .ne_zero,
             .rs1 = len,
-            .inst = @intCast(u32, self.mir_instructions.len - 2),
+            .inst = @as(u32, @intCast(self.mir_instructions.len - 2)),
         } },
     });
 
@@ -3838,7 +3838,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                         .arithmetic_2op = .{
                             .is_imm = true,
                             .rs1 = reg,
-                            .rs2_or_imm = .{ .imm = @truncate(u12, x) },
+                            .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) },
                         },
                     },
                 });
@@ -3848,7 +3848,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                     .data = .{
                         .sethi = .{
                             .rd = reg,
-                            .imm = @truncate(u22, x >> 10),
+                            .imm = @as(u22, @truncate(x >> 10)),
                         },
                     },
                 });
@@ -3860,12 +3860,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                             .is_imm = true,
                             .rd = reg,
                             .rs1 = reg,
-                            .rs2_or_imm = .{ .imm = @truncate(u10, x) },
+                            .rs2_or_imm = .{ .imm = @as(u10, @truncate(x)) },
                         },
                     },
                 });
             } else if (x <= math.maxInt(u44)) {
-                try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 12) });
+                try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 12)) });
 
                 _ = try self.addInst(.{
                     .tag = .sllx,
@@ -3886,7 +3886,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                             .is_imm = true,
                             .rd = reg,
                             .rs1 = reg,
-                            .rs2_or_imm = .{ .imm = @truncate(u12, x) },
+                            .rs2_or_imm = .{ .imm = @as(u12, @truncate(x)) },
                         },
                     },
                 });
@@ -3894,8 +3894,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
                 // Need to allocate a temporary register to load 64-bit immediates.
                 const tmp_reg = try self.register_manager.allocReg(null, gp);
 
-                try self.genSetReg(ty, tmp_reg, .{ .immediate = @truncate(u32, x) });
-                try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 32) });
+                try self.genSetReg(ty, tmp_reg, .{ .immediate = @as(u32, @truncate(x)) });
+                try self.genSetReg(ty, reg, .{ .immediate = @as(u32, @truncate(x >> 32)) });
 
                 _ = try self.addInst(.{
                     .tag = .sllx,
@@ -3994,7 +3994,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
 
             const overflow_bit_ty = ty.structFieldType(1, mod);
-            const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
+            const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, mod)));
             const cond_reg = try self.register_manager.allocReg(null, gp);
 
             // TODO handle floating point CCRs
@@ -4412,8 +4412,8 @@ fn parseRegName(name: []const u8) ?Register {
 fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
     const tag = self.mir_instructions.items(.tag)[inst];
     switch (tag) {
-        .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
-        .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len),
+        .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
+        .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)),
         else => unreachable,
     }
 }
@@ -4490,7 +4490,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
             };
 
             for (fn_info.param_types, 0..) |ty, i| {
-                const param_size = @intCast(u32, ty.toType().abiSize(mod));
+                const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
                 if (param_size <= 8) {
                     if (next_register < argument_registers.len) {
                         result.args[i] = .{ .register = argument_registers[next_register] };
@@ -4522,7 +4522,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
             } else if (!ret_ty.hasRuntimeBits(mod)) {
                 result.return_value = .{ .none = {} };
             } else {
-                const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
+                const ret_ty_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
                 // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller.
                 if (ret_ty_size <= 8) {
                     result.return_value = switch (role) {
@@ -4721,7 +4721,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
         const mcv = try self.resolveInst(operand);
         const ptr_ty = self.typeOf(operand);
         const struct_ty = ptr_ty.childType(mod);
-        const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
+        const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, mod)));
         switch (mcv) {
             .ptr_stack_offset => |off| {
                 break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -4816,7 +4816,7 @@ fn truncRegister(
                         .is_imm = true,
                         .rd = dest_reg,
                         .rs1 = operand_reg,
-                        .rs2_or_imm = .{ .imm = @intCast(u6, 64 - int_bits) },
+                        .rs2_or_imm = .{ .imm = @as(u6, @intCast(64 - int_bits)) },
                     },
                 },
             });
@@ -4830,7 +4830,7 @@ fn truncRegister(
                         .is_imm = true,
                         .rd = dest_reg,
                         .rs1 = dest_reg,
-                        .rs2_or_imm = .{ .imm = @intCast(u6, int_bits) },
+                        .rs2_or_imm = .{ .imm = @as(u6, @intCast(int_bits)) },
                     },
                 },
             });
src/arch/sparc64/Emit.zig
@@ -70,7 +70,7 @@ pub fn emitMir(
 
     // Emit machine code
     for (mir_tags, 0..) |tag, index| {
-        const inst = @intCast(u32, index);
+        const inst = @as(u32, @intCast(index));
         switch (tag) {
             .dbg_line => try emit.mirDbgLine(inst),
             .dbg_prologue_end => try emit.mirDebugPrologueEnd(),
@@ -294,7 +294,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
         .bpcc => switch (tag) {
             .bpcc => {
                 const branch_predict_int = emit.mir.instructions.items(.data)[inst].branch_predict_int;
-                const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_int.inst).?) - @intCast(i64, emit.code.items.len);
+                const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_int.inst).?)) - @as(i64, @intCast(emit.code.items.len));
                 log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
 
                 try emit.writeInstruction(
@@ -303,7 +303,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
                         branch_predict_int.annul,
                         branch_predict_int.pt,
                         branch_predict_int.ccr,
-                        @intCast(i21, offset),
+                        @as(i21, @intCast(offset)),
                     ),
                 );
             },
@@ -312,7 +312,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
         .bpr => switch (tag) {
             .bpr => {
                 const branch_predict_reg = emit.mir.instructions.items(.data)[inst].branch_predict_reg;
-                const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_reg.inst).?) - @intCast(i64, emit.code.items.len);
+                const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_reg.inst).?)) - @as(i64, @intCast(emit.code.items.len));
                 log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
 
                 try emit.writeInstruction(
@@ -321,7 +321,7 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
                         branch_predict_reg.annul,
                         branch_predict_reg.pt,
                         branch_predict_reg.rs1,
-                        @intCast(i18, offset),
+                        @as(i18, @intCast(offset)),
                     ),
                 );
             },
@@ -437,9 +437,9 @@ fn mirShift(emit: *Emit, inst: Mir.Inst.Index) !void {
     if (data.is_imm) {
         const imm = data.rs2_or_imm.imm;
         switch (tag) {
-            .sll => try emit.writeInstruction(Instruction.sll(u5, rs1, @truncate(u5, imm), rd)),
-            .srl => try emit.writeInstruction(Instruction.srl(u5, rs1, @truncate(u5, imm), rd)),
-            .sra => try emit.writeInstruction(Instruction.sra(u5, rs1, @truncate(u5, imm), rd)),
+            .sll => try emit.writeInstruction(Instruction.sll(u5, rs1, @as(u5, @truncate(imm)), rd)),
+            .srl => try emit.writeInstruction(Instruction.srl(u5, rs1, @as(u5, @truncate(imm)), rd)),
+            .sra => try emit.writeInstruction(Instruction.sra(u5, rs1, @as(u5, @truncate(imm)), rd)),
             .sllx => try emit.writeInstruction(Instruction.sllx(u6, rs1, imm, rd)),
             .srlx => try emit.writeInstruction(Instruction.srlx(u6, rs1, imm, rd)),
             .srax => try emit.writeInstruction(Instruction.srax(u6, rs1, imm, rd)),
@@ -495,7 +495,7 @@ fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
 }
 
 fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
-    const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line);
+    const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
     const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
     switch (emit.debug_output) {
         .dwarf => |dbg_out| {
@@ -547,7 +547,7 @@ fn lowerBranches(emit: *Emit) !void {
     // TODO optimization opportunity: do this in codegen while
     // generating MIR
     for (mir_tags, 0..) |tag, index| {
-        const inst = @intCast(u32, index);
+        const inst = @as(u32, @intCast(index));
         if (isBranch(tag)) {
             const target_inst = emit.branchTarget(inst);
 
@@ -592,7 +592,7 @@ fn lowerBranches(emit: *Emit) !void {
         var current_code_offset: usize = 0;
 
         for (mir_tags, 0..) |tag, index| {
-            const inst = @intCast(u32, index);
+            const inst = @as(u32, @intCast(index));
 
             // If this instruction contained in the code offset
             // mapping (when it is a target of a branch or if it is a
@@ -607,7 +607,7 @@ fn lowerBranches(emit: *Emit) !void {
                 const target_inst = emit.branchTarget(inst);
                 if (target_inst < inst) {
                     const target_offset = emit.code_offset_mapping.get(target_inst).?;
-                    const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset);
+                    const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset));
                     const branch_type = emit.branch_types.getPtr(inst).?;
                     const optimal_branch_type = try emit.optimalBranchType(tag, offset);
                     if (branch_type.* != optimal_branch_type) {
@@ -626,7 +626,7 @@ fn lowerBranches(emit: *Emit) !void {
                 for (origin_list.items) |forward_branch_inst| {
                     const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
                     const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
-                    const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset);
+                    const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset));
                     const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
                     const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
                     if (branch_type.* != optimal_branch_type) {
src/arch/sparc64/Mir.zig
@@ -379,7 +379,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
     inline for (fields) |field| {
         @field(result, field.name) = switch (field.type) {
             u32 => mir.extra[i],
-            i32 => @bitCast(i32, mir.extra[i]),
+            i32 => @as(i32, @bitCast(mir.extra[i])),
             else => @compileError("bad field type"),
         };
         i += 1;
src/arch/wasm/CodeGen.zig
@@ -120,7 +120,7 @@ const WValue = union(enum) {
         if (local_value < reserved + 2) return; // reserved locals may never be re-used. Also accounts for 2 stack locals.
 
         const index = local_value - reserved;
-        const valtype = @enumFromInt(wasm.Valtype, gen.locals.items[index]);
+        const valtype = @as(wasm.Valtype, @enumFromInt(gen.locals.items[index]));
         switch (valtype) {
             .i32 => gen.free_locals_i32.append(gen.gpa, local_value) catch return, // It's ok to fail any of those, a new local can be allocated instead
             .i64 => gen.free_locals_i64.append(gen.gpa, local_value) catch return,
@@ -817,7 +817,7 @@ fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []c
     assert(operands.len <= Liveness.bpi - 1);
     var tomb_bits = func.liveness.getTombBits(inst);
     for (operands) |operand| {
-        const dies = @truncate(u1, tomb_bits) != 0;
+        const dies = @as(u1, @truncate(tomb_bits)) != 0;
         tomb_bits >>= 1;
         if (!dies) continue;
         processDeath(func, operand);
@@ -910,7 +910,7 @@ fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
 }
 
 fn addExtended(func: *CodeGen, opcode: wasm.MiscOpcode) error{OutOfMemory}!void {
-    const extra_index = @intCast(u32, func.mir_extra.items.len);
+    const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
     try func.mir_extra.append(func.gpa, @intFromEnum(opcode));
     try func.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } });
 }
@@ -934,11 +934,11 @@ fn addImm64(func: *CodeGen, imm: u64) error{OutOfMemory}!void {
 /// Accepts the index into the list of 128bit-immediates
 fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void {
     const simd_values = func.simd_immediates.items[index];
-    const extra_index = @intCast(u32, func.mir_extra.items.len);
+    const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
     // tag + 128bit value
     try func.mir_extra.ensureUnusedCapacity(func.gpa, 5);
     func.mir_extra.appendAssumeCapacity(std.wasm.simdOpcode(.v128_const));
-    func.mir_extra.appendSliceAssumeCapacity(@alignCast(4, mem.bytesAsSlice(u32, &simd_values)));
+    func.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values)));
     try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
 }
 
@@ -979,7 +979,7 @@ fn addExtra(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
 /// Returns the index into `mir_extra`
 fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    const result = @intCast(u32, func.mir_extra.items.len);
+    const result = @as(u32, @intCast(func.mir_extra.items.len));
     inline for (fields) |field| {
         func.mir_extra.appendAssumeCapacity(switch (field.type) {
             u32 => @field(extra, field.name),
@@ -1020,7 +1020,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
         },
         .Union => switch (ty.containerLayout(mod)) {
             .Packed => {
-                const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory");
+                const int_ty = mod.intType(.unsigned, @as(u16, @intCast(ty.bitSize(mod)))) catch @panic("out of memory");
                 return typeToValtype(int_ty, mod);
             },
             else => wasm.Valtype.i32,
@@ -1050,7 +1050,7 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
         .dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?)
         .none, .stack => {}, // no-op
         .local => |idx| try func.addLabel(.local_get, idx.value),
-        .imm32 => |val| try func.addImm32(@bitCast(i32, val)),
+        .imm32 => |val| try func.addImm32(@as(i32, @bitCast(val))),
         .imm64 => |val| try func.addImm64(val),
         .imm128 => |val| try func.addImm128(val),
         .float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
@@ -1264,7 +1264,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
     // In case we have a return value, but the last instruction is a noreturn (such as a while loop)
     // we emit an unreachable instruction to tell the stack validator that part will never be reached.
     if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
-        const inst = @intCast(u32, func.air.instructions.len - 1);
+        const inst = @as(u32, @intCast(func.air.instructions.len - 1));
         const last_inst_ty = func.typeOfIndex(inst);
         if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) {
             try func.addTag(.@"unreachable");
@@ -1287,11 +1287,11 @@ fn genFunc(func: *CodeGen) InnerError!void {
         try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
         // get the total stack size
         const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment);
-        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } });
+        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(aligned_stack)) } });
         // substract it from the current stack pointer
         try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
         // Get negative stack aligment
-        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, func.stack_alignment) * -1 } });
+        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment)) * -1 } });
         // Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
         try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
         // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
@@ -1432,7 +1432,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
             if (value != .imm32 and value != .imm64) {
                 const opcode = buildOpcode(.{
                     .op = .load,
-                    .width = @intCast(u8, abi_size),
+                    .width = @as(u8, @intCast(abi_size)),
                     .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
                     .valtype1 = typeToValtype(scalar_type, mod),
                 });
@@ -1468,7 +1468,7 @@ fn lowerToStack(func: *CodeGen, value: WValue) !void {
             if (offset.value > 0) {
                 switch (func.arch()) {
                     .wasm32 => {
-                        try func.addImm32(@bitCast(i32, offset.value));
+                        try func.addImm32(@as(i32, @bitCast(offset.value)));
                         try func.addTag(.i32_add);
                     },
                     .wasm64 => {
@@ -1815,7 +1815,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en
     if (offset + ptr_value.offset() > 0) {
         switch (func.arch()) {
             .wasm32 => {
-                try func.addImm32(@bitCast(i32, @intCast(u32, offset + ptr_value.offset())));
+                try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(offset + ptr_value.offset())))));
                 try func.addTag(.i32_add);
             },
             .wasm64 => {
@@ -2111,7 +2111,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 try func.emitWValue(operand);
                 const opcode = buildOpcode(.{
                     .op = .load,
-                    .width = @intCast(u8, scalar_type.abiSize(mod) * 8),
+                    .width = @as(u8, @intCast(scalar_type.abiSize(mod) * 8)),
                     .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
                     .valtype1 = typeToValtype(scalar_type, mod),
                 });
@@ -2180,7 +2180,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
     if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{});
     const pl_op = func.air.instructions.items(.data)[inst].pl_op;
     const extra = func.air.extraData(Air.Call, pl_op.payload);
-    const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]);
+    const args = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]));
     const ty = func.typeOf(pl_op.operand);
 
     const mod = func.bin_file.base.options.module.?;
@@ -2319,15 +2319,15 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
             return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
         }
 
-        var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1);
-        mask <<= @intCast(u6, ptr_info.packed_offset.bit_offset);
+        var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(mod)))) - 1));
+        mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset));
         mask ^= ~@as(u64, 0);
         const shift_val = if (ptr_info.packed_offset.host_size <= 4)
             WValue{ .imm32 = ptr_info.packed_offset.bit_offset }
         else
             WValue{ .imm64 = ptr_info.packed_offset.bit_offset };
         const mask_val = if (ptr_info.packed_offset.host_size <= 4)
-            WValue{ .imm32 = @truncate(u32, mask) }
+            WValue{ .imm32 = @as(u32, @truncate(mask)) }
         else
             WValue{ .imm64 = mask };
 
@@ -2357,7 +2357,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
                 return func.store(lhs, rhs, Type.anyerror, 0);
             }
 
-            const len = @intCast(u32, abi_size);
+            const len = @as(u32, @intCast(abi_size));
             return func.memcpy(lhs, rhs, .{ .imm32 = len });
         },
         .Optional => {
@@ -2372,23 +2372,23 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
                 return func.store(lhs, rhs, Type.anyerror, 0);
             }
 
-            const len = @intCast(u32, abi_size);
+            const len = @as(u32, @intCast(abi_size));
             return func.memcpy(lhs, rhs, .{ .imm32 = len });
         },
         .Struct, .Array, .Union => if (isByRef(ty, mod)) {
-            const len = @intCast(u32, abi_size);
+            const len = @as(u32, @intCast(abi_size));
             return func.memcpy(lhs, rhs, .{ .imm32 = len });
         },
         .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
             .unrolled => {
-                const len = @intCast(u32, abi_size);
+                const len = @as(u32, @intCast(abi_size));
                 return func.memcpy(lhs, rhs, .{ .imm32 = len });
             },
             .direct => {
                 try func.emitWValue(lhs);
                 try func.lowerToStack(rhs);
                 // TODO: Add helper functions for simd opcodes
-                const extra_index = @intCast(u32, func.mir_extra.items.len);
+                const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
                 // stores as := opcode, offset, alignment (opcode::memarg)
                 try func.mir_extra.appendSlice(func.gpa, &[_]u32{
                     std.wasm.simdOpcode(.v128_store),
@@ -2423,7 +2423,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
             try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
             return;
         } else if (abi_size > 16) {
-            try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) });
+            try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(mod))) });
         },
         else => if (abi_size > 8) {
             return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
@@ -2440,7 +2440,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
     const valtype = typeToValtype(ty, mod);
     const opcode = buildOpcode(.{
         .valtype1 = valtype,
-        .width = @intCast(u8, abi_size * 8),
+        .width = @as(u8, @intCast(abi_size * 8)),
         .op = .store,
     });
 
@@ -2501,7 +2501,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
 
     if (ty.zigTypeTag(mod) == .Vector) {
         // TODO: Add helper functions for simd opcodes
-        const extra_index = @intCast(u32, func.mir_extra.items.len);
+        const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
         // stores as := opcode, offset, alignment (opcode::memarg)
         try func.mir_extra.appendSlice(func.gpa, &[_]u32{
             std.wasm.simdOpcode(.v128_load),
@@ -2512,7 +2512,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
         return WValue{ .stack = {} };
     }
 
-    const abi_size = @intCast(u8, ty.abiSize(mod));
+    const abi_size = @as(u8, @intCast(ty.abiSize(mod)));
     const opcode = buildOpcode(.{
         .valtype1 = typeToValtype(ty, mod),
         .width = abi_size * 8,
@@ -2589,10 +2589,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     // For big integers we can ignore this as we will call into compiler-rt which handles this.
     const result = switch (op) {
         .shr, .shl => res: {
-            const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
+            const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse {
                 return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
             };
-            const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
+            const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?;
             const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
                 const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
                 break :blk try tmp.toLocal(func, lhs_ty);
@@ -2868,10 +2868,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     // For big integers we can ignore this as we will call into compiler-rt which handles this.
     const result = switch (op) {
         .shr, .shl => res: {
-            const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
+            const lhs_wasm_bits = toWasmBits(@as(u16, @intCast(lhs_ty.bitSize(mod)))) orelse {
                 return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
             };
-            const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
+            const rhs_wasm_bits = toWasmBits(@as(u16, @intCast(rhs_ty.bitSize(mod)))).?;
             const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
                 const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
                 break :blk try tmp.toLocal(func, lhs_ty);
@@ -2902,7 +2902,7 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
 fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
     const mod = func.bin_file.base.options.module.?;
     assert(ty.abiSize(mod) <= 16);
-    const bitsize = @intCast(u16, ty.bitSize(mod));
+    const bitsize = @as(u16, @intCast(ty.bitSize(mod)));
     const wasm_bits = toWasmBits(bitsize) orelse {
         return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
     };
@@ -2916,7 +2916,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
         const result_ptr = try func.allocStack(ty);
         try func.emitWValue(result_ptr);
         try func.store(.{ .stack = {} }, lsb, Type.u64, 8 + result_ptr.offset());
-        const result = (@as(u64, 1) << @intCast(u6, 64 - (wasm_bits - bitsize))) - 1;
+        const result = (@as(u64, 1) << @as(u6, @intCast(64 - (wasm_bits - bitsize)))) - 1;
         try func.emitWValue(result_ptr);
         _ = try func.load(operand, Type.u64, 0);
         try func.addImm64(result);
@@ -2925,10 +2925,10 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
         return result_ptr;
     }
 
-    const result = (@as(u64, 1) << @intCast(u6, bitsize)) - 1;
+    const result = (@as(u64, 1) << @as(u6, @intCast(bitsize))) - 1;
     try func.emitWValue(operand);
     if (bitsize <= 32) {
-        try func.addImm32(@bitCast(i32, @intCast(u32, result)));
+        try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(result)))));
         try func.addTag(.i32_and);
     } else if (bitsize <= 64) {
         try func.addImm64(result);
@@ -2957,15 +2957,15 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
             const index = elem.index;
             const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod);
             const elem_offset = index * elem_type.abiSize(mod);
-            return func.lowerParentPtr(elem.base.toValue(), @intCast(u32, elem_offset + offset));
+            return func.lowerParentPtr(elem.base.toValue(), @as(u32, @intCast(elem_offset + offset)));
         },
         .field => |field| {
             const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod);
 
             const field_offset = switch (parent_ty.zigTypeTag(mod)) {
                 .Struct => switch (parent_ty.containerLayout(mod)) {
-                    .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod),
-                    else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod),
+                    .Packed => parent_ty.packedStructFieldByteOffset(@as(usize, @intCast(field.index)), mod),
+                    else => parent_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod),
                 },
                 .Union => switch (parent_ty.containerLayout(mod)) {
                     .Packed => 0,
@@ -2975,7 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
                         if (layout.payload_align > layout.tag_align) break :blk 0;
 
                         // tag is stored first so calculate offset from where payload starts
-                        break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align));
+                        break :blk @as(u32, @intCast(std.mem.alignForward(u64, layout.tag_size, layout.tag_align)));
                     },
                 },
                 .Pointer => switch (parent_ty.ptrSize(mod)) {
@@ -2988,7 +2988,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
                 },
                 else => unreachable,
             };
-            return func.lowerParentPtr(field.base.toValue(), @intCast(u32, offset + field_offset));
+            return func.lowerParentPtr(field.base.toValue(), @as(u32, @intCast(offset + field_offset)));
         },
     }
 }
@@ -3045,11 +3045,11 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
     comptime assert(@typeInfo(T).Int.signedness == .signed);
     assert(bits <= 64);
     const WantedT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
-    if (value >= 0) return @bitCast(WantedT, value);
-    const max_value = @intCast(u64, (@as(u65, 1) << bits) - 1);
-    const flipped = @intCast(T, (~-@as(i65, value)) + 1);
-    const result = @bitCast(WantedT, flipped) & max_value;
-    return @intCast(WantedT, result);
+    if (value >= 0) return @as(WantedT, @bitCast(value));
+    const max_value = @as(u64, @intCast((@as(u65, 1) << bits) - 1));
+    const flipped = @as(T, @intCast((~-@as(i65, value)) + 1));
+    const result = @as(WantedT, @bitCast(flipped)) & max_value;
+    return @as(WantedT, @intCast(result));
 }
 
 fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
@@ -3150,18 +3150,18 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
             const int_info = ty.intInfo(mod);
             switch (int_info.signedness) {
                 .signed => switch (int_info.bits) {
-                    0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement(
+                    0...32 => return WValue{ .imm32 = @as(u32, @intCast(toTwosComplement(
                         val.toSignedInt(mod),
-                        @intCast(u6, int_info.bits),
-                    )) },
+                        @as(u6, @intCast(int_info.bits)),
+                    ))) },
                     33...64 => return WValue{ .imm64 = toTwosComplement(
                         val.toSignedInt(mod),
-                        @intCast(u7, int_info.bits),
+                        @as(u7, @intCast(int_info.bits)),
                     ) },
                     else => unreachable,
                 },
                 .unsigned => switch (int_info.bits) {
-                    0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
+                    0...32 => return WValue{ .imm32 = @as(u32, @intCast(val.toUnsignedInt(mod))) },
                     33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) },
                     else => unreachable,
                 },
@@ -3198,7 +3198,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
             return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType());
         },
         .float => |float| switch (float.storage) {
-            .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) },
+            .f16 => |f16_val| return WValue{ .imm32 = @as(u16, @bitCast(f16_val)) },
             .f32 => |f32_val| return WValue{ .float32 = f32_val },
             .f64 => |f64_val| return WValue{ .float64 = f64_val },
             else => unreachable,
@@ -3254,7 +3254,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
 /// Stores the value as a 128bit-immediate value by storing it inside
 /// the list and returning the index into this list as `WValue`.
 fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
-    const index = @intCast(u32, func.simd_immediates.items.len);
+    const index = @as(u32, @intCast(func.simd_immediates.items.len));
     try func.simd_immediates.append(func.gpa, value);
     return WValue{ .imm128 = index };
 }
@@ -3270,8 +3270,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
         },
         .Float => switch (ty.floatBits(func.target)) {
             16 => return WValue{ .imm32 = 0xaaaaaaaa },
-            32 => return WValue{ .float32 = @bitCast(f32, @as(u32, 0xaaaaaaaa)) },
-            64 => return WValue{ .float64 = @bitCast(f64, @as(u64, 0xaaaaaaaaaaaaaaaa)) },
+            32 => return WValue{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) },
+            64 => return WValue{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) },
             else => unreachable,
         },
         .Pointer => switch (func.arch()) {
@@ -3312,13 +3312,13 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
             .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod),
             .int => |int| intStorageAsI32(int.storage, mod),
             .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod),
-            .err => |err| @bitCast(i32, @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err.name).?)),
+            .err => |err| @as(i32, @bitCast(@as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))),
             else => unreachable,
         },
     }
 
     return switch (ty.zigTypeTag(mod)) {
-        .ErrorSet => @bitCast(i32, val.getErrorInt(mod)),
+        .ErrorSet => @as(i32, @bitCast(val.getErrorInt(mod))),
         else => unreachable, // Programmer called this function for an illegal type
     };
 }
@@ -3329,11 +3329,11 @@ fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32
 
 fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
     return switch (storage) {
-        .i64 => |x| @intCast(i32, x),
-        .u64 => |x| @bitCast(i32, @intCast(u32, x)),
+        .i64 => |x| @as(i32, @intCast(x)),
+        .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
         .big_int => unreachable,
-        .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)),
-        .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))),
+        .lazy_align => |ty| @as(i32, @bitCast(ty.toType().abiAlignment(mod))),
+        .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiSize(mod))))),
     };
 }
 
@@ -3421,7 +3421,7 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     try func.branches.ensureUnusedCapacity(func.gpa, 2);
     {
         func.branches.appendAssumeCapacity(.{});
-        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len));
+        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len)));
         defer {
             var else_stack = func.branches.pop();
             else_stack.deinit(func.gpa);
@@ -3433,7 +3433,7 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     // Outer block that matches the condition
     {
         func.branches.appendAssumeCapacity(.{});
-        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len));
+        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len)));
         defer {
             var then_stack = func.branches.pop();
             then_stack.deinit(func.gpa);
@@ -3715,7 +3715,7 @@ fn structFieldPtr(
     }
     switch (struct_ptr) {
         .stack_offset => |stack_offset| {
-            return WValue{ .stack_offset = .{ .value = stack_offset.value + @intCast(u32, offset), .references = 1 } };
+            return WValue{ .stack_offset = .{ .value = stack_offset.value + @as(u32, @intCast(offset)), .references = 1 } };
         },
         else => return func.buildPointerOffset(struct_ptr, offset, .new),
     }
@@ -3755,7 +3755,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     try func.binOp(operand, const_wvalue, backing_ty, .shr);
 
                 if (field_ty.zigTypeTag(mod) == .Float) {
-                    const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+                    const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
                     const truncated = try func.trunc(shifted_value, int_type, backing_ty);
                     const bitcasted = try func.bitcast(field_ty, int_type, truncated);
                     break :result try bitcasted.toLocal(func, field_ty);
@@ -3764,7 +3764,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     // we can simply reuse the operand.
                     break :result func.reuseOperand(struct_field.struct_operand, operand);
                 } else if (field_ty.isPtrAtRuntime(mod)) {
-                    const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+                    const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
                     const truncated = try func.trunc(shifted_value, int_type, backing_ty);
                     break :result try truncated.toLocal(func, field_ty);
                 }
@@ -3783,14 +3783,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     }
                 }
 
-                const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod)));
+                const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(mod))));
                 if (field_ty.zigTypeTag(mod) == .Float) {
-                    const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+                    const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
                     const truncated = try func.trunc(operand, int_type, union_int_type);
                     const bitcasted = try func.bitcast(field_ty, int_type, truncated);
                     break :result try bitcasted.toLocal(func, field_ty);
                 } else if (field_ty.isPtrAtRuntime(mod)) {
-                    const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
+                    const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(mod))));
                     const truncated = try func.trunc(operand, int_type, union_int_type);
                     break :result try truncated.toLocal(func, field_ty);
                 }
@@ -3847,7 +3847,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     var highest_maybe: ?i32 = null;
     while (case_i < switch_br.data.cases_len) : (case_i += 1) {
         const case = func.air.extraData(Air.SwitchBr.Case, extra_index);
-        const items = @ptrCast([]const Air.Inst.Ref, func.air.extra[case.end..][0..case.data.items_len]);
+        const items = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[case.end..][0..case.data.items_len]));
         const case_body = func.air.extra[case.end + items.len ..][0..case.data.body_len];
         extra_index = case.end + items.len + case_body.len;
         const values = try func.gpa.alloc(CaseValue, items.len);
@@ -3904,7 +3904,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         }
 
         // Account for default branch so always add '1'
-        const depth = @intCast(u32, highest - lowest + @intFromBool(has_else_body)) + 1;
+        const depth = @as(u32, @intCast(highest - lowest + @intFromBool(has_else_body))) + 1;
         const jump_table: Mir.JumpTable = .{ .length = depth };
         const table_extra_index = try func.addExtra(jump_table);
         try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
@@ -3915,7 +3915,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             const idx = blk: {
                 for (case_list.items, 0..) |case, idx| {
                     for (case.values) |case_value| {
-                        if (case_value.integer == value) break :blk @intCast(u32, idx);
+                        if (case_value.integer == value) break :blk @as(u32, @intCast(idx));
                     }
                 }
                 // error sets are almost always sparse so we use the default case
@@ -4018,7 +4018,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
         try func.emitWValue(operand);
         if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
             try func.addMemArg(.i32_load16_u, .{
-                .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)),
+                .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))),
                 .alignment = Type.anyerror.abiAlignment(mod),
             });
         }
@@ -4051,7 +4051,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
             break :result WValue{ .none = {} };
         }
 
-        const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+        const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod)));
         if (op_is_ptr or isByRef(payload_ty, mod)) {
             break :result try func.buildPointerOffset(operand, pl_offset, .new);
         }
@@ -4080,7 +4080,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool)
             break :result func.reuseOperand(ty_op.operand, operand);
         }
 
-        const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod)));
+        const error_val = try func.load(operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))));
         break :result try error_val.toLocal(func, Type.anyerror);
     };
     func.finishAir(inst, result, &.{ty_op.operand});
@@ -4100,13 +4100,13 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
         }
 
         const err_union = try func.allocStack(err_ty);
-        const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
+        const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new);
         try func.store(payload_ptr, operand, pl_ty, 0);
 
         // ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
         try func.emitWValue(err_union);
         try func.addImm32(0);
-        const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
+        const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
         try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
         break :result err_union;
     };
@@ -4128,11 +4128,11 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
         const err_union = try func.allocStack(err_ty);
         // store error value
-        try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod)));
+        try func.store(err_union, operand, Type.anyerror, @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))));
 
         // write 'undefined' to the payload
-        const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
-        const len = @intCast(u32, err_ty.errorUnionPayload(mod).abiSize(mod));
+        const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod))), .new);
+        const len = @as(u32, @intCast(err_ty.errorUnionPayload(mod).abiSize(mod)));
         try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
 
         break :result err_union;
@@ -4154,8 +4154,8 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         return func.fail("todo Wasm intcast for bitsize > 128", .{});
     }
 
-    const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?;
-    const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
+    const op_bits = toWasmBits(@as(u16, @intCast(operand_ty.bitSize(mod)))).?;
+    const wanted_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?;
     const result = if (op_bits == wanted_bits)
         func.reuseOperand(ty_op.operand, operand)
     else
@@ -4170,8 +4170,8 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 /// NOTE: May leave the result on the top of the stack.
 fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
     const mod = func.bin_file.base.options.module.?;
-    const given_bitsize = @intCast(u16, given.bitSize(mod));
-    const wanted_bitsize = @intCast(u16, wanted.bitSize(mod));
+    const given_bitsize = @as(u16, @intCast(given.bitSize(mod)));
+    const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(mod)));
     assert(given_bitsize <= 128);
     assert(wanted_bitsize <= 128);
 
@@ -4396,7 +4396,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     // calculate index into slice
     try func.emitWValue(index);
-    try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+    try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
     try func.addTag(.i32_mul);
     try func.addTag(.i32_add);
 
@@ -4426,7 +4426,7 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     // calculate index into slice
     try func.emitWValue(index);
-    try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+    try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
     try func.addTag(.i32_mul);
     try func.addTag(.i32_add);
 
@@ -4466,13 +4466,13 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 /// NOTE: Resulting value is left on the stack.
 fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
     const mod = func.bin_file.base.options.module.?;
-    const given_bits = @intCast(u16, given_ty.bitSize(mod));
+    const given_bits = @as(u16, @intCast(given_ty.bitSize(mod)));
     if (toWasmBits(given_bits) == null) {
         return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
     }
 
     var result = try func.intcast(operand, given_ty, wanted_ty);
-    const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod));
+    const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(mod)));
     const wasm_bits = toWasmBits(wanted_bits).?;
     if (wasm_bits != wanted_bits) {
         result = try func.wrapOperand(result, wanted_ty);
@@ -4505,7 +4505,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     }
 
     // store the length of the array in the slice
-    const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) };
+    const len = WValue{ .imm32 = @as(u32, @intCast(array_ty.arrayLen(mod))) };
     try func.store(slice_local, len, Type.usize, func.ptrSize());
 
     func.finishAir(inst, slice_local, &.{ty_op.operand});
@@ -4545,7 +4545,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     // calculate index into slice
     try func.emitWValue(index);
-    try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+    try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
     try func.addTag(.i32_mul);
     try func.addTag(.i32_add);
 
@@ -4584,7 +4584,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     // calculate index into ptr
     try func.emitWValue(index);
-    try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+    try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
     try func.addTag(.i32_mul);
     try func.addTag(.i32_add);
 
@@ -4612,7 +4612,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
 
     try func.lowerToStack(ptr);
     try func.emitWValue(offset);
-    try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod))));
+    try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(pointee_ty.abiSize(mod))))));
     try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
     try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
 
@@ -4635,7 +4635,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
     const value = try func.resolveInst(bin_op.rhs);
     const len = switch (ptr_ty.ptrSize(mod)) {
         .Slice => try func.sliceLen(ptr),
-        .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }),
+        .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(mod).arrayLen(mod))) }),
         .C, .Many => unreachable,
     };
 
@@ -4656,7 +4656,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
 /// we implement it manually.
 fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
     const mod = func.bin_file.base.options.module.?;
-    const abi_size = @intCast(u32, elem_ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
 
     // When bulk_memory is enabled, we lower it to wasm's memset instruction.
     // If not, we lower it ourselves.
@@ -4756,7 +4756,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     if (isByRef(array_ty, mod)) {
         try func.lowerToStack(array);
         try func.emitWValue(index);
-        try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+        try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
         try func.addTag(.i32_mul);
         try func.addTag(.i32_add);
     } else {
@@ -4772,11 +4772,11 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     else => unreachable,
                 };
 
-                var operands = [_]u32{ std.wasm.simdOpcode(opcode), @intCast(u8, lane) };
+                var operands = [_]u32{ std.wasm.simdOpcode(opcode), @as(u8, @intCast(lane)) };
 
                 try func.emitWValue(array);
 
-                const extra_index = @intCast(u32, func.mir_extra.items.len);
+                const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
                 try func.mir_extra.appendSlice(func.gpa, &operands);
                 try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
 
@@ -4789,7 +4789,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 // Is a non-unrolled vector (v128)
                 try func.lowerToStack(stack_vec);
                 try func.emitWValue(index);
-                try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+                try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
                 try func.addTag(.i32_mul);
                 try func.addTag(.i32_add);
             },
@@ -4886,7 +4886,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 const result = try func.allocLocal(ty);
                 try func.emitWValue(operand);
                 // TODO: Add helper functions for simd opcodes
-                const extra_index = @intCast(u32, func.mir_extra.items.len);
+                const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
                 // stores as := opcode, offset, alignment (opcode::memarg)
                 try func.mir_extra.appendSlice(func.gpa, &[_]u32{
                     opcode,
@@ -4907,7 +4907,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 };
                 const result = try func.allocLocal(ty);
                 try func.emitWValue(operand);
-                const extra_index = @intCast(u32, func.mir_extra.items.len);
+                const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
                 try func.mir_extra.append(func.gpa, opcode);
                 try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
                 try func.addLabel(.local_set, result.local.value);
@@ -4917,13 +4917,13 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         }
     }
     const elem_size = elem_ty.bitSize(mod);
-    const vector_len = @intCast(usize, ty.vectorLen(mod));
+    const vector_len = @as(usize, @intCast(ty.vectorLen(mod)));
     if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
         return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
     }
 
     const result = try func.allocStack(ty);
-    const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod));
+    const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
     var index: usize = 0;
     var offset: u32 = 0;
     while (index < vector_len) : (index += 1) {
@@ -4966,11 +4966,11 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             try func.emitWValue(result);
 
             const loaded = if (value >= 0)
-                try func.load(a, child_ty, @intCast(u32, @intCast(i64, elem_size) * value))
+                try func.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value)))
             else
-                try func.load(b, child_ty, @intCast(u32, @intCast(i64, elem_size) * ~value));
+                try func.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value)));
 
-            try func.store(.stack, loaded, child_ty, result.stack_offset.value + @intCast(u32, elem_size) * @intCast(u32, index));
+            try func.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index)));
         }
 
         return func.finishAir(inst, result, &.{ extra.a, extra.b });
@@ -4980,22 +4980,22 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         } ++ [1]u32{undefined} ** 4;
 
         var lanes = std.mem.asBytes(operands[1..]);
-        for (0..@intCast(usize, mask_len)) |index| {
+        for (0..@as(usize, @intCast(mask_len))) |index| {
             const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
             const base_index = if (mask_elem >= 0)
-                @intCast(u8, @intCast(i64, elem_size) * mask_elem)
+                @as(u8, @intCast(@as(i64, @intCast(elem_size)) * mask_elem))
             else
-                16 + @intCast(u8, @intCast(i64, elem_size) * ~mask_elem);
+                16 + @as(u8, @intCast(@as(i64, @intCast(elem_size)) * ~mask_elem));
 
-            for (0..@intCast(usize, elem_size)) |byte_offset| {
-                lanes[index * @intCast(usize, elem_size) + byte_offset] = base_index + @intCast(u8, byte_offset);
+            for (0..@as(usize, @intCast(elem_size))) |byte_offset| {
+                lanes[index * @as(usize, @intCast(elem_size)) + byte_offset] = base_index + @as(u8, @intCast(byte_offset));
             }
         }
 
         try func.emitWValue(a);
         try func.emitWValue(b);
 
-        const extra_index = @intCast(u32, func.mir_extra.items.len);
+        const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
         try func.mir_extra.appendSlice(func.gpa, &operands);
         try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
 
@@ -5015,15 +5015,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const mod = func.bin_file.base.options.module.?;
     const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
     const result_ty = func.typeOfIndex(inst);
-    const len = @intCast(usize, result_ty.arrayLen(mod));
-    const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]);
+    const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
+    const elements = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[ty_pl.payload..][0..len]));
 
     const result: WValue = result_value: {
         switch (result_ty.zigTypeTag(mod)) {
             .Array => {
                 const result = try func.allocStack(result_ty);
                 const elem_ty = result_ty.childType(mod);
-                const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+                const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
                 const sentinel = if (result_ty.sentinel(mod)) |sent| blk: {
                     break :blk try func.lowerConstant(sent, elem_ty);
                 } else null;
@@ -5087,7 +5087,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                             WValue{ .imm64 = current_bit };
 
                         const value = try func.resolveInst(elem);
-                        const value_bit_size = @intCast(u16, field.ty.bitSize(mod));
+                        const value_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
                         const int_ty = try mod.intType(.unsigned, value_bit_size);
 
                         // load our current result on stack so we can perform all transformations
@@ -5113,7 +5113,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                         if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
 
                         const elem_ty = result_ty.structFieldType(elem_index, mod);
-                        const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+                        const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
                         const value = try func.resolveInst(elem);
                         try func.store(offset, value, elem_ty, 0);
 
@@ -5174,7 +5174,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
                     try func.store(payload_ptr, payload, field.ty, 0);
                 } else {
-                    try func.store(result_ptr, payload, field.ty, @intCast(u32, layout.tag_size));
+                    try func.store(result_ptr, payload, field.ty, @as(u32, @intCast(layout.tag_size)));
                 }
 
                 if (layout.tag_size > 0) {
@@ -5187,21 +5187,21 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                         result_ptr,
                         tag_int,
                         union_obj.tag_ty,
-                        @intCast(u32, layout.payload_size),
+                        @as(u32, @intCast(layout.payload_size)),
                     );
                 }
             }
             break :result result_ptr;
         } else {
             const operand = try func.resolveInst(extra.init);
-            const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod)));
+            const union_int_type = try mod.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(mod))));
             if (field.ty.zigTypeTag(mod) == .Float) {
-                const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
+                const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field.ty.bitSize(mod))));
                 const bitcasted = try func.bitcast(field.ty, int_type, operand);
                 const casted = try func.trunc(bitcasted, int_type, union_int_type);
                 break :result try casted.toLocal(func, field.ty);
             } else if (field.ty.isPtrAtRuntime(mod)) {
-                const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
+                const int_type = try mod.intType(.unsigned, @as(u16, @intCast(field.ty.bitSize(mod))));
                 const casted = try func.intcast(operand, int_type, union_int_type);
                 break :result try casted.toLocal(func, field.ty);
             }
@@ -5334,7 +5334,7 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     // when the tag alignment is smaller than the payload, the field will be stored
     // after the payload.
     const offset = if (layout.tag_align < layout.payload_align) blk: {
-        break :blk @intCast(u32, layout.payload_size);
+        break :blk @as(u32, @intCast(layout.payload_size));
     } else @as(u32, 0);
     try func.store(union_ptr, new_tag, tag_ty, offset);
     func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
@@ -5353,7 +5353,7 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     // when the tag alignment is smaller than the payload, the field will be stored
     // after the payload.
     const offset = if (layout.tag_align < layout.payload_align) blk: {
-        break :blk @intCast(u32, layout.payload_size);
+        break :blk @as(u32, @intCast(layout.payload_size));
     } else @as(u32, 0);
     const tag = try func.load(operand, tag_ty, offset);
     const result = try tag.toLocal(func, tag_ty);
@@ -5458,7 +5458,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
         operand,
         .{ .imm32 = 0 },
         Type.anyerror,
-        @intCast(u32, errUnionErrorOffset(payload_ty, mod)),
+        @as(u32, @intCast(errUnionErrorOffset(payload_ty, mod))),
     );
 
     const result = result: {
@@ -5466,7 +5466,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
             break :result func.reuseOperand(ty_op.operand, operand);
         }
 
-        break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new);
+        break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, mod))), .new);
     };
     func.finishAir(inst, result, &.{ty_op.operand});
 }
@@ -5483,7 +5483,7 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const result = if (field_offset != 0) result: {
         const base = try func.buildPointerOffset(field_ptr, 0, .new);
         try func.addLabel(.local_get, base.local.value);
-        try func.addImm32(@bitCast(i32, @intCast(u32, field_offset)));
+        try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(field_offset)))));
         try func.addTag(.i32_sub);
         try func.addLabel(.local_set, base.local.value);
         break :result base;
@@ -5514,14 +5514,14 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             const slice_len = try func.sliceLen(dst);
             if (ptr_elem_ty.abiSize(mod) != 1) {
                 try func.emitWValue(slice_len);
-                try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) });
+                try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(mod))) });
                 try func.addTag(.i32_mul);
                 try func.addLabel(.local_set, slice_len.local.value);
             }
             break :blk slice_len;
         },
         .One => @as(WValue, .{
-            .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)),
+            .imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod))),
         }),
         .C, .Many => unreachable,
     };
@@ -5611,7 +5611,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     try func.emitWValue(operand);
     switch (func.arch()) {
         .wasm32 => {
-            try func.addImm32(@bitCast(i32, @intCast(u32, abi_size)));
+            try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(abi_size)))));
             try func.addTag(.i32_mul);
             try func.addTag(.i32_add);
         },
@@ -5708,7 +5708,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
 
     const result_ptr = try func.allocStack(func.typeOfIndex(inst));
     try func.store(result_ptr, result, lhs_ty, 0);
-    const offset = @intCast(u32, lhs_ty.abiSize(mod));
+    const offset = @as(u32, @intCast(lhs_ty.abiSize(mod)));
     try func.store(result_ptr, overflow_local, Type.u1, offset);
 
     func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -5830,7 +5830,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     const result_ptr = try func.allocStack(func.typeOfIndex(inst));
     try func.store(result_ptr, result, lhs_ty, 0);
-    const offset = @intCast(u32, lhs_ty.abiSize(mod));
+    const offset = @as(u32, @intCast(lhs_ty.abiSize(mod)));
     try func.store(result_ptr, overflow_local, Type.u1, offset);
 
     func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -6005,7 +6005,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     const result_ptr = try func.allocStack(func.typeOfIndex(inst));
     try func.store(result_ptr, bin_op_local, lhs_ty, 0);
-    const offset = @intCast(u32, lhs_ty.abiSize(mod));
+    const offset = @as(u32, @intCast(lhs_ty.abiSize(mod)));
     try func.store(result_ptr, overflow_bit, Type.u1, offset);
 
     func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -6149,7 +6149,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     switch (wasm_bits) {
         32 => {
             if (wasm_bits != int_info.bits) {
-                const val: u32 = @as(u32, 1) << @intCast(u5, int_info.bits);
+                const val: u32 = @as(u32, 1) << @as(u5, @intCast(int_info.bits));
                 // leave value on the stack
                 _ = try func.binOp(operand, .{ .imm32 = val }, ty, .@"or");
             } else try func.emitWValue(operand);
@@ -6157,7 +6157,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         },
         64 => {
             if (wasm_bits != int_info.bits) {
-                const val: u64 = @as(u64, 1) << @intCast(u6, int_info.bits);
+                const val: u64 = @as(u64, 1) << @as(u6, @intCast(int_info.bits));
                 // leave value on the stack
                 _ = try func.binOp(operand, .{ .imm64 = val }, ty, .@"or");
             } else try func.emitWValue(operand);
@@ -6172,7 +6172,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             try func.addTag(.i64_ctz);
             _ = try func.load(operand, Type.u64, 8);
             if (wasm_bits != int_info.bits) {
-                try func.addImm64(@as(u64, 1) << @intCast(u6, int_info.bits - 64));
+                try func.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64)));
                 try func.addTag(.i64_or);
             }
             try func.addTag(.i64_ctz);
@@ -6275,7 +6275,7 @@ fn lowerTry(
         // check if the error tag is set for the error union.
         try func.emitWValue(err_union);
         if (pl_has_bits) {
-            const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
+            const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
             try func.addMemArg(.i32_load16_u, .{
                 .offset = err_union.offset() + err_offset,
                 .alignment = Type.anyerror.abiAlignment(mod),
@@ -6300,7 +6300,7 @@ fn lowerTry(
         return WValue{ .none = {} };
     }
 
-    const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod));
+    const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
     if (isByRef(pl_ty, mod)) {
         return buildPointerOffset(func, err_union, pl_offset, .new);
     }
@@ -6590,9 +6590,9 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     var bin_result = try (try func.binOp(lhs, rhs, ty, op)).toLocal(func, ty);
     defer bin_result.free(func);
     if (wasm_bits != int_info.bits and op == .add) {
-        const val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits)) - 1);
+        const val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits))) - 1));
         const imm_val = switch (wasm_bits) {
-            32 => WValue{ .imm32 = @intCast(u32, val) },
+            32 => WValue{ .imm32 = @as(u32, @intCast(val)) },
             64 => WValue{ .imm64 = val },
             else => unreachable,
         };
@@ -6603,7 +6603,7 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     } else {
         switch (wasm_bits) {
             32 => try func.addImm32(if (op == .add) @as(i32, -1) else 0),
-            64 => try func.addImm64(if (op == .add) @bitCast(u64, @as(i64, -1)) else 0),
+            64 => try func.addImm64(if (op == .add) @as(u64, @bitCast(@as(i64, -1))) else 0),
             else => unreachable,
         }
         try func.emitWValue(bin_result);
@@ -6629,16 +6629,16 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type,
         break :rhs try (try func.signAbsValue(rhs_operand, ty)).toLocal(func, ty);
     } else rhs_operand;
 
-    const max_val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits - 1)) - 1);
-    const min_val: i64 = (-@intCast(i64, @intCast(u63, max_val))) - 1;
+    const max_val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits - 1))) - 1));
+    const min_val: i64 = (-@as(i64, @intCast(@as(u63, @intCast(max_val))))) - 1;
     const max_wvalue = switch (wasm_bits) {
-        32 => WValue{ .imm32 = @truncate(u32, max_val) },
+        32 => WValue{ .imm32 = @as(u32, @truncate(max_val)) },
         64 => WValue{ .imm64 = max_val },
         else => unreachable,
     };
     const min_wvalue = switch (wasm_bits) {
-        32 => WValue{ .imm32 = @bitCast(u32, @truncate(i32, min_val)) },
-        64 => WValue{ .imm64 = @bitCast(u64, min_val) },
+        32 => WValue{ .imm32 = @as(u32, @bitCast(@as(i32, @truncate(min_val)))) },
+        64 => WValue{ .imm64 = @as(u64, @bitCast(min_val)) },
         else => unreachable,
     };
 
@@ -6715,11 +6715,11 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             },
             64 => blk: {
                 if (!is_signed) {
-                    try func.addImm64(@bitCast(u64, @as(i64, -1)));
+                    try func.addImm64(@as(u64, @bitCast(@as(i64, -1))));
                     break :blk;
                 }
-                try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
-                try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
+                try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64)))));
+                try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64)))));
                 _ = try func.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
                 try func.addTag(.select);
             },
@@ -6759,12 +6759,12 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             },
             64 => blk: {
                 if (!is_signed) {
-                    try func.addImm64(@bitCast(u64, @as(i64, -1)));
+                    try func.addImm64(@as(u64, @bitCast(@as(i64, -1))));
                     break :blk;
                 }
 
-                try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
-                try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
+                try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64)))));
+                try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64)))));
                 _ = try func.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt);
                 try func.addTag(.select);
             },
@@ -6894,7 +6894,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
     // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse.
     // generate an if-else chain for each tag value as well as constant.
     for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| {
-        const field_index = @intCast(u32, field_index_usize);
+        const field_index = @as(u32, @intCast(field_index_usize));
         const tag_name = mod.intern_pool.stringToSlice(tag_name_ip);
         // for each tag name, create an unnamed const,
         // and then get a pointer to its value.
@@ -6953,7 +6953,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
             try writer.writeByte(std.wasm.opcode(.i32_const));
             try relocs.append(.{
                 .relocation_type = .R_WASM_MEMORY_ADDR_LEB,
-                .offset = @intCast(u32, body_list.items.len),
+                .offset = @as(u32, @intCast(body_list.items.len)),
                 .index = tag_sym_index,
             });
             try writer.writeAll(&[_]u8{0} ** 5); // will be relocated
@@ -6965,7 +6965,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
 
             // store length
             try writer.writeByte(std.wasm.opcode(.i32_const));
-            try leb.writeULEB128(writer, @intCast(u32, tag_name.len));
+            try leb.writeULEB128(writer, @as(u32, @intCast(tag_name.len)));
             try writer.writeByte(std.wasm.opcode(.i32_store));
             try leb.writeULEB128(writer, encoded_alignment);
             try leb.writeULEB128(writer, @as(u32, 4));
@@ -6974,7 +6974,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
             try writer.writeByte(std.wasm.opcode(.i64_const));
             try relocs.append(.{
                 .relocation_type = .R_WASM_MEMORY_ADDR_LEB64,
-                .offset = @intCast(u32, body_list.items.len),
+                .offset = @as(u32, @intCast(body_list.items.len)),
                 .index = tag_sym_index,
             });
             try writer.writeAll(&[_]u8{0} ** 10); // will be relocated
@@ -6986,7 +6986,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
 
             // store length
             try writer.writeByte(std.wasm.opcode(.i64_const));
-            try leb.writeULEB128(writer, @intCast(u64, tag_name.len));
+            try leb.writeULEB128(writer, @as(u64, @intCast(tag_name.len)));
             try writer.writeByte(std.wasm.opcode(.i64_store));
             try leb.writeULEB128(writer, encoded_alignment);
             try leb.writeULEB128(writer, @as(u32, 8));
@@ -7026,7 +7026,7 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     var lowest: ?u32 = null;
     var highest: ?u32 = null;
     for (names) |name| {
-        const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?);
+        const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?));
         if (lowest) |*l| {
             if (err_int < l.*) {
                 l.* = err_int;
@@ -7054,11 +7054,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     // lower operand to determine jump table target
     try func.emitWValue(operand);
-    try func.addImm32(@intCast(i32, lowest.?));
+    try func.addImm32(@as(i32, @intCast(lowest.?)));
     try func.addTag(.i32_sub);
 
     // Account for default branch so always add '1'
-    const depth = @intCast(u32, highest.? - lowest.? + 1);
+    const depth = @as(u32, @intCast(highest.? - lowest.? + 1));
     const jump_table: Mir.JumpTable = .{ .length = depth };
     const table_extra_index = try func.addExtra(jump_table);
     try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
@@ -7155,7 +7155,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         try func.addTag(.i32_and);
         const and_result = try WValue.toLocal(.stack, func, Type.bool);
         const result_ptr = try func.allocStack(result_ty);
-        try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod)));
+        try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(mod))));
         try func.store(result_ptr, ptr_val, ty, 0);
         break :val result_ptr;
     } else val: {
@@ -7221,13 +7221,13 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 try func.emitWValue(ptr);
                 try func.emitWValue(value);
                 if (op == .Nand) {
-                    const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
+                    const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?;
 
                     const and_res = try func.binOp(value, operand, ty, .@"and");
                     if (wasm_bits == 32)
                         try func.addImm32(-1)
                     else if (wasm_bits == 64)
-                        try func.addImm64(@bitCast(u64, @as(i64, -1)))
+                        try func.addImm64(@as(u64, @bitCast(@as(i64, -1))))
                     else
                         return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
                     _ = try func.binOp(and_res, .stack, ty, .xor);
@@ -7352,14 +7352,14 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 try func.store(.stack, .stack, ty, ptr.offset());
             },
             .Nand => {
-                const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
+                const wasm_bits = toWasmBits(@as(u16, @intCast(ty.bitSize(mod)))).?;
 
                 try func.emitWValue(ptr);
                 const and_res = try func.binOp(result, operand, ty, .@"and");
                 if (wasm_bits == 32)
                     try func.addImm32(-1)
                 else if (wasm_bits == 64)
-                    try func.addImm64(@bitCast(u64, @as(i64, -1)))
+                    try func.addImm64(@as(u64, @bitCast(@as(i64, -1))))
                 else
                     return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
                 _ = try func.binOp(and_res, .stack, ty, .xor);
src/arch/wasm/Emit.zig
@@ -45,7 +45,7 @@ pub fn emitMir(emit: *Emit) InnerError!void {
     try emit.emitLocals();
 
     for (mir_tags, 0..) |tag, index| {
-        const inst = @intCast(u32, index);
+        const inst = @as(u32, @intCast(index));
         switch (tag) {
             // block instructions
             .block => try emit.emitBlock(tag, inst),
@@ -247,7 +247,7 @@ pub fn emitMir(emit: *Emit) InnerError!void {
 }
 
 fn offset(self: Emit) u32 {
-    return @intCast(u32, self.code.items.len);
+    return @as(u32, @intCast(self.code.items.len));
 }
 
 fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@@ -260,7 +260,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
 
 fn emitLocals(emit: *Emit) !void {
     const writer = emit.code.writer();
-    try leb128.writeULEB128(writer, @intCast(u32, emit.locals.len));
+    try leb128.writeULEB128(writer, @as(u32, @intCast(emit.locals.len)));
     // emit the actual locals amount
     for (emit.locals) |local| {
         try leb128.writeULEB128(writer, @as(u32, 1));
@@ -324,13 +324,13 @@ fn emitImm64(emit: *Emit, inst: Mir.Inst.Index) !void {
     const extra_index = emit.mir.instructions.items(.data)[inst].payload;
     const value = emit.mir.extraData(Mir.Imm64, extra_index);
     try emit.code.append(std.wasm.opcode(.i64_const));
-    try leb128.writeILEB128(emit.code.writer(), @bitCast(i64, value.data.toU64()));
+    try leb128.writeILEB128(emit.code.writer(), @as(i64, @bitCast(value.data.toU64())));
 }
 
 fn emitFloat32(emit: *Emit, inst: Mir.Inst.Index) !void {
     const value: f32 = emit.mir.instructions.items(.data)[inst].float32;
     try emit.code.append(std.wasm.opcode(.f32_const));
-    try emit.code.writer().writeIntLittle(u32, @bitCast(u32, value));
+    try emit.code.writer().writeIntLittle(u32, @as(u32, @bitCast(value)));
 }
 
 fn emitFloat64(emit: *Emit, inst: Mir.Inst.Index) !void {
@@ -425,7 +425,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
             .offset = mem_offset,
             .index = mem.pointer,
             .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
-            .addend = @intCast(i32, mem.offset),
+            .addend = @as(i32, @intCast(mem.offset)),
         });
     }
 }
@@ -436,7 +436,7 @@ fn emitExtended(emit: *Emit, inst: Mir.Inst.Index) !void {
     const writer = emit.code.writer();
     try emit.code.append(std.wasm.opcode(.misc_prefix));
     try leb128.writeULEB128(writer, opcode);
-    switch (@enumFromInt(std.wasm.MiscOpcode, opcode)) {
+    switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) {
         // bulk-memory opcodes
         .data_drop => {
             const segment = emit.mir.extra[extra_index + 1];
@@ -475,7 +475,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void {
     const writer = emit.code.writer();
     try emit.code.append(std.wasm.opcode(.simd_prefix));
     try leb128.writeULEB128(writer, opcode);
-    switch (@enumFromInt(std.wasm.SimdOpcode, opcode)) {
+    switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) {
         .v128_store,
         .v128_load,
         .v128_load8_splat,
@@ -507,7 +507,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void {
         .f64x2_extract_lane,
         .f64x2_replace_lane,
         => {
-            try writer.writeByte(@intCast(u8, emit.mir.extra[extra_index + 1]));
+            try writer.writeByte(@as(u8, @intCast(emit.mir.extra[extra_index + 1])));
         },
         .i8x16_splat,
         .i16x8_splat,
@@ -526,7 +526,7 @@ fn emitAtomic(emit: *Emit, inst: Mir.Inst.Index) !void {
     const writer = emit.code.writer();
     try emit.code.append(std.wasm.opcode(.atomics_prefix));
     try leb128.writeULEB128(writer, opcode);
-    switch (@enumFromInt(std.wasm.AtomicsOpcode, opcode)) {
+    switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) {
         .i32_atomic_load,
         .i64_atomic_load,
         .i32_atomic_load8_u,
@@ -623,7 +623,7 @@ fn emitDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void {
 fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
     if (emit.dbg_output != .dwarf) return;
 
-    const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line);
+    const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
     const delta_pc = emit.offset() - emit.prev_di_offset;
     // TODO: This must emit a relocation to calculate the offset relative
     // to the code section start.
src/arch/wasm/Mir.zig
@@ -544,12 +544,12 @@ pub const Inst = struct {
 
         /// From a given wasm opcode, returns a MIR tag.
         pub fn fromOpcode(opcode: std.wasm.Opcode) Tag {
-            return @enumFromInt(Tag, @intFromEnum(opcode)); // Given `Opcode` is not present as a tag for MIR yet
+            return @as(Tag, @enumFromInt(@intFromEnum(opcode))); // Given `Opcode` is not present as a tag for MIR yet
         }
 
         /// Returns a wasm opcode from a given MIR tag.
         pub fn toOpcode(self: Tag) std.wasm.Opcode {
-            return @enumFromInt(std.wasm.Opcode, @intFromEnum(self));
+            return @as(std.wasm.Opcode, @enumFromInt(@intFromEnum(self)));
         }
     };
 
@@ -621,8 +621,8 @@ pub const Imm64 = struct {
 
     pub fn fromU64(imm: u64) Imm64 {
         return .{
-            .msb = @truncate(u32, imm >> 32),
-            .lsb = @truncate(u32, imm),
+            .msb = @as(u32, @truncate(imm >> 32)),
+            .lsb = @as(u32, @truncate(imm)),
         };
     }
 
@@ -639,15 +639,15 @@ pub const Float64 = struct {
     lsb: u32,
 
     pub fn fromFloat64(float: f64) Float64 {
-        const tmp = @bitCast(u64, float);
+        const tmp = @as(u64, @bitCast(float));
         return .{
-            .msb = @truncate(u32, tmp >> 32),
-            .lsb = @truncate(u32, tmp),
+            .msb = @as(u32, @truncate(tmp >> 32)),
+            .lsb = @as(u32, @truncate(tmp)),
         };
     }
 
     pub fn toF64(self: Float64) f64 {
-        @bitCast(f64, self.toU64());
+        @as(f64, @bitCast(self.toU64()));
     }
 
     pub fn toU64(self: Float64) u64 {
src/arch/x86_64/abi.zig
@@ -278,7 +278,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
                         // "Otherwise class SSE is used."
                         result[result_i] = .sse;
                     }
-                    byte_i += @intCast(usize, field_size);
+                    byte_i += @as(usize, @intCast(field_size));
                     if (byte_i == 8) {
                         byte_i = 0;
                         result_i += 1;
@@ -293,7 +293,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
                     result_i += field_class.len;
                     // If there are any bytes leftover, we have to try to combine
                     // the next field with them.
-                    byte_i = @intCast(usize, field_size % 8);
+                    byte_i = @as(usize, @intCast(field_size % 8));
                     if (byte_i != 0) result_i -= 1;
                 }
             }
src/arch/x86_64/bits.zig
@@ -232,7 +232,7 @@ pub const Register = enum(u7) {
             else => unreachable,
             // zig fmt: on
         };
-        return @intCast(u6, @intFromEnum(reg) - base);
+        return @as(u6, @intCast(@intFromEnum(reg) - base));
     }
 
     pub fn bitSize(reg: Register) u64 {
@@ -291,11 +291,11 @@ pub const Register = enum(u7) {
             else => unreachable,
             // zig fmt: on
         };
-        return @truncate(u4, @intFromEnum(reg) - base);
+        return @as(u4, @truncate(@intFromEnum(reg) - base));
     }
 
     pub fn lowEnc(reg: Register) u3 {
-        return @truncate(u3, reg.enc());
+        return @as(u3, @truncate(reg.enc()));
     }
 
     pub fn toBitSize(reg: Register, bit_size: u64) Register {
@@ -325,19 +325,19 @@ pub const Register = enum(u7) {
     }
 
     pub fn to64(reg: Register) Register {
-        return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax));
+        return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax)));
     }
 
     pub fn to32(reg: Register) Register {
-        return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax));
+        return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax)));
     }
 
     pub fn to16(reg: Register) Register {
-        return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax));
+        return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax)));
     }
 
     pub fn to8(reg: Register) Register {
-        return @enumFromInt(Register, @intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al));
+        return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al)));
     }
 
     fn sseBase(reg: Register) u7 {
@@ -350,11 +350,11 @@ pub const Register = enum(u7) {
     }
 
     pub fn to256(reg: Register) Register {
-        return @enumFromInt(Register, @intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0));
+        return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0)));
     }
 
     pub fn to128(reg: Register) Register {
-        return @enumFromInt(Register, @intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0));
+        return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0)));
     }
 
     /// DWARF register encoding
@@ -363,7 +363,7 @@ pub const Register = enum(u7) {
             .general_purpose => if (reg.isExtended())
                 reg.enc()
             else
-                @truncate(u3, @as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3),
+                @as(u3, @truncate(@as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3)),
             .sse => 17 + @as(u6, reg.enc()),
             .x87 => 33 + @as(u6, reg.enc()),
             .mmx => 41 + @as(u6, reg.enc()),
@@ -610,15 +610,15 @@ pub const Immediate = union(enum) {
     pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 {
         return switch (imm) {
             .signed => |x| switch (bit_size) {
-                1, 8 => @bitCast(u8, @intCast(i8, x)),
-                16 => @bitCast(u16, @intCast(i16, x)),
-                32, 64 => @bitCast(u32, x),
+                1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))),
+                16 => @as(u16, @bitCast(@as(i16, @intCast(x)))),
+                32, 64 => @as(u32, @bitCast(x)),
                 else => unreachable,
             },
             .unsigned => |x| switch (bit_size) {
-                1, 8 => @intCast(u8, x),
-                16 => @intCast(u16, x),
-                32 => @intCast(u32, x),
+                1, 8 => @as(u8, @intCast(x)),
+                16 => @as(u16, @intCast(x)),
+                32 => @as(u32, @intCast(x)),
                 64 => x,
                 else => unreachable,
             },
src/arch/x86_64/CodeGen.zig
@@ -329,7 +329,7 @@ pub const MCValue = union(enum) {
             .load_frame,
             .reserved_frame,
             => unreachable, // not offsettable
-            .immediate => |imm| .{ .immediate = @bitCast(u64, @bitCast(i64, imm) +% off) },
+            .immediate => |imm| .{ .immediate = @as(u64, @bitCast(@as(i64, @bitCast(imm)) +% off)) },
             .register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } },
             .register_offset => |reg_off| .{
                 .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off },
@@ -360,7 +360,7 @@ pub const MCValue = union(enum) {
             .lea_frame,
             .reserved_frame,
             => unreachable,
-            .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr|
+            .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
                 Memory.sib(ptr_size, .{ .base = .{ .reg = .ds }, .disp = small_addr })
             else
                 Memory.moffs(.ds, addr),
@@ -606,7 +606,7 @@ const FrameAlloc = struct {
     fn init(alloc_abi: struct { size: u64, alignment: u32 }) FrameAlloc {
         assert(math.isPowerOfTwo(alloc_abi.alignment));
         return .{
-            .abi_size = @intCast(u31, alloc_abi.size),
+            .abi_size = @as(u31, @intCast(alloc_abi.size)),
             .abi_align = math.log2_int(u32, alloc_abi.alignment),
             .ref_count = 0,
         };
@@ -694,7 +694,7 @@ pub fn generate(
         FrameAlloc.init(.{
             .size = 0,
             .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack|
-                @intCast(u32, set_align_stack.alignment.toByteUnitsOptional().?)
+                @as(u32, @intCast(set_align_stack.alignment.toByteUnitsOptional().?))
             else
                 1,
         }),
@@ -979,7 +979,7 @@ fn fmtTracking(self: *Self) std.fmt.Formatter(formatTracking) {
 fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
     const gpa = self.gpa;
     try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
-    const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len);
+    const result_index = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len));
     self.mir_instructions.appendAssumeCapacity(inst);
     if (inst.tag != .pseudo or switch (inst.ops) {
         else => true,
@@ -1000,11 +1000,11 @@ fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
 
 fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    const result = @intCast(u32, self.mir_extra.items.len);
+    const result = @as(u32, @intCast(self.mir_extra.items.len));
     inline for (fields) |field| {
         self.mir_extra.appendAssumeCapacity(switch (field.type) {
             u32 => @field(extra, field.name),
-            i32 => @bitCast(u32, @field(extra, field.name)),
+            i32 => @as(u32, @bitCast(@field(extra, field.name))),
             else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
         });
     }
@@ -1214,8 +1214,8 @@ fn asmImmediate(self: *Self, tag: Mir.Inst.FixedTag, imm: Immediate) !void {
         .data = .{ .i = .{
             .fixes = tag[0],
             .i = switch (imm) {
-                .signed => |s| @bitCast(u32, s),
-                .unsigned => |u| @intCast(u32, u),
+                .signed => |s| @as(u32, @bitCast(s)),
+                .unsigned => |u| @as(u32, @intCast(u)),
             },
         } },
     });
@@ -1246,8 +1246,8 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.FixedTag, reg: Register, imm:
                 .fixes = tag[0],
                 .r1 = reg,
                 .i = switch (imm) {
-                    .signed => |s| @bitCast(u32, s),
-                    .unsigned => |u| @intCast(u32, u),
+                    .signed => |s| @as(u32, @bitCast(s)),
+                    .unsigned => |u| @as(u32, @intCast(u)),
                 },
             } },
             .ri64 => .{ .rx = .{
@@ -1316,7 +1316,7 @@ fn asmRegisterRegisterRegisterImmediate(
             .r1 = reg1,
             .r2 = reg2,
             .r3 = reg3,
-            .i = @intCast(u8, imm.unsigned),
+            .i = @as(u8, @intCast(imm.unsigned)),
         } },
     });
 }
@@ -1339,8 +1339,8 @@ fn asmRegisterRegisterImmediate(
             .r1 = reg1,
             .r2 = reg2,
             .i = switch (imm) {
-                .signed => |s| @bitCast(u32, s),
-                .unsigned => |u| @intCast(u32, u),
+                .signed => |s| @as(u32, @bitCast(s)),
+                .unsigned => |u| @as(u32, @intCast(u)),
             },
         } },
     });
@@ -1429,7 +1429,7 @@ fn asmRegisterMemoryImmediate(
         .data = .{ .rix = .{
             .fixes = tag[0],
             .r1 = reg,
-            .i = @intCast(u8, imm.unsigned),
+            .i = @as(u8, @intCast(imm.unsigned)),
             .payload = switch (m) {
                 .sib => try self.addExtra(Mir.MemorySib.encode(m)),
                 .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
@@ -1458,7 +1458,7 @@ fn asmRegisterRegisterMemoryImmediate(
             .fixes = tag[0],
             .r1 = reg1,
             .r2 = reg2,
-            .i = @intCast(u8, imm.unsigned),
+            .i = @as(u8, @intCast(imm.unsigned)),
             .payload = switch (m) {
                 .sib => try self.addExtra(Mir.MemorySib.encode(m)),
                 .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
@@ -1490,8 +1490,8 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, reg: Regist
 
 fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immediate) !void {
     const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) {
-        .signed => |s| @bitCast(u32, s),
-        .unsigned => |u| @intCast(u32, u),
+        .signed => |s| @as(u32, @bitCast(s)),
+        .unsigned => |u| @as(u32, @intCast(u)),
     } });
     assert(payload + 1 == switch (m) {
         .sib => try self.addExtra(Mir.MemorySib.encode(m)),
@@ -1562,7 +1562,7 @@ fn asmMemoryRegisterImmediate(
         .data = .{ .rix = .{
             .fixes = tag[0],
             .r1 = reg,
-            .i = @intCast(u8, imm.unsigned),
+            .i = @as(u8, @intCast(imm.unsigned)),
             .payload = switch (m) {
                 .sib => try self.addExtra(Mir.MemorySib.encode(m)),
                 .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
@@ -1617,7 +1617,7 @@ fn gen(self: *Self) InnerError!void {
         // Eliding the reloc will cause a miscompilation in this case.
         for (self.exitlude_jump_relocs.items) |jmp_reloc| {
             self.mir_instructions.items(.data)[jmp_reloc].inst.inst =
-                @intCast(u32, self.mir_instructions.len);
+                @as(u32, @intCast(self.mir_instructions.len));
         }
 
         try self.asmPseudo(.pseudo_dbg_epilogue_begin_none);
@@ -1739,7 +1739,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
 
     for (body) |inst| {
         if (builtin.mode == .Debug) {
-            const mir_inst = @intCast(Mir.Inst.Index, self.mir_instructions.len);
+            const mir_inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len));
             try self.mir_to_air_map.put(self.gpa, mir_inst, inst);
         }
 
@@ -2032,7 +2032,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
 
             var data_off: i32 = 0;
             for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, index_usize| {
-                const index = @intCast(u32, index_usize);
+                const index = @as(u32, @intCast(index_usize));
                 const tag_name = mod.intern_pool.stringToSlice(enum_ty.enumFields(mod)[index_usize]);
                 const tag_val = try mod.enumValueFieldIndex(enum_ty, index);
                 const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val });
@@ -2050,7 +2050,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
                 exitlude_jump_reloc.* = try self.asmJmpReloc(undefined);
                 try self.performReloc(skip_reloc);
 
-                data_off += @intCast(i32, tag_name.len + 1);
+                data_off += @as(i32, @intCast(tag_name.len + 1));
             }
 
             try self.airTrap();
@@ -2126,7 +2126,7 @@ fn finishAirResult(self: *Self, inst: Air.Inst.Index, result: MCValue) void {
 fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
     var tomb_bits = self.liveness.getTombBits(inst);
     for (operands) |op| {
-        const dies = @truncate(u1, tomb_bits) != 0;
+        const dies = @as(u1, @truncate(tomb_bits)) != 0;
         tomb_bits >>= 1;
         if (!dies) continue;
         self.processDeath(Air.refToIndexAllowNone(op) orelse continue);
@@ -2167,7 +2167,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
     const frame_offset = self.frame_locs.items(.disp);
 
     for (stack_frame_order, FrameIndex.named_count..) |*frame_order, frame_index|
-        frame_order.* = @enumFromInt(FrameIndex, frame_index);
+        frame_order.* = @as(FrameIndex, @enumFromInt(frame_index));
     {
         const SortContext = struct {
             frame_align: @TypeOf(frame_align),
@@ -2195,7 +2195,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
         }
     }
 
-    var rbp_offset = @intCast(i32, save_reg_list.count() * 8);
+    var rbp_offset = @as(i32, @intCast(save_reg_list.count() * 8));
     self.setFrameLoc(.base_ptr, .rbp, &rbp_offset, false);
     self.setFrameLoc(.ret_addr, .rbp, &rbp_offset, false);
     self.setFrameLoc(.args_frame, .rbp, &rbp_offset, false);
@@ -2210,22 +2210,22 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
     rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align);
     rsp_offset -= stack_frame_align_offset;
     frame_size[@intFromEnum(FrameIndex.call_frame)] =
-        @intCast(u31, rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]);
+        @as(u31, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]));
 
     return .{
         .stack_mask = @as(u32, math.maxInt(u32)) << (if (need_align_stack) needed_align else 0),
-        .stack_adjust = @intCast(u32, rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)]),
+        .stack_adjust = @as(u32, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)])),
         .save_reg_list = save_reg_list,
     };
 }
 
 fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) u32 {
     const alloc_align = @as(u32, 1) << self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align;
-    return @min(alloc_align, @bitCast(u32, frame_addr.off) & (alloc_align - 1));
+    return @min(alloc_align, @as(u32, @bitCast(frame_addr.off)) & (alloc_align - 1));
 }
 
 fn getFrameAddrSize(self: *Self, frame_addr: FrameAddr) u32 {
-    return self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_size - @intCast(u31, frame_addr.off);
+    return self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_size - @as(u31, @intCast(frame_addr.off));
 }
 
 fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
@@ -2245,7 +2245,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
         _ = self.free_frame_indices.swapRemoveAt(free_i);
         return frame_index;
     }
-    const frame_index = @enumFromInt(FrameIndex, self.frame_allocs.len);
+    const frame_index = @as(FrameIndex, @enumFromInt(self.frame_allocs.len));
     try self.frame_allocs.append(self.gpa, alloc);
     return frame_index;
 }
@@ -2321,7 +2321,7 @@ const State = struct {
 
 fn initRetroactiveState(self: *Self) State {
     var state: State = undefined;
-    state.inst_tracking_len = @intCast(u32, self.inst_tracking.count());
+    state.inst_tracking_len = @as(u32, @intCast(self.inst_tracking.count()));
     state.scope_generation = self.scope_generation;
     return state;
 }
@@ -2393,7 +2393,7 @@ fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, compt
             }
             {
                 const reg = RegisterManager.regAtTrackedIndex(
-                    @intCast(RegisterManager.RegisterBitSet.ShiftInt, index),
+                    @as(RegisterManager.RegisterBitSet.ShiftInt, @intCast(index)),
                 );
                 self.register_manager.freeReg(reg);
                 self.register_manager.getRegAssumeFree(reg, target_maybe_inst);
@@ -2628,7 +2628,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
 
         const dst_ty = self.typeOfIndex(inst);
         const dst_int_info = dst_ty.intInfo(mod);
-        const abi_size = @intCast(u32, dst_ty.abiSize(mod));
+        const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
 
         const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty;
         const extend = switch (src_int_info.signedness) {
@@ -2706,9 +2706,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[inst].ty_op;
 
     const dst_ty = self.typeOfIndex(inst);
-    const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
+    const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
     const src_ty = self.typeOf(ty_op.operand);
-    const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
+    const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod)));
 
     const result = result: {
         const src_mcv = try self.resolveInst(ty_op.operand);
@@ -2753,13 +2753,13 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
             });
 
             const elem_ty = src_ty.childType(mod);
-            const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits));
+            const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - dst_info.bits)));
 
             const splat_ty = try mod.vectorType(.{
-                .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)),
+                .len = @as(u32, @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits))),
                 .child = elem_ty.ip_index,
             });
-            const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod));
+            const splat_abi_size = @as(u32, @intCast(splat_ty.abiSize(mod)));
 
             const splat_val = try mod.intern(.{ .aggregate = .{
                 .ty = splat_ty.ip_index,
@@ -2834,7 +2834,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
     try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr);
     try self.genSetMem(
         .{ .frame = frame_index },
-        @intCast(i32, ptr_ty.abiSize(mod)),
+        @as(i32, @intCast(ptr_ty.abiSize(mod))),
         len_ty,
         len,
     );
@@ -2875,7 +2875,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
                 const src_val = air_data[inst].interned.toValue();
                 var space: Value.BigIntSpace = undefined;
                 const src_int = src_val.toBigInt(&space, mod);
-                return @intCast(u16, src_int.bitCountTwosComp()) +
+                return @as(u16, @intCast(src_int.bitCountTwosComp())) +
                     @intFromBool(src_int.positive and dst_info.signedness == .signed);
             },
             .intcast => {
@@ -2964,7 +2964,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
         try self.genSetReg(limit_reg, ty, dst_mcv);
         try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
         try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
-            .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+            .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1,
         });
         if (reg_extra_bits > 0) {
             const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv);
@@ -2983,7 +2983,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
         break :cc .o;
     } else cc: {
         try self.genSetReg(limit_reg, ty, .{
-            .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)),
+            .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - ty.bitSize(mod))),
         });
 
         try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv);
@@ -2994,7 +2994,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
         break :cc .c;
     };
 
-    const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2);
+    const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
     try self.asmCmovccRegisterRegister(
         registerAlias(dst_reg, cmov_abi_size),
         registerAlias(limit_reg, cmov_abi_size),
@@ -3043,7 +3043,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
         try self.genSetReg(limit_reg, ty, dst_mcv);
         try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
         try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
-            .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+            .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1,
         });
         if (reg_extra_bits > 0) {
             const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv);
@@ -3066,7 +3066,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
         break :cc .c;
     };
 
-    const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2);
+    const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
     try self.asmCmovccRegisterRegister(
         registerAlias(dst_reg, cmov_abi_size),
         registerAlias(limit_reg, cmov_abi_size),
@@ -3114,18 +3114,18 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
         try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv);
         try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
         try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{
-            .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
+            .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1,
         });
         break :cc .o;
     } else cc: {
         try self.genSetReg(limit_reg, ty, .{
-            .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits),
+            .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - reg_bits)),
         });
         break :cc .c;
     };
 
     const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
-    const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2);
+    const cmov_abi_size = @max(@as(u32, @intCast(ty.abiSize(mod))), 2);
     try self.asmCmovccRegisterRegister(
         registerAlias(dst_mcv.register, cmov_abi_size),
         registerAlias(limit_reg, cmov_abi_size),
@@ -3172,13 +3172,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                         try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
                     try self.genSetMem(
                         .{ .frame = frame_index },
-                        @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
+                        @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))),
                         Type.u1,
                         .{ .eflags = cc },
                     );
                     try self.genSetMem(
                         .{ .frame = frame_index },
-                        @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
+                        @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))),
                         ty,
                         partial_mcv,
                     );
@@ -3245,13 +3245,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                         try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
                     try self.genSetMem(
                         .{ .frame = frame_index },
-                        @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
+                        @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))),
                         tuple_ty.structFieldType(1, mod),
                         .{ .eflags = cc },
                     );
                     try self.genSetMem(
                         .{ .frame = frame_index },
-                        @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
+                        @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))),
                         tuple_ty.structFieldType(0, mod),
                         partial_mcv,
                     );
@@ -3319,7 +3319,7 @@ fn genSetFrameTruncatedOverflowCompare(
         );
     }
 
-    const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod));
+    const payload_off = @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod)));
     if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv);
     try self.genSetMem(
         .{ .frame = frame_index },
@@ -3329,7 +3329,7 @@ fn genSetFrameTruncatedOverflowCompare(
     );
     try self.genSetMem(
         .{ .frame = frame_index },
-        @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
+        @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))),
         tuple_ty.structFieldType(1, mod),
         if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
     );
@@ -3386,13 +3386,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                     if (dst_info.bits >= lhs_active_bits + rhs_active_bits) {
                         try self.genSetMem(
                             .{ .frame = frame_index },
-                            @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
+                            @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))),
                             tuple_ty.structFieldType(0, mod),
                             partial_mcv,
                         );
                         try self.genSetMem(
                             .{ .frame = frame_index },
-                            @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
+                            @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))),
                             tuple_ty.structFieldType(1, mod),
                             .{ .immediate = 0 }, // cc being set is impossible
                         );
@@ -3416,7 +3416,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
 /// Quotient is saved in .rax and remainder in .rdx.
 fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     if (abi_size > 8) {
         return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{});
     }
@@ -3456,7 +3456,7 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue
 /// Clobbers .rax and .rdx registers.
 fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     const int_info = ty.intInfo(mod);
     const dividend: Register = switch (lhs) {
         .register => |reg| reg,
@@ -3595,7 +3595,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
             try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
 
         const pl_ty = dst_ty.childType(mod);
-        const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod));
+        const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod)));
         try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 });
         break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv;
     };
@@ -3628,7 +3628,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
 
                 const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand);
                 if (err_off > 0) {
-                    const shift = @intCast(u6, err_off * 8);
+                    const shift = @as(u6, @intCast(err_off * 8));
                     try self.genShiftBinOpMir(
                         .{ ._r, .sh },
                         err_union_ty,
@@ -3642,7 +3642,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
             },
             .load_frame => |frame_addr| break :result .{ .load_frame = .{
                 .index = frame_addr.index,
-                .off = frame_addr.off + @intCast(i32, err_off),
+                .off = frame_addr.off + @as(i32, @intCast(err_off)),
             } },
             else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}),
         }
@@ -3674,7 +3674,7 @@ fn genUnwrapErrorUnionPayloadMir(
         switch (err_union) {
             .load_frame => |frame_addr| break :result .{ .load_frame = .{
                 .index = frame_addr.index,
-                .off = frame_addr.off + @intCast(i32, payload_off),
+                .off = frame_addr.off + @as(i32, @intCast(payload_off)),
             } },
             .register => |reg| {
                 // TODO reuse operand
@@ -3686,7 +3686,7 @@ fn genUnwrapErrorUnionPayloadMir(
                 else
                     .{ .register = try self.copyToTmpRegister(err_union_ty, err_union) };
                 if (payload_off > 0) {
-                    const shift = @intCast(u6, payload_off * 8);
+                    const shift = @as(u6, @intCast(payload_off * 8));
                     try self.genShiftBinOpMir(
                         .{ ._r, .sh },
                         err_union_ty,
@@ -3727,8 +3727,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
     const eu_ty = src_ty.childType(mod);
     const pl_ty = eu_ty.errorUnionPayload(mod);
     const err_ty = eu_ty.errorUnionSet(mod);
-    const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
-    const err_abi_size = @intCast(u32, err_ty.abiSize(mod));
+    const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod)));
+    const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod)));
     try self.asmRegisterMemory(
         .{ ._, .mov },
         registerAlias(dst_reg, err_abi_size),
@@ -3766,8 +3766,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
 
     const eu_ty = src_ty.childType(mod);
     const pl_ty = eu_ty.errorUnionPayload(mod);
-    const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
-    const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
+    const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
+    const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
     try self.asmRegisterMemory(
         .{ ._, .lea },
         registerAlias(dst_reg, dst_abi_size),
@@ -3793,8 +3793,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
         const eu_ty = src_ty.childType(mod);
         const pl_ty = eu_ty.errorUnionPayload(mod);
         const err_ty = eu_ty.errorUnionSet(mod);
-        const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
-        const err_abi_size = @intCast(u32, err_ty.abiSize(mod));
+        const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod)));
+        const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod)));
         try self.asmMemoryImmediate(
             .{ ._, .mov },
             Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{
@@ -3814,8 +3814,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
         const dst_lock = self.register_manager.lockReg(dst_reg);
         defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
-        const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
+        const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
+        const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
         try self.asmRegisterMemory(
             .{ ._, .lea },
             registerAlias(dst_reg, dst_abi_size),
@@ -3864,14 +3864,14 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
         try self.genCopy(pl_ty, opt_mcv, pl_mcv);
 
         if (!same_repr) {
-            const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod));
+            const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod)));
             switch (opt_mcv) {
                 else => unreachable,
 
                 .register => |opt_reg| try self.asmRegisterImmediate(
                     .{ ._s, .bt },
                     opt_reg,
-                    Immediate.u(@intCast(u6, pl_abi_size * 8)),
+                    Immediate.u(@as(u6, @intCast(pl_abi_size * 8))),
                 ),
 
                 .load_frame => |frame_addr| try self.asmMemoryImmediate(
@@ -3903,8 +3903,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
         if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 };
 
         const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod));
-        const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
-        const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
+        const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
+        const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod)));
         try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand);
         try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 });
         break :result .{ .load_frame = .{ .index = frame_index } };
@@ -3925,8 +3925,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
         if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand);
 
         const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod));
-        const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
-        const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
+        const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod)));
+        const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod)));
         try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef);
         const operand = try self.resolveInst(ty_op.operand);
         try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand);
@@ -3988,7 +3988,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
     const dst_lock = self.register_manager.lockReg(dst_reg);
     defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
+    const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
     try self.asmRegisterMemory(
         .{ ._, .lea },
         registerAlias(dst_reg, dst_abi_size),
@@ -4165,7 +4165,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
     // additional `mov` is needed at the end to get the actual value
 
     const elem_ty = ptr_ty.elemType2(mod);
-    const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod));
+    const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
     const index_ty = self.typeOf(bin_op.rhs);
     const index_mcv = try self.resolveInst(bin_op.rhs);
     const index_lock = switch (index_mcv) {
@@ -4305,7 +4305,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
             .load_frame => |frame_addr| {
                 if (tag_abi_size <= 8) {
                     const off: i32 = if (layout.tag_align < layout.payload_align)
-                        @intCast(i32, layout.payload_size)
+                        @as(i32, @intCast(layout.payload_size))
                     else
                         0;
                     break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{
@@ -4317,13 +4317,13 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
             },
             .register => {
                 const shift: u6 = if (layout.tag_align < layout.payload_align)
-                    @intCast(u6, layout.payload_size * 8)
+                    @as(u6, @intCast(layout.payload_size * 8))
                 else
                     0;
                 const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand);
                 try self.genShiftBinOpMir(.{ ._r, .sh }, Type.usize, result, .{ .immediate = shift });
                 break :blk MCValue{
-                    .register = registerAlias(result.register, @intCast(u32, layout.tag_size)),
+                    .register = registerAlias(result.register, @as(u32, @intCast(layout.tag_size))),
                 };
             },
             else => return self.fail("TODO implement get_union_tag for {}", .{operand}),
@@ -4420,7 +4420,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
                 try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg });
             } else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv);
 
-            const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2);
+            const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2);
             try self.asmCmovccRegisterRegister(
                 registerAlias(dst_reg, cmov_abi_size),
                 registerAlias(imm_reg, cmov_abi_size),
@@ -4430,7 +4430,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
             try self.genBinOpMir(.{ ._, .xor }, dst_ty, dst_mcv, .{ .immediate = src_bits - 1 });
         } else {
             const imm_reg = try self.copyToTmpRegister(dst_ty, .{
-                .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - self.regBitSize(dst_ty)),
+                .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - self.regBitSize(dst_ty))),
             });
             const imm_lock = self.register_manager.lockRegAssumeUnused(imm_reg);
             defer self.register_manager.unlockReg(imm_lock);
@@ -4447,7 +4447,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
                 .{ .register = wide_reg },
             );
 
-            const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2);
+            const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2);
             try self.asmCmovccRegisterRegister(
                 registerAlias(imm_reg, cmov_abi_size),
                 registerAlias(dst_reg, cmov_abi_size),
@@ -4501,8 +4501,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
                         .{ ._, .@"or" },
                         wide_ty,
                         tmp_mcv,
-                        .{ .immediate = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - extra_bits)) <<
-                            @intCast(u6, src_bits) },
+                        .{ .immediate = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - extra_bits))) <<
+                            @as(u6, @intCast(src_bits)) },
                     );
                     break :masked tmp_mcv;
                 } else mat_src_mcv;
@@ -4519,7 +4519,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
                         .{ ._, .@"or" },
                         Type.u64,
                         dst_mcv,
-                        .{ .immediate = @as(u64, math.maxInt(u64)) << @intCast(u6, src_bits - 64) },
+                        .{ .immediate = @as(u64, math.maxInt(u64)) << @as(u6, @intCast(src_bits - 64)) },
                     );
                     break :masked dst_mcv;
                 } else mat_src_mcv.address().offset(8).deref();
@@ -4547,7 +4547,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
             try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg });
         } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv);
 
-        const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2);
+        const cmov_abi_size = @max(@as(u32, @intCast(dst_ty.abiSize(mod))), 2);
         try self.asmCmovccRegisterRegister(
             registerAlias(dst_reg, cmov_abi_size),
             registerAlias(width_reg, cmov_abi_size),
@@ -4563,7 +4563,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[inst].ty_op;
     const result: MCValue = result: {
         const src_ty = self.typeOf(ty_op.operand);
-        const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
+        const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod)));
         const src_mcv = try self.resolveInst(ty_op.operand);
 
         if (self.hasFeature(.popcnt)) {
@@ -4588,7 +4588,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
             break :result dst_mcv;
         }
 
-        const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8);
+        const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8));
         const imm_0_1 = Immediate.u(mask / 0b1_1);
         const imm_00_11 = Immediate.u(mask / 0b01_01);
         const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
@@ -4754,7 +4754,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[inst].ty_op;
 
     const src_ty = self.typeOf(ty_op.operand);
-    const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
+    const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod)));
     const src_mcv = try self.resolveInst(ty_op.operand);
 
     const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false);
@@ -4774,7 +4774,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
         else
             undefined;
 
-        const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - src_abi_size * 8);
+        const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8));
         const imm_0000_1111 = Immediate.u(mask / 0b0001_0001);
         const imm_00_11 = Immediate.u(mask / 0b01_01);
         const imm_0_1 = Immediate.u(mask / 0b1_1);
@@ -5017,7 +5017,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4
     })) |tag| tag else return self.fail("TODO implement genRound for {}", .{
         ty.fmt(self.bin_file.options.module.?),
     });
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     const dst_alias = registerAlias(dst_reg, abi_size);
     switch (mir_tag[0]) {
         .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate(
@@ -5057,7 +5057,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
     const mod = self.bin_file.options.module.?;
     const un_op = self.air.instructions.items(.data)[inst].un_op;
     const ty = self.typeOf(un_op);
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
 
     const src_mcv = try self.resolveInst(un_op);
     const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv))
@@ -5123,7 +5123,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
                                 .{ .v_ps, .cvtph2 },
                                 wide_reg,
                                 src_mcv.mem(Memory.PtrSize.fromSize(
-                                    @intCast(u32, @divExact(wide_reg.bitSize(), 16)),
+                                    @as(u32, @intCast(@divExact(wide_reg.bitSize(), 16))),
                                 )),
                             ) else try self.asmRegisterRegister(
                                 .{ .v_ps, .cvtph2 },
@@ -5255,10 +5255,10 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
     const ptr_info = ptr_ty.ptrInfo(mod);
 
     const val_ty = ptr_info.child.toType();
-    const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
+    const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod)));
     const limb_abi_size: u32 = @min(val_abi_size, 8);
     const limb_abi_bits = limb_abi_size * 8;
-    const val_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size);
+    const val_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size));
     const val_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits;
     const val_extra_bits = self.regExtraBits(val_ty);
 
@@ -5404,7 +5404,7 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In
     const limb_abi_bits = limb_abi_size * 8;
 
     const src_bit_size = src_ty.bitSize(mod);
-    const src_byte_off = @intCast(i32, ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size);
+    const src_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size));
     const src_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits;
 
     const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
@@ -5421,13 +5421,13 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In
             .disp = src_byte_off + limb_i * limb_abi_bits,
         });
 
-        const part_mask = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - part_bit_size)) <<
-            @intCast(u6, part_bit_off);
+        const part_mask = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - part_bit_size))) <<
+            @as(u6, @intCast(part_bit_off));
         const part_mask_not = part_mask ^
-            (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_abi_bits));
+            (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_abi_bits)));
         if (limb_abi_size <= 4) {
             try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.u(part_mask_not));
-        } else if (math.cast(i32, @bitCast(i64, part_mask_not))) |small| {
+        } else if (math.cast(i32, @as(i64, @bitCast(part_mask_not)))) |small| {
             try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.s(small));
         } else {
             const part_mask_reg = try self.register_manager.allocReg(null, gp);
@@ -5542,14 +5542,14 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
     const ptr_field_ty = self.typeOfIndex(inst);
     const ptr_container_ty = self.typeOf(operand);
     const container_ty = ptr_container_ty.childType(mod);
-    const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) {
+    const field_offset = @as(i32, @intCast(switch (container_ty.containerLayout(mod)) {
         .Auto, .Extern => container_ty.structFieldOffset(index, mod),
         .Packed => if (container_ty.zigTypeTag(mod) == .Struct and
             ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0)
             container_ty.packedStructFieldByteOffset(index, mod)
         else
             0,
-    });
+    }));
 
     const src_mcv = try self.resolveInst(operand);
     const dst_mcv = if (switch (src_mcv) {
@@ -5577,7 +5577,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
 
         const src_mcv = try self.resolveInst(operand);
         const field_off = switch (container_ty.containerLayout(mod)) {
-            .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8),
+            .Auto, .Extern => @as(u32, @intCast(container_ty.structFieldOffset(index, mod) * 8)),
             .Packed => if (mod.typeToStruct(container_ty)) |struct_obj|
                 struct_obj.packedFieldBitOffset(mod, index)
             else
@@ -5588,7 +5588,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
             .load_frame => |frame_addr| {
                 if (field_off % 8 == 0) {
                     const off_mcv =
-                        src_mcv.address().offset(@intCast(i32, @divExact(field_off, 8))).deref();
+                        src_mcv.address().offset(@as(i32, @intCast(@divExact(field_off, 8)))).deref();
                     if (self.reuseOperand(inst, operand, 0, src_mcv)) break :result off_mcv;
 
                     const dst_mcv = try self.allocRegOrMem(inst, true);
@@ -5596,10 +5596,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                     break :result dst_mcv;
                 }
 
-                const field_abi_size = @intCast(u32, field_ty.abiSize(mod));
+                const field_abi_size = @as(u32, @intCast(field_ty.abiSize(mod)));
                 const limb_abi_size: u32 = @min(field_abi_size, 8);
                 const limb_abi_bits = limb_abi_size * 8;
-                const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size);
+                const field_byte_off = @as(i32, @intCast(field_off / limb_abi_bits * limb_abi_size));
                 const field_bit_off = field_off % limb_abi_bits;
 
                 if (field_abi_size > 8) {
@@ -5643,7 +5643,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                         tmp_reg,
                         Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{
                             .base = .{ .frame = frame_addr.index },
-                            .disp = frame_addr.off + field_byte_off + @intCast(i32, limb_abi_size),
+                            .disp = frame_addr.off + field_byte_off + @as(i32, @intCast(limb_abi_size)),
                         }),
                     );
                     try self.asmRegisterRegisterImmediate(
@@ -5724,7 +5724,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
 
     const inst_ty = self.typeOfIndex(inst);
     const parent_ty = inst_ty.childType(mod);
-    const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod));
+    const field_offset = @as(i32, @intCast(parent_ty.structFieldOffset(extra.field_index, mod)));
 
     const src_mcv = try self.resolveInst(extra.field_ptr);
     const dst_mcv = if (src_mcv.isRegisterOffset() and
@@ -5773,14 +5773,14 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
 
     switch (tag) {
         .not => {
-            const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8));
+            const limb_abi_size = @as(u16, @intCast(@min(src_ty.abiSize(mod), 8)));
             const int_info = if (src_ty.ip_index == .bool_type)
                 std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 }
             else
                 src_ty.intInfo(mod);
             var byte_off: i32 = 0;
             while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) {
-                const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8));
+                const limb_bits = @as(u16, @intCast(@min(int_info.bits - byte_off * 8, limb_abi_size * 8)));
                 const limb_ty = try mod.intType(int_info.signedness, limb_bits);
                 const limb_mcv = switch (byte_off) {
                     0 => dst_mcv,
@@ -5788,7 +5788,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
                 };
 
                 if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) {
-                    const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits);
+                    const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_bits));
                     try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask });
                 } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv);
             }
@@ -5801,7 +5801,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
 
 fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, dst_ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
     if (abi_size > 8) return self.fail("TODO implement {} for {}", .{
         mir_tag,
         dst_ty.fmt(self.bin_file.options.module.?),
@@ -5863,7 +5863,7 @@ fn genShiftBinOpMir(
         break :rhs .{ .register = .rcx };
     };
 
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     if (abi_size <= 8) {
         switch (lhs_mcv) {
             .register => |lhs_reg| switch (rhs_mcv) {
@@ -5886,7 +5886,7 @@ fn genShiftBinOpMir(
                 const lhs_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (lhs_mcv) {
                     .memory => |addr| .{
                         .base = .{ .reg = .ds },
-                        .disp = math.cast(i32, @bitCast(i64, addr)) orelse
+                        .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse
                             return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{
                             @tagName(lhs_mcv),
                             @tagName(rhs_mcv),
@@ -6151,8 +6151,8 @@ fn genMulDivBinOp(
     if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) {
         return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()});
     }
-    const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
-    const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
+    const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
+    const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod)));
     if (switch (tag) {
         else => unreachable,
         .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2,
@@ -6326,7 +6326,7 @@ fn genBinOp(
     const mod = self.bin_file.options.module.?;
     const lhs_ty = self.typeOf(lhs_air);
     const rhs_ty = self.typeOf(rhs_air);
-    const abi_size = @intCast(u32, lhs_ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(lhs_ty.abiSize(mod)));
 
     const maybe_mask_reg = switch (air_tag) {
         else => null,
@@ -6481,7 +6481,7 @@ fn genBinOp(
                     .lea_tlv,
                     .lea_frame,
                     => true,
-                    .memory => |addr| math.cast(i32, @bitCast(i64, addr)) == null,
+                    .memory => |addr| math.cast(i32, @as(i64, @bitCast(addr))) == null,
                     else => false,
                 }) .{ .register = try self.copyToTmpRegister(rhs_ty, src_mcv) } else src_mcv;
                 const mat_mcv_lock = switch (mat_src_mcv) {
@@ -6506,7 +6506,7 @@ fn genBinOp(
                     },
                 };
 
-                const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2);
+                const cmov_abi_size = @max(@as(u32, @intCast(lhs_ty.abiSize(mod))), 2);
                 const tmp_reg = switch (dst_mcv) {
                     .register => |reg| reg,
                     else => try self.copyToTmpRegister(lhs_ty, dst_mcv),
@@ -6541,7 +6541,7 @@ fn genBinOp(
                         Memory.sib(Memory.PtrSize.fromSize(cmov_abi_size), switch (mat_src_mcv) {
                             .memory => |addr| .{
                                 .base = .{ .reg = .ds },
-                                .disp = @intCast(i32, @bitCast(i64, addr)),
+                                .disp = @as(i32, @intCast(@as(i64, @bitCast(addr)))),
                             },
                             .indirect => |reg_off| .{
                                 .base = .{ .reg = reg_off.reg },
@@ -7429,7 +7429,7 @@ fn genBinOpMir(
     src_mcv: MCValue,
 ) !void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     switch (dst_mcv) {
         .none,
         .unreach,
@@ -7465,28 +7465,28 @@ fn genBinOpMir(
                     8 => try self.asmRegisterImmediate(
                         mir_tag,
                         dst_alias,
-                        if (math.cast(i8, @bitCast(i64, imm))) |small|
+                        if (math.cast(i8, @as(i64, @bitCast(imm)))) |small|
                             Immediate.s(small)
                         else
-                            Immediate.u(@intCast(u8, imm)),
+                            Immediate.u(@as(u8, @intCast(imm))),
                     ),
                     16 => try self.asmRegisterImmediate(
                         mir_tag,
                         dst_alias,
-                        if (math.cast(i16, @bitCast(i64, imm))) |small|
+                        if (math.cast(i16, @as(i64, @bitCast(imm)))) |small|
                             Immediate.s(small)
                         else
-                            Immediate.u(@intCast(u16, imm)),
+                            Immediate.u(@as(u16, @intCast(imm))),
                     ),
                     32 => try self.asmRegisterImmediate(
                         mir_tag,
                         dst_alias,
-                        if (math.cast(i32, @bitCast(i64, imm))) |small|
+                        if (math.cast(i32, @as(i64, @bitCast(imm)))) |small|
                             Immediate.s(small)
                         else
-                            Immediate.u(@intCast(u32, imm)),
+                            Immediate.u(@as(u32, @intCast(imm))),
                     ),
-                    64 => if (math.cast(i32, @bitCast(i64, imm))) |small|
+                    64 => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small|
                         try self.asmRegisterImmediate(mir_tag, dst_alias, Immediate.s(small))
                     else
                         try self.asmRegisterRegister(mir_tag, dst_alias, registerAlias(
@@ -7602,8 +7602,8 @@ fn genBinOpMir(
                 => null,
                 .memory, .load_got, .load_direct, .load_tlv => src: {
                     switch (src_mcv) {
-                        .memory => |addr| if (math.cast(i32, @bitCast(i64, addr)) != null and
-                            math.cast(i32, @bitCast(i64, addr) + abi_size - limb_abi_size) != null)
+                        .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr))) != null and
+                            math.cast(i32, @as(i64, @bitCast(addr)) + abi_size - limb_abi_size) != null)
                             break :src null,
                         .load_got, .load_direct, .load_tlv => {},
                         else => unreachable,
@@ -7680,7 +7680,7 @@ fn genBinOpMir(
                         const imm = switch (off) {
                             0 => src_imm,
                             else => switch (ty_signedness) {
-                                .signed => @bitCast(u64, @bitCast(i64, src_imm) >> 63),
+                                .signed => @as(u64, @bitCast(@as(i64, @bitCast(src_imm)) >> 63)),
                                 .unsigned => 0,
                             },
                         };
@@ -7688,28 +7688,28 @@ fn genBinOpMir(
                             8 => try self.asmMemoryImmediate(
                                 mir_limb_tag,
                                 dst_limb_mem,
-                                if (math.cast(i8, @bitCast(i64, imm))) |small|
+                                if (math.cast(i8, @as(i64, @bitCast(imm)))) |small|
                                     Immediate.s(small)
                                 else
-                                    Immediate.u(@intCast(u8, imm)),
+                                    Immediate.u(@as(u8, @intCast(imm))),
                             ),
                             16 => try self.asmMemoryImmediate(
                                 mir_limb_tag,
                                 dst_limb_mem,
-                                if (math.cast(i16, @bitCast(i64, imm))) |small|
+                                if (math.cast(i16, @as(i64, @bitCast(imm)))) |small|
                                     Immediate.s(small)
                                 else
-                                    Immediate.u(@intCast(u16, imm)),
+                                    Immediate.u(@as(u16, @intCast(imm))),
                             ),
                             32 => try self.asmMemoryImmediate(
                                 mir_limb_tag,
                                 dst_limb_mem,
-                                if (math.cast(i32, @bitCast(i64, imm))) |small|
+                                if (math.cast(i32, @as(i64, @bitCast(imm)))) |small|
                                     Immediate.s(small)
                                 else
-                                    Immediate.u(@intCast(u32, imm)),
+                                    Immediate.u(@as(u32, @intCast(imm))),
                             ),
-                            64 => if (math.cast(i32, @bitCast(i64, imm))) |small|
+                            64 => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small|
                                 try self.asmMemoryImmediate(
                                     mir_limb_tag,
                                     dst_limb_mem,
@@ -7753,7 +7753,7 @@ fn genBinOpMir(
                                 0 => src_mcv,
                                 else => .{ .immediate = 0 },
                             },
-                            .memory => |addr| .{ .memory = @bitCast(u64, @bitCast(i64, addr) + off) },
+                            .memory => |addr| .{ .memory = @as(u64, @bitCast(@as(i64, @bitCast(addr)) + off)) },
                             .indirect => |reg_off| .{ .indirect = .{
                                 .reg = reg_off.reg,
                                 .off = reg_off.off + off,
@@ -7780,7 +7780,7 @@ fn genBinOpMir(
 /// Does not support byte-size operands.
 fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, dst_ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod)));
     switch (dst_mcv) {
         .none,
         .unreach,
@@ -7847,7 +7847,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
                     Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) {
                         .memory => |addr| .{
                             .base = .{ .reg = .ds },
-                            .disp = math.cast(i32, @bitCast(i64, addr)) orelse
+                            .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse
                                 return self.asmRegisterRegister(
                                 .{ .i_, .mul },
                                 dst_alias,
@@ -8014,7 +8014,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
     const pl_op = self.air.instructions.items(.data)[inst].pl_op;
     const callee = pl_op.operand;
     const extra = self.air.extraData(Air.Call, pl_op.payload);
-    const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+    const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
     const ty = self.typeOf(callee);
 
     const fn_ty = switch (ty.zigTypeTag(mod)) {
@@ -8107,7 +8107,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
                 const got_addr = atom.getOffsetTableAddress(elf_file);
                 try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{
                     .base = .{ .reg = .ds },
-                    .disp = @intCast(i32, got_addr),
+                    .disp = @as(i32, @intCast(got_addr)),
                 }));
             } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
                 const atom = try coff_file.getOrCreateAtomForDecl(owner_decl);
@@ -8124,7 +8124,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
                 const atom = p9.getAtom(atom_index);
                 try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{
                     .base = .{ .reg = .ds },
-                    .disp = @intCast(i32, atom.getOffsetTableAddress(p9)),
+                    .disp = @as(i32, @intCast(atom.getOffsetTableAddress(p9))),
                 }));
             } else unreachable;
         } else if (func_value.getExternFunc(mod)) |extern_func| {
@@ -8244,7 +8244,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
     const result = MCValue{
         .eflags = switch (ty.zigTypeTag(mod)) {
             else => result: {
-                const abi_size = @intCast(u16, ty.abiSize(mod));
+                const abi_size = @as(u16, @intCast(ty.abiSize(mod)));
                 const may_flip: enum {
                     may_flip,
                     must_flip,
@@ -8441,7 +8441,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
     self.eflags_inst = inst;
 
     const op_ty = self.typeOf(un_op);
-    const op_abi_size = @intCast(u32, op_ty.abiSize(mod));
+    const op_abi_size = @as(u32, @intCast(op_ty.abiSize(mod)));
     const op_mcv = try self.resolveInst(un_op);
     const dst_reg = switch (op_mcv) {
         .register => |reg| reg,
@@ -8650,7 +8650,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
     const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
         .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
     else
-        .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool };
+        .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool };
 
     switch (opt_mcv) {
         .none,
@@ -8670,18 +8670,18 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
 
         .register => |opt_reg| {
             if (some_info.off == 0) {
-                const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
+                const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod)));
                 const alias_reg = registerAlias(opt_reg, some_abi_size);
                 assert(some_abi_size * 8 == alias_reg.bitSize());
                 try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg);
                 return .{ .eflags = .z };
             }
             assert(some_info.ty.ip_index == .bool_type);
-            const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod));
+            const opt_abi_size = @as(u32, @intCast(opt_ty.abiSize(mod)));
             try self.asmRegisterImmediate(
                 .{ ._, .bt },
                 registerAlias(opt_reg, opt_abi_size),
-                Immediate.u(@intCast(u6, some_info.off * 8)),
+                Immediate.u(@as(u6, @intCast(some_info.off * 8))),
             );
             return .{ .eflags = .nc };
         },
@@ -8696,7 +8696,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
             defer self.register_manager.unlockReg(addr_reg_lock);
 
             try self.genSetReg(addr_reg, Type.usize, opt_mcv.address());
-            const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
+            const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod)));
             try self.asmMemoryImmediate(
                 .{ ._, .cmp },
                 Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{
@@ -8709,7 +8709,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
         },
 
         .indirect, .load_frame => {
-            const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
+            const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod)));
             try self.asmMemoryImmediate(
                 .{ ._, .cmp },
                 Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) {
@@ -8741,7 +8741,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
     const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
         .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty }
     else
-        .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool };
+        .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool };
 
     const ptr_reg = switch (ptr_mcv) {
         .register => |reg| reg,
@@ -8750,7 +8750,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
     const ptr_lock = self.register_manager.lockReg(ptr_reg);
     defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
+    const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod)));
     try self.asmMemoryImmediate(
         .{ ._, .cmp },
         Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{
@@ -8783,7 +8783,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !
 
             const tmp_reg = try self.copyToTmpRegister(ty, operand);
             if (err_off > 0) {
-                const shift = @intCast(u6, err_off * 8);
+                const shift = @as(u6, @intCast(err_off * 8));
                 try self.genShiftBinOpMir(
                     .{ ._r, .sh },
                     ty,
@@ -8805,7 +8805,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !
             Type.anyerror,
             .{ .load_frame = .{
                 .index = frame_addr.index,
-                .off = frame_addr.off + @intCast(i32, err_off),
+                .off = frame_addr.off + @as(i32, @intCast(err_off)),
             } },
             .{ .immediate = 0 },
         ),
@@ -8943,7 +8943,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const loop = self.air.extraData(Air.Block, ty_pl.payload);
     const body = self.air.extra[loop.end..][0..loop.data.body_len];
-    const jmp_target = @intCast(u32, self.mir_instructions.len);
+    const jmp_target = @as(u32, @intCast(self.mir_instructions.len));
 
     self.scope_generation += 1;
     const state = try self.saveState();
@@ -9015,9 +9015,9 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void {
 
     while (case_i < switch_br.data.cases_len) : (case_i += 1) {
         const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
-        const items = @ptrCast(
+        const items = @as(
             []const Air.Inst.Ref,
-            self.air.extra[case.end..][0..case.data.items_len],
+            @ptrCast(self.air.extra[case.end..][0..case.data.items_len]),
         );
         const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
         extra_index = case.end + items.len + case_body.len;
@@ -9066,7 +9066,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void {
 }
 
 fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
-    const next_inst = @intCast(u32, self.mir_instructions.len);
+    const next_inst = @as(u32, @intCast(self.mir_instructions.len));
     switch (self.mir_instructions.items(.tag)[reloc]) {
         .j, .jmp => {},
         .pseudo => switch (self.mir_instructions.items(.ops)[reloc]) {
@@ -9141,11 +9141,11 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
 fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const extra = self.air.extraData(Air.Asm, ty_pl.payload);
-    const clobbers_len = @truncate(u31, extra.data.flags);
+    const clobbers_len = @as(u31, @truncate(extra.data.flags));
     var extra_i: usize = extra.end;
-    const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+    const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
     extra_i += outputs.len;
-    const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+    const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
     extra_i += inputs.len;
 
     var result: MCValue = .none;
@@ -9281,7 +9281,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
                 if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| {
                     if (mnem_size) |size| {
                         const max = @as(u64, math.maxInt(u64)) >>
-                            @intCast(u6, 64 - (size.bitSize() - 1));
+                            @as(u6, @intCast(64 - (size.bitSize() - 1)));
                         if ((if (s < 0) ~s else s) > max)
                             return self.fail("Invalid immediate size: '{s}'", .{op_str});
                     }
@@ -9289,7 +9289,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
                 } else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| {
                     if (mnem_size) |size| {
                         const max = @as(u64, math.maxInt(u64)) >>
-                            @intCast(u6, 64 - size.bitSize());
+                            @as(u6, @intCast(64 - size.bitSize()));
                         if (u > max)
                             return self.fail("Invalid immediate size: '{s}'", .{op_str});
                     }
@@ -9618,7 +9618,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError
         .indirect => |reg_off| try self.genSetMem(.{ .reg = reg_off.reg }, reg_off.off, ty, src_mcv),
         .memory, .load_direct, .load_got, .load_tlv => {
             switch (dst_mcv) {
-                .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr|
+                .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
                     return self.genSetMem(.{ .reg = .ds }, small_addr, ty, src_mcv),
                 .load_direct, .load_got, .load_tlv => {},
                 else => unreachable,
@@ -9641,7 +9641,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError
 
 fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     if (abi_size * 8 > dst_reg.bitSize())
         return self.fail("genSetReg called with a value larger than dst_reg", .{});
     switch (src_mcv) {
@@ -9662,11 +9662,11 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
             } else if (abi_size > 4 and math.cast(u32, imm) != null) {
                 // 32-bit moves zero-extend to 64-bit.
                 try self.asmRegisterImmediate(.{ ._, .mov }, dst_reg.to32(), Immediate.u(imm));
-            } else if (abi_size <= 4 and @bitCast(i64, imm) < 0) {
+            } else if (abi_size <= 4 and @as(i64, @bitCast(imm)) < 0) {
                 try self.asmRegisterImmediate(
                     .{ ._, .mov },
                     registerAlias(dst_reg, abi_size),
-                    Immediate.s(@intCast(i32, @bitCast(i64, imm))),
+                    Immediate.s(@as(i32, @intCast(@as(i64, @bitCast(imm))))),
                 );
             } else {
                 try self.asmRegisterImmediate(
@@ -9806,7 +9806,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
         },
         .memory, .load_direct, .load_got, .load_tlv => {
             switch (src_mcv) {
-                .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| {
+                .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| {
                     const dst_alias = registerAlias(dst_reg, abi_size);
                     const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
                         .base = .{ .reg = .ds },
@@ -9814,7 +9814,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
                     });
                     switch (try self.moveStrategy(ty, mem.isAlignedGeneric(
                         u32,
-                        @bitCast(u32, small_addr),
+                        @as(u32, @bitCast(small_addr)),
                         ty.abiAlignment(mod),
                     ))) {
                         .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem),
@@ -9928,9 +9928,9 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
 
 fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void {
     const mod = self.bin_file.options.module.?;
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     const dst_ptr_mcv: MCValue = switch (base) {
-        .none => .{ .immediate = @bitCast(u64, @as(i64, disp)) },
+        .none => .{ .immediate = @as(u64, @bitCast(@as(i64, disp))) },
         .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
         .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } },
     };
@@ -9941,9 +9941,9 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
         .immediate => |imm| switch (abi_size) {
             1, 2, 4 => {
                 const immediate = if (ty.isSignedInt(mod))
-                    Immediate.s(@truncate(i32, @bitCast(i64, imm)))
+                    Immediate.s(@as(i32, @truncate(@as(i64, @bitCast(imm)))))
                 else
-                    Immediate.u(@intCast(u32, imm));
+                    Immediate.u(@as(u32, @intCast(imm)));
                 try self.asmMemoryImmediate(
                     .{ ._, .mov },
                     Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }),
@@ -9951,7 +9951,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
                 );
             },
             3, 5...7 => unreachable,
-            else => if (math.cast(i32, @bitCast(i64, imm))) |small| {
+            else => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| {
                 try self.asmMemoryImmediate(
                     .{ ._, .mov },
                     Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }),
@@ -9963,14 +9963,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
                     .{ ._, .mov },
                     Memory.sib(.dword, .{ .base = base, .disp = disp + offset }),
                     if (ty.isSignedInt(mod))
-                        Immediate.s(@truncate(
+                        Immediate.s(@as(
                             i32,
-                            @bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63),
+                            @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)),
                         ))
                     else
-                        Immediate.u(@truncate(
+                        Immediate.u(@as(
                             u32,
-                            if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0,
+                            @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0),
                         )),
                 );
             },
@@ -9985,13 +9985,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
             switch (try self.moveStrategy(ty, switch (base) {
                 .none => mem.isAlignedGeneric(
                     u32,
-                    @bitCast(u32, disp),
+                    @as(u32, @bitCast(disp)),
                     ty.abiAlignment(mod),
                 ),
                 .reg => |reg| switch (reg) {
                     .es, .cs, .ss, .ds => mem.isAlignedGeneric(
                         u32,
-                        @bitCast(u32, disp),
+                        @as(u32, @bitCast(disp)),
                         ty.abiAlignment(mod),
                     ),
                     else => false,
@@ -10012,13 +10012,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
         .register_overflow => |ro| {
             try self.genSetMem(
                 base,
-                disp + @intCast(i32, ty.structFieldOffset(0, mod)),
+                disp + @as(i32, @intCast(ty.structFieldOffset(0, mod))),
                 ty.structFieldType(0, mod),
                 .{ .register = ro.reg },
             );
             try self.genSetMem(
                 base,
-                disp + @intCast(i32, ty.structFieldOffset(1, mod)),
+                disp + @as(i32, @intCast(ty.structFieldOffset(1, mod))),
                 ty.structFieldType(1, mod),
                 .{ .eflags = ro.eflags },
             );
@@ -10077,7 +10077,7 @@ fn genLazySymbolRef(
         _ = try atom.getOrCreateOffsetTableEntry(elf_file);
         const got_addr = atom.getOffsetTableAddress(elf_file);
         const got_mem =
-            Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) });
+            Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) });
         switch (tag) {
             .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem),
             .call => try self.asmMemory(.{ ._, .call }, got_mem),
@@ -10099,7 +10099,7 @@ fn genLazySymbolRef(
         _ = atom.getOrCreateOffsetTableEntry(p9_file);
         const got_addr = atom.getOffsetTableAddress(p9_file);
         const got_mem =
-            Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) });
+            Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) });
         switch (tag) {
             .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem),
             .call => try self.asmMemory(.{ ._, .call }, got_mem),
@@ -10195,8 +10195,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
             if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
         if (dst_signedness == src_signedness) break :result dst_mcv;
 
-        const abi_size = @intCast(u16, dst_ty.abiSize(mod));
-        const bit_size = @intCast(u16, dst_ty.bitSize(mod));
+        const abi_size = @as(u16, @intCast(dst_ty.abiSize(mod)));
+        const bit_size = @as(u16, @intCast(dst_ty.bitSize(mod)));
         if (abi_size * 8 <= bit_size) break :result dst_mcv;
 
         const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable;
@@ -10237,7 +10237,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
     try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr);
     try self.genSetMem(
         .{ .frame = frame_index },
-        @intCast(i32, ptr_ty.abiSize(mod)),
+        @as(i32, @intCast(ptr_ty.abiSize(mod))),
         Type.usize,
         .{ .immediate = array_len },
     );
@@ -10251,7 +10251,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
     const ty_op = self.air.instructions.items(.data)[inst].ty_op;
 
     const src_ty = self.typeOf(ty_op.operand);
-    const src_bits = @intCast(u32, src_ty.bitSize(mod));
+    const src_bits = @as(u32, @intCast(src_ty.bitSize(mod)));
     const src_signedness =
         if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
     const dst_ty = self.typeOfIndex(inst);
@@ -10306,7 +10306,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
 
     const src_ty = self.typeOf(ty_op.operand);
     const dst_ty = self.typeOfIndex(inst);
-    const dst_bits = @intCast(u32, dst_ty.bitSize(mod));
+    const dst_bits = @as(u32, @intCast(dst_ty.bitSize(mod)));
     const dst_signedness =
         if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned;
 
@@ -10359,7 +10359,7 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
 
     const ptr_ty = self.typeOf(extra.ptr);
     const val_ty = self.typeOf(extra.expected_value);
-    const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
+    const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod)));
 
     try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
     const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
@@ -10461,7 +10461,7 @@ fn atomicOp(
     };
     defer if (val_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
+    const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod)));
     const ptr_size = Memory.PtrSize.fromSize(val_abi_size);
     const ptr_mem = switch (ptr_mcv) {
         .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size),
@@ -10539,7 +10539,7 @@ fn atomicOp(
             defer self.register_manager.unlockReg(tmp_lock);
 
             try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(.rax, val_abi_size), ptr_mem);
-            const loop = @intCast(u32, self.mir_instructions.len);
+            const loop = @as(u32, @intCast(self.mir_instructions.len));
             if (rmw_op != std.builtin.AtomicRmwOp.Xchg) {
                 try self.genSetReg(tmp_reg, val_ty, .{ .register = .rax });
             }
@@ -10613,7 +10613,7 @@ fn atomicOp(
                 .scale_index = ptr_mem.scaleIndex(),
                 .disp = ptr_mem.sib.disp + 8,
             }));
-            const loop = @intCast(u32, self.mir_instructions.len);
+            const loop = @as(u32, @intCast(self.mir_instructions.len));
             const val_mem_mcv: MCValue = switch (val_mcv) {
                 .memory, .indirect, .load_frame => val_mcv,
                 else => .{ .indirect = .{
@@ -10769,7 +10769,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
     };
     defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod));
+    const elem_abi_size = @as(u31, @intCast(elem_ty.abiSize(mod)));
 
     if (elem_abi_size == 1) {
         const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
@@ -11249,9 +11249,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
 fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
     const mod = self.bin_file.options.module.?;
     const result_ty = self.typeOfIndex(inst);
-    const len = @intCast(usize, result_ty.arrayLen(mod));
+    const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+    const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
     const result: MCValue = result: {
         switch (result_ty.zigTypeTag(mod)) {
             .Struct => {
@@ -11268,17 +11268,17 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                         if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
 
                         const elem_ty = result_ty.structFieldType(elem_i, mod);
-                        const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod));
+                        const elem_bit_size = @as(u32, @intCast(elem_ty.bitSize(mod)));
                         if (elem_bit_size > 64) {
                             return self.fail(
                                 "TODO airAggregateInit implement packed structs with large fields",
                                 .{},
                             );
                         }
-                        const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod));
+                        const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
                         const elem_abi_bits = elem_abi_size * 8;
                         const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i);
-                        const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size);
+                        const elem_byte_off = @as(i32, @intCast(elem_off / elem_abi_bits * elem_abi_size));
                         const elem_bit_off = elem_off % elem_abi_bits;
                         const elem_mcv = try self.resolveInst(elem);
                         const mat_elem_mcv = switch (elem_mcv) {
@@ -11330,7 +11330,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                                 elem_ty,
                                 .{ .load_frame = .{
                                     .index = frame_index,
-                                    .off = elem_byte_off + @intCast(i32, elem_abi_size),
+                                    .off = elem_byte_off + @as(i32, @intCast(elem_abi_size)),
                                 } },
                                 .{ .register = reg },
                             );
@@ -11340,7 +11340,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                     if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
 
                     const elem_ty = result_ty.structFieldType(elem_i, mod);
-                    const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod));
+                    const elem_off = @as(i32, @intCast(result_ty.structFieldOffset(elem_i, mod)));
                     const elem_mcv = try self.resolveInst(elem);
                     const mat_elem_mcv = switch (elem_mcv) {
                         .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
@@ -11354,7 +11354,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                 const frame_index =
                     try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
                 const elem_ty = result_ty.childType(mod);
-                const elem_size = @intCast(u32, elem_ty.abiSize(mod));
+                const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
 
                 for (elements, 0..) |elem, elem_i| {
                     const elem_mcv = try self.resolveInst(elem);
@@ -11362,12 +11362,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
                         .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
                         else => elem_mcv,
                     };
-                    const elem_off = @intCast(i32, elem_size * elem_i);
+                    const elem_off = @as(i32, @intCast(elem_size * elem_i));
                     try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv);
                 }
                 if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem(
                     .{ .frame = frame_index },
-                    @intCast(i32, elem_size * elements.len),
+                    @as(i32, @intCast(elem_size * elements.len)),
                     elem_ty,
                     try self.genTypedValue(.{ .ty = elem_ty, .val = sentinel }),
                 );
@@ -11416,7 +11416,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
         const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
         const tag_int = tag_int_val.toUnsignedInt(mod);
         const tag_off = if (layout.tag_align < layout.payload_align)
-            @intCast(i32, layout.payload_size)
+            @as(i32, @intCast(layout.payload_size))
         else
             0;
         try self.genCopy(tag_ty, dst_mcv.address().offset(tag_off).deref(), .{ .immediate = tag_int });
@@ -11424,7 +11424,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
         const pl_off = if (layout.tag_align < layout.payload_align)
             0
         else
-            @intCast(i32, layout.tag_size);
+            @as(i32, @intCast(layout.tag_size));
         try self.genCopy(src_ty, dst_mcv.address().offset(pl_off).deref(), src_mcv);
 
         break :result dst_mcv;
@@ -11454,7 +11454,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
     var order = [1]u2{0} ** 3;
     var unused = std.StaticBitSet(3).initFull();
     for (ops, &mcvs, &locks, 0..) |op, *mcv, *lock, op_i| {
-        const op_index = @intCast(u2, op_i);
+        const op_index = @as(u2, @intCast(op_i));
         mcv.* = try self.resolveInst(op);
         if (unused.isSet(0) and mcv.isRegister() and self.reuseOperand(inst, op, op_index, mcv.*)) {
             order[op_index] = 1;
@@ -11470,7 +11470,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
     }
     for (&order, &mcvs, &locks) |*mop_index, *mcv, *lock| {
         if (mop_index.* != 0) continue;
-        mop_index.* = 1 + @intCast(u2, unused.toggleFirstSet().?);
+        mop_index.* = 1 + @as(u2, @intCast(unused.toggleFirstSet().?));
         if (mop_index.* > 1 and mcv.isRegister()) continue;
         const reg = try self.copyToTmpRegister(ty, mcv.*);
         mcv.* = .{ .register = reg };
@@ -11570,7 +11570,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
     var mops: [3]MCValue = undefined;
     for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv;
 
-    const abi_size = @intCast(u32, ty.abiSize(mod));
+    const abi_size = @as(u32, @intCast(ty.abiSize(mod)));
     const mop1_reg = registerAlias(mops[0].getReg().?, abi_size);
     const mop2_reg = registerAlias(mops[1].getReg().?, abi_size);
     if (mops[2].isRegister()) try self.asmRegisterRegisterRegister(
@@ -11723,7 +11723,7 @@ fn resolveCallingConventionValues(
             switch (self.target.os.tag) {
                 .windows => {
                     // Align the stack to 16bytes before allocating shadow stack space (if any).
-                    result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod));
+                    result.stack_byte_count += @as(u31, @intCast(4 * Type.usize.abiSize(mod)));
                 },
                 else => {},
             }
@@ -11746,7 +11746,7 @@ fn resolveCallingConventionValues(
                 result.return_value = switch (classes[0]) {
                     .integer => InstTracking.init(.{ .register = registerAlias(
                         ret_reg,
-                        @intCast(u32, ret_ty.abiSize(mod)),
+                        @as(u32, @intCast(ret_ty.abiSize(mod))),
                     ) }),
                     .float, .sse => InstTracking.init(.{ .register = .xmm0 }),
                     .memory => ret: {
@@ -11782,17 +11782,17 @@ fn resolveCallingConventionValues(
                     },
                     .float, .sse => switch (self.target.os.tag) {
                         .windows => if (param_reg_i < 4) {
-                            arg.* = .{ .register = @enumFromInt(
+                            arg.* = .{ .register = @as(
                                 Register,
-                                @intFromEnum(Register.xmm0) + param_reg_i,
+                                @enumFromInt(@intFromEnum(Register.xmm0) + param_reg_i),
                             ) };
                             param_reg_i += 1;
                             continue;
                         },
                         else => if (param_sse_reg_i < 8) {
-                            arg.* = .{ .register = @enumFromInt(
+                            arg.* = .{ .register = @as(
                                 Register,
-                                @intFromEnum(Register.xmm0) + param_sse_reg_i,
+                                @enumFromInt(@intFromEnum(Register.xmm0) + param_sse_reg_i),
                             ) };
                             param_sse_reg_i += 1;
                             continue;
@@ -11804,8 +11804,8 @@ fn resolveCallingConventionValues(
                     }),
                 }
 
-                const param_size = @intCast(u31, ty.abiSize(mod));
-                const param_align = @intCast(u31, ty.abiAlignment(mod));
+                const param_size = @as(u31, @intCast(ty.abiSize(mod)));
+                const param_align = @as(u31, @intCast(ty.abiAlignment(mod)));
                 result.stack_byte_count =
                     mem.alignForward(u31, result.stack_byte_count, param_align);
                 arg.* = .{ .load_frame = .{
@@ -11825,7 +11825,7 @@ fn resolveCallingConventionValues(
                 result.return_value = InstTracking.init(.none);
             } else {
                 const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0];
-                const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod));
+                const ret_ty_size = @as(u31, @intCast(ret_ty.abiSize(mod)));
                 if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) {
                     const aliased_reg = registerAlias(ret_reg, ret_ty_size);
                     result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none };
@@ -11844,8 +11844,8 @@ fn resolveCallingConventionValues(
                     arg.* = .none;
                     continue;
                 }
-                const param_size = @intCast(u31, ty.abiSize(mod));
-                const param_align = @intCast(u31, ty.abiAlignment(mod));
+                const param_size = @as(u31, @intCast(ty.abiSize(mod)));
+                const param_align = @as(u31, @intCast(ty.abiAlignment(mod)));
                 result.stack_byte_count =
                     mem.alignForward(u31, result.stack_byte_count, param_align);
                 arg.* = .{ .load_frame = .{
@@ -11932,12 +11932,12 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
     const mod = self.bin_file.options.module.?;
     const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
         .signedness = .unsigned,
-        .bits = @intCast(u16, ty.bitSize(mod)),
+        .bits = @as(u16, @intCast(ty.bitSize(mod))),
     };
     const max_reg_bit_width = Register.rax.bitSize();
     switch (int_info.signedness) {
         .signed => {
-            const shift = @intCast(u6, max_reg_bit_width - int_info.bits);
+            const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits));
             try self.genShiftBinOpMir(
                 .{ ._l, .sa },
                 Type.isize,
@@ -11952,7 +11952,7 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
             );
         },
         .unsigned => {
-            const shift = @intCast(u6, max_reg_bit_width - int_info.bits);
+            const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits));
             const mask = (~@as(u64, 0)) >> shift;
             if (int_info.bits <= 32) {
                 try self.genBinOpMir(
src/arch/x86_64/Emit.zig
@@ -19,18 +19,18 @@ pub const Error = Lower.Error || error{
 
 pub fn emitMir(emit: *Emit) Error!void {
     for (0..emit.lower.mir.instructions.len) |mir_i| {
-        const mir_index = @intCast(Mir.Inst.Index, mir_i);
+        const mir_index = @as(Mir.Inst.Index, @intCast(mir_i));
         try emit.code_offset_mapping.putNoClobber(
             emit.lower.allocator,
             mir_index,
-            @intCast(u32, emit.code.items.len),
+            @as(u32, @intCast(emit.code.items.len)),
         );
         const lowered = try emit.lower.lowerMir(mir_index);
         var lowered_relocs = lowered.relocs;
         for (lowered.insts, 0..) |lowered_inst, lowered_index| {
-            const start_offset = @intCast(u32, emit.code.items.len);
+            const start_offset = @as(u32, @intCast(emit.code.items.len));
             try lowered_inst.encode(emit.code.writer(), .{});
-            const end_offset = @intCast(u32, emit.code.items.len);
+            const end_offset = @as(u32, @intCast(emit.code.items.len));
             while (lowered_relocs.len > 0 and
                 lowered_relocs[0].lowered_inst_index == lowered_index) : ({
                 lowered_relocs = lowered_relocs[1..];
@@ -39,7 +39,7 @@ pub fn emitMir(emit: *Emit) Error!void {
                     .source = start_offset,
                     .target = target,
                     .offset = end_offset - 4,
-                    .length = @intCast(u5, end_offset - start_offset),
+                    .length = @as(u5, @intCast(end_offset - start_offset)),
                 }),
                 .linker_extern_fn => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
                     // Add relocation to the decl.
@@ -89,7 +89,7 @@ pub fn emitMir(emit: *Emit) Error!void {
                             else => unreachable,
                         },
                         .target = .{ .sym_index = symbol.sym_index, .file = null },
-                        .offset = @intCast(u32, end_offset - 4),
+                        .offset = @as(u32, @intCast(end_offset - 4)),
                         .addend = 0,
                         .pcrel = true,
                         .length = 2,
@@ -113,7 +113,7 @@ pub fn emitMir(emit: *Emit) Error!void {
                             .linker_import => coff_file.getGlobalByIndex(symbol.sym_index),
                             else => unreachable,
                         },
-                        .offset = @intCast(u32, end_offset - 4),
+                        .offset = @as(u32, @intCast(end_offset - 4)),
                         .addend = 0,
                         .pcrel = true,
                         .length = 2,
@@ -122,7 +122,7 @@ pub fn emitMir(emit: *Emit) Error!void {
                     const atom_index = symbol.atom_index;
                     try p9_file.addReloc(atom_index, .{ // TODO we may need to add a .type field to the relocs if they are .linker_got instead of just .linker_direct
                         .target = symbol.sym_index, // we set sym_index to just be the atom index
-                        .offset = @intCast(u32, end_offset - 4),
+                        .offset = @as(u32, @intCast(end_offset - 4)),
                         .addend = 0,
                         .pcrel = true,
                     });
@@ -209,13 +209,13 @@ fn fixupRelocs(emit: *Emit) Error!void {
     for (emit.relocs.items) |reloc| {
         const target = emit.code_offset_mapping.get(reloc.target) orelse
             return emit.fail("JMP/CALL relocation target not found!", .{});
-        const disp = @intCast(i32, @intCast(i64, target) - @intCast(i64, reloc.source + reloc.length));
+        const disp = @as(i32, @intCast(@as(i64, @intCast(target)) - @as(i64, @intCast(reloc.source + reloc.length))));
         mem.writeIntLittle(i32, emit.code.items[reloc.offset..][0..4], disp);
     }
 }
 
 fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
-    const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line);
+    const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
     const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
     log.debug("  (advance pc={d} and line={d})", .{ delta_line, delta_pc });
     switch (emit.debug_output) {
@@ -233,22 +233,22 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
             // increasing the line number
             try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
             // increasing the pc
-            const d_pc_p9 = @intCast(i64, delta_pc) - quant;
+            const d_pc_p9 = @as(i64, @intCast(delta_pc)) - quant;
             if (d_pc_p9 > 0) {
                 // minus one because if its the last one, we want to leave space to change the line which is one quanta
                 var diff = @divExact(d_pc_p9, quant) - quant;
                 while (diff > 0) {
                     if (diff < 64) {
-                        try dbg_out.dbg_line.append(@intCast(u8, diff + 128));
+                        try dbg_out.dbg_line.append(@as(u8, @intCast(diff + 128)));
                         diff = 0;
                     } else {
-                        try dbg_out.dbg_line.append(@intCast(u8, 64 + 128));
+                        try dbg_out.dbg_line.append(@as(u8, @intCast(64 + 128)));
                         diff -= 64;
                     }
                 }
                 if (dbg_out.pcop_change_index.*) |pci|
                     dbg_out.dbg_line.items[pci] += 1;
-                dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1);
+                dbg_out.pcop_change_index.* = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
             } else if (d_pc_p9 == 0) {
                 // we don't need to do anything, because adding the quant does it for us
             } else unreachable;
src/arch/x86_64/encoder.zig
@@ -471,7 +471,7 @@ pub const Instruction = struct {
                             } else {
                                 try encoder.sib_baseDisp8(dst);
                             }
-                            try encoder.disp8(@truncate(i8, sib.disp));
+                            try encoder.disp8(@as(i8, @truncate(sib.disp)));
                         } else {
                             try encoder.modRm_SIBDisp32(src);
                             if (mem.scaleIndex()) |si| {
@@ -487,7 +487,7 @@ pub const Instruction = struct {
                             try encoder.modRm_indirectDisp0(src, dst);
                         } else if (math.cast(i8, sib.disp)) |_| {
                             try encoder.modRm_indirectDisp8(src, dst);
-                            try encoder.disp8(@truncate(i8, sib.disp));
+                            try encoder.disp8(@as(i8, @truncate(sib.disp)));
                         } else {
                             try encoder.modRm_indirectDisp32(src, dst);
                             try encoder.disp32(sib.disp);
@@ -509,9 +509,9 @@ pub const Instruction = struct {
     fn encodeImm(imm: Immediate, kind: Encoding.Op, encoder: anytype) !void {
         const raw = imm.asUnsigned(kind.immBitSize());
         switch (kind.immBitSize()) {
-            8 => try encoder.imm8(@intCast(u8, raw)),
-            16 => try encoder.imm16(@intCast(u16, raw)),
-            32 => try encoder.imm32(@intCast(u32, raw)),
+            8 => try encoder.imm8(@as(u8, @intCast(raw))),
+            16 => try encoder.imm16(@as(u16, @intCast(raw))),
+            32 => try encoder.imm32(@as(u32, @intCast(raw))),
             64 => try encoder.imm64(raw),
             else => unreachable,
         }
@@ -581,7 +581,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
 
         /// Encodes legacy prefixes
         pub fn legacyPrefixes(self: Self, prefixes: LegacyPrefixes) !void {
-            if (@bitCast(u16, prefixes) != 0) {
+            if (@as(u16, @bitCast(prefixes)) != 0) {
                 // Hopefully this path isn't taken very often, so we'll do it the slow way for now
 
                 // LOCK
@@ -891,7 +891,7 @@ fn Encoder(comptime T: type, comptime opts: Options) type {
         ///
         /// It is sign-extended to 64 bits by the cpu.
         pub fn disp8(self: Self, disp: i8) !void {
-            try self.writer.writeByte(@bitCast(u8, disp));
+            try self.writer.writeByte(@as(u8, @bitCast(disp)));
         }
 
         /// Encode an 32 bit displacement
src/arch/x86_64/Encoding.zig
@@ -85,7 +85,7 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct {
     rex: Rex,
 }, modrm_ext: ?u3) ?Encoding {
     for (mnemonic_to_encodings_map, 0..) |encs, mnemonic_int| for (encs) |data| {
-        const enc = Encoding{ .mnemonic = @enumFromInt(Mnemonic, mnemonic_int), .data = data };
+        const enc = Encoding{ .mnemonic = @as(Mnemonic, @enumFromInt(mnemonic_int)), .data = data };
         if (modrm_ext) |ext| if (ext != data.modrm_ext) continue;
         if (!std.mem.eql(u8, opc, enc.opcode())) continue;
         if (prefixes.rex.w) {
@@ -763,7 +763,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op
 
     var cwriter = std.io.countingWriter(std.io.null_writer);
     inst.encode(cwriter.writer(), .{ .allow_frame_loc = true }) catch unreachable; // Not allowed to fail here unless OOM.
-    return @intCast(usize, cwriter.bytes_written);
+    return @as(usize, @intCast(cwriter.bytes_written));
 }
 
 const mnemonic_to_encodings_map = init: {
src/arch/x86_64/Lower.zig
@@ -188,7 +188,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
             .pseudo_probe_align_ri_s => {
                 try lower.emit(.none, .@"test", &.{
                     .{ .reg = inst.data.ri.r1 },
-                    .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) },
+                    .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) },
                 });
                 try lower.emit(.none, .jz, &.{
                     .{ .imm = lower.reloc(.{ .inst = index + 1 }) },
@@ -213,7 +213,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
             },
             .pseudo_probe_adjust_unrolled_ri_s => {
                 var offset = page_size;
-                while (offset < @bitCast(i32, inst.data.ri.i)) : (offset += page_size) {
+                while (offset < @as(i32, @bitCast(inst.data.ri.i))) : (offset += page_size) {
                     try lower.emit(.none, .@"test", &.{
                         .{ .mem = Memory.sib(.dword, .{
                             .base = .{ .reg = inst.data.ri.r1 },
@@ -224,14 +224,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
                 }
                 try lower.emit(.none, .sub, &.{
                     .{ .reg = inst.data.ri.r1 },
-                    .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) },
+                    .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) },
                 });
                 assert(lower.result_insts_len <= pseudo_probe_adjust_unrolled_max_insts);
             },
             .pseudo_probe_adjust_setup_rri_s => {
                 try lower.emit(.none, .mov, &.{
                     .{ .reg = inst.data.rri.r2.to32() },
-                    .{ .imm = Immediate.s(@bitCast(i32, inst.data.rri.i)) },
+                    .{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.rri.i))) },
                 });
                 try lower.emit(.none, .sub, &.{
                     .{ .reg = inst.data.rri.r1 },
@@ -289,7 +289,7 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
         .i_s,
         .mi_sib_s,
         .mi_rip_s,
-        => Immediate.s(@bitCast(i32, i)),
+        => Immediate.s(@as(i32, @bitCast(i))),
 
         .rrri,
         .rri_u,
src/arch/x86_64/Mir.zig
@@ -989,7 +989,7 @@ pub const RegisterList = struct {
 
     fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
         for (registers, 0..) |cpreg, i| {
-            if (reg.id() == cpreg.id()) return @intCast(u32, i);
+            if (reg.id() == cpreg.id()) return @as(u32, @intCast(i));
         }
         unreachable; // register not in input register list!
     }
@@ -1009,7 +1009,7 @@ pub const RegisterList = struct {
     }
 
     pub fn count(self: Self) u32 {
-        return @intCast(u32, self.bitset.count());
+        return @as(u32, @intCast(self.bitset.count()));
     }
 };
 
@@ -1023,15 +1023,15 @@ pub const Imm64 = struct {
 
     pub fn encode(v: u64) Imm64 {
         return .{
-            .msb = @truncate(u32, v >> 32),
-            .lsb = @truncate(u32, v),
+            .msb = @as(u32, @truncate(v >> 32)),
+            .lsb = @as(u32, @truncate(v)),
         };
     }
 
     pub fn decode(imm: Imm64) u64 {
         var res: u64 = 0;
-        res |= (@intCast(u64, imm.msb) << 32);
-        res |= @intCast(u64, imm.lsb);
+        res |= (@as(u64, @intCast(imm.msb)) << 32);
+        res |= @as(u64, @intCast(imm.lsb));
         return res;
     }
 };
@@ -1070,18 +1070,18 @@ pub const MemorySib = struct {
     }
 
     pub fn decode(msib: MemorySib) Memory {
-        const scale = @truncate(u4, msib.scale_index);
+        const scale = @as(u4, @truncate(msib.scale_index));
         assert(scale == 0 or std.math.isPowerOfTwo(scale));
         return .{ .sib = .{
-            .ptr_size = @enumFromInt(Memory.PtrSize, msib.ptr_size),
-            .base = switch (@enumFromInt(Memory.Base.Tag, msib.base_tag)) {
+            .ptr_size = @as(Memory.PtrSize, @enumFromInt(msib.ptr_size)),
+            .base = switch (@as(Memory.Base.Tag, @enumFromInt(msib.base_tag))) {
                 .none => .none,
-                .reg => .{ .reg = @enumFromInt(Register, msib.base) },
-                .frame => .{ .frame = @enumFromInt(bits.FrameIndex, msib.base) },
+                .reg => .{ .reg = @as(Register, @enumFromInt(msib.base)) },
+                .frame => .{ .frame = @as(bits.FrameIndex, @enumFromInt(msib.base)) },
             },
             .scale_index = .{
                 .scale = scale,
-                .index = if (scale > 0) @enumFromInt(Register, msib.scale_index >> 4) else undefined,
+                .index = if (scale > 0) @as(Register, @enumFromInt(msib.scale_index >> 4)) else undefined,
             },
             .disp = msib.disp,
         } };
@@ -1103,7 +1103,7 @@ pub const MemoryRip = struct {
 
     pub fn decode(mrip: MemoryRip) Memory {
         return .{ .rip = .{
-            .ptr_size = @enumFromInt(Memory.PtrSize, mrip.ptr_size),
+            .ptr_size = @as(Memory.PtrSize, @enumFromInt(mrip.ptr_size)),
             .disp = mrip.disp,
         } };
     }
@@ -1120,14 +1120,14 @@ pub const MemoryMoffs = struct {
     pub fn encode(seg: Register, offset: u64) MemoryMoffs {
         return .{
             .seg = @intFromEnum(seg),
-            .msb = @truncate(u32, offset >> 32),
-            .lsb = @truncate(u32, offset >> 0),
+            .msb = @as(u32, @truncate(offset >> 32)),
+            .lsb = @as(u32, @truncate(offset >> 0)),
         };
     }
 
     pub fn decode(moffs: MemoryMoffs) Memory {
         return .{ .moffs = .{
-            .seg = @enumFromInt(Register, moffs.seg),
+            .seg = @as(Register, @enumFromInt(moffs.seg)),
             .offset = @as(u64, moffs.msb) << 32 | @as(u64, moffs.lsb) << 0,
         } };
     }
@@ -1147,7 +1147,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end:
     inline for (fields) |field| {
         @field(result, field.name) = switch (field.type) {
             u32 => mir.extra[i],
-            i32 => @bitCast(i32, mir.extra[i]),
+            i32 => @as(i32, @bitCast(mir.extra[i])),
             else => @compileError("bad field type"),
         };
         i += 1;
src/codegen/c/type.zig
@@ -138,7 +138,7 @@ pub const CType = extern union {
 
         pub fn toIndex(self: Tag) Index {
             assert(!self.hasPayload());
-            return @intCast(Index, @intFromEnum(self));
+            return @as(Index, @intCast(@intFromEnum(self)));
         }
 
         pub fn Type(comptime self: Tag) type {
@@ -330,7 +330,7 @@ pub const CType = extern union {
                 store: *const Set,
 
                 pub fn hash(self: @This(), cty: CType) Map.Hash {
-                    return @truncate(Map.Hash, cty.hash(self.store.*));
+                    return @as(Map.Hash, @truncate(cty.hash(self.store.*)));
                 }
                 pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool {
                     return lhs.eql(rhs);
@@ -340,7 +340,7 @@ pub const CType = extern union {
             map: Map = .{},
 
             pub fn indexToCType(self: Set, index: Index) CType {
-                if (index < Tag.no_payload_count) return initTag(@enumFromInt(Tag, index));
+                if (index < Tag.no_payload_count) return initTag(@as(Tag, @enumFromInt(index)));
                 return self.map.keys()[index - Tag.no_payload_count];
             }
 
@@ -362,7 +362,7 @@ pub const CType = extern union {
                 return if (self.map.getIndexAdapted(
                     ty,
                     TypeAdapter32{ .kind = kind, .lookup = lookup, .convert = &convert },
-                )) |idx| @intCast(Index, Tag.no_payload_count + idx) else null;
+                )) |idx| @as(Index, @intCast(Tag.no_payload_count + idx)) else null;
             }
         };
 
@@ -376,7 +376,7 @@ pub const CType = extern union {
 
             pub fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index {
                 const t = cty.tag();
-                if (@intFromEnum(t) < Tag.no_payload_count) return @intCast(Index, @intFromEnum(t));
+                if (@intFromEnum(t) < Tag.no_payload_count) return @as(Index, @intCast(@intFromEnum(t)));
 
                 const gop = try self.set.map.getOrPutContext(self.gpa(), cty, .{ .store = &self.set });
                 if (!gop.found_existing) gop.key_ptr.* = cty;
@@ -386,7 +386,7 @@ pub const CType = extern union {
                     assert(cty.eql(key.*));
                     assert(cty.hash(self.set) == key.hash(self.set));
                 }
-                return @intCast(Index, Tag.no_payload_count + gop.index);
+                return @as(Index, @intCast(Tag.no_payload_count + gop.index));
             }
 
             pub fn typeToIndex(
@@ -424,7 +424,7 @@ pub const CType = extern union {
                     assert(adapter.eql(ty, cty.*));
                     assert(adapter.hash(ty) == cty.hash(self.set));
                 }
-                return @intCast(Index, Tag.no_payload_count + gop.index);
+                return @as(Index, @intCast(Tag.no_payload_count + gop.index));
             }
         };
 
@@ -1388,7 +1388,7 @@ pub const CType = extern union {
                                 .len = @divExact(abi_size, abi_align),
                                 .elem_type = tagFromIntInfo(.{
                                     .signedness = .unsigned,
-                                    .bits = @intCast(u16, abi_align * 8),
+                                    .bits = @as(u16, @intCast(abi_align * 8)),
                                 }).toIndex(),
                             } } };
                             self.value = .{ .cty = initPayload(&self.storage.seq) };
@@ -1492,7 +1492,7 @@ pub const CType = extern union {
                     if (mod.typeToStruct(ty)) |struct_obj| {
                         try self.initType(struct_obj.backing_int_ty, kind, lookup);
                     } else {
-                        const bits = @intCast(u16, ty.bitSize(mod));
+                        const bits = @as(u16, @intCast(ty.bitSize(mod)));
                         const int_ty = try mod.intType(.unsigned, bits);
                         try self.initType(int_ty, kind, lookup);
                     }
@@ -2299,7 +2299,7 @@ pub const CType = extern union {
         }
 
         pub fn hash(self: @This(), ty: Type) u32 {
-            return @truncate(u32, self.to64().hash(ty));
+            return @as(u32, @truncate(self.to64().hash(ty)));
         }
     };
 };
src/codegen/llvm/bindings.zig
@@ -8,7 +8,7 @@ pub const Bool = enum(c_int) {
     _,
 
     pub fn fromBool(b: bool) Bool {
-        return @enumFromInt(Bool, @intFromBool(b));
+        return @as(Bool, @enumFromInt(@intFromBool(b)));
     }
 
     pub fn toBool(b: Bool) bool {
src/codegen/spirv/Assembler.zig
@@ -293,7 +293,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
                     return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits});
                 },
             }
-            break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(u16, bits) } });
+            break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @as(u16, @intCast(bits)) } });
         },
         .OpTypeVector => try self.spv.resolve(.{ .vector_type = .{
             .component_type = try self.resolveTypeRef(operands[1].ref_id),
@@ -306,7 +306,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
         },
         .OpTypePointer => try self.spv.ptrType(
             try self.resolveTypeRef(operands[2].ref_id),
-            @enumFromInt(spec.StorageClass, operands[1].value),
+            @as(spec.StorageClass, @enumFromInt(operands[1].value)),
         ),
         .OpTypeFunction => blk: {
             const param_operands = operands[2..];
@@ -340,7 +340,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
         else => switch (self.inst.opcode) {
             .OpEntryPoint => unreachable,
             .OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes,
-            .OpVariable => switch (@enumFromInt(spec.StorageClass, operands[2].value)) {
+            .OpVariable => switch (@as(spec.StorageClass, @enumFromInt(operands[2].value))) {
                 .Function => &self.func.prologue,
                 else => {
                     // This is currently disabled because global variables are required to be
@@ -391,7 +391,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
     }
 
     const actual_word_count = section.instructions.items.len - first_word;
-    section.instructions.items[first_word] |= @as(u32, @intCast(u16, actual_word_count)) << 16 | @intFromEnum(self.inst.opcode);
+    section.instructions.items[first_word] |= @as(u32, @as(u16, @intCast(actual_word_count))) << 16 | @intFromEnum(self.inst.opcode);
 
     if (maybe_result_id) |result| {
         return AsmValue{ .value = result };
@@ -458,7 +458,7 @@ fn parseInstruction(self: *Assembler) !void {
         if (!entry.found_existing) {
             entry.value_ptr.* = .just_declared;
         }
-        break :blk @intCast(AsmValue.Ref, entry.index);
+        break :blk @as(AsmValue.Ref, @intCast(entry.index));
     } else null;
 
     const opcode_tok = self.currentToken();
@@ -613,7 +613,7 @@ fn parseRefId(self: *Assembler) !void {
         entry.value_ptr.* = .unresolved_forward_reference;
     }
 
-    const index = @intCast(AsmValue.Ref, entry.index);
+    const index = @as(AsmValue.Ref, @intCast(entry.index));
     try self.inst.operands.append(self.gpa, .{ .ref_id = index });
 }
 
@@ -645,7 +645,7 @@ fn parseString(self: *Assembler) !void {
     else
         text[1..];
 
-    const string_offset = @intCast(u32, self.inst.string_bytes.items.len);
+    const string_offset = @as(u32, @intCast(self.inst.string_bytes.items.len));
     try self.inst.string_bytes.ensureUnusedCapacity(self.gpa, literal.len + 1);
     self.inst.string_bytes.appendSliceAssumeCapacity(literal);
     self.inst.string_bytes.appendAssumeCapacity(0);
@@ -693,18 +693,18 @@ fn parseContextDependentInt(self: *Assembler, signedness: std.builtin.Signedness
         const int = std.fmt.parseInt(i128, text, 0) catch break :invalid;
         const min = switch (signedness) {
             .unsigned => 0,
-            .signed => -(@as(i128, 1) << (@intCast(u7, width) - 1)),
+            .signed => -(@as(i128, 1) << (@as(u7, @intCast(width)) - 1)),
         };
-        const max = (@as(i128, 1) << (@intCast(u7, width) - @intFromBool(signedness == .signed))) - 1;
+        const max = (@as(i128, 1) << (@as(u7, @intCast(width)) - @intFromBool(signedness == .signed))) - 1;
         if (int < min or int > max) {
             break :invalid;
         }
 
         // Note, we store the sign-extended version here.
         if (width <= @bitSizeOf(spec.Word)) {
-            try self.inst.operands.append(self.gpa, .{ .literal32 = @truncate(u32, @bitCast(u128, int)) });
+            try self.inst.operands.append(self.gpa, .{ .literal32 = @as(u32, @truncate(@as(u128, @bitCast(int)))) });
         } else {
-            try self.inst.operands.append(self.gpa, .{ .literal64 = @truncate(u64, @bitCast(u128, int)) });
+            try self.inst.operands.append(self.gpa, .{ .literal64 = @as(u64, @truncate(@as(u128, @bitCast(int)))) });
         }
         return;
     }
@@ -725,7 +725,7 @@ fn parseContextDependentFloat(self: *Assembler, comptime width: u16) !void {
         return self.fail(tok.start, "'{s}' is not a valid {}-bit float literal", .{ text, width });
     };
 
-    const float_bits = @bitCast(Int, value);
+    const float_bits = @as(Int, @bitCast(value));
     if (width <= @bitSizeOf(spec.Word)) {
         try self.inst.operands.append(self.gpa, .{ .literal32 = float_bits });
     } else {
src/codegen/spirv/Cache.zig
@@ -158,16 +158,16 @@ const Tag = enum {
         high: u32,
 
         fn encode(value: f64) Float64 {
-            const bits = @bitCast(u64, value);
+            const bits = @as(u64, @bitCast(value));
             return .{
-                .low = @truncate(u32, bits),
-                .high = @truncate(u32, bits >> 32),
+                .low = @as(u32, @truncate(bits)),
+                .high = @as(u32, @truncate(bits >> 32)),
             };
         }
 
         fn decode(self: Float64) f64 {
             const bits = @as(u64, self.low) | (@as(u64, self.high) << 32);
-            return @bitCast(f64, bits);
+            return @as(f64, @bitCast(bits));
         }
     };
 
@@ -189,8 +189,8 @@ const Tag = enum {
         fn encode(ty: Ref, value: u64) Int64 {
             return .{
                 .ty = ty,
-                .low = @truncate(u32, value),
-                .high = @truncate(u32, value >> 32),
+                .low = @as(u32, @truncate(value)),
+                .high = @as(u32, @truncate(value >> 32)),
             };
         }
 
@@ -207,13 +207,13 @@ const Tag = enum {
         fn encode(ty: Ref, value: i64) Int64 {
             return .{
                 .ty = ty,
-                .low = @truncate(u32, @bitCast(u64, value)),
-                .high = @truncate(u32, @bitCast(u64, value) >> 32),
+                .low = @as(u32, @truncate(@as(u64, @bitCast(value)))),
+                .high = @as(u32, @truncate(@as(u64, @bitCast(value)) >> 32)),
             };
         }
 
         fn decode(self: Int64) i64 {
-            return @bitCast(i64, @as(u64, self.low) | (@as(u64, self.high) << 32));
+            return @as(i64, @bitCast(@as(u64, self.low) | (@as(u64, self.high) << 32)));
         }
     };
 };
@@ -305,21 +305,21 @@ pub const Key = union(enum) {
         /// Turns this value into the corresponding 32-bit literal, 2s complement signed.
         fn toBits32(self: Int) u32 {
             return switch (self.value) {
-                .uint64 => |val| @intCast(u32, val),
-                .int64 => |val| if (val < 0) @bitCast(u32, @intCast(i32, val)) else @intCast(u32, val),
+                .uint64 => |val| @as(u32, @intCast(val)),
+                .int64 => |val| if (val < 0) @as(u32, @bitCast(@as(i32, @intCast(val)))) else @as(u32, @intCast(val)),
             };
         }
 
         fn toBits64(self: Int) u64 {
             return switch (self.value) {
                 .uint64 => |val| val,
-                .int64 => |val| @bitCast(u64, val),
+                .int64 => |val| @as(u64, @bitCast(val)),
             };
         }
 
         fn to(self: Int, comptime T: type) T {
             return switch (self.value) {
-                inline else => |val| @intCast(T, val),
+                inline else => |val| @as(T, @intCast(val)),
             };
         }
     };
@@ -357,9 +357,9 @@ pub const Key = union(enum) {
             .float => |float| {
                 std.hash.autoHash(&hasher, float.ty);
                 switch (float.value) {
-                    .float16 => |value| std.hash.autoHash(&hasher, @bitCast(u16, value)),
-                    .float32 => |value| std.hash.autoHash(&hasher, @bitCast(u32, value)),
-                    .float64 => |value| std.hash.autoHash(&hasher, @bitCast(u64, value)),
+                    .float16 => |value| std.hash.autoHash(&hasher, @as(u16, @bitCast(value))),
+                    .float32 => |value| std.hash.autoHash(&hasher, @as(u32, @bitCast(value))),
+                    .float64 => |value| std.hash.autoHash(&hasher, @as(u64, @bitCast(value))),
                 }
             },
             .function_type => |func| {
@@ -379,7 +379,7 @@ pub const Key = union(enum) {
             },
             inline else => |key| std.hash.autoHash(&hasher, key),
         }
-        return @truncate(u32, hasher.final());
+        return @as(u32, @truncate(hasher.final()));
     }
 
     fn eql(a: Key, b: Key) bool {
@@ -411,7 +411,7 @@ pub const Key = union(enum) {
 
         pub fn eql(ctx: @This(), a: Key, b_void: void, b_index: usize) bool {
             _ = b_void;
-            return ctx.self.lookup(@enumFromInt(Ref, b_index)).eql(a);
+            return ctx.self.lookup(@as(Ref, @enumFromInt(b_index))).eql(a);
         }
 
         pub fn hash(ctx: @This(), a: Key) u32 {
@@ -445,7 +445,7 @@ pub fn materialize(self: *const Self, spv: *Module) !Section {
     var section = Section{};
     errdefer section.deinit(spv.gpa);
     for (self.items.items(.result_id), 0..) |result_id, index| {
-        try self.emit(spv, result_id, @enumFromInt(Ref, index), &section);
+        try self.emit(spv, result_id, @as(Ref, @enumFromInt(index)), &section);
     }
     return section;
 }
@@ -534,7 +534,7 @@ fn emit(
             }
             for (struct_type.memberNames(), 0..) |member_name, i| {
                 if (self.getString(member_name)) |name| {
-                    try spv.memberDebugName(result_id, @intCast(u32, i), "{s}", .{name});
+                    try spv.memberDebugName(result_id, @as(u32, @intCast(i)), "{s}", .{name});
                 }
             }
             // TODO: Decorations?
@@ -557,7 +557,7 @@ fn emit(
         .float => |float| {
             const ty_id = self.resultId(float.ty);
             const lit: Lit = switch (float.value) {
-                .float16 => |value| .{ .uint32 = @bitCast(u16, value) },
+                .float16 => |value| .{ .uint32 = @as(u16, @bitCast(value)) },
                 .float32 => |value| .{ .float32 = value },
                 .float64 => |value| .{ .float64 = value },
             };
@@ -603,7 +603,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
     const adapter: Key.Adapter = .{ .self = self };
     const entry = try self.map.getOrPutAdapted(spv.gpa, key, adapter);
     if (entry.found_existing) {
-        return @enumFromInt(Ref, entry.index);
+        return @as(Ref, @enumFromInt(entry.index));
     }
     const result_id = spv.allocId();
     const item: Item = switch (key) {
@@ -640,10 +640,10 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
         },
         .function_type => |function| blk: {
             const extra = try self.addExtra(spv, Tag.FunctionType{
-                .param_len = @intCast(u32, function.parameters.len),
+                .param_len = @as(u32, @intCast(function.parameters.len)),
                 .return_type = function.return_type,
             });
-            try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, function.parameters));
+            try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(function.parameters)));
             break :blk .{
                 .tag = .type_function,
                 .result_id = result_id,
@@ -678,12 +678,12 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
         .struct_type => |struct_type| blk: {
             const extra = try self.addExtra(spv, Tag.SimpleStructType{
                 .name = struct_type.name,
-                .members_len = @intCast(u32, struct_type.member_types.len),
+                .members_len = @as(u32, @intCast(struct_type.member_types.len)),
             });
-            try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, struct_type.member_types));
+            try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(struct_type.member_types)));
 
             if (struct_type.member_names) |member_names| {
-                try self.extra.appendSlice(spv.gpa, @ptrCast([]const u32, member_names));
+                try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(member_names)));
                 break :blk Item{
                     .tag = .type_struct_simple_with_member_names,
                     .result_id = result_id,
@@ -721,7 +721,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
                             .result_id = result_id,
                             .data = try self.addExtra(spv, Tag.UInt32{
                                 .ty = int.ty,
-                                .value = @intCast(u32, val),
+                                .value = @as(u32, @intCast(val)),
                             }),
                         };
                     } else if (val >= std.math.minInt(i32) and val <= std.math.maxInt(i32)) {
@@ -730,20 +730,20 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
                             .result_id = result_id,
                             .data = try self.addExtra(spv, Tag.Int32{
                                 .ty = int.ty,
-                                .value = @intCast(i32, val),
+                                .value = @as(i32, @intCast(val)),
                             }),
                         };
                     } else if (val < 0) {
                         break :blk .{
                             .tag = .int_large,
                             .result_id = result_id,
-                            .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @intCast(i64, val))),
+                            .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @as(i64, @intCast(val)))),
                         };
                     } else {
                         break :blk .{
                             .tag = .uint_large,
                             .result_id = result_id,
-                            .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @intCast(u64, val))),
+                            .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @as(u64, @intCast(val)))),
                         };
                     }
                 },
@@ -753,12 +753,12 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
             16 => .{
                 .tag = .float16,
                 .result_id = result_id,
-                .data = @bitCast(u16, float.value.float16),
+                .data = @as(u16, @bitCast(float.value.float16)),
             },
             32 => .{
                 .tag = .float32,
                 .result_id = result_id,
-                .data = @bitCast(u32, float.value.float32),
+                .data = @as(u32, @bitCast(float.value.float32)),
             },
             64 => .{
                 .tag = .float64,
@@ -788,7 +788,7 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
     };
     try self.items.append(spv.gpa, item);
 
-    return @enumFromInt(Ref, entry.index);
+    return @as(Ref, @enumFromInt(entry.index));
 }
 
 /// Turn a Ref back into a Key.
@@ -797,20 +797,20 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
     const item = self.items.get(@intFromEnum(ref));
     const data = item.data;
     return switch (item.tag) {
-        .type_simple => switch (@enumFromInt(Tag.SimpleType, data)) {
+        .type_simple => switch (@as(Tag.SimpleType, @enumFromInt(data))) {
             .void => .void_type,
             .bool => .bool_type,
         },
         .type_int_signed => .{ .int_type = .{
             .signedness = .signed,
-            .bits = @intCast(u16, data),
+            .bits = @as(u16, @intCast(data)),
         } },
         .type_int_unsigned => .{ .int_type = .{
             .signedness = .unsigned,
-            .bits = @intCast(u16, data),
+            .bits = @as(u16, @intCast(data)),
         } },
         .type_float => .{ .float_type = .{
-            .bits = @intCast(u16, data),
+            .bits = @as(u16, @intCast(data)),
         } },
         .type_vector => .{ .vector_type = self.extraData(Tag.VectorType, data) },
         .type_array => .{ .array_type = self.extraData(Tag.ArrayType, data) },
@@ -819,26 +819,26 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
             return .{
                 .function_type = .{
                     .return_type = payload.data.return_type,
-                    .parameters = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.param_len]),
+                    .parameters = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.param_len])),
                 },
             };
         },
         .type_ptr_generic => .{
             .ptr_type = .{
                 .storage_class = .Generic,
-                .child_type = @enumFromInt(Ref, data),
+                .child_type = @as(Ref, @enumFromInt(data)),
             },
         },
         .type_ptr_crosswgp => .{
             .ptr_type = .{
                 .storage_class = .CrossWorkgroup,
-                .child_type = @enumFromInt(Ref, data),
+                .child_type = @as(Ref, @enumFromInt(data)),
             },
         },
         .type_ptr_function => .{
             .ptr_type = .{
                 .storage_class = .Function,
-                .child_type = @enumFromInt(Ref, data),
+                .child_type = @as(Ref, @enumFromInt(data)),
             },
         },
         .type_ptr_simple => {
@@ -852,7 +852,7 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
         },
         .type_struct_simple => {
             const payload = self.extraDataTrail(Tag.SimpleStructType, data);
-            const member_types = @ptrCast([]const Ref, self.extra.items[payload.trail..][0..payload.data.members_len]);
+            const member_types = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.members_len]));
             return .{
                 .struct_type = .{
                     .name = payload.data.name,
@@ -864,8 +864,8 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
         .type_struct_simple_with_member_names => {
             const payload = self.extraDataTrail(Tag.SimpleStructType, data);
             const trailing = self.extra.items[payload.trail..];
-            const member_types = @ptrCast([]const Ref, trailing[0..payload.data.members_len]);
-            const member_names = @ptrCast([]const String, trailing[payload.data.members_len..][0..payload.data.members_len]);
+            const member_types = @as([]const Ref, @ptrCast(trailing[0..payload.data.members_len]));
+            const member_names = @as([]const String, @ptrCast(trailing[payload.data.members_len..][0..payload.data.members_len]));
             return .{
                 .struct_type = .{
                     .name = payload.data.name,
@@ -876,11 +876,11 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
         },
         .float16 => .{ .float = .{
             .ty = self.get(.{ .float_type = .{ .bits = 16 } }),
-            .value = .{ .float16 = @bitCast(f16, @intCast(u16, data)) },
+            .value = .{ .float16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) },
         } },
         .float32 => .{ .float = .{
             .ty = self.get(.{ .float_type = .{ .bits = 32 } }),
-            .value = .{ .float32 = @bitCast(f32, data) },
+            .value = .{ .float32 = @as(f32, @bitCast(data)) },
         } },
         .float64 => .{ .float = .{
             .ty = self.get(.{ .float_type = .{ .bits = 64 } }),
@@ -923,17 +923,17 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
             } };
         },
         .undef => .{ .undef = .{
-            .ty = @enumFromInt(Ref, data),
+            .ty = @as(Ref, @enumFromInt(data)),
         } },
         .null => .{ .null = .{
-            .ty = @enumFromInt(Ref, data),
+            .ty = @as(Ref, @enumFromInt(data)),
         } },
         .bool_true => .{ .bool = .{
-            .ty = @enumFromInt(Ref, data),
+            .ty = @as(Ref, @enumFromInt(data)),
             .value = true,
         } },
         .bool_false => .{ .bool = .{
-            .ty = @enumFromInt(Ref, data),
+            .ty = @as(Ref, @enumFromInt(data)),
             .value = false,
         } },
     };
@@ -949,7 +949,7 @@ pub fn resultId(self: Self, ref: Ref) IdResult {
 fn get(self: *const Self, key: Key) Ref {
     const adapter: Key.Adapter = .{ .self = self };
     const index = self.map.getIndexAdapted(key, adapter).?;
-    return @enumFromInt(Ref, index);
+    return @as(Ref, @enumFromInt(index));
 }
 
 fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 {
@@ -959,12 +959,12 @@ fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 {
 }
 
 fn addExtraAssumeCapacity(self: *Self, extra: anytype) !u32 {
-    const payload_offset = @intCast(u32, self.extra.items.len);
+    const payload_offset = @as(u32, @intCast(self.extra.items.len));
     inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
         const field_val = @field(extra, field.name);
         const word = switch (field.type) {
             u32 => field_val,
-            i32 => @bitCast(u32, field_val),
+            i32 => @as(u32, @bitCast(field_val)),
             Ref => @intFromEnum(field_val),
             StorageClass => @intFromEnum(field_val),
             String => @intFromEnum(field_val),
@@ -986,16 +986,16 @@ fn extraDataTrail(self: Self, comptime T: type, offset: u32) struct { data: T, t
         const word = self.extra.items[offset + i];
         @field(result, field.name) = switch (field.type) {
             u32 => word,
-            i32 => @bitCast(i32, word),
-            Ref => @enumFromInt(Ref, word),
-            StorageClass => @enumFromInt(StorageClass, word),
-            String => @enumFromInt(String, word),
+            i32 => @as(i32, @bitCast(word)),
+            Ref => @as(Ref, @enumFromInt(word)),
+            StorageClass => @as(StorageClass, @enumFromInt(word)),
+            String => @as(String, @enumFromInt(word)),
             else => @compileError("Invalid type: " ++ @typeName(field.type)),
         };
     }
     return .{
         .data = result,
-        .trail = offset + @intCast(u32, fields.len),
+        .trail = offset + @as(u32, @intCast(fields.len)),
     };
 }
 
@@ -1017,7 +1017,7 @@ pub const String = enum(u32) {
             _ = ctx;
             var hasher = std.hash.Wyhash.init(0);
             hasher.update(a);
-            return @truncate(u32, hasher.final());
+            return @as(u32, @truncate(hasher.final()));
         }
     };
 };
@@ -1032,10 +1032,10 @@ pub fn addString(self: *Self, spv: *Module, str: []const u8) !String {
         try self.string_bytes.ensureUnusedCapacity(spv.gpa, 1 + str.len);
         self.string_bytes.appendSliceAssumeCapacity(str);
         self.string_bytes.appendAssumeCapacity(0);
-        entry.value_ptr.* = @intCast(u32, offset);
+        entry.value_ptr.* = @as(u32, @intCast(offset));
     }
 
-    return @enumFromInt(String, entry.index);
+    return @as(String, @enumFromInt(entry.index));
 }
 
 pub fn getString(self: *const Self, ref: String) ?[]const u8 {
src/codegen/spirv/Module.zig
@@ -451,8 +451,8 @@ pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef {
     return try self.resolveId(.{ .int = .{
         .ty = ty_ref,
         .value = switch (ty.signedness) {
-            .signed => Value{ .int64 = @intCast(i64, value) },
-            .unsigned => Value{ .uint64 = @intCast(u64, value) },
+            .signed => Value{ .int64 = @as(i64, @intCast(value)) },
+            .unsigned => Value{ .uint64 = @as(u64, @intCast(value)) },
         },
     } });
 }
@@ -516,7 +516,7 @@ pub fn allocDecl(self: *Module, kind: DeclKind) !Decl.Index {
         .begin_dep = undefined,
         .end_dep = undefined,
     });
-    const index = @enumFromInt(Decl.Index, @intCast(u32, self.decls.items.len - 1));
+    const index = @as(Decl.Index, @enumFromInt(@as(u32, @intCast(self.decls.items.len - 1))));
     switch (kind) {
         .func => {},
         // If the decl represents a global, also allocate a global node.
@@ -540,9 +540,9 @@ pub fn globalPtr(self: *Module, index: Decl.Index) ?*Global {
 
 /// Declare ALL dependencies for a decl.
 pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl.Index) !void {
-    const begin_dep = @intCast(u32, self.decl_deps.items.len);
+    const begin_dep = @as(u32, @intCast(self.decl_deps.items.len));
     try self.decl_deps.appendSlice(self.gpa, deps);
-    const end_dep = @intCast(u32, self.decl_deps.items.len);
+    const end_dep = @as(u32, @intCast(self.decl_deps.items.len));
 
     const decl = self.declPtr(decl_index);
     decl.begin_dep = begin_dep;
@@ -550,13 +550,13 @@ pub fn declareDeclDeps(self: *Module, decl_index: Decl.Index, deps: []const Decl
 }
 
 pub fn beginGlobal(self: *Module) u32 {
-    return @intCast(u32, self.globals.section.instructions.items.len);
+    return @as(u32, @intCast(self.globals.section.instructions.items.len));
 }
 
 pub fn endGlobal(self: *Module, global_index: Decl.Index, begin_inst: u32) void {
     const global = self.globalPtr(global_index).?;
     global.begin_inst = begin_inst;
-    global.end_inst = @intCast(u32, self.globals.section.instructions.items.len);
+    global.end_inst = @as(u32, @intCast(self.globals.section.instructions.items.len));
 }
 
 pub fn declareEntryPoint(self: *Module, decl_index: Decl.Index, name: []const u8) !void {
src/codegen/spirv/Section.zig
@@ -50,7 +50,7 @@ pub fn emitRaw(
 ) !void {
     const word_count = 1 + operand_words;
     try section.instructions.ensureUnusedCapacity(allocator, word_count);
-    section.writeWord((@intCast(Word, word_count << 16)) | @intFromEnum(opcode));
+    section.writeWord((@as(Word, @intCast(word_count << 16))) | @intFromEnum(opcode));
 }
 
 pub fn emit(
@@ -61,7 +61,7 @@ pub fn emit(
 ) !void {
     const word_count = instructionSize(opcode, operands);
     try section.instructions.ensureUnusedCapacity(allocator, word_count);
-    section.writeWord(@intCast(Word, word_count << 16) | @intFromEnum(opcode));
+    section.writeWord(@as(Word, @intCast(word_count << 16)) | @intFromEnum(opcode));
     section.writeOperands(opcode.Operands(), operands);
 }
 
@@ -94,8 +94,8 @@ pub fn writeWords(section: *Section, words: []const Word) void {
 
 pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void {
     section.writeWords(&.{
-        @truncate(Word, dword),
-        @truncate(Word, dword >> @bitSizeOf(Word)),
+        @as(Word, @truncate(dword)),
+        @as(Word, @truncate(dword >> @bitSizeOf(Word))),
     });
 }
 
@@ -145,7 +145,7 @@ pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand)
             },
             .Struct => |info| {
                 if (info.layout == .Packed) {
-                    section.writeWord(@bitCast(Word, operand));
+                    section.writeWord(@as(Word, @bitCast(operand)));
                 } else {
                     section.writeExtendedMask(Operand, operand);
                 }
@@ -166,7 +166,7 @@ fn writeString(section: *Section, str: []const u8) void {
 
         var j: usize = 0;
         while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) {
-            word |= @as(Word, str[i + j]) << @intCast(Log2Word, j * @bitSizeOf(u8));
+            word |= @as(Word, str[i + j]) << @as(Log2Word, @intCast(j * @bitSizeOf(u8)));
         }
 
         section.instructions.appendAssumeCapacity(word);
@@ -175,12 +175,12 @@ fn writeString(section: *Section, str: []const u8) void {
 
 fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDependentNumber) void {
     switch (operand) {
-        .int32 => |int| section.writeWord(@bitCast(Word, int)),
-        .uint32 => |int| section.writeWord(@bitCast(Word, int)),
-        .int64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)),
-        .uint64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)),
-        .float32 => |float| section.writeWord(@bitCast(Word, float)),
-        .float64 => |float| section.writeDoubleWord(@bitCast(DoubleWord, float)),
+        .int32 => |int| section.writeWord(@as(Word, @bitCast(int))),
+        .uint32 => |int| section.writeWord(@as(Word, @bitCast(int))),
+        .int64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))),
+        .uint64 => |int| section.writeDoubleWord(@as(DoubleWord, @bitCast(int))),
+        .float32 => |float| section.writeWord(@as(Word, @bitCast(float))),
+        .float64 => |float| section.writeDoubleWord(@as(DoubleWord, @bitCast(float))),
     }
 }
 
@@ -189,10 +189,10 @@ fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand
     inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| {
         switch (@typeInfo(field.type)) {
             .Optional => if (@field(operand, field.name) != null) {
-                mask |= 1 << @intCast(u5, bit);
+                mask |= 1 << @as(u5, @intCast(bit));
             },
             .Bool => if (@field(operand, field.name)) {
-                mask |= 1 << @intCast(u5, bit);
+                mask |= 1 << @as(u5, @intCast(bit));
             },
             else => unreachable,
         }
@@ -392,7 +392,7 @@ test "SPIR-V Section emit() - extended mask" {
         (@as(Word, 5) << 16) | @intFromEnum(Opcode.OpLoopMerge),
         10,
         20,
-        @bitCast(Word, spec.LoopControl{ .Unroll = true, .DependencyLength = true }),
+        @as(Word, @bitCast(spec.LoopControl{ .Unroll = true, .DependencyLength = true })),
         2,
     }, section.instructions.items);
 }
src/codegen/c.zig
@@ -326,7 +326,7 @@ pub const Function = struct {
             .cty_idx = try f.typeToIndex(ty, .complete),
             .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
         });
-        return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) };
+        return .{ .new_local = @as(LocalIndex, @intCast(f.locals.items.len - 1)) };
     }
 
     fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue {
@@ -644,7 +644,7 @@ pub const DeclGen = struct {
                 // Ensure complete type definition is visible before accessing fields.
                 _ = try dg.typeToIndex(base_ty, .complete);
                 const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) {
-                    .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod),
+                    .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@as(usize, @intCast(field.index)), mod),
                     .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
                         .One, .Many, .C => unreachable,
                         .Slice => switch (field.index) {
@@ -662,7 +662,7 @@ pub const DeclGen = struct {
                     try dg.renderCType(writer, ptr_cty);
                     try writer.writeByte(')');
                 }
-                switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) {
+                switch (fieldLocation(base_ty, ptr_ty, @as(u32, @intCast(field.index)), mod)) {
                     .begin => try dg.renderParentPtr(writer, field.base, location),
                     .field => |name| {
                         try writer.writeAll("&(");
@@ -740,11 +740,11 @@ pub const DeclGen = struct {
                     try dg.renderTypeForBuiltinFnName(writer, ty);
                     try writer.writeByte('(');
                     switch (bits) {
-                        16 => try writer.print("{x}", .{@bitCast(f16, undefPattern(i16))}),
-                        32 => try writer.print("{x}", .{@bitCast(f32, undefPattern(i32))}),
-                        64 => try writer.print("{x}", .{@bitCast(f64, undefPattern(i64))}),
-                        80 => try writer.print("{x}", .{@bitCast(f80, undefPattern(i80))}),
-                        128 => try writer.print("{x}", .{@bitCast(f128, undefPattern(i128))}),
+                        16 => try writer.print("{x}", .{@as(f16, @bitCast(undefPattern(i16)))}),
+                        32 => try writer.print("{x}", .{@as(f32, @bitCast(undefPattern(i32)))}),
+                        64 => try writer.print("{x}", .{@as(f64, @bitCast(undefPattern(i64)))}),
+                        80 => try writer.print("{x}", .{@as(f80, @bitCast(undefPattern(i80)))}),
+                        128 => try writer.print("{x}", .{@as(f128, @bitCast(undefPattern(i128)))}),
                         else => unreachable,
                     }
                     try writer.writeAll(", ");
@@ -1041,11 +1041,11 @@ pub const DeclGen = struct {
                 };
 
                 switch (bits) {
-                    16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))),
-                    32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))),
-                    64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))),
-                    80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))),
-                    128 => repr_val_big.set(@bitCast(u128, f128_val)),
+                    16 => repr_val_big.set(@as(u16, @bitCast(val.toFloat(f16, mod)))),
+                    32 => repr_val_big.set(@as(u32, @bitCast(val.toFloat(f32, mod)))),
+                    64 => repr_val_big.set(@as(u64, @bitCast(val.toFloat(f64, mod)))),
+                    80 => repr_val_big.set(@as(u80, @bitCast(val.toFloat(f80, mod)))),
+                    128 => repr_val_big.set(@as(u128, @bitCast(f128_val))),
                     else => unreachable,
                 }
 
@@ -1103,11 +1103,11 @@ pub const DeclGen = struct {
                     if (std.math.isNan(f128_val)) switch (bits) {
                         // We only actually need to pass the significand, but it will get
                         // properly masked anyway, so just pass the whole value.
-                        16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}),
-                        32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}),
-                        64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}),
-                        80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}),
-                        128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}),
+                        16 => try writer.print("\"0x{x}\"", .{@as(u16, @bitCast(val.toFloat(f16, mod)))}),
+                        32 => try writer.print("\"0x{x}\"", .{@as(u32, @bitCast(val.toFloat(f32, mod)))}),
+                        64 => try writer.print("\"0x{x}\"", .{@as(u64, @bitCast(val.toFloat(f64, mod)))}),
+                        80 => try writer.print("\"0x{x}\"", .{@as(u80, @bitCast(val.toFloat(f80, mod)))}),
+                        128 => try writer.print("\"0x{x}\"", .{@as(u128, @bitCast(f128_val))}),
                         else => unreachable,
                     };
                     try writer.writeAll(", ");
@@ -1225,11 +1225,11 @@ pub const DeclGen = struct {
                             var index: usize = 0;
                             while (index < ai.len) : (index += 1) {
                                 const elem_val = try val.elemValue(mod, index);
-                                const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+                                const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
                                 try literal.writeChar(elem_val_u8);
                             }
                             if (ai.sentinel) |s| {
-                                const s_u8 = @intCast(u8, s.toUnsignedInt(mod));
+                                const s_u8 = @as(u8, @intCast(s.toUnsignedInt(mod)));
                                 if (s_u8 != 0) try literal.writeChar(s_u8);
                             }
                             try literal.end();
@@ -1239,7 +1239,7 @@ pub const DeclGen = struct {
                             while (index < ai.len) : (index += 1) {
                                 if (index != 0) try writer.writeByte(',');
                                 const elem_val = try val.elemValue(mod, index);
-                                const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+                                const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
                                 try writer.print("'\\x{x}'", .{elem_val_u8});
                             }
                             if (ai.sentinel) |s| {
@@ -1840,7 +1840,7 @@ pub const DeclGen = struct {
             decl.ty,
             .{ .decl = decl_index },
             CQualifiers.init(.{ .@"const" = variable.is_const }),
-            @intCast(u32, decl.alignment.toByteUnits(0)),
+            @as(u32, @intCast(decl.alignment.toByteUnits(0))),
             .complete,
         );
         try fwd_decl_writer.writeAll(";\n");
@@ -1907,7 +1907,7 @@ pub const DeclGen = struct {
         const mod = dg.module;
         const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
             .signedness = .unsigned,
-            .bits = @intCast(u16, ty.bitSize(mod)),
+            .bits = @as(u16, @intCast(ty.bitSize(mod))),
         };
 
         if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
@@ -2481,7 +2481,7 @@ fn genExports(o: *Object) !void {
     if (mod.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| {
         for (exports.items[1..], 1..) |@"export", i| {
             try fwd_decl_writer.writeAll("zig_export(");
-            try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) });
+            try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @as(u32, @intCast(i)) });
             try fwd_decl_writer.print(", {s}, {s});\n", .{
                 fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null),
                 fmtStringLiteral(ip.stringToSlice(@"export".opts.name), null),
@@ -2510,7 +2510,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
             try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete);
             try w.writeAll(") {\n switch (tag) {\n");
             for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| {
-                const index = @intCast(u32, index_usize);
+                const index = @as(u32, @intCast(index_usize));
                 const name = mod.intern_pool.stringToSlice(name_ip);
                 const tag_val = try mod.enumValueFieldIndex(enum_ty, index);
 
@@ -2783,7 +2783,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
     // Remember how many locals there were before entering the body so that we can free any that
     // were newly introduced. Any new locals must necessarily be logically free after the then
     // branch is complete.
-    const pre_locals_len = @intCast(LocalIndex, f.locals.items.len);
+    const pre_locals_len = @as(LocalIndex, @intCast(f.locals.items.len));
 
     for (leading_deaths) |death| {
         try die(f, inst, Air.indexToRef(death));
@@ -2804,7 +2804,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
     // them, unless they were used to store allocs.
 
     for (pre_locals_len..f.locals.items.len) |local_i| {
-        const local_index = @intCast(LocalIndex, local_i);
+        const local_index = @as(LocalIndex, @intCast(local_i));
         if (f.allocs.contains(local_index)) {
             continue;
         }
@@ -3364,7 +3364,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
         const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
         const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
 
-        const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod)));
+        const field_ty = try mod.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(mod))));
 
         try f.writeCValue(writer, local, .Other);
         try v.elem(f, writer);
@@ -3667,7 +3667,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
         var mask = try BigInt.Managed.initCapacity(stack.get(), BigInt.calcTwosCompLimbCount(host_bits));
         defer mask.deinit();
 
-        try mask.setTwosCompIntLimit(.max, .unsigned, @intCast(usize, src_bits));
+        try mask.setTwosCompIntLimit(.max, .unsigned, @as(usize, @intCast(src_bits)));
         try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset);
         try mask.bitNotWrap(&mask, .unsigned, host_bits);
 
@@ -4096,7 +4096,7 @@ fn airCall(
 
     const pl_op = f.air.instructions.items(.data)[inst].pl_op;
     const extra = f.air.extraData(Air.Call, pl_op.payload);
-    const args = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]);
+    const args = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra.end..][0..extra.data.args_len]));
 
     const resolved_args = try gpa.alloc(CValue, args.len);
     defer gpa.free(resolved_args);
@@ -4537,7 +4537,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
             wrap_cty = elem_cty.toSignedness(dest_info.signedness);
             need_bitcasts = wrap_cty.?.tag() == .zig_i128;
             bits -= 1;
-            bits %= @intCast(u16, f.byteSize(elem_cty) * 8);
+            bits %= @as(u16, @intCast(f.byteSize(elem_cty) * 8));
             bits += 1;
         }
         try writer.writeAll(" = ");
@@ -4711,7 +4711,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
     var extra_index: usize = switch_br.end;
     for (0..switch_br.data.cases_len) |case_i| {
         const case = f.air.extraData(Air.SwitchBr.Case, extra_index);
-        const items = @ptrCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]);
+        const items = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[case.end..][0..case.data.items_len]));
         const case_body = f.air.extra[case.end + items.len ..][0..case.data.body_len];
         extra_index = case.end + case.data.items_len + case_body.len;
 
@@ -4771,13 +4771,13 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
     const mod = f.object.dg.module;
     const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
     const extra = f.air.extraData(Air.Asm, ty_pl.payload);
-    const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
-    const clobbers_len = @truncate(u31, extra.data.flags);
+    const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+    const clobbers_len = @as(u31, @truncate(extra.data.flags));
     const gpa = f.object.dg.gpa;
     var extra_i: usize = extra.end;
-    const outputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.outputs_len]);
+    const outputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.outputs_len]));
     extra_i += outputs.len;
-    const inputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.inputs_len]);
+    const inputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.inputs_len]));
     extra_i += inputs.len;
 
     const result = result: {
@@ -4794,7 +4794,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
             break :local local;
         } else .none;
 
-        const locals_begin = @intCast(LocalIndex, f.locals.items.len);
+        const locals_begin = @as(LocalIndex, @intCast(f.locals.items.len));
         const constraints_extra_begin = extra_i;
         for (outputs) |output| {
             const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]);
@@ -5402,7 +5402,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
                     inst_ty.intInfo(mod).signedness
                 else
                     .unsigned;
-                const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
+                const field_int_ty = try mod.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(mod))));
 
                 const temp_local = try f.allocLocal(inst, field_int_ty);
                 try f.writeCValue(writer, temp_local, .Other);
@@ -6033,7 +6033,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
     try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
 
     const repr_ty = if (ty.isRuntimeFloat())
-        mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+        mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
     else
         ty;
 
@@ -6136,7 +6136,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
     const operand_mat = try Materialize.start(f, inst, writer, ty, operand);
     try reap(f, inst, &.{ pl_op.operand, extra.operand });
 
-    const repr_bits = @intCast(u16, ty.abiSize(mod) * 8);
+    const repr_bits = @as(u16, @intCast(ty.abiSize(mod) * 8));
     const is_float = ty.isRuntimeFloat();
     const is_128 = repr_bits == 128;
     const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty;
@@ -6186,7 +6186,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
     const ty = ptr_ty.childType(mod);
 
     const repr_ty = if (ty.isRuntimeFloat())
-        mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+        mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
     else
         ty;
 
@@ -6226,7 +6226,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
     try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
 
     const repr_ty = if (ty.isRuntimeFloat())
-        mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+        mod.intType(.unsigned, @as(u16, @intCast(ty.abiSize(mod) * 8))) catch unreachable
     else
         ty;
 
@@ -6574,7 +6574,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
         try writer.writeAll("] = ");
 
         const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
-        const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63));
+        const src_val = try mod.intValue(Type.usize, @as(u64, @intCast(mask_elem ^ mask_elem >> 63)));
 
         try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other);
         try writer.writeByte('[');
@@ -6745,8 +6745,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
     const ip = &mod.intern_pool;
     const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
     const inst_ty = f.typeOfIndex(inst);
-    const len = @intCast(usize, inst_ty.arrayLen(mod));
-    const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]);
+    const len = @as(usize, @intCast(inst_ty.arrayLen(mod)));
+    const elements = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[ty_pl.payload..][0..len]));
     const gpa = f.object.dg.gpa;
     const resolved_elements = try gpa.alloc(CValue, elements.len);
     defer gpa.free(resolved_elements);
@@ -7387,7 +7387,7 @@ fn fmtStringLiteral(str: []const u8, sentinel: ?u8) std.fmt.Formatter(formatStri
 fn undefPattern(comptime IntType: type) IntType {
     const int_info = @typeInfo(IntType).Int;
     const UnsignedType = std.meta.Int(.unsigned, int_info.bits);
-    return @bitCast(IntType, @as(UnsignedType, (1 << (int_info.bits | 1)) / 3));
+    return @as(IntType, @bitCast(@as(UnsignedType, (1 << (int_info.bits | 1)) / 3)));
 }
 
 const FormatIntLiteralContext = struct {
@@ -7438,7 +7438,7 @@ fn formatIntLiteral(
     } else data.val.toBigInt(&int_buf, mod);
     assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
 
-    const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8);
+    const c_bits = @as(usize, @intCast(data.cty.byteSize(data.dg.ctypes.set, target) * 8));
     var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined;
     const one = BigInt.Mutable.init(&one_limbs, 1).toConst();
 
@@ -7471,7 +7471,7 @@ fn formatIntLiteral(
             const array_data = data.cty.castTag(.array).?.data;
             break :info .{
                 .cty = data.dg.indexToCType(array_data.elem_type),
-                .count = @intCast(usize, array_data.len),
+                .count = @as(usize, @intCast(array_data.len)),
                 .endian = target.cpu.arch.endian(),
                 .homogeneous = true,
             };
@@ -7527,7 +7527,7 @@ fn formatIntLiteral(
 
         var c_limb_int_info = std.builtin.Type.Int{
             .signedness = undefined,
-            .bits = @intCast(u16, @divExact(c_bits, c_limb_info.count)),
+            .bits = @as(u16, @intCast(@divExact(c_bits, c_limb_info.count))),
         };
         var c_limb_cty: CType = undefined;
 
@@ -7727,7 +7727,7 @@ fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type {
 fn lowersToArray(ty: Type, mod: *Module) bool {
     return switch (ty.zigTypeTag(mod)) {
         .Array, .Vector => return true,
-        else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null,
+        else => return ty.isAbiInt(mod) and toCIntBits(@as(u32, @intCast(ty.bitSize(mod)))) == null,
     };
 }
 
@@ -7735,7 +7735,7 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi
     assert(operands.len <= Liveness.bpi - 1);
     var tomb_bits = f.liveness.getTombBits(inst);
     for (operands) |operand| {
-        const dies = @truncate(u1, tomb_bits) != 0;
+        const dies = @as(u1, @truncate(tomb_bits)) != 0;
         tomb_bits >>= 1;
         if (!dies) continue;
         try die(f, inst, operand);
src/codegen/llvm.zig
@@ -592,7 +592,7 @@ pub const Object = struct {
         llvm_errors[0] = llvm_slice_ty.getUndef();
         for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| {
             const name = mod.intern_pool.stringToSlice(name_nts);
-            const str_init = o.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
+            const str_init = o.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False);
             const str_global = o.llvm_module.addGlobal(str_init.typeOf(), "");
             str_global.setInitializer(str_init);
             str_global.setLinkage(.Private);
@@ -607,7 +607,7 @@ pub const Object = struct {
             llvm_error.* = llvm_slice_ty.constNamedStruct(&slice_fields, slice_fields.len);
         }
 
-        const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @intCast(c_uint, error_name_list.len));
+        const error_name_table_init = llvm_slice_ty.constArray(llvm_errors.ptr, @as(c_uint, @intCast(error_name_list.len)));
 
         const error_name_table_global = o.llvm_module.addGlobal(error_name_table_init.typeOf(), "");
         error_name_table_global.setInitializer(error_name_table_init);
@@ -1027,7 +1027,7 @@ pub const Object = struct {
                     llvm_arg_i += 1;
 
                     const param_llvm_ty = try o.lowerType(param_ty);
-                    const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
+                    const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
                     const int_llvm_ty = o.context.intType(abi_size * 8);
                     const alignment = @max(
                         param_ty.abiAlignment(mod),
@@ -1053,7 +1053,7 @@ pub const Object = struct {
                     const ptr_info = param_ty.ptrInfo(mod);
 
                     if (math.cast(u5, it.zig_index - 1)) |i| {
-                        if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
+                        if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
                             o.addArgAttr(llvm_func, llvm_arg_i, "noalias");
                         }
                     }
@@ -1083,9 +1083,9 @@ pub const Object = struct {
                     const param_llvm_ty = try o.lowerType(param_ty);
                     const param_alignment = param_ty.abiAlignment(mod);
                     const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
-                    const llvm_ty = o.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
+                    const llvm_ty = o.context.structType(field_types.ptr, @as(c_uint, @intCast(field_types.len)), .False);
                     for (field_types, 0..) |_, field_i_usize| {
-                        const field_i = @intCast(c_uint, field_i_usize);
+                        const field_i = @as(c_uint, @intCast(field_i_usize));
                         const param = llvm_func.getParam(llvm_arg_i);
                         llvm_arg_i += 1;
                         const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, "");
@@ -1289,11 +1289,11 @@ pub const Object = struct {
             if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
             if (self.di_map.get(decl)) |di_node| {
                 if (try decl.isFunction(mod)) {
-                    const di_func = @ptrCast(*llvm.DISubprogram, di_node);
+                    const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node));
                     const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len);
                     di_func.replaceLinkageName(linkage_name);
                 } else {
-                    const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node);
+                    const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node));
                     const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len);
                     di_global.replaceLinkageName(linkage_name);
                 }
@@ -1315,11 +1315,11 @@ pub const Object = struct {
             if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport);
             if (self.di_map.get(decl)) |di_node| {
                 if (try decl.isFunction(mod)) {
-                    const di_func = @ptrCast(*llvm.DISubprogram, di_node);
+                    const di_func = @as(*llvm.DISubprogram, @ptrCast(di_node));
                     const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len);
                     di_func.replaceLinkageName(linkage_name);
                 } else {
-                    const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node);
+                    const di_global = @as(*llvm.DIGlobalVariable, @ptrCast(di_node));
                     const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len);
                     di_global.replaceLinkageName(linkage_name);
                 }
@@ -1390,7 +1390,7 @@ pub const Object = struct {
         const gop = try o.di_map.getOrPut(gpa, file);
         errdefer assert(o.di_map.remove(file));
         if (gop.found_existing) {
-            return @ptrCast(*llvm.DIFile, gop.value_ptr.*);
+            return @as(*llvm.DIFile, @ptrCast(gop.value_ptr.*));
         }
         const dir_path_z = d: {
             var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
@@ -1514,7 +1514,7 @@ pub const Object = struct {
                     if (@sizeOf(usize) == @sizeOf(u64)) {
                         enumerators[i] = dib.createEnumerator2(
                             field_name_z,
-                            @intCast(c_uint, bigint.limbs.len),
+                            @as(c_uint, @intCast(bigint.limbs.len)),
                             bigint.limbs.ptr,
                             int_info.bits,
                             int_info.signedness == .unsigned,
@@ -1538,7 +1538,7 @@ pub const Object = struct {
                     ty.abiSize(mod) * 8,
                     ty.abiAlignment(mod) * 8,
                     enumerators.ptr,
-                    @intCast(c_int, enumerators.len),
+                    @as(c_int, @intCast(enumerators.len)),
                     try o.lowerDebugType(int_ty, .full),
                     "",
                 );
@@ -1713,7 +1713,7 @@ pub const Object = struct {
                     ty.abiSize(mod) * 8,
                     ty.abiAlignment(mod) * 8,
                     try o.lowerDebugType(ty.childType(mod), .full),
-                    @intCast(i64, ty.arrayLen(mod)),
+                    @as(i64, @intCast(ty.arrayLen(mod))),
                 );
                 // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
                 try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty));
@@ -2018,7 +2018,7 @@ pub const Object = struct {
                             0, // flags
                             null, // derived from
                             di_fields.items.ptr,
-                            @intCast(c_int, di_fields.items.len),
+                            @as(c_int, @intCast(di_fields.items.len)),
                             0, // run time lang
                             null, // vtable holder
                             "", // unique id
@@ -2105,7 +2105,7 @@ pub const Object = struct {
                     0, // flags
                     null, // derived from
                     di_fields.items.ptr,
-                    @intCast(c_int, di_fields.items.len),
+                    @as(c_int, @intCast(di_fields.items.len)),
                     0, // run time lang
                     null, // vtable holder
                     "", // unique id
@@ -2217,7 +2217,7 @@ pub const Object = struct {
                     ty.abiAlignment(mod) * 8, // align in bits
                     0, // flags
                     di_fields.items.ptr,
-                    @intCast(c_int, di_fields.items.len),
+                    @as(c_int, @intCast(di_fields.items.len)),
                     0, // run time lang
                     "", // unique id
                 );
@@ -2330,7 +2330,7 @@ pub const Object = struct {
 
                 const fn_di_ty = dib.createSubroutineType(
                     param_di_types.items.ptr,
-                    @intCast(c_int, param_di_types.items.len),
+                    @as(c_int, @intCast(param_di_types.items.len)),
                     0,
                 );
                 // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
@@ -2487,7 +2487,7 @@ pub const Object = struct {
         }
 
         if (fn_info.alignment.toByteUnitsOptional()) |a| {
-            llvm_fn.setAlignment(@intCast(c_uint, a));
+            llvm_fn.setAlignment(@as(c_uint, @intCast(a)));
         }
 
         // Function attributes that are independent of analysis results of the function body.
@@ -2710,7 +2710,7 @@ pub const Object = struct {
                 if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null);
                 const elem_llvm_ty = try o.lowerType(elem_ty);
                 const total_len = t.arrayLen(mod) + @intFromBool(t.sentinel(mod) != null);
-                return elem_llvm_ty.arrayType(@intCast(c_uint, total_len));
+                return elem_llvm_ty.arrayType(@as(c_uint, @intCast(total_len)));
             },
             .Vector => {
                 const elem_type = try o.lowerType(t.childType(mod));
@@ -2732,7 +2732,7 @@ pub const Object = struct {
                 };
                 const offset = child_ty.abiSize(mod) + 1;
                 const abi_size = t.abiSize(mod);
-                const padding = @intCast(c_uint, abi_size - offset);
+                const padding = @as(c_uint, @intCast(abi_size - offset));
                 if (padding == 0) {
                     return o.context.structType(&fields_buf, 2, .False);
                 }
@@ -2761,7 +2761,7 @@ pub const Object = struct {
                         std.mem.alignForward(u64, error_size, payload_align) +
                         payload_size;
                     const abi_size = std.mem.alignForward(u64, payload_end, error_align);
-                    const padding = @intCast(c_uint, abi_size - payload_end);
+                    const padding = @as(c_uint, @intCast(abi_size - payload_end));
                     if (padding == 0) {
                         return o.context.structType(&fields_buf, 2, .False);
                     }
@@ -2774,7 +2774,7 @@ pub const Object = struct {
                         std.mem.alignForward(u64, payload_size, error_align) +
                         error_size;
                     const abi_size = std.mem.alignForward(u64, error_end, payload_align);
-                    const padding = @intCast(c_uint, abi_size - error_end);
+                    const padding = @as(c_uint, @intCast(abi_size - error_end));
                     if (padding == 0) {
                         return o.context.structType(&fields_buf, 2, .False);
                     }
@@ -2811,7 +2811,7 @@ pub const Object = struct {
 
                             const padding_len = offset - prev_offset;
                             if (padding_len > 0) {
-                                const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+                                const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
                                 try llvm_field_types.append(gpa, llvm_array_ty);
                             }
                             const field_llvm_ty = try o.lowerType(field_ty.toType());
@@ -2824,14 +2824,14 @@ pub const Object = struct {
                             offset = std.mem.alignForward(u64, offset, big_align);
                             const padding_len = offset - prev_offset;
                             if (padding_len > 0) {
-                                const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+                                const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
                                 try llvm_field_types.append(gpa, llvm_array_ty);
                             }
                         }
 
                         llvm_struct_ty.structSetBody(
                             llvm_field_types.items.ptr,
-                            @intCast(c_uint, llvm_field_types.items.len),
+                            @as(c_uint, @intCast(llvm_field_types.items.len)),
                             .False,
                         );
 
@@ -2880,7 +2880,7 @@ pub const Object = struct {
 
                     const padding_len = offset - prev_offset;
                     if (padding_len > 0) {
-                        const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+                        const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
                         try llvm_field_types.append(gpa, llvm_array_ty);
                     }
                     const field_llvm_ty = try o.lowerType(field.ty);
@@ -2893,14 +2893,14 @@ pub const Object = struct {
                     offset = std.mem.alignForward(u64, offset, big_align);
                     const padding_len = offset - prev_offset;
                     if (padding_len > 0) {
-                        const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+                        const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
                         try llvm_field_types.append(gpa, llvm_array_ty);
                     }
                 }
 
                 llvm_struct_ty.structSetBody(
                     llvm_field_types.items.ptr,
-                    @intCast(c_uint, llvm_field_types.items.len),
+                    @as(c_uint, @intCast(llvm_field_types.items.len)),
                     llvm.Bool.fromBool(any_underaligned_fields),
                 );
 
@@ -2914,7 +2914,7 @@ pub const Object = struct {
                 const union_obj = mod.typeToUnion(t).?;
 
                 if (union_obj.layout == .Packed) {
-                    const bitsize = @intCast(c_uint, t.bitSize(mod));
+                    const bitsize = @as(c_uint, @intCast(t.bitSize(mod)));
                     const int_llvm_ty = o.context.intType(bitsize);
                     gop.value_ptr.* = int_llvm_ty;
                     return int_llvm_ty;
@@ -2939,9 +2939,9 @@ pub const Object = struct {
                         break :t llvm_aligned_field_ty;
                     }
                     const padding_len = if (layout.tag_size == 0)
-                        @intCast(c_uint, layout.abi_size - layout.most_aligned_field_size)
+                        @as(c_uint, @intCast(layout.abi_size - layout.most_aligned_field_size))
                     else
-                        @intCast(c_uint, layout.payload_size - layout.most_aligned_field_size);
+                        @as(c_uint, @intCast(layout.payload_size - layout.most_aligned_field_size));
                     const fields: [2]*llvm.Type = .{
                         llvm_aligned_field_ty,
                         o.context.intType(8).arrayType(padding_len),
@@ -3020,7 +3020,7 @@ pub const Object = struct {
             },
             .abi_sized_int => {
                 const param_ty = fn_info.param_types[it.zig_index - 1].toType();
-                const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
+                const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
                 try llvm_params.append(o.context.intType(abi_size * 8));
             },
             .slice => {
@@ -3045,7 +3045,7 @@ pub const Object = struct {
             .float_array => |count| {
                 const param_ty = fn_info.param_types[it.zig_index - 1].toType();
                 const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
-                const field_count = @intCast(c_uint, count);
+                const field_count = @as(c_uint, @intCast(count));
                 const arr_ty = float_ty.arrayType(field_count);
                 try llvm_params.append(arr_ty);
             },
@@ -3059,7 +3059,7 @@ pub const Object = struct {
         return llvm.functionType(
             llvm_ret_ty,
             llvm_params.items.ptr,
-            @intCast(c_uint, llvm_params.items.len),
+            @as(c_uint, @intCast(llvm_params.items.len)),
             llvm.Bool.fromBool(fn_info.is_var_args),
         );
     }
@@ -3219,7 +3219,7 @@ pub const Object = struct {
                     }
                     if (@sizeOf(usize) == @sizeOf(u64)) {
                         break :v llvm_type.constIntOfArbitraryPrecision(
-                            @intCast(c_uint, bigint.limbs.len),
+                            @as(c_uint, @intCast(bigint.limbs.len)),
                             bigint.limbs.ptr,
                         );
                     }
@@ -3234,19 +3234,19 @@ pub const Object = struct {
                 const llvm_ty = try o.lowerType(tv.ty);
                 switch (tv.ty.floatBits(target)) {
                     16 => {
-                        const repr = @bitCast(u16, tv.val.toFloat(f16, mod));
+                        const repr = @as(u16, @bitCast(tv.val.toFloat(f16, mod)));
                         const llvm_i16 = o.context.intType(16);
                         const int = llvm_i16.constInt(repr, .False);
                         return int.constBitCast(llvm_ty);
                     },
                     32 => {
-                        const repr = @bitCast(u32, tv.val.toFloat(f32, mod));
+                        const repr = @as(u32, @bitCast(tv.val.toFloat(f32, mod)));
                         const llvm_i32 = o.context.intType(32);
                         const int = llvm_i32.constInt(repr, .False);
                         return int.constBitCast(llvm_ty);
                     },
                     64 => {
-                        const repr = @bitCast(u64, tv.val.toFloat(f64, mod));
+                        const repr = @as(u64, @bitCast(tv.val.toFloat(f64, mod)));
                         const llvm_i64 = o.context.intType(64);
                         const int = llvm_i64.constInt(repr, .False);
                         return int.constBitCast(llvm_ty);
@@ -3265,7 +3265,7 @@ pub const Object = struct {
                         }
                     },
                     128 => {
-                        var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod));
+                        var buf: [2]u64 = @as([2]u64, @bitCast(tv.val.toFloat(f128, mod)));
                         // LLVM seems to require that the lower half of the f128 be placed first
                         // in the buffer.
                         if (native_endian == .Big) {
@@ -3343,7 +3343,7 @@ pub const Object = struct {
                 .array_type => switch (aggregate.storage) {
                     .bytes => |bytes| return o.context.constString(
                         bytes.ptr,
-                        @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)),
+                        @as(c_uint, @intCast(tv.ty.arrayLenIncludingSentinel(mod))),
                         .True, // Don't null terminate. Bytes has the sentinel, if any.
                     ),
                     .elems => |elem_vals| {
@@ -3358,21 +3358,21 @@ pub const Object = struct {
                         if (need_unnamed) {
                             return o.context.constStruct(
                                 llvm_elems.ptr,
-                                @intCast(c_uint, llvm_elems.len),
+                                @as(c_uint, @intCast(llvm_elems.len)),
                                 .True,
                             );
                         } else {
                             const llvm_elem_ty = try o.lowerType(elem_ty);
                             return llvm_elem_ty.constArray(
                                 llvm_elems.ptr,
-                                @intCast(c_uint, llvm_elems.len),
+                                @as(c_uint, @intCast(llvm_elems.len)),
                             );
                         }
                     },
                     .repeated_elem => |val| {
                         const elem_ty = tv.ty.childType(mod);
                         const sentinel = tv.ty.sentinel(mod);
-                        const len = @intCast(usize, tv.ty.arrayLen(mod));
+                        const len = @as(usize, @intCast(tv.ty.arrayLen(mod)));
                         const len_including_sent = len + @intFromBool(sentinel != null);
                         const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
                         defer gpa.free(llvm_elems);
@@ -3393,14 +3393,14 @@ pub const Object = struct {
                         if (need_unnamed) {
                             return o.context.constStruct(
                                 llvm_elems.ptr,
-                                @intCast(c_uint, llvm_elems.len),
+                                @as(c_uint, @intCast(llvm_elems.len)),
                                 .True,
                             );
                         } else {
                             const llvm_elem_ty = try o.lowerType(elem_ty);
                             return llvm_elem_ty.constArray(
                                 llvm_elems.ptr,
-                                @intCast(c_uint, llvm_elems.len),
+                                @as(c_uint, @intCast(llvm_elems.len)),
                             );
                         }
                     },
@@ -3425,7 +3425,7 @@ pub const Object = struct {
                     }
                     return llvm.constVector(
                         llvm_elems.ptr,
-                        @intCast(c_uint, llvm_elems.len),
+                        @as(c_uint, @intCast(llvm_elems.len)),
                     );
                 },
                 .anon_struct_type => |tuple| {
@@ -3450,7 +3450,7 @@ pub const Object = struct {
 
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) {
-                            const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+                            const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
                             // TODO make this and all other padding elsewhere in debug
                             // builds be 0xaa not undef.
                             llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
@@ -3472,7 +3472,7 @@ pub const Object = struct {
                         offset = std.mem.alignForward(u64, offset, big_align);
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) {
-                            const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+                            const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
                             llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
                         }
                     }
@@ -3480,14 +3480,14 @@ pub const Object = struct {
                     if (need_unnamed) {
                         return o.context.constStruct(
                             llvm_fields.items.ptr,
-                            @intCast(c_uint, llvm_fields.items.len),
+                            @as(c_uint, @intCast(llvm_fields.items.len)),
                             .False,
                         );
                     } else {
                         const llvm_struct_ty = try o.lowerType(tv.ty);
                         return llvm_struct_ty.constNamedStruct(
                             llvm_fields.items.ptr,
-                            @intCast(c_uint, llvm_fields.items.len),
+                            @as(c_uint, @intCast(llvm_fields.items.len)),
                         );
                     }
                 },
@@ -3498,7 +3498,7 @@ pub const Object = struct {
                     if (struct_obj.layout == .Packed) {
                         assert(struct_obj.haveLayout());
                         const big_bits = struct_obj.backing_int_ty.bitSize(mod);
-                        const int_llvm_ty = o.context.intType(@intCast(c_uint, big_bits));
+                        const int_llvm_ty = o.context.intType(@as(c_uint, @intCast(big_bits)));
                         const fields = struct_obj.fields.values();
                         comptime assert(Type.packed_struct_layout_version == 2);
                         var running_int: *llvm.Value = int_llvm_ty.constNull();
@@ -3510,7 +3510,7 @@ pub const Object = struct {
                                 .ty = field.ty,
                                 .val = try tv.val.fieldValue(mod, i),
                             });
-                            const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
+                            const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
                             const small_int_ty = o.context.intType(ty_bit_size);
                             const small_int_val = if (field.ty.isPtrAtRuntime(mod))
                                 non_int_val.constPtrToInt(small_int_ty)
@@ -3547,7 +3547,7 @@ pub const Object = struct {
 
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) {
-                            const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+                            const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
                             // TODO make this and all other padding elsewhere in debug
                             // builds be 0xaa not undef.
                             llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
@@ -3569,7 +3569,7 @@ pub const Object = struct {
                         offset = std.mem.alignForward(u64, offset, big_align);
                         const padding_len = offset - prev_offset;
                         if (padding_len > 0) {
-                            const llvm_array_ty = o.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+                            const llvm_array_ty = o.context.intType(8).arrayType(@as(c_uint, @intCast(padding_len)));
                             llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
                         }
                     }
@@ -3577,13 +3577,13 @@ pub const Object = struct {
                     if (need_unnamed) {
                         return o.context.constStruct(
                             llvm_fields.items.ptr,
-                            @intCast(c_uint, llvm_fields.items.len),
+                            @as(c_uint, @intCast(llvm_fields.items.len)),
                             .False,
                         );
                     } else {
                         return llvm_struct_ty.constNamedStruct(
                             llvm_fields.items.ptr,
-                            @intCast(c_uint, llvm_fields.items.len),
+                            @as(c_uint, @intCast(llvm_fields.items.len)),
                         );
                     }
                 },
@@ -3616,7 +3616,7 @@ pub const Object = struct {
                     if (!field_ty.hasRuntimeBits(mod))
                         return llvm_union_ty.constNull();
                     const non_int_val = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val });
-                    const ty_bit_size = @intCast(u16, field_ty.bitSize(mod));
+                    const ty_bit_size = @as(u16, @intCast(field_ty.bitSize(mod)));
                     const small_int_ty = o.context.intType(ty_bit_size);
                     const small_int_val = if (field_ty.isPtrAtRuntime(mod))
                         non_int_val.constPtrToInt(small_int_ty)
@@ -3632,7 +3632,7 @@ pub const Object = struct {
                 var need_unnamed: bool = layout.most_aligned_field != field_index;
                 const payload = p: {
                     if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
-                        const padding_len = @intCast(c_uint, layout.payload_size);
+                        const padding_len = @as(c_uint, @intCast(layout.payload_size));
                         break :p o.context.intType(8).arrayType(padding_len).getUndef();
                     }
                     const field = try lowerValue(o, .{ .ty = field_ty, .val = tag_and_val.val });
@@ -3641,7 +3641,7 @@ pub const Object = struct {
                     if (field_size == layout.payload_size) {
                         break :p field;
                     }
-                    const padding_len = @intCast(c_uint, layout.payload_size - field_size);
+                    const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size));
                     const fields: [2]*llvm.Value = .{
                         field, o.context.intType(8).arrayType(padding_len).getUndef(),
                     };
@@ -3706,7 +3706,7 @@ pub const Object = struct {
             }
             if (@sizeOf(usize) == @sizeOf(u64)) {
                 break :v llvm_type.constIntOfArbitraryPrecision(
-                    @intCast(c_uint, bigint.limbs.len),
+                    @as(c_uint, @intCast(bigint.limbs.len)),
                     bigint.limbs.ptr,
                 );
             }
@@ -3799,7 +3799,7 @@ pub const Object = struct {
                 const parent_llvm_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned);
                 const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
 
-                const field_index = @intCast(u32, field_ptr.index);
+                const field_index = @as(u32, @intCast(field_ptr.index));
                 const llvm_u32 = o.context.intType(32);
                 switch (parent_ty.zigTypeTag(mod)) {
                     .Union => {
@@ -3834,7 +3834,7 @@ pub const Object = struct {
                                 var b: usize = 0;
                                 for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
                                     if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-                                    b += @intCast(usize, field.ty.bitSize(mod));
+                                    b += @as(usize, @intCast(field.ty.bitSize(mod)));
                                 }
                                 break :b b;
                             };
@@ -3992,9 +3992,9 @@ pub const Object = struct {
     ) void {
         const llvm_attr = o.context.createStringAttribute(
             name.ptr,
-            @intCast(c_uint, name.len),
+            @as(c_uint, @intCast(name.len)),
             value.ptr,
-            @intCast(c_uint, value.len),
+            @as(c_uint, @intCast(value.len)),
         );
         val.addAttributeAtIndex(index, llvm_attr);
     }
@@ -4026,14 +4026,14 @@ pub const Object = struct {
             .Enum => ty.intTagType(mod),
             .Float => {
                 if (!is_rmw_xchg) return null;
-                return o.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8));
+                return o.context.intType(@as(c_uint, @intCast(ty.abiSize(mod) * 8)));
             },
             .Bool => return o.context.intType(8),
             else => return null,
         };
         const bit_count = int_ty.intInfo(mod).bits;
         if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) {
-            return o.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8));
+            return o.context.intType(@as(c_uint, @intCast(int_ty.abiSize(mod) * 8)));
         } else {
             return null;
         }
@@ -4051,7 +4051,7 @@ pub const Object = struct {
         if (param_ty.isPtrAtRuntime(mod)) {
             const ptr_info = param_ty.ptrInfo(mod);
             if (math.cast(u5, param_index)) |i| {
-                if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
+                if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
                     o.addArgAttr(llvm_fn, llvm_arg_i, "noalias");
                 }
             }
@@ -4550,7 +4550,7 @@ pub const FuncGen = struct {
     fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*llvm.Value {
         const pl_op = self.air.instructions.items(.data)[inst].pl_op;
         const extra = self.air.extraData(Air.Call, pl_op.payload);
-        const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+        const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
         const o = self.dg.object;
         const mod = o.module;
         const callee_ty = self.typeOf(pl_op.operand);
@@ -4638,7 +4638,7 @@ pub const FuncGen = struct {
                 const arg = args[it.zig_index - 1];
                 const param_ty = self.typeOf(arg);
                 const llvm_arg = try self.resolveInst(arg);
-                const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
+                const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod)));
                 const int_llvm_ty = self.context.intType(abi_size * 8);
 
                 if (isByRef(param_ty, mod)) {
@@ -4683,10 +4683,10 @@ pub const FuncGen = struct {
                     break :p p;
                 };
 
-                const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False);
+                const llvm_ty = self.context.structType(llvm_types.ptr, @as(c_uint, @intCast(llvm_types.len)), .False);
                 try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
                 for (llvm_types, 0..) |field_ty, i_usize| {
-                    const i = @intCast(c_uint, i_usize);
+                    const i = @as(c_uint, @intCast(i_usize));
                     const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
                     const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
                     load_inst.setAlignment(target.ptrBitWidth() / 8);
@@ -4742,7 +4742,7 @@ pub const FuncGen = struct {
             try o.lowerType(zig_fn_ty),
             llvm_fn,
             llvm_args.items.ptr,
-            @intCast(c_uint, llvm_args.items.len),
+            @as(c_uint, @intCast(llvm_args.items.len)),
             toLlvmCallConv(fn_info.cc, target),
             attr,
             "",
@@ -4788,7 +4788,7 @@ pub const FuncGen = struct {
                     const llvm_arg_i = it.llvm_index - 2;
 
                     if (math.cast(u5, it.zig_index - 1)) |i| {
-                        if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
+                        if (@as(u1, @truncate(fn_info.noalias_bits >> i)) != 0) {
                             o.addArgAttr(call, llvm_arg_i, "noalias");
                         }
                     }
@@ -5213,7 +5213,7 @@ pub const FuncGen = struct {
         phi_node.addIncoming(
             breaks.items(.val).ptr,
             breaks.items(.bb).ptr,
-            @intCast(c_uint, breaks.len),
+            @as(c_uint, @intCast(breaks.len)),
         );
         return phi_node;
     }
@@ -5379,7 +5379,7 @@ pub const FuncGen = struct {
 
         while (case_i < switch_br.data.cases_len) : (case_i += 1) {
             const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
-            const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+            const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
             const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
             extra_index = case.end + case.data.items_len + case_body.len;
 
@@ -5479,7 +5479,7 @@ pub const FuncGen = struct {
             }
         }
 
-        const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod));
+        const operand_bits = @as(u16, @intCast(operand_scalar_ty.bitSize(mod)));
         const rt_int_bits = compilerRtIntBits(operand_bits);
         const rt_int_ty = self.context.intType(rt_int_bits);
         var extended = e: {
@@ -5540,7 +5540,7 @@ pub const FuncGen = struct {
             }
         }
 
-        const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod)));
+        const rt_int_bits = compilerRtIntBits(@as(u16, @intCast(dest_scalar_ty.bitSize(mod))));
         const ret_ty = self.context.intType(rt_int_bits);
         const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: {
             // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
@@ -5806,12 +5806,12 @@ pub const FuncGen = struct {
                         const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
                         const elem_llvm_ty = try o.lowerType(field_ty);
                         if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
-                            const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
+                            const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
                             const same_size_int = self.context.intType(elem_bits);
                             const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
                             return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
                         } else if (field_ty.isPtrAtRuntime(mod)) {
-                            const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
+                            const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
                             const same_size_int = self.context.intType(elem_bits);
                             const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
                             return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -5828,12 +5828,12 @@ pub const FuncGen = struct {
                     const containing_int = struct_llvm_val;
                     const elem_llvm_ty = try o.lowerType(field_ty);
                     if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
-                        const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
+                        const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
                         const same_size_int = self.context.intType(elem_bits);
                         const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
                         return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
                     } else if (field_ty.isPtrAtRuntime(mod)) {
-                        const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
+                        const elem_bits = @as(c_uint, @intCast(field_ty.bitSize(mod)));
                         const same_size_int = self.context.intType(elem_bits);
                         const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
                         return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -5924,8 +5924,8 @@ pub const FuncGen = struct {
     fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value {
         const di_scope = self.di_scope orelse return null;
         const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
-        self.prev_dbg_line = @intCast(c_uint, self.base_line + dbg_stmt.line + 1);
-        self.prev_dbg_column = @intCast(c_uint, dbg_stmt.column + 1);
+        self.prev_dbg_line = @as(c_uint, @intCast(self.base_line + dbg_stmt.line + 1));
+        self.prev_dbg_column = @as(c_uint, @intCast(dbg_stmt.column + 1));
         const inlined_at = if (self.dbg_inlined.items.len > 0)
             self.dbg_inlined.items[self.dbg_inlined.items.len - 1].loc
         else
@@ -5949,7 +5949,7 @@ pub const FuncGen = struct {
         const cur_debug_location = self.builder.getCurrentDebugLocation2();
 
         try self.dbg_inlined.append(self.gpa, .{
-            .loc = @ptrCast(*llvm.DILocation, cur_debug_location),
+            .loc = @as(*llvm.DILocation, @ptrCast(cur_debug_location)),
             .scope = self.di_scope.?,
             .base_line = self.base_line,
         });
@@ -6107,13 +6107,13 @@ pub const FuncGen = struct {
         const o = self.dg.object;
         const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
         const extra = self.air.extraData(Air.Asm, ty_pl.payload);
-        const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
-        const clobbers_len = @truncate(u31, extra.data.flags);
+        const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+        const clobbers_len = @as(u31, @truncate(extra.data.flags));
         var extra_i: usize = extra.end;
 
-        const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+        const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
         extra_i += outputs.len;
-        const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+        const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
         extra_i += inputs.len;
 
         var llvm_constraints: std.ArrayListUnmanaged(u8) = .{};
@@ -6390,7 +6390,7 @@ pub const FuncGen = struct {
             1 => llvm_ret_types[0],
             else => self.context.structType(
                 llvm_ret_types.ptr,
-                @intCast(c_uint, return_count),
+                @as(c_uint, @intCast(return_count)),
                 .False,
             ),
         };
@@ -6398,7 +6398,7 @@ pub const FuncGen = struct {
         const llvm_fn_ty = llvm.functionType(
             ret_llvm_ty,
             llvm_param_types.ptr,
-            @intCast(c_uint, param_count),
+            @as(c_uint, @intCast(param_count)),
             .False,
         );
         const asm_fn = llvm.getInlineAsm(
@@ -6416,7 +6416,7 @@ pub const FuncGen = struct {
             llvm_fn_ty,
             asm_fn,
             llvm_param_values.ptr,
-            @intCast(c_uint, param_count),
+            @as(c_uint, @intCast(param_count)),
             .C,
             .Auto,
             "",
@@ -6433,7 +6433,7 @@ pub const FuncGen = struct {
             if (llvm_ret_indirect[i]) continue;
 
             const output_value = if (return_count > 1) b: {
-                break :b self.builder.buildExtractValue(call, @intCast(c_uint, llvm_ret_i), "");
+                break :b self.builder.buildExtractValue(call, @as(c_uint, @intCast(llvm_ret_i)), "");
             } else call;
 
             if (output != .none) {
@@ -7315,7 +7315,7 @@ pub const FuncGen = struct {
         result_vector: *llvm.Value,
         vector_len: usize,
     ) !*llvm.Value {
-        const args_len = @intCast(c_uint, args_vectors.len);
+        const args_len = @as(c_uint, @intCast(args_vectors.len));
         const llvm_i32 = self.context.intType(32);
         assert(args_len <= 3);
 
@@ -7345,7 +7345,7 @@ pub const FuncGen = struct {
             const alias = o.llvm_module.getNamedGlobalAlias(fn_name.ptr, fn_name.len);
             break :b if (alias) |a| a.getAliasee() else null;
         } orelse b: {
-            const params_len = @intCast(c_uint, param_types.len);
+            const params_len = @as(c_uint, @intCast(param_types.len));
             const fn_type = llvm.functionType(return_type, param_types.ptr, params_len, .False);
             const f = o.llvm_module.addFunction(fn_name, fn_type);
             break :b f;
@@ -8319,8 +8319,8 @@ pub const FuncGen = struct {
             return null;
         const ordering = toLlvmAtomicOrdering(atomic_load.order);
         const opt_abi_llvm_ty = o.getAtomicAbiType(elem_ty, false);
-        const ptr_alignment = @intCast(u32, ptr_info.flags.alignment.toByteUnitsOptional() orelse
-            ptr_info.child.toType().abiAlignment(mod));
+        const ptr_alignment = @as(u32, @intCast(ptr_info.flags.alignment.toByteUnitsOptional() orelse
+            ptr_info.child.toType().abiAlignment(mod)));
         const ptr_volatile = llvm.Bool.fromBool(ptr_info.flags.is_volatile);
         const elem_llvm_ty = try o.lowerType(elem_ty);
 
@@ -8696,10 +8696,10 @@ pub const FuncGen = struct {
         const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid");
         const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid");
         const end_block = self.context.appendBasicBlock(self.llvm_func, "End");
-        const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len));
+        const switch_instr = self.builder.buildSwitch(operand, invalid_block, @as(c_uint, @intCast(names.len)));
 
         for (names) |name| {
-            const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?);
+            const err_int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?));
             const this_tag_int_value = try o.lowerValue(.{
                 .ty = Type.err_int,
                 .val = try mod.intValue(Type.err_int, err_int),
@@ -8779,10 +8779,10 @@ pub const FuncGen = struct {
         const named_block = self.context.appendBasicBlock(fn_val, "Named");
         const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed");
         const tag_int_value = fn_val.getParam(0);
-        const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len));
+        const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @as(c_uint, @intCast(enum_type.names.len)));
 
         for (enum_type.names, 0..) |_, field_index_usize| {
-            const field_index = @intCast(u32, field_index_usize);
+            const field_index = @as(u32, @intCast(field_index_usize));
             const this_tag_int_value = int: {
                 break :int try o.lowerValue(.{
                     .ty = enum_ty,
@@ -8855,16 +8855,16 @@ pub const FuncGen = struct {
 
         const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue");
         const tag_int_value = fn_val.getParam(0);
-        const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, enum_type.names.len));
+        const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @as(c_uint, @intCast(enum_type.names.len)));
 
         const array_ptr_indices = [_]*llvm.Value{
             usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
         };
 
         for (enum_type.names, 0..) |name_ip, field_index_usize| {
-            const field_index = @intCast(u32, field_index_usize);
+            const field_index = @as(u32, @intCast(field_index_usize));
             const name = mod.intern_pool.stringToSlice(name_ip);
-            const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
+            const str_init = self.context.constString(name.ptr, @as(c_uint, @intCast(name.len)), .False);
             const str_init_llvm_ty = str_init.typeOf();
             const str_global = o.llvm_module.addGlobal(str_init_llvm_ty, "");
             str_global.setInitializer(str_init);
@@ -8986,7 +8986,7 @@ pub const FuncGen = struct {
                 val.* = llvm_i32.getUndef();
             } else {
                 const int = elem.toSignedInt(mod);
-                const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
+                const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len));
                 val.* = llvm_i32.constInt(unsigned, .False);
             }
         }
@@ -9150,8 +9150,8 @@ pub const FuncGen = struct {
         const mod = o.module;
         const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
         const result_ty = self.typeOfIndex(inst);
-        const len = @intCast(usize, result_ty.arrayLen(mod));
-        const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+        const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
+        const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
         const llvm_result_ty = try o.lowerType(result_ty);
 
         switch (result_ty.zigTypeTag(mod)) {
@@ -9171,7 +9171,7 @@ pub const FuncGen = struct {
                     const struct_obj = mod.typeToStruct(result_ty).?;
                     assert(struct_obj.haveLayout());
                     const big_bits = struct_obj.backing_int_ty.bitSize(mod);
-                    const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
+                    const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits)));
                     const fields = struct_obj.fields.values();
                     comptime assert(Type.packed_struct_layout_version == 2);
                     var running_int: *llvm.Value = int_llvm_ty.constNull();
@@ -9181,7 +9181,7 @@ pub const FuncGen = struct {
                         if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
 
                         const non_int_val = try self.resolveInst(elem);
-                        const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
+                        const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
                         const small_int_ty = self.context.intType(ty_bit_size);
                         const small_int_val = if (field.ty.isPtrAtRuntime(mod))
                             self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
@@ -9251,7 +9251,7 @@ pub const FuncGen = struct {
                 for (elements, 0..) |elem, i| {
                     const indices: [2]*llvm.Value = .{
                         llvm_usize.constNull(),
-                        llvm_usize.constInt(@intCast(c_uint, i), .False),
+                        llvm_usize.constInt(@as(c_uint, @intCast(i)), .False),
                     };
                     const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
                     const llvm_elem = try self.resolveInst(elem);
@@ -9260,7 +9260,7 @@ pub const FuncGen = struct {
                 if (array_info.sentinel) |sent_val| {
                     const indices: [2]*llvm.Value = .{
                         llvm_usize.constNull(),
-                        llvm_usize.constInt(@intCast(c_uint, array_info.len), .False),
+                        llvm_usize.constInt(@as(c_uint, @intCast(array_info.len)), .False),
                     };
                     const elem_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
                     const llvm_elem = try self.resolveValue(.{
@@ -9289,10 +9289,10 @@ pub const FuncGen = struct {
 
         if (union_obj.layout == .Packed) {
             const big_bits = union_ty.bitSize(mod);
-            const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
+            const int_llvm_ty = self.context.intType(@as(c_uint, @intCast(big_bits)));
             const field = union_obj.fields.values()[extra.field_index];
             const non_int_val = try self.resolveInst(extra.init);
-            const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
+            const ty_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
             const small_int_ty = self.context.intType(ty_bit_size);
             const small_int_val = if (field.ty.isPtrAtRuntime(mod))
                 self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
@@ -9332,13 +9332,13 @@ pub const FuncGen = struct {
         const llvm_union_ty = t: {
             const payload = p: {
                 if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) {
-                    const padding_len = @intCast(c_uint, layout.payload_size);
+                    const padding_len = @as(c_uint, @intCast(layout.payload_size));
                     break :p self.context.intType(8).arrayType(padding_len);
                 }
                 if (field_size == layout.payload_size) {
                     break :p field_llvm_ty;
                 }
-                const padding_len = @intCast(c_uint, layout.payload_size - field_size);
+                const padding_len = @as(c_uint, @intCast(layout.payload_size - field_size));
                 const fields: [2]*llvm.Type = .{
                     field_llvm_ty, self.context.intType(8).arrayType(padding_len),
                 };
@@ -9766,8 +9766,8 @@ pub const FuncGen = struct {
         const elem_ty = info.child.toType();
         if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
 
-        const ptr_alignment = @intCast(u32, info.flags.alignment.toByteUnitsOptional() orelse
-            elem_ty.abiAlignment(mod));
+        const ptr_alignment = @as(u32, @intCast(info.flags.alignment.toByteUnitsOptional() orelse
+            elem_ty.abiAlignment(mod)));
         const ptr_volatile = llvm.Bool.fromBool(info.flags.is_volatile);
 
         assert(info.flags.vector_index != .runtime);
@@ -9799,7 +9799,7 @@ pub const FuncGen = struct {
         containing_int.setAlignment(ptr_alignment);
         containing_int.setVolatile(ptr_volatile);
 
-        const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
+        const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod)));
         const shift_amt = containing_int.typeOf().constInt(info.packed_offset.bit_offset, .False);
         const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
         const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -9872,7 +9872,7 @@ pub const FuncGen = struct {
             assert(ordering == .NotAtomic);
             containing_int.setAlignment(ptr_alignment);
             containing_int.setVolatile(ptr_volatile);
-            const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
+            const elem_bits = @as(c_uint, @intCast(ptr_ty.childType(mod).bitSize(mod)));
             const containing_int_ty = containing_int.typeOf();
             const shift_amt = containing_int_ty.constInt(info.packed_offset.bit_offset, .False);
             // Convert to equally-sized integer type in order to perform the bit
@@ -9945,7 +9945,7 @@ pub const FuncGen = struct {
         if (!target_util.hasValgrindSupport(target)) return default_value;
 
         const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
-        const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod));
+        const usize_alignment = @as(c_uint, @intCast(Type.usize.abiSize(mod)));
 
         const array_llvm_ty = usize_llvm_ty.arrayType(6);
         const array_ptr = fg.valgrind_client_request_array orelse a: {
@@ -9957,7 +9957,7 @@ pub const FuncGen = struct {
         const zero = usize_llvm_ty.constInt(0, .False);
         for (array_elements, 0..) |elem, i| {
             const indexes = [_]*llvm.Value{
-                zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False),
+                zero, usize_llvm_ty.constInt(@as(c_uint, @intCast(i)), .False),
             };
             const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, "");
             const store_inst = fg.builder.buildStore(elem, elem_ptr);
@@ -10530,7 +10530,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
                     assert(classes[0] == .direct and classes[1] == .none);
                     const scalar_type = wasm_c_abi.scalarType(return_type, mod);
                     const abi_size = scalar_type.abiSize(mod);
-                    return o.context.intType(@intCast(c_uint, abi_size * 8));
+                    return o.context.intType(@as(c_uint, @intCast(abi_size * 8)));
                 },
                 .aarch64, .aarch64_be => {
                     switch (aarch64_c_abi.classifyType(return_type, mod)) {
@@ -10539,7 +10539,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
                         .byval => return o.lowerType(return_type),
                         .integer => {
                             const bit_size = return_type.bitSize(mod);
-                            return o.context.intType(@intCast(c_uint, bit_size));
+                            return o.context.intType(@as(c_uint, @intCast(bit_size)));
                         },
                         .double_integer => return o.context.intType(64).arrayType(2),
                     }
@@ -10560,7 +10560,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
                         .memory => return o.context.voidType(),
                         .integer => {
                             const bit_size = return_type.bitSize(mod);
-                            return o.context.intType(@intCast(c_uint, bit_size));
+                            return o.context.intType(@as(c_uint, @intCast(bit_size)));
                         },
                         .double_integer => {
                             var llvm_types_buffer: [2]*llvm.Type = .{
@@ -10598,7 +10598,7 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type {
                 return o.lowerType(return_type);
             } else {
                 const abi_size = return_type.abiSize(mod);
-                return o.context.intType(@intCast(c_uint, abi_size * 8));
+                return o.context.intType(@as(c_uint, @intCast(abi_size * 8)));
             }
         },
         .win_i128 => return o.context.intType(64).vectorType(2),
@@ -10656,7 +10656,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) !*llvm.Type
     }
     if (classes[0] == .integer and classes[1] == .none) {
         const abi_size = return_type.abiSize(mod);
-        return o.context.intType(@intCast(c_uint, abi_size * 8));
+        return o.context.intType(@as(c_uint, @intCast(abi_size * 8)));
     }
     return o.context.structType(&llvm_types_buffer, llvm_types_index, .False);
 }
@@ -11145,28 +11145,28 @@ const AnnotatedDITypePtr = enum(usize) {
 
     fn initFwd(di_type: *llvm.DIType) AnnotatedDITypePtr {
         const addr = @intFromPtr(di_type);
-        assert(@truncate(u1, addr) == 0);
-        return @enumFromInt(AnnotatedDITypePtr, addr | 1);
+        assert(@as(u1, @truncate(addr)) == 0);
+        return @as(AnnotatedDITypePtr, @enumFromInt(addr | 1));
     }
 
     fn initFull(di_type: *llvm.DIType) AnnotatedDITypePtr {
         const addr = @intFromPtr(di_type);
-        return @enumFromInt(AnnotatedDITypePtr, addr);
+        return @as(AnnotatedDITypePtr, @enumFromInt(addr));
     }
 
     fn init(di_type: *llvm.DIType, resolve: Object.DebugResolveStatus) AnnotatedDITypePtr {
         const addr = @intFromPtr(di_type);
         const bit = @intFromBool(resolve == .fwd);
-        return @enumFromInt(AnnotatedDITypePtr, addr | bit);
+        return @as(AnnotatedDITypePtr, @enumFromInt(addr | bit));
     }
 
     fn toDIType(self: AnnotatedDITypePtr) *llvm.DIType {
         const fixed_addr = @intFromEnum(self) & ~@as(usize, 1);
-        return @ptrFromInt(*llvm.DIType, fixed_addr);
+        return @as(*llvm.DIType, @ptrFromInt(fixed_addr));
     }
 
     fn isFwdOnly(self: AnnotatedDITypePtr) bool {
-        return @truncate(u1, @intFromEnum(self)) != 0;
+        return @as(u1, @truncate(@intFromEnum(self))) != 0;
     }
 };
 
src/codegen/spirv.zig
@@ -466,7 +466,7 @@ pub const DeclGen = struct {
                 unused.* = undef;
             }
 
-            const word = @bitCast(Word, self.partial_word.buffer);
+            const word = @as(Word, @bitCast(self.partial_word.buffer));
             const result_id = try self.dg.spv.constInt(self.u32_ty_ref, word);
             try self.members.append(self.u32_ty_ref);
             try self.initializers.append(result_id);
@@ -482,7 +482,7 @@ pub const DeclGen = struct {
         }
 
         fn addUndef(self: *@This(), amt: u64) !void {
-            for (0..@intCast(usize, amt)) |_| {
+            for (0..@as(usize, @intCast(amt))) |_| {
                 try self.addByte(undef);
             }
         }
@@ -539,13 +539,13 @@ pub const DeclGen = struct {
             const mod = self.dg.module;
             const int_info = ty.intInfo(mod);
             const int_bits = switch (int_info.signedness) {
-                .signed => @bitCast(u64, val.toSignedInt(mod)),
+                .signed => @as(u64, @bitCast(val.toSignedInt(mod))),
                 .unsigned => val.toUnsignedInt(mod),
             };
 
             // TODO: Swap endianess if the compiler is big endian.
             const len = ty.abiSize(mod);
-            try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]);
+            try self.addBytes(std.mem.asBytes(&int_bits)[0..@as(usize, @intCast(len))]);
         }
 
         fn addFloat(self: *@This(), ty: Type, val: Value) !void {
@@ -557,15 +557,15 @@ pub const DeclGen = struct {
             switch (ty.floatBits(target)) {
                 16 => {
                     const float_bits = val.toFloat(f16, mod);
-                    try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]);
+                    try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]);
                 },
                 32 => {
                     const float_bits = val.toFloat(f32, mod);
-                    try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]);
+                    try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]);
                 },
                 64 => {
                     const float_bits = val.toFloat(f64, mod);
-                    try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]);
+                    try self.addBytes(std.mem.asBytes(&float_bits)[0..@as(usize, @intCast(len))]);
                 },
                 else => unreachable,
             }
@@ -664,7 +664,7 @@ pub const DeclGen = struct {
                 .int => try self.addInt(ty, val),
                 .err => |err| {
                     const int = try mod.getErrorValue(err.name);
-                    try self.addConstInt(u16, @intCast(u16, int));
+                    try self.addConstInt(u16, @as(u16, @intCast(int)));
                 },
                 .error_union => |error_union| {
                     const payload_ty = ty.errorUnionPayload(mod);
@@ -755,10 +755,10 @@ pub const DeclGen = struct {
                         switch (aggregate.storage) {
                             .bytes => |bytes| try self.addBytes(bytes),
                             .elems, .repeated_elem => {
-                                for (0..@intCast(usize, array_type.len)) |i| {
+                                for (0..@as(usize, @intCast(array_type.len))) |i| {
                                     try self.lower(elem_ty, switch (aggregate.storage) {
                                         .bytes => unreachable,
-                                        .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(),
+                                        .elems => |elem_vals| elem_vals[@as(usize, @intCast(i))].toValue(),
                                         .repeated_elem => |elem_val| elem_val.toValue(),
                                     });
                                 }
@@ -1132,7 +1132,7 @@ pub const DeclGen = struct {
 
         const payload_padding_len = layout.payload_size - active_field_size;
         if (payload_padding_len != 0) {
-            const payload_padding_ty_ref = try self.spv.arrayType(@intCast(u32, payload_padding_len), u8_ty_ref);
+            const payload_padding_ty_ref = try self.spv.arrayType(@as(u32, @intCast(payload_padding_len)), u8_ty_ref);
             member_types.appendAssumeCapacity(payload_padding_ty_ref);
             member_names.appendAssumeCapacity(try self.spv.resolveString("payload_padding"));
         }
@@ -1259,7 +1259,7 @@ pub const DeclGen = struct {
 
                 return try self.spv.resolve(.{ .vector_type = .{
                     .component_type = try self.resolveType(ty.childType(mod), repr),
-                    .component_count = @intCast(u32, ty.vectorLen(mod)),
+                    .component_count = @as(u32, @intCast(ty.vectorLen(mod))),
                 } });
             },
             .Struct => {
@@ -1588,7 +1588,7 @@ pub const DeclGen = struct {
                 init_val,
                 actual_storage_class,
                 final_storage_class == .Generic,
-                @intCast(u32, decl.alignment.toByteUnits(0)),
+                @as(u32, @intCast(decl.alignment.toByteUnits(0))),
             );
         }
     }
@@ -1856,7 +1856,7 @@ pub const DeclGen = struct {
     }
 
     fn maskStrangeInt(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, bits: u16) !IdRef {
-        const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @intCast(u6, bits)) - 1;
+        const mask_value = if (bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(bits))) - 1;
         const result_id = self.spv.allocId();
         const mask_id = try self.spv.constInt(ty_ref, mask_value);
         try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{
@@ -2063,7 +2063,7 @@ pub const DeclGen = struct {
                 self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF);
             } else {
                 const int = elem.toSignedInt(mod);
-                const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
+                const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int + a_len));
                 self.func.body.writeOperand(spec.LiteralInteger, unsigned);
             }
         }
@@ -2689,7 +2689,7 @@ pub const DeclGen = struct {
         // are not allowed to be created from a phi node, and throw an error for those.
         const result_type_id = try self.resolveTypeId(ty);
 
-        try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
+        try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @as(u16, @intCast(incoming_blocks.items.len * 2))); // result type + result + variable/parent...
         self.func.body.writeOperand(spec.IdResultType, result_type_id);
         self.func.body.writeOperand(spec.IdRef, result_id);
 
@@ -3105,7 +3105,7 @@ pub const DeclGen = struct {
             while (case_i < num_cases) : (case_i += 1) {
                 // SPIR-V needs a literal here, which' width depends on the case condition.
                 const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
-                const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+                const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
                 const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
                 extra_index = case.end + case.data.items_len + case_body.len;
 
@@ -3116,7 +3116,7 @@ pub const DeclGen = struct {
                         return self.todo("switch on runtime value???", .{});
                     };
                     const int_val = switch (cond_ty.zigTypeTag(mod)) {
-                        .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod),
+                        .Int => if (cond_ty.isSignedInt(mod)) @as(u64, @bitCast(value.toSignedInt(mod))) else value.toUnsignedInt(mod),
                         .Enum => blk: {
                             // TODO: figure out of cond_ty is correct (something with enum literals)
                             break :blk (try value.intFromEnum(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants
@@ -3124,7 +3124,7 @@ pub const DeclGen = struct {
                         else => unreachable,
                     };
                     const int_lit: spec.LiteralContextDependentNumber = switch (cond_words) {
-                        1 => .{ .uint32 = @intCast(u32, int_val) },
+                        1 => .{ .uint32 = @as(u32, @intCast(int_val)) },
                         2 => .{ .uint64 = int_val },
                         else => unreachable,
                     };
@@ -3139,7 +3139,7 @@ pub const DeclGen = struct {
         var case_i: u32 = 0;
         while (case_i < num_cases) : (case_i += 1) {
             const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
-            const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+            const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
             const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
             extra_index = case.end + case.data.items_len + case_body.len;
 
@@ -3167,15 +3167,15 @@ pub const DeclGen = struct {
         const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
         const extra = self.air.extraData(Air.Asm, ty_pl.payload);
 
-        const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
-        const clobbers_len = @truncate(u31, extra.data.flags);
+        const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+        const clobbers_len = @as(u31, @truncate(extra.data.flags));
 
         if (!is_volatile and self.liveness.isUnused(inst)) return null;
 
         var extra_i: usize = extra.end;
-        const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
+        const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
         extra_i += outputs.len;
-        const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
+        const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
         extra_i += inputs.len;
 
         if (outputs.len > 1) {
@@ -3297,7 +3297,7 @@ pub const DeclGen = struct {
         const mod = self.module;
         const pl_op = self.air.instructions.items(.data)[inst].pl_op;
         const extra = self.air.extraData(Air.Call, pl_op.payload);
-        const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
+        const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
         const callee_ty = self.typeOf(pl_op.operand);
         const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
             .Fn => callee_ty,
src/link/Coff/ImportTable.zig
@@ -38,7 +38,7 @@ pub fn deinit(itab: *ImportTable, allocator: Allocator) void {
 
 /// Size of the import table does not include the sentinel.
 pub fn size(itab: ImportTable) u32 {
-    return @intCast(u32, itab.entries.items.len) * @sizeOf(u64);
+    return @as(u32, @intCast(itab.entries.items.len)) * @sizeOf(u64);
 }
 
 pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc) !ImportIndex {
@@ -49,7 +49,7 @@ pub fn addImport(itab: *ImportTable, allocator: Allocator, target: SymbolWithLoc
             break :blk index;
         } else {
             log.debug("  (allocating import entry at index {d})", .{itab.entries.items.len});
-            const index = @intCast(u32, itab.entries.items.len);
+            const index = @as(u32, @intCast(itab.entries.items.len));
             _ = itab.entries.addOneAssumeCapacity();
             break :blk index;
         }
@@ -73,7 +73,7 @@ fn getBaseAddress(ctx: Context) u32 {
     var addr = header.virtual_address;
     for (ctx.coff_file.import_tables.values(), 0..) |other_itab, i| {
         if (ctx.index == i) break;
-        addr += @intCast(u32, other_itab.entries.items.len * @sizeOf(u64)) + 8;
+        addr += @as(u32, @intCast(other_itab.entries.items.len * @sizeOf(u64))) + 8;
     }
     return addr;
 }
src/link/Coff/Relocation.zig
@@ -126,23 +126,23 @@ fn resolveAarch64(self: Relocation, ctx: Context) void {
     var buffer = ctx.code[self.offset..];
     switch (self.type) {
         .got_page, .import_page, .page => {
-            const source_page = @intCast(i32, ctx.source_vaddr >> 12);
-            const target_page = @intCast(i32, ctx.target_vaddr >> 12);
-            const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
+            const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12));
+            const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12));
+            const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
             var inst = aarch64.Instruction{
                 .pc_relative_address = mem.bytesToValue(meta.TagPayload(
                     aarch64.Instruction,
                     aarch64.Instruction.pc_relative_address,
                 ), buffer[0..4]),
             };
-            inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
-            inst.pc_relative_address.immlo = @truncate(u2, pages);
+            inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+            inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
             mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
         },
         .got_pageoff, .import_pageoff, .pageoff => {
             assert(!self.pcrel);
 
-            const narrowed = @truncate(u12, @intCast(u64, ctx.target_vaddr));
+            const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr))));
             if (isArithmeticOp(buffer[0..4])) {
                 var inst = aarch64.Instruction{
                     .add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
@@ -182,7 +182,7 @@ fn resolveAarch64(self: Relocation, ctx: Context) void {
                 2 => mem.writeIntLittle(
                     u32,
                     buffer[0..4],
-                    @truncate(u32, ctx.target_vaddr + ctx.image_base),
+                    @as(u32, @truncate(ctx.target_vaddr + ctx.image_base)),
                 ),
                 3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base),
                 else => unreachable,
@@ -206,17 +206,17 @@ fn resolveX86(self: Relocation, ctx: Context) void {
 
         .got, .import => {
             assert(self.pcrel);
-            const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4;
+            const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
             mem.writeIntLittle(i32, buffer[0..4], disp);
         },
         .direct => {
             if (self.pcrel) {
-                const disp = @intCast(i32, ctx.target_vaddr) - @intCast(i32, ctx.source_vaddr) - 4;
+                const disp = @as(i32, @intCast(ctx.target_vaddr)) - @as(i32, @intCast(ctx.source_vaddr)) - 4;
                 mem.writeIntLittle(i32, buffer[0..4], disp);
             } else switch (ctx.ptr_width) {
-                .p32 => mem.writeIntLittle(u32, buffer[0..4], @intCast(u32, ctx.target_vaddr + ctx.image_base)),
+                .p32 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @intCast(ctx.target_vaddr + ctx.image_base))),
                 .p64 => switch (self.length) {
-                    2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, ctx.target_vaddr + ctx.image_base)),
+                    2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(ctx.target_vaddr + ctx.image_base))),
                     3 => mem.writeIntLittle(u64, buffer[0..8], ctx.target_vaddr + ctx.image_base),
                     else => unreachable,
                 },
@@ -226,6 +226,6 @@ fn resolveX86(self: Relocation, ctx: Context) void {
 }
 
 inline fn isArithmeticOp(inst: *const [4]u8) bool {
-    const group_decode = @truncate(u5, inst[3]);
+    const group_decode = @as(u5, @truncate(inst[3]));
     return ((group_decode >> 2) == 4);
 }
src/link/MachO/dyld_info/bind.zig
@@ -39,7 +39,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
         }
 
         pub fn size(self: Self) u64 {
-            return @intCast(u64, self.buffer.items.len);
+            return @as(u64, @intCast(self.buffer.items.len));
         }
 
         pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void {
@@ -95,7 +95,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
                     const sym = ctx.getSymbol(current.target);
                     const name = ctx.getSymbolName(current.target);
                     const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0;
-                    const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER);
+                    const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER);
 
                     try setSymbol(name, flags, writer);
                     try setTypePointer(writer);
@@ -112,7 +112,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
                 switch (state) {
                     .start => {
                         if (current.offset < offset) {
-                            try addAddr(@bitCast(u64, @intCast(i64, current.offset) - @intCast(i64, offset)), writer);
+                            try addAddr(@as(u64, @bitCast(@as(i64, @intCast(current.offset)) - @as(i64, @intCast(offset)))), writer);
                             offset = offset - (offset - current.offset);
                         } else if (current.offset > offset) {
                             const delta = current.offset - offset;
@@ -130,7 +130,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
                         } else if (current.offset > offset) {
                             const delta = current.offset - offset;
                             state = .bind_times_skip;
-                            skip = @intCast(u64, delta);
+                            skip = @as(u64, @intCast(delta));
                             offset += skip;
                         } else unreachable;
                         i -= 1;
@@ -194,7 +194,7 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type {
         }
 
         pub fn size(self: Self) u64 {
-            return @intCast(u64, self.buffer.items.len);
+            return @as(u64, @intCast(self.buffer.items.len));
         }
 
         pub fn finalize(self: *Self, gpa: Allocator, ctx: Ctx) !void {
@@ -208,12 +208,12 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type {
             var addend: i64 = 0;
 
             for (self.entries.items) |entry| {
-                self.offsets.appendAssumeCapacity(@intCast(u32, cwriter.bytes_written));
+                self.offsets.appendAssumeCapacity(@as(u32, @intCast(cwriter.bytes_written)));
 
                 const sym = ctx.getSymbol(entry.target);
                 const name = ctx.getSymbolName(entry.target);
                 const flags: u8 = if (sym.weakRef()) macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT else 0;
-                const ordinal = @divTrunc(@bitCast(i16, sym.n_desc), macho.N_SYMBOL_RESOLVER);
+                const ordinal = @divTrunc(@as(i16, @bitCast(sym.n_desc)), macho.N_SYMBOL_RESOLVER);
 
                 try setSegmentOffset(entry.segment_id, entry.offset, writer);
                 try setSymbol(name, flags, writer);
@@ -238,20 +238,20 @@ pub fn LazyBind(comptime Ctx: type, comptime Target: type) type {
 
 fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
     log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
-    try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id));
+    try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
     try std.leb.writeULEB128(writer, offset);
 }
 
 fn setSymbol(name: []const u8, flags: u8, writer: anytype) !void {
     log.debug(">>> set symbol: {s} with flags: {x}", .{ name, flags });
-    try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @truncate(u4, flags));
+    try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | @as(u4, @truncate(flags)));
     try writer.writeAll(name);
     try writer.writeByte(0);
 }
 
 fn setTypePointer(writer: anytype) !void {
     log.debug(">>> set type: {d}", .{macho.BIND_TYPE_POINTER});
-    try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.BIND_TYPE_POINTER));
+    try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.BIND_TYPE_POINTER)));
 }
 
 fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
@@ -264,13 +264,13 @@ fn setDylibOrdinal(ordinal: i16, writer: anytype) !void {
             else => unreachable, // Invalid dylib special binding
         }
         log.debug(">>> set dylib special: {d}", .{ordinal});
-        const cast = @bitCast(u16, ordinal);
-        try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, cast));
+        const cast = @as(u16, @bitCast(ordinal));
+        try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @as(u4, @truncate(cast)));
     } else {
-        const cast = @bitCast(u16, ordinal);
+        const cast = @as(u16, @bitCast(ordinal));
         log.debug(">>> set dylib ordinal: {d}", .{ordinal});
         if (cast <= 0xf) {
-            try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, cast));
+            try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @as(u4, @truncate(cast)));
         } else {
             try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
             try std.leb.writeULEB128(writer, cast);
@@ -295,7 +295,7 @@ fn doBindAddAddr(addr: u64, writer: anytype) !void {
         const imm = @divExact(addr, @sizeOf(u64));
         if (imm <= 0xf) {
             try writer.writeByte(
-                macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @truncate(u4, imm),
+                macho.BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm)),
             );
             return;
         }
@@ -341,7 +341,7 @@ const TestContext = struct {
 
     fn addSymbol(ctx: *TestContext, gpa: Allocator, name: []const u8, ordinal: i16, flags: u16) !void {
         const n_strx = try ctx.addString(gpa, name);
-        var n_desc = @bitCast(u16, ordinal * macho.N_SYMBOL_RESOLVER);
+        var n_desc = @as(u16, @bitCast(ordinal * macho.N_SYMBOL_RESOLVER));
         n_desc |= flags;
         try ctx.symbols.append(gpa, .{
             .n_value = 0,
@@ -353,7 +353,7 @@ const TestContext = struct {
     }
 
     fn addString(ctx: *TestContext, gpa: Allocator, name: []const u8) !u32 {
-        const n_strx = @intCast(u32, ctx.strtab.items.len);
+        const n_strx = @as(u32, @intCast(ctx.strtab.items.len));
         try ctx.strtab.appendSlice(gpa, name);
         try ctx.strtab.append(gpa, 0);
         return n_strx;
@@ -366,7 +366,7 @@ const TestContext = struct {
     fn getSymbolName(ctx: TestContext, target: Target) []const u8 {
         const sym = ctx.getSymbol(target);
         assert(sym.n_strx < ctx.strtab.items.len);
-        return std.mem.sliceTo(@ptrCast([*:0]const u8, ctx.strtab.items.ptr + sym.n_strx), 0);
+        return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.strtab.items.ptr + sym.n_strx)), 0);
     }
 };
 
src/link/MachO/dyld_info/Rebase.zig
@@ -31,7 +31,7 @@ pub fn deinit(rebase: *Rebase, gpa: Allocator) void {
 }
 
 pub fn size(rebase: Rebase) u64 {
-    return @intCast(u64, rebase.buffer.items.len);
+    return @as(u64, @intCast(rebase.buffer.items.len));
 }
 
 pub fn finalize(rebase: *Rebase, gpa: Allocator) !void {
@@ -145,12 +145,12 @@ fn finalizeSegment(entries: []const Entry, writer: anytype) !void {
 
 fn setTypePointer(writer: anytype) !void {
     log.debug(">>> set type: {d}", .{macho.REBASE_TYPE_POINTER});
-    try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.REBASE_TYPE_POINTER));
+    try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @as(u4, @truncate(macho.REBASE_TYPE_POINTER)));
 }
 
 fn setSegmentOffset(segment_id: u8, offset: u64, writer: anytype) !void {
     log.debug(">>> set segment: {d} and offset: {x}", .{ segment_id, offset });
-    try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, segment_id));
+    try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @as(u4, @truncate(segment_id)));
     try std.leb.writeULEB128(writer, offset);
 }
 
@@ -163,7 +163,7 @@ fn rebaseAddAddr(addr: u64, writer: anytype) !void {
 fn rebaseTimes(count: usize, writer: anytype) !void {
     log.debug(">>> rebase with count: {d}", .{count});
     if (count <= 0xf) {
-        try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @truncate(u4, count));
+        try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @as(u4, @truncate(count)));
     } else {
         try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_ULEB_TIMES);
         try std.leb.writeULEB128(writer, count);
@@ -182,7 +182,7 @@ fn addAddr(addr: u64, writer: anytype) !void {
     if (std.mem.isAlignedGeneric(u64, addr, @sizeOf(u64))) {
         const imm = @divExact(addr, @sizeOf(u64));
         if (imm <= 0xf) {
-            try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @truncate(u4, imm));
+            try writer.writeByte(macho.REBASE_OPCODE_ADD_ADDR_IMM_SCALED | @as(u4, @truncate(imm)));
             return;
         }
     }
src/link/MachO/Archive.zig
@@ -169,7 +169,7 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !
         };
         const object_offset = try symtab_reader.readIntLittle(u32);
 
-        const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + n_strx), 0);
+        const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + n_strx)), 0);
         const owned_name = try allocator.dupe(u8, sym_name);
         const res = try self.toc.getOrPut(allocator, owned_name);
         defer if (res.found_existing) allocator.free(owned_name);
src/link/MachO/CodeSignature.zig
@@ -72,7 +72,7 @@ const CodeDirectory = struct {
                 .hashSize = hash_size,
                 .hashType = macho.CS_HASHTYPE_SHA256,
                 .platform = 0,
-                .pageSize = @truncate(u8, std.math.log2(page_size)),
+                .pageSize = @as(u8, @truncate(std.math.log2(page_size))),
                 .spare2 = 0,
                 .scatterOffset = 0,
                 .teamOffset = 0,
@@ -110,7 +110,7 @@ const CodeDirectory = struct {
     fn size(self: CodeDirectory) u32 {
         const code_slots = self.inner.nCodeSlots * hash_size;
         const special_slots = self.inner.nSpecialSlots * hash_size;
-        return @sizeOf(macho.CodeDirectory) + @intCast(u32, self.ident.len + 1 + special_slots + code_slots);
+        return @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.ident.len + 1 + special_slots + code_slots));
     }
 
     fn write(self: CodeDirectory, writer: anytype) !void {
@@ -139,9 +139,9 @@ const CodeDirectory = struct {
         try writer.writeAll(self.ident);
         try writer.writeByte(0);
 
-        var i: isize = @intCast(isize, self.inner.nSpecialSlots);
+        var i: isize = @as(isize, @intCast(self.inner.nSpecialSlots));
         while (i > 0) : (i -= 1) {
-            try writer.writeAll(&self.special_slots[@intCast(usize, i - 1)]);
+            try writer.writeAll(&self.special_slots[@as(usize, @intCast(i - 1))]);
         }
 
         for (self.code_slots.items) |slot| {
@@ -186,7 +186,7 @@ const Entitlements = struct {
     }
 
     fn size(self: Entitlements) u32 {
-        return @intCast(u32, self.inner.len) + 2 * @sizeOf(u32);
+        return @as(u32, @intCast(self.inner.len)) + 2 * @sizeOf(u32);
     }
 
     fn write(self: Entitlements, writer: anytype) !void {
@@ -281,7 +281,7 @@ pub fn writeAdhocSignature(
     self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0;
     self.code_directory.inner.codeLimit = opts.file_size;
 
-    const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size);
+    const total_pages = @as(u32, @intCast(mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size));
 
     try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages);
     self.code_directory.code_slots.items.len = total_pages;
@@ -331,7 +331,7 @@ pub fn writeAdhocSignature(
     }
 
     self.code_directory.inner.hashOffset =
-        @sizeOf(macho.CodeDirectory) + @intCast(u32, self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size);
+        @sizeOf(macho.CodeDirectory) + @as(u32, @intCast(self.code_directory.ident.len + 1 + self.code_directory.inner.nSpecialSlots * hash_size));
     self.code_directory.inner.length = self.code_directory.size();
     header.length += self.code_directory.size();
 
@@ -339,7 +339,7 @@ pub fn writeAdhocSignature(
     try writer.writeIntBig(u32, header.length);
     try writer.writeIntBig(u32, header.count);
 
-    var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @intCast(u32, blobs.items.len);
+    var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @as(u32, @intCast(blobs.items.len));
     for (blobs.items) |blob| {
         try writer.writeIntBig(u32, blob.slotType());
         try writer.writeIntBig(u32, offset);
@@ -383,7 +383,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
         ssize += @sizeOf(macho.BlobIndex) + sig.size();
     }
     ssize += n_special_slots * hash_size;
-    return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64)));
+    return @as(u32, @intCast(mem.alignForward(u64, ssize, @sizeOf(u64))));
 }
 
 pub fn clear(self: *CodeSignature, allocator: Allocator) void {
src/link/MachO/dead_strip.zig
@@ -27,10 +27,10 @@ pub fn gcAtoms(zld: *Zld, resolver: *const SymbolResolver) !void {
     defer arena.deinit();
 
     var roots = AtomTable.init(arena.allocator());
-    try roots.ensureUnusedCapacity(@intCast(u32, zld.globals.items.len));
+    try roots.ensureUnusedCapacity(@as(u32, @intCast(zld.globals.items.len)));
 
     var alive = AtomTable.init(arena.allocator());
-    try alive.ensureTotalCapacity(@intCast(u32, zld.atoms.items.len));
+    try alive.ensureTotalCapacity(@as(u32, @intCast(zld.atoms.items.len)));
 
     try collectRoots(zld, &roots, resolver);
     try mark(zld, roots, &alive);
@@ -99,8 +99,8 @@ fn collectRoots(zld: *Zld, roots: *AtomTable, resolver: *const SymbolResolver) !
                 const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
                     source_sym.n_sect - 1
                 else sect_id: {
-                    const nbase = @intCast(u32, object.in_symtab.?.len);
-                    const sect_id = @intCast(u8, atom.sym_index - nbase);
+                    const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+                    const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
                     break :sect_id sect_id;
                 };
                 const source_sect = object.getSourceSection(sect_id);
@@ -148,7 +148,7 @@ fn markLive(zld: *Zld, atom_index: AtomIndex, alive: *AtomTable) void {
 
     for (relocs) |rel| {
         const target = switch (cpu_arch) {
-            .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) {
+            .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) {
                 .ARM64_RELOC_ADDEND => continue,
                 else => Atom.parseRelocTarget(zld, .{
                     .object_id = atom.getFile().?,
@@ -208,7 +208,7 @@ fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable) bool {
 
     for (relocs) |rel| {
         const target = switch (cpu_arch) {
-            .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) {
+            .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) {
                 .ARM64_RELOC_ADDEND => continue,
                 else => Atom.parseRelocTarget(zld, .{
                     .object_id = atom.getFile().?,
@@ -264,8 +264,8 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
                 const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
                     source_sym.n_sect - 1
                 else blk: {
-                    const nbase = @intCast(u32, object.in_symtab.?.len);
-                    const sect_id = @intCast(u8, atom.sym_index - nbase);
+                    const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+                    const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
                     break :blk sect_id;
                 };
                 const source_sect = object.getSourceSection(sect_id);
@@ -283,7 +283,7 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
     for (zld.objects.items, 0..) |_, object_id| {
         // Traverse unwind and eh_frame records noting if the source symbol has been marked, and if so,
         // marking all references as live.
-        try markUnwindRecords(zld, @intCast(u32, object_id), alive);
+        try markUnwindRecords(zld, @as(u32, @intCast(object_id)), alive);
     }
 }
 
@@ -329,7 +329,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void {
                     .object_id = object_id,
                     .rel = rel,
                     .code = mem.asBytes(&record),
-                    .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+                    .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
                 });
                 const target_sym = zld.getSymbol(target);
                 if (!target_sym.undf()) {
@@ -344,7 +344,7 @@ fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void {
                     .object_id = object_id,
                     .rel = rel,
                     .code = mem.asBytes(&record),
-                    .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+                    .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
                 });
                 const target_object = zld.objects.items[target.getFile().?];
                 const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?;
@@ -377,7 +377,7 @@ fn markEhFrameRecord(zld: *Zld, object_id: u32, atom_index: AtomIndex, alive: *A
                     .object_id = object_id,
                     .rel = rel,
                     .code = fde.data,
-                    .base_offset = @intCast(i32, fde_offset) + 4,
+                    .base_offset = @as(i32, @intCast(fde_offset)) + 4,
                 });
                 const target_sym = zld.getSymbol(target);
                 if (!target_sym.undf()) blk: {
src/link/MachO/DebugSymbols.zig
@@ -64,9 +64,9 @@ pub const Reloc = struct {
 /// has been called to get a viable debug symbols output.
 pub fn populateMissingMetadata(self: *DebugSymbols) !void {
     if (self.dwarf_segment_cmd_index == null) {
-        self.dwarf_segment_cmd_index = @intCast(u8, self.segments.items.len);
+        self.dwarf_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
 
-        const off = @intCast(u64, self.page_size);
+        const off = @as(u64, @intCast(self.page_size));
         const ideal_size: u16 = 200 + 128 + 160 + 250;
         const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size);
 
@@ -86,7 +86,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
         try self.dwarf.strtab.buffer.append(self.allocator, 0);
         self.debug_str_section_index = try self.allocateSection(
             "__debug_str",
-            @intCast(u32, self.dwarf.strtab.buffer.items.len),
+            @as(u32, @intCast(self.dwarf.strtab.buffer.items.len)),
             0,
         );
         self.debug_string_table_dirty = true;
@@ -113,7 +113,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
     }
 
     if (self.linkedit_segment_cmd_index == null) {
-        self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len);
+        self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
         try self.segments.append(self.allocator, .{
             .segname = makeStaticString("__LINKEDIT"),
             .maxprot = macho.PROT.READ,
@@ -128,7 +128,7 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
     var sect = macho.section_64{
         .sectname = makeStaticString(sectname),
         .segname = segment.segname,
-        .size = @intCast(u32, size),
+        .size = @as(u32, @intCast(size)),
         .@"align" = alignment,
     };
     const alignment_pow_2 = try math.powi(u32, 2, alignment);
@@ -141,9 +141,9 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
         off + size,
     });
 
-    sect.offset = @intCast(u32, off);
+    sect.offset = @as(u32, @intCast(off));
 
-    const index = @intCast(u8, self.sections.items.len);
+    const index = @as(u8, @intCast(self.sections.items.len));
     try self.sections.append(self.allocator, sect);
     segment.cmdsize += @sizeOf(macho.section_64);
     segment.nsects += 1;
@@ -176,7 +176,7 @@ pub fn growSection(self: *DebugSymbols, sect_index: u8, needed_size: u32, requir
             if (amt != existing_size) return error.InputOutput;
         }
 
-        sect.offset = @intCast(u32, new_offset);
+        sect.offset = @as(u32, @intCast(new_offset));
     }
 
     sect.size = needed_size;
@@ -286,7 +286,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
     {
         const sect_index = self.debug_str_section_index.?;
         if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) {
-            const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len);
+            const needed_size = @as(u32, @intCast(self.dwarf.strtab.buffer.items.len));
             try self.growSection(sect_index, needed_size, false);
             try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset);
             self.debug_string_table_dirty = false;
@@ -307,7 +307,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
 
     const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
     try self.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
-    try self.writeHeader(macho_file, ncmds, @intCast(u32, lc_buffer.items.len));
+    try self.writeHeader(macho_file, ncmds, @as(u32, @intCast(lc_buffer.items.len)));
 
     assert(!self.debug_abbrev_section_dirty);
     assert(!self.debug_aranges_section_dirty);
@@ -378,7 +378,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype)
     // Write segment/section headers from the binary file first.
     const end = macho_file.linkedit_segment_cmd_index.?;
     for (macho_file.segments.items[0..end], 0..) |seg, i| {
-        const indexes = macho_file.getSectionIndexes(@intCast(u8, i));
+        const indexes = macho_file.getSectionIndexes(@as(u8, @intCast(i)));
         var out_seg = seg;
         out_seg.fileoff = 0;
         out_seg.filesize = 0;
@@ -407,7 +407,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype)
     }
     // Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
     for (self.segments.items, 0..) |seg, i| {
-        const indexes = self.getSectionIndexes(@intCast(u8, i));
+        const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
         try writer.writeStruct(seg);
         for (self.sections.items[indexes.start..indexes.end]) |header| {
             try writer.writeStruct(header);
@@ -473,7 +473,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
 
     for (macho_file.locals.items, 0..) |sym, sym_id| {
         if (sym.n_strx == 0) continue; // no name, skip
-        const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
+        const sym_loc = MachO.SymbolWithLoc{ .sym_index = @as(u32, @intCast(sym_id)), .file = null };
         if (macho_file.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
         if (macho_file.getGlobal(macho_file.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip
         var out_sym = sym;
@@ -501,10 +501,10 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
     const needed_size = nsyms * @sizeOf(macho.nlist_64);
     seg.filesize = offset + needed_size - seg.fileoff;
 
-    self.symtab_cmd.symoff = @intCast(u32, offset);
-    self.symtab_cmd.nsyms = @intCast(u32, nsyms);
+    self.symtab_cmd.symoff = @as(u32, @intCast(offset));
+    self.symtab_cmd.nsyms = @as(u32, @intCast(nsyms));
 
-    const locals_off = @intCast(u32, offset);
+    const locals_off = @as(u32, @intCast(offset));
     const locals_size = nlocals * @sizeOf(macho.nlist_64);
     const exports_off = locals_off + locals_size;
     const exports_size = nexports * @sizeOf(macho.nlist_64);
@@ -521,13 +521,13 @@ fn writeStrtab(self: *DebugSymbols) !void {
     defer tracy.end();
 
     const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
-    const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64));
+    const symtab_size = @as(u32, @intCast(self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)));
     const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64));
     const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64));
 
     seg.filesize = offset + needed_size - seg.fileoff;
-    self.symtab_cmd.stroff = @intCast(u32, offset);
-    self.symtab_cmd.strsize = @intCast(u32, needed_size);
+    self.symtab_cmd.stroff = @as(u32, @intCast(offset));
+    self.symtab_cmd.strsize = @as(u32, @intCast(needed_size));
 
     log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
 
@@ -542,8 +542,8 @@ fn writeStrtab(self: *DebugSymbols) !void {
 pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } {
     var start: u8 = 0;
     const nsects = for (self.segments.items, 0..) |seg, i| {
-        if (i == segment_index) break @intCast(u8, seg.nsects);
-        start += @intCast(u8, seg.nsects);
+        if (i == segment_index) break @as(u8, @intCast(seg.nsects));
+        start += @as(u8, @intCast(seg.nsects));
     } else 0;
     return .{ .start = start, .end = start + nsects };
 }
src/link/MachO/DwarfInfo.zig
@@ -70,7 +70,7 @@ pub fn genSubprogramLookupByName(
                         low_pc = addr;
                     }
                     if (try attr.getConstant(self)) |constant| {
-                        low_pc = @intCast(u64, constant);
+                        low_pc = @as(u64, @intCast(constant));
                     }
                 },
                 dwarf.AT.high_pc => {
@@ -78,7 +78,7 @@ pub fn genSubprogramLookupByName(
                         high_pc = addr;
                     }
                     if (try attr.getConstant(self)) |constant| {
-                        high_pc = @intCast(u64, constant);
+                        high_pc = @as(u64, @intCast(constant));
                     }
                 },
                 else => {},
@@ -261,7 +261,7 @@ pub const Attribute = struct {
 
         switch (self.form) {
             dwarf.FORM.string => {
-                return mem.sliceTo(@ptrCast([*:0]const u8, debug_info.ptr), 0);
+                return mem.sliceTo(@as([*:0]const u8, @ptrCast(debug_info.ptr)), 0);
             },
             dwarf.FORM.strp => {
                 const off = if (cuh.is_64bit)
@@ -499,5 +499,5 @@ fn findAbbrevEntrySize(self: DwarfInfo, da_off: usize, da_len: usize, di_off: us
 
 fn getString(self: DwarfInfo, off: u64) []const u8 {
     assert(off < self.debug_str.len);
-    return mem.sliceTo(@ptrCast([*:0]const u8, self.debug_str.ptr + @intCast(usize, off)), 0);
+    return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.debug_str.ptr + @as(usize, @intCast(off)))), 0);
 }
src/link/MachO/Dylib.zig
@@ -75,7 +75,7 @@ pub const Id = struct {
                 .int => |int| {
                     var out: u32 = 0;
                     const major = math.cast(u16, int) orelse return error.Overflow;
-                    out += @intCast(u32, major) << 16;
+                    out += @as(u32, @intCast(major)) << 16;
                     return out;
                 },
                 .float => |float| {
@@ -106,9 +106,9 @@ pub const Id = struct {
             out += try fmt.parseInt(u8, values[2], 10);
         }
         if (count > 1) {
-            out += @intCast(u32, try fmt.parseInt(u8, values[1], 10)) << 8;
+            out += @as(u32, @intCast(try fmt.parseInt(u8, values[1], 10))) << 8;
         }
-        out += @intCast(u32, try fmt.parseInt(u16, values[0], 10)) << 16;
+        out += @as(u32, @intCast(try fmt.parseInt(u16, values[0], 10))) << 16;
 
         return out;
     }
@@ -164,11 +164,11 @@ pub fn parseFromBinary(
         switch (cmd.cmd()) {
             .SYMTAB => {
                 const symtab_cmd = cmd.cast(macho.symtab_command).?;
-                const symtab = @ptrCast(
+                const symtab = @as(
                     [*]const macho.nlist_64,
                     // Alignment is guaranteed as a dylib is a final linked image and has to have sections
                     // properly aligned in order to be correctly loaded by the loader.
-                    @alignCast(@alignOf(macho.nlist_64), &data[symtab_cmd.symoff]),
+                    @ptrCast(@alignCast(&data[symtab_cmd.symoff])),
                 )[0..symtab_cmd.nsyms];
                 const strtab = data[symtab_cmd.stroff..][0..symtab_cmd.strsize];
 
@@ -176,7 +176,7 @@ pub fn parseFromBinary(
                     const add_to_symtab = sym.ext() and (sym.sect() or sym.indr());
                     if (!add_to_symtab) continue;
 
-                    const sym_name = mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx), 0);
+                    const sym_name = mem.sliceTo(@as([*:0]const u8, @ptrCast(strtab.ptr + sym.n_strx)), 0);
                     try self.symbols.putNoClobber(allocator, try allocator.dupe(u8, sym_name), false);
                 }
             },
src/link/MachO/eh_frame.zig
@@ -36,7 +36,7 @@ pub fn scanRelocs(zld: *Zld) !void {
                 try cies.putNoClobber(cie_offset, {});
                 it.seekTo(cie_offset);
                 const cie = (try it.next()).?;
-                try cie.scanRelocs(zld, @intCast(u32, object_id), cie_offset);
+                try cie.scanRelocs(zld, @as(u32, @intCast(object_id)), cie_offset);
             }
         }
     }
@@ -110,7 +110,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
     var eh_frame_offset: u32 = 0;
 
     for (zld.objects.items, 0..) |*object, object_id| {
-        try eh_records.ensureUnusedCapacity(2 * @intCast(u32, object.exec_atoms.items.len));
+        try eh_records.ensureUnusedCapacity(2 * @as(u32, @intCast(object.exec_atoms.items.len)));
 
         var cies = std.AutoHashMap(u32, u32).init(gpa);
         defer cies.deinit();
@@ -139,7 +139,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
                 eh_it.seekTo(cie_offset);
                 const source_cie_record = (try eh_it.next()).?;
                 var cie_record = try source_cie_record.toOwned(gpa);
-                try cie_record.relocate(zld, @intCast(u32, object_id), .{
+                try cie_record.relocate(zld, @as(u32, @intCast(object_id)), .{
                     .source_offset = cie_offset,
                     .out_offset = eh_frame_offset,
                     .sect_addr = sect.addr,
@@ -151,7 +151,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
 
             var fde_record = try source_fde_record.toOwned(gpa);
             fde_record.setCiePointer(eh_frame_offset + 4 - gop.value_ptr.*);
-            try fde_record.relocate(zld, @intCast(u32, object_id), .{
+            try fde_record.relocate(zld, @as(u32, @intCast(object_id)), .{
                 .source_offset = fde_record_offset,
                 .out_offset = eh_frame_offset,
                 .sect_addr = sect.addr,
@@ -194,7 +194,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
             UnwindInfo.UnwindEncoding.setDwarfSectionOffset(
                 &record.compactUnwindEncoding,
                 cpu_arch,
-                @intCast(u24, eh_frame_offset),
+                @as(u24, @intCast(eh_frame_offset)),
             );
 
             const cie_record = eh_records.get(
@@ -268,7 +268,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
         }) u64 {
             assert(rec.tag == .fde);
             const addend = mem.readIntLittle(i64, rec.data[4..][0..8]);
-            return @intCast(u64, @intCast(i64, ctx.base_addr + ctx.base_offset + 8) + addend);
+            return @as(u64, @intCast(@as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8)) + addend));
         }
 
         pub fn setTargetSymbolAddress(rec: *Record, value: u64, ctx: struct {
@@ -276,7 +276,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
             base_offset: u64,
         }) !void {
             assert(rec.tag == .fde);
-            const addend = @intCast(i64, value) - @intCast(i64, ctx.base_addr + ctx.base_offset + 8);
+            const addend = @as(i64, @intCast(value)) - @as(i64, @intCast(ctx.base_addr + ctx.base_offset + 8));
             mem.writeIntLittle(i64, rec.data[4..][0..8], addend);
         }
 
@@ -291,7 +291,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
             for (relocs) |rel| {
                 switch (cpu_arch) {
                     .aarch64 => {
-                        const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+                        const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
                         switch (rel_type) {
                             .ARM64_RELOC_SUBTRACTOR,
                             .ARM64_RELOC_UNSIGNED,
@@ -301,7 +301,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
                         }
                     },
                     .x86_64 => {
-                        const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+                        const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
                         switch (rel_type) {
                             .X86_64_RELOC_GOT => {},
                             else => unreachable,
@@ -313,7 +313,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
                     .object_id = object_id,
                     .rel = rel,
                     .code = rec.data,
-                    .base_offset = @intCast(i32, source_offset) + 4,
+                    .base_offset = @as(i32, @intCast(source_offset)) + 4,
                 });
                 return target;
             }
@@ -335,40 +335,40 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
                     .object_id = object_id,
                     .rel = rel,
                     .code = rec.data,
-                    .base_offset = @intCast(i32, ctx.source_offset) + 4,
+                    .base_offset = @as(i32, @intCast(ctx.source_offset)) + 4,
                 });
-                const rel_offset = @intCast(u32, rel.r_address - @intCast(i32, ctx.source_offset) - 4);
+                const rel_offset = @as(u32, @intCast(rel.r_address - @as(i32, @intCast(ctx.source_offset)) - 4));
                 const source_addr = ctx.sect_addr + rel_offset + ctx.out_offset + 4;
 
                 switch (cpu_arch) {
                     .aarch64 => {
-                        const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+                        const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
                         switch (rel_type) {
                             .ARM64_RELOC_SUBTRACTOR => {
                                 // Address of the __eh_frame in the source object file
                             },
                             .ARM64_RELOC_POINTER_TO_GOT => {
                                 const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false);
-                                const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse
+                                const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse
                                     return error.Overflow;
                                 mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], result);
                             },
                             .ARM64_RELOC_UNSIGNED => {
                                 assert(rel.r_extern == 1);
                                 const target_addr = try Atom.getRelocTargetAddress(zld, target, false, false);
-                                const result = @intCast(i64, target_addr) - @intCast(i64, source_addr);
-                                mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @intCast(i64, result));
+                                const result = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr));
+                                mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @as(i64, @intCast(result)));
                             },
                             else => unreachable,
                         }
                     },
                     .x86_64 => {
-                        const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+                        const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
                         switch (rel_type) {
                             .X86_64_RELOC_GOT => {
                                 const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false);
                                 const addend = mem.readIntLittle(i32, rec.data[rel_offset..][0..4]);
-                                const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+                                const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
                                 const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
                                 mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], disp);
                             },
@@ -392,7 +392,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
 
         pub fn getAugmentationString(rec: Record) []const u8 {
             assert(rec.tag == .cie);
-            return mem.sliceTo(@ptrCast([*:0]const u8, rec.data.ptr + 5), 0);
+            return mem.sliceTo(@as([*:0]const u8, @ptrCast(rec.data.ptr + 5)), 0);
         }
 
         pub fn getPersonalityPointer(rec: Record, ctx: struct {
@@ -418,7 +418,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
                 'P' => {
                     const enc = try reader.readByte();
                     const offset = ctx.base_offset + 13 + aug_str.len + creader.bytes_read;
-                    const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader);
+                    const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader);
                     return ptr;
                 },
                 'L' => {
@@ -441,7 +441,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
             const reader = stream.reader();
             _ = try reader.readByte();
             const offset = ctx.base_offset + 25;
-            const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader);
+            const ptr = try getEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), reader);
             return ptr;
         }
 
@@ -454,7 +454,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
             var stream = std.io.fixedBufferStream(rec.data[21..]);
             const writer = stream.writer();
             const offset = ctx.base_offset + 25;
-            try setEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), value, writer);
+            try setEncodedPointer(enc, @as(i64, @intCast(ctx.base_addr + offset)), value, writer);
         }
 
         fn getLsdaEncoding(rec: Record) !?u8 {
@@ -494,11 +494,11 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
             if (enc == EH_PE.omit) return null;
 
             var ptr: i64 = switch (enc & 0x0F) {
-                EH_PE.absptr => @bitCast(i64, try reader.readIntLittle(u64)),
-                EH_PE.udata2 => @bitCast(i16, try reader.readIntLittle(u16)),
-                EH_PE.udata4 => @bitCast(i32, try reader.readIntLittle(u32)),
-                EH_PE.udata8 => @bitCast(i64, try reader.readIntLittle(u64)),
-                EH_PE.uleb128 => @bitCast(i64, try leb.readULEB128(u64, reader)),
+                EH_PE.absptr => @as(i64, @bitCast(try reader.readIntLittle(u64))),
+                EH_PE.udata2 => @as(i16, @bitCast(try reader.readIntLittle(u16))),
+                EH_PE.udata4 => @as(i32, @bitCast(try reader.readIntLittle(u32))),
+                EH_PE.udata8 => @as(i64, @bitCast(try reader.readIntLittle(u64))),
+                EH_PE.uleb128 => @as(i64, @bitCast(try leb.readULEB128(u64, reader))),
                 EH_PE.sdata2 => try reader.readIntLittle(i16),
                 EH_PE.sdata4 => try reader.readIntLittle(i32),
                 EH_PE.sdata8 => try reader.readIntLittle(i64),
@@ -517,13 +517,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
                 else => return null,
             }
 
-            return @bitCast(u64, ptr);
+            return @as(u64, @bitCast(ptr));
         }
 
         fn setEncodedPointer(enc: u8, pcrel_offset: i64, value: u64, writer: anytype) !void {
             if (enc == EH_PE.omit) return;
 
-            var actual = @intCast(i64, value);
+            var actual = @as(i64, @intCast(value));
 
             switch (enc & 0x70) {
                 EH_PE.absptr => {},
@@ -537,13 +537,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
             }
 
             switch (enc & 0x0F) {
-                EH_PE.absptr => try writer.writeIntLittle(u64, @bitCast(u64, actual)),
-                EH_PE.udata2 => try writer.writeIntLittle(u16, @bitCast(u16, @intCast(i16, actual))),
-                EH_PE.udata4 => try writer.writeIntLittle(u32, @bitCast(u32, @intCast(i32, actual))),
-                EH_PE.udata8 => try writer.writeIntLittle(u64, @bitCast(u64, actual)),
-                EH_PE.uleb128 => try leb.writeULEB128(writer, @bitCast(u64, actual)),
-                EH_PE.sdata2 => try writer.writeIntLittle(i16, @intCast(i16, actual)),
-                EH_PE.sdata4 => try writer.writeIntLittle(i32, @intCast(i32, actual)),
+                EH_PE.absptr => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))),
+                EH_PE.udata2 => try writer.writeIntLittle(u16, @as(u16, @bitCast(@as(i16, @intCast(actual))))),
+                EH_PE.udata4 => try writer.writeIntLittle(u32, @as(u32, @bitCast(@as(i32, @intCast(actual))))),
+                EH_PE.udata8 => try writer.writeIntLittle(u64, @as(u64, @bitCast(actual))),
+                EH_PE.uleb128 => try leb.writeULEB128(writer, @as(u64, @bitCast(actual))),
+                EH_PE.sdata2 => try writer.writeIntLittle(i16, @as(i16, @intCast(actual))),
+                EH_PE.sdata4 => try writer.writeIntLittle(i32, @as(i32, @intCast(actual))),
                 EH_PE.sdata8 => try writer.writeIntLittle(i64, actual),
                 EH_PE.sleb128 => try leb.writeILEB128(writer, actual),
                 else => unreachable,
src/link/MachO/load_commands.zig
@@ -114,7 +114,7 @@ fn calcLCsSize(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx
         }
     }
 
-    return @intCast(u32, sizeofcmds);
+    return @as(u32, @intCast(sizeofcmds));
 }
 
 pub fn calcMinHeaderPad(gpa: Allocator, options: *const link.Options, ctx: CalcLCsSizeCtx) !u64 {
@@ -140,7 +140,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 {
     var pos: usize = 0;
     while (true) {
         if (pos >= lc_buffer.len) break;
-        const cmd = @ptrCast(*align(1) const macho.load_command, lc_buffer.ptr + pos).*;
+        const cmd = @as(*align(1) const macho.load_command, @ptrCast(lc_buffer.ptr + pos)).*;
         ncmds += 1;
         pos += cmd.cmdsize;
     }
@@ -149,11 +149,11 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 {
 
 pub fn writeDylinkerLC(lc_writer: anytype) !void {
     const name_len = mem.sliceTo(default_dyld_path, 0).len;
-    const cmdsize = @intCast(u32, mem.alignForward(
+    const cmdsize = @as(u32, @intCast(mem.alignForward(
         u64,
         @sizeOf(macho.dylinker_command) + name_len,
         @sizeOf(u64),
-    ));
+    )));
     try lc_writer.writeStruct(macho.dylinker_command{
         .cmd = .LOAD_DYLINKER,
         .cmdsize = cmdsize,
@@ -176,11 +176,11 @@ const WriteDylibLCCtx = struct {
 
 fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void {
     const name_len = ctx.name.len + 1;
-    const cmdsize = @intCast(u32, mem.alignForward(
+    const cmdsize = @as(u32, @intCast(mem.alignForward(
         u64,
         @sizeOf(macho.dylib_command) + name_len,
         @sizeOf(u64),
-    ));
+    )));
     try lc_writer.writeStruct(macho.dylib_command{
         .cmd = ctx.cmd,
         .cmdsize = cmdsize,
@@ -217,8 +217,8 @@ pub fn writeDylibIdLC(gpa: Allocator, options: *const link.Options, lc_writer: a
     try writeDylibLC(.{
         .cmd = .ID_DYLIB,
         .name = install_name,
-        .current_version = @intCast(u32, curr.major << 16 | curr.minor << 8 | curr.patch),
-        .compatibility_version = @intCast(u32, compat.major << 16 | compat.minor << 8 | compat.patch),
+        .current_version = @as(u32, @intCast(curr.major << 16 | curr.minor << 8 | curr.patch)),
+        .compatibility_version = @as(u32, @intCast(compat.major << 16 | compat.minor << 8 | compat.patch)),
     }, lc_writer);
 }
 
@@ -253,11 +253,11 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an
 
     while (try it.next()) |rpath| {
         const rpath_len = rpath.len + 1;
-        const cmdsize = @intCast(u32, mem.alignForward(
+        const cmdsize = @as(u32, @intCast(mem.alignForward(
             u64,
             @sizeOf(macho.rpath_command) + rpath_len,
             @sizeOf(u64),
-        ));
+        )));
         try lc_writer.writeStruct(macho.rpath_command{
             .cmdsize = cmdsize,
             .path = @sizeOf(macho.rpath_command),
@@ -275,12 +275,12 @@ pub fn writeBuildVersionLC(options: *const link.Options, lc_writer: anytype) !vo
     const cmdsize = @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version);
     const platform_version = blk: {
         const ver = options.target.os.version_range.semver.min;
-        const platform_version = @intCast(u32, ver.major << 16 | ver.minor << 8);
+        const platform_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8));
         break :blk platform_version;
     };
     const sdk_version = if (options.native_darwin_sdk) |sdk| blk: {
         const ver = sdk.version;
-        const sdk_version = @intCast(u32, ver.major << 16 | ver.minor << 8);
+        const sdk_version = @as(u32, @intCast(ver.major << 16 | ver.minor << 8));
         break :blk sdk_version;
     } else platform_version;
     const is_simulator_abi = options.target.abi == .simulator;
src/link/MachO/Object.zig
@@ -164,7 +164,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
         else => {},
     } else return;
 
-    self.in_symtab = @ptrCast([*]align(1) const macho.nlist_64, self.contents.ptr + symtab.symoff)[0..symtab.nsyms];
+    self.in_symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(self.contents.ptr + symtab.symoff))[0..symtab.nsyms];
     self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize];
 
     self.symtab = try allocator.alloc(macho.nlist_64, self.in_symtab.?.len + nsects);
@@ -202,7 +202,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
     defer sorted_all_syms.deinit();
 
     for (self.in_symtab.?, 0..) |_, index| {
-        sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
+        sorted_all_syms.appendAssumeCapacity(.{ .index = @as(u32, @intCast(index)) });
     }
 
     // We sort by type: defined < undefined, and
@@ -225,18 +225,18 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
             }
         }
         if (sym.sect() and section_index_lookup == null) {
-            section_index_lookup = .{ .start = @intCast(u32, i), .len = 1 };
+            section_index_lookup = .{ .start = @as(u32, @intCast(i)), .len = 1 };
         }
 
         prev_sect_id = sym.n_sect;
 
         self.symtab[i] = sym;
         self.source_symtab_lookup[i] = sym_id.index;
-        self.reverse_symtab_lookup[sym_id.index] = @intCast(u32, i);
-        self.source_address_lookup[i] = if (sym.undf()) -1 else @intCast(i64, sym.n_value);
+        self.reverse_symtab_lookup[sym_id.index] = @as(u32, @intCast(i));
+        self.source_address_lookup[i] = if (sym.undf()) -1 else @as(i64, @intCast(sym.n_value));
 
-        const sym_name_len = mem.sliceTo(@ptrCast([*:0]const u8, self.in_strtab.?.ptr + sym.n_strx), 0).len + 1;
-        self.strtab_lookup[i] = @intCast(u32, sym_name_len);
+        const sym_name_len = mem.sliceTo(@as([*:0]const u8, @ptrCast(self.in_strtab.?.ptr + sym.n_strx)), 0).len + 1;
+        self.strtab_lookup[i] = @as(u32, @intCast(sym_name_len));
     }
 
     // If there were no undefined symbols, make sure we populate the
@@ -267,7 +267,7 @@ const SymbolAtIndex = struct {
 
     fn getSymbolName(self: SymbolAtIndex, ctx: Context) []const u8 {
         const off = self.getSymbol(ctx).n_strx;
-        return mem.sliceTo(@ptrCast([*:0]const u8, ctx.in_strtab.?.ptr + off), 0);
+        return mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.in_strtab.?.ptr + off)), 0);
     }
 
     fn getSymbolSeniority(self: SymbolAtIndex, ctx: Context) u2 {
@@ -338,7 +338,7 @@ fn filterSymbolsBySection(symbols: []macho.nlist_64, n_sect: u8) struct {
         .n_sect = n_sect,
     });
 
-    return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) };
+    return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) };
 }
 
 fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr: u64) struct {
@@ -360,7 +360,7 @@ fn filterSymbolsByAddress(symbols: []macho.nlist_64, start_addr: u64, end_addr:
         .addr = end_addr,
     });
 
-    return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) };
+    return .{ .index = @as(u32, @intCast(index)), .len = @as(u32, @intCast(len)) };
 }
 
 const SortedSection = struct {
@@ -400,7 +400,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
         };
         if (sect.size == 0) continue;
 
-        const sect_id = @intCast(u8, id);
+        const sect_id = @as(u8, @intCast(id));
         const sym = self.getSectionAliasSymbolPtr(sect_id);
         sym.* = .{
             .n_strx = 0,
@@ -417,7 +417,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
             const out_sect_id = (try zld.getOutputSection(sect)) orelse continue;
             if (sect.size == 0) continue;
 
-            const sect_id = @intCast(u8, id);
+            const sect_id = @as(u8, @intCast(id));
             const sym_index = self.getSectionAliasSymbolIndex(sect_id);
             const atom_index = try self.createAtomFromSubsection(
                 zld,
@@ -459,7 +459,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
     defer gpa.free(sorted_sections);
 
     for (sections, 0..) |sect, id| {
-        sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) };
+        sorted_sections[id] = .{ .header = sect, .id = @as(u8, @intCast(id)) };
     }
 
     mem.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress);
@@ -651,7 +651,7 @@ fn filterRelocs(
     const start = @import("zld.zig").bsearch(macho.relocation_info, relocs, Predicate{ .addr = end_addr });
     const len = @import("zld.zig").lsearch(macho.relocation_info, relocs[start..], LPredicate{ .addr = start_addr });
 
-    return .{ .start = @intCast(u32, start), .len = @intCast(u32, len) };
+    return .{ .start = @as(u32, @intCast(start)), .len = @as(u32, @intCast(len)) };
 }
 
 /// Parse all relocs for the input section, and sort in descending order.
@@ -659,7 +659,7 @@ fn filterRelocs(
 /// section in a sorted manner which is simply not true.
 fn parseRelocs(self: *Object, gpa: Allocator, sect_id: u8) !void {
     const section = self.getSourceSection(sect_id);
-    const start = @intCast(u32, self.relocations.items.len);
+    const start = @as(u32, @intCast(self.relocations.items.len));
     if (self.getSourceRelocs(section)) |relocs| {
         try self.relocations.ensureUnusedCapacity(gpa, relocs.len);
         self.relocations.appendUnalignedSliceAssumeCapacity(relocs);
@@ -677,8 +677,8 @@ fn cacheRelocs(self: *Object, zld: *Zld, atom_index: AtomIndex) !void {
         // If there was no matching symbol present in the source symtab, this means
         // we are dealing with either an entire section, or part of it, but also
         // starting at the beginning.
-        const nbase = @intCast(u32, self.in_symtab.?.len);
-        const sect_id = @intCast(u8, atom.sym_index - nbase);
+        const nbase = @as(u32, @intCast(self.in_symtab.?.len));
+        const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
         break :blk sect_id;
     };
     const source_sect = self.getSourceSection(source_sect_id);
@@ -745,7 +745,7 @@ fn parseEhFrameSection(self: *Object, zld: *Zld, object_id: u32) !void {
                             .object_id = object_id,
                             .rel = rel,
                             .code = it.data[offset..],
-                            .base_offset = @intCast(i32, offset),
+                            .base_offset = @as(i32, @intCast(offset)),
                         });
                         break :blk target;
                     },
@@ -798,7 +798,7 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
         _ = try zld.initSection("__TEXT", "__unwind_info", .{});
     }
 
-    try self.unwind_records_lookup.ensureTotalCapacity(gpa, @intCast(u32, self.exec_atoms.items.len));
+    try self.unwind_records_lookup.ensureTotalCapacity(gpa, @as(u32, @intCast(self.exec_atoms.items.len)));
 
     const unwind_records = self.getUnwindRecords();
 
@@ -834,14 +834,14 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
             .object_id = object_id,
             .rel = rel,
             .code = mem.asBytes(&record),
-            .base_offset = @intCast(i32, offset),
+            .base_offset = @as(i32, @intCast(offset)),
         });
         log.debug("unwind record {d} tracks {s}", .{ record_id, zld.getSymbolName(target) });
         if (target.getFile() != object_id) {
             self.unwind_relocs_lookup[record_id].dead = true;
         } else {
             const atom_index = self.getAtomIndexForSymbol(target.sym_index).?;
-            self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @intCast(u32, record_id));
+            self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @as(u32, @intCast(record_id)));
         }
     }
 }
@@ -869,7 +869,7 @@ pub fn getSourceSectionIndexByName(self: Object, segname: []const u8, sectname:
     const sections = self.getSourceSections();
     for (sections, 0..) |sect, i| {
         if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName()))
-            return @intCast(u8, i);
+            return @as(u8, @intCast(i));
     } else return null;
 }
 
@@ -898,7 +898,7 @@ pub fn parseDataInCode(self: *Object, gpa: Allocator) !void {
         }
     } else return;
     const ndice = @divExact(cmd.datasize, @sizeOf(macho.data_in_code_entry));
-    const dice = @ptrCast([*]align(1) const macho.data_in_code_entry, self.contents.ptr + cmd.dataoff)[0..ndice];
+    const dice = @as([*]align(1) const macho.data_in_code_entry, @ptrCast(self.contents.ptr + cmd.dataoff))[0..ndice];
     try self.data_in_code.ensureTotalCapacityPrecise(gpa, dice.len);
     self.data_in_code.appendUnalignedSliceAssumeCapacity(dice);
     mem.sort(macho.data_in_code_entry, self.data_in_code.items, {}, diceLessThan);
@@ -945,12 +945,12 @@ pub fn parseDwarfInfo(self: Object) DwarfInfo {
 }
 
 pub fn getSectionContents(self: Object, sect: macho.section_64) []const u8 {
-    const size = @intCast(usize, sect.size);
+    const size = @as(usize, @intCast(sect.size));
     return self.contents[sect.offset..][0..size];
 }
 
 pub fn getSectionAliasSymbolIndex(self: Object, sect_id: u8) u32 {
-    const start = @intCast(u32, self.in_symtab.?.len);
+    const start = @as(u32, @intCast(self.in_symtab.?.len));
     return start + sect_id;
 }
 
@@ -964,7 +964,7 @@ pub fn getSectionAliasSymbolPtr(self: *Object, sect_id: u8) *macho.nlist_64 {
 
 fn getSourceRelocs(self: Object, sect: macho.section_64) ?[]align(1) const macho.relocation_info {
     if (sect.nreloc == 0) return null;
-    return @ptrCast([*]align(1) const macho.relocation_info, self.contents.ptr + sect.reloff)[0..sect.nreloc];
+    return @as([*]align(1) const macho.relocation_info, @ptrCast(self.contents.ptr + sect.reloff))[0..sect.nreloc];
 }
 
 pub fn getRelocs(self: Object, sect_id: u8) []const macho.relocation_info {
@@ -1005,25 +1005,25 @@ pub fn getSymbolByAddress(self: Object, addr: u64, sect_hint: ?u8) u32 {
             const target_sym_index = @import("zld.zig").lsearch(
                 i64,
                 self.source_address_lookup[lookup.start..][0..lookup.len],
-                Predicate{ .addr = @intCast(i64, addr) },
+                Predicate{ .addr = @as(i64, @intCast(addr)) },
             );
             if (target_sym_index > 0) {
-                return @intCast(u32, lookup.start + target_sym_index - 1);
+                return @as(u32, @intCast(lookup.start + target_sym_index - 1));
             }
         }
         return self.getSectionAliasSymbolIndex(sect_id);
     }
 
     const target_sym_index = @import("zld.zig").lsearch(i64, self.source_address_lookup, Predicate{
-        .addr = @intCast(i64, addr),
+        .addr = @as(i64, @intCast(addr)),
     });
     assert(target_sym_index > 0);
-    return @intCast(u32, target_sym_index - 1);
+    return @as(u32, @intCast(target_sym_index - 1));
 }
 
 pub fn getGlobal(self: Object, sym_index: u32) ?u32 {
     if (self.globals_lookup[sym_index] == -1) return null;
-    return @intCast(u32, self.globals_lookup[sym_index]);
+    return @as(u32, @intCast(self.globals_lookup[sym_index]));
 }
 
 pub fn getAtomIndexForSymbol(self: Object, sym_index: u32) ?AtomIndex {
@@ -1041,7 +1041,7 @@ pub fn getUnwindRecords(self: Object) []align(1) const macho.compact_unwind_entr
     const sect = self.getSourceSection(sect_id);
     const data = self.getSectionContents(sect);
     const num_entries = @divExact(data.len, @sizeOf(macho.compact_unwind_entry));
-    return @ptrCast([*]align(1) const macho.compact_unwind_entry, data)[0..num_entries];
+    return @as([*]align(1) const macho.compact_unwind_entry, @ptrCast(data))[0..num_entries];
 }
 
 pub fn hasEhFrameRecords(self: Object) bool {
src/link/MachO/Relocation.zig
@@ -94,9 +94,9 @@ pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, cod
         .tlv_initializer => blk: {
             assert(self.addend == 0); // Addend here makes no sense.
             const header = macho_file.sections.items(.header)[macho_file.thread_data_section_index.?];
-            break :blk @intCast(i64, target_base_addr - header.addr);
+            break :blk @as(i64, @intCast(target_base_addr - header.addr));
         },
-        else => @intCast(i64, target_base_addr) + self.addend,
+        else => @as(i64, @intCast(target_base_addr)) + self.addend,
     };
 
     log.debug("  ({x}: [() => 0x{x} ({s})) ({s})", .{
@@ -119,7 +119,7 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
         .branch => {
             const displacement = math.cast(
                 i28,
-                @intCast(i64, target_addr) - @intCast(i64, source_addr),
+                @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)),
             ) orelse unreachable; // TODO codegen should never allow for jump larger than i28 displacement
             var inst = aarch64.Instruction{
                 .unconditional_branch_immediate = mem.bytesToValue(meta.TagPayload(
@@ -127,25 +127,25 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
                     aarch64.Instruction.unconditional_branch_immediate,
                 ), buffer[0..4]),
             };
-            inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
+            inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2))));
             mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
         },
         .page, .got_page => {
-            const source_page = @intCast(i32, source_addr >> 12);
-            const target_page = @intCast(i32, target_addr >> 12);
-            const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
+            const source_page = @as(i32, @intCast(source_addr >> 12));
+            const target_page = @as(i32, @intCast(target_addr >> 12));
+            const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
             var inst = aarch64.Instruction{
                 .pc_relative_address = mem.bytesToValue(meta.TagPayload(
                     aarch64.Instruction,
                     aarch64.Instruction.pc_relative_address,
                 ), buffer[0..4]),
             };
-            inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
-            inst.pc_relative_address.immlo = @truncate(u2, pages);
+            inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+            inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
             mem.writeIntLittle(u32, buffer[0..4], inst.toU32());
         },
         .pageoff, .got_pageoff => {
-            const narrowed = @truncate(u12, @intCast(u64, target_addr));
+            const narrowed = @as(u12, @truncate(@as(u64, @intCast(target_addr))));
             if (isArithmeticOp(buffer[0..4])) {
                 var inst = aarch64.Instruction{
                     .add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
@@ -180,8 +180,8 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
             }
         },
         .tlv_initializer, .unsigned => switch (self.length) {
-            2 => mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, @bitCast(u64, target_addr))),
-            3 => mem.writeIntLittle(u64, buffer[0..8], @bitCast(u64, target_addr)),
+            2 => mem.writeIntLittle(u32, buffer[0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr))))),
+            3 => mem.writeIntLittle(u64, buffer[0..8], @as(u64, @bitCast(target_addr))),
             else => unreachable,
         },
         .got, .signed, .tlv => unreachable, // Invalid target architecture.
@@ -191,16 +191,16 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: i64, code: []
 fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8) void {
     switch (self.type) {
         .branch, .got, .tlv, .signed => {
-            const displacement = @intCast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr) - 4);
-            mem.writeIntLittle(u32, code[self.offset..][0..4], @bitCast(u32, displacement));
+            const displacement = @as(i32, @intCast(@as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)) - 4));
+            mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @bitCast(displacement)));
         },
         .tlv_initializer, .unsigned => {
             switch (self.length) {
                 2 => {
-                    mem.writeIntLittle(u32, code[self.offset..][0..4], @truncate(u32, @bitCast(u64, target_addr)));
+                    mem.writeIntLittle(u32, code[self.offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(target_addr)))));
                 },
                 3 => {
-                    mem.writeIntLittle(u64, code[self.offset..][0..8], @bitCast(u64, target_addr));
+                    mem.writeIntLittle(u64, code[self.offset..][0..8], @as(u64, @bitCast(target_addr)));
                 },
                 else => unreachable,
             }
@@ -210,24 +210,24 @@ fn resolveX8664(self: Relocation, source_addr: u64, target_addr: i64, code: []u8
 }
 
 pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
-    const group_decode = @truncate(u5, inst[3]);
+    const group_decode = @as(u5, @truncate(inst[3]));
     return ((group_decode >> 2) == 4);
 }
 
 pub fn calcPcRelativeDisplacementX86(source_addr: u64, target_addr: u64, correction: u3) error{Overflow}!i32 {
-    const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr + 4 + correction);
+    const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr + 4 + correction));
     return math.cast(i32, disp) orelse error.Overflow;
 }
 
 pub fn calcPcRelativeDisplacementArm64(source_addr: u64, target_addr: u64) error{Overflow}!i28 {
-    const disp = @intCast(i64, target_addr) - @intCast(i64, source_addr);
+    const disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr));
     return math.cast(i28, disp) orelse error.Overflow;
 }
 
 pub fn calcNumberOfPages(source_addr: u64, target_addr: u64) i21 {
-    const source_page = @intCast(i32, source_addr >> 12);
-    const target_page = @intCast(i32, target_addr >> 12);
-    const pages = @intCast(i21, target_page - source_page);
+    const source_page = @as(i32, @intCast(source_addr >> 12));
+    const target_page = @as(i32, @intCast(target_addr >> 12));
+    const pages = @as(i21, @intCast(target_page - source_page));
     return pages;
 }
 
@@ -241,7 +241,7 @@ pub const PageOffsetInstKind = enum {
 };
 
 pub fn calcPageOffset(target_addr: u64, kind: PageOffsetInstKind) !u12 {
-    const narrowed = @truncate(u12, target_addr);
+    const narrowed = @as(u12, @truncate(target_addr));
     return switch (kind) {
         .arithmetic, .load_store_8 => narrowed,
         .load_store_16 => try math.divExact(u12, narrowed, 2),
src/link/MachO/thunks.zig
@@ -131,7 +131,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
         log.debug("GROUP END at {d}", .{group_end});
 
         // Insert thunk at group_end
-        const thunk_index = @intCast(u32, zld.thunks.items.len);
+        const thunk_index = @as(u32, @intCast(zld.thunks.items.len));
         try zld.thunks.append(gpa, .{ .start_index = undefined, .len = 0 });
 
         // Scan relocs in the group and create trampolines for any unreachable callsite.
@@ -174,7 +174,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
         }
     }
 
-    header.size = @intCast(u32, offset);
+    header.size = @as(u32, @intCast(offset));
 }
 
 fn allocateThunk(
@@ -223,7 +223,7 @@ fn scanRelocs(
 
     const base_offset = if (object.getSourceSymbol(atom.sym_index)) |source_sym| blk: {
         const source_sect = object.getSourceSection(source_sym.n_sect - 1);
-        break :blk @intCast(i32, source_sym.n_value - source_sect.addr);
+        break :blk @as(i32, @intCast(source_sym.n_value - source_sect.addr));
     } else 0;
 
     const code = Atom.getAtomCode(zld, atom_index);
@@ -289,7 +289,7 @@ fn scanRelocs(
 }
 
 inline fn relocNeedsThunk(rel: macho.relocation_info) bool {
-    const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+    const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
     return rel_type == .ARM64_RELOC_BRANCH26;
 }
 
@@ -315,7 +315,7 @@ fn isReachable(
 
     if (!allocated.contains(target_atom_index)) return false;
 
-    const source_addr = source_sym.n_value + @intCast(u32, rel.r_address - base_offset);
+    const source_addr = source_sym.n_value + @as(u32, @intCast(rel.r_address - base_offset));
     const is_via_got = Atom.relocRequiresGot(zld, rel);
     const target_addr = Atom.getRelocTargetAddress(zld, target, is_via_got, false) catch unreachable;
     _ = Relocation.calcPcRelativeDisplacementArm64(source_addr, target_addr) catch
@@ -349,7 +349,7 @@ fn getThunkIndex(zld: *Zld, atom_index: AtomIndex) ?ThunkIndex {
         const end_addr = start_addr + thunk.getSize();
 
         if (start_addr <= sym.n_value and sym.n_value < end_addr) {
-            return @intCast(u32, i);
+            return @as(u32, @intCast(i));
         }
     }
     return null;
src/link/MachO/Trie.zig
@@ -220,7 +220,7 @@ pub const Node = struct {
             try writer.writeByte(0);
         }
         // Write number of edges (max legal number of edges is 256).
-        try writer.writeByte(@intCast(u8, self.edges.items.len));
+        try writer.writeByte(@as(u8, @intCast(self.edges.items.len)));
 
         for (self.edges.items) |edge| {
             // Write edge label and offset to next node in trie.
src/link/MachO/UnwindInfo.zig
@@ -87,7 +87,7 @@ const Page = struct {
             const record_id = page.page_encodings[index];
             const record = info.records.items[record_id];
             if (record.compactUnwindEncoding == enc) {
-                return @intCast(u8, index);
+                return @as(u8, @intCast(index));
             }
         }
         return null;
@@ -150,14 +150,14 @@ const Page = struct {
 
                 for (info.records.items[page.start..][0..page.count]) |record| {
                     try writer.writeStruct(macho.unwind_info_regular_second_level_entry{
-                        .functionOffset = @intCast(u32, record.rangeStart),
+                        .functionOffset = @as(u32, @intCast(record.rangeStart)),
                         .encoding = record.compactUnwindEncoding,
                     });
                 }
             },
             .compressed => {
                 const entry_offset = @sizeOf(macho.unwind_info_compressed_second_level_page_header) +
-                    @intCast(u16, page.page_encodings_count) * @sizeOf(u32);
+                    @as(u16, @intCast(page.page_encodings_count)) * @sizeOf(u32);
                 try writer.writeStruct(macho.unwind_info_compressed_second_level_page_header{
                     .entryPageOffset = entry_offset,
                     .entryCount = page.count,
@@ -183,8 +183,8 @@ const Page = struct {
                         break :blk ncommon + page.getPageEncoding(info, record.compactUnwindEncoding).?;
                     };
                     const compressed = macho.UnwindInfoCompressedEntry{
-                        .funcOffset = @intCast(u24, record.rangeStart - first_entry.rangeStart),
-                        .encodingIndex = @intCast(u8, enc_index),
+                        .funcOffset = @as(u24, @intCast(record.rangeStart - first_entry.rangeStart)),
+                        .encodingIndex = @as(u8, @intCast(enc_index)),
                     };
                     try writer.writeStruct(compressed);
                 }
@@ -214,15 +214,15 @@ pub fn scanRelocs(zld: *Zld) !void {
             if (!UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) {
                 if (getPersonalityFunctionReloc(
                     zld,
-                    @intCast(u32, object_id),
+                    @as(u32, @intCast(object_id)),
                     record_id,
                 )) |rel| {
                     // Personality function; add GOT pointer.
                     const target = Atom.parseRelocTarget(zld, .{
-                        .object_id = @intCast(u32, object_id),
+                        .object_id = @as(u32, @intCast(object_id)),
                         .rel = rel,
                         .code = mem.asBytes(&record),
-                        .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+                        .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
                     });
                     try Atom.addGotEntry(zld, target);
                 }
@@ -258,18 +258,18 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
                 var record = unwind_records[record_id];
 
                 if (UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) {
-                    try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record);
+                    try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record);
                 } else {
                     if (getPersonalityFunctionReloc(
                         zld,
-                        @intCast(u32, object_id),
+                        @as(u32, @intCast(object_id)),
                         record_id,
                     )) |rel| {
                         const target = Atom.parseRelocTarget(zld, .{
-                            .object_id = @intCast(u32, object_id),
+                            .object_id = @as(u32, @intCast(object_id)),
                             .rel = rel,
                             .code = mem.asBytes(&record),
-                            .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+                            .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
                         });
                         const personality_index = info.getPersonalityFunction(target) orelse inner: {
                             const personality_index = info.personalities_count;
@@ -282,14 +282,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
                         UnwindEncoding.setPersonalityIndex(&record.compactUnwindEncoding, personality_index + 1);
                     }
 
-                    if (getLsdaReloc(zld, @intCast(u32, object_id), record_id)) |rel| {
+                    if (getLsdaReloc(zld, @as(u32, @intCast(object_id)), record_id)) |rel| {
                         const target = Atom.parseRelocTarget(zld, .{
-                            .object_id = @intCast(u32, object_id),
+                            .object_id = @as(u32, @intCast(object_id)),
                             .rel = rel,
                             .code = mem.asBytes(&record),
-                            .base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
+                            .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))),
                         });
-                        record.lsda = @bitCast(u64, target);
+                        record.lsda = @as(u64, @bitCast(target));
                     }
                 }
                 break :blk record;
@@ -302,7 +302,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
                     if (object.eh_frame_records_lookup.get(atom_index)) |fde_offset| {
                         if (object.eh_frame_relocs_lookup.get(fde_offset).?.dead) continue;
                         var record = nullRecord();
-                        try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record);
+                        try info.collectPersonalityFromDwarf(zld, @as(u32, @intCast(object_id)), atom_index, &record);
                         switch (cpu_arch) {
                             .aarch64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_ARM64_MODE.DWARF),
                             .x86_64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_X86_64_MODE.DWARF),
@@ -320,7 +320,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
             const sym = zld.getSymbol(sym_loc);
             assert(sym.n_desc != N_DEAD);
             record.rangeStart = sym.n_value;
-            record.rangeLength = @intCast(u32, atom.size);
+            record.rangeLength = @as(u32, @intCast(atom.size));
 
             records.appendAssumeCapacity(record);
             atom_indexes.appendAssumeCapacity(atom_index);
@@ -329,7 +329,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
 
     // Fold records
     try info.records.ensureTotalCapacity(info.gpa, records.items.len);
-    try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len));
+    try info.records_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(atom_indexes.items.len)));
 
     var maybe_prev: ?macho.compact_unwind_entry = null;
     for (records.items, 0..) |record, i| {
@@ -341,15 +341,15 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
                     (prev.personalityFunction != record.personalityFunction) or
                     record.lsda > 0)
                 {
-                    const record_id = @intCast(RecordIndex, info.records.items.len);
+                    const record_id = @as(RecordIndex, @intCast(info.records.items.len));
                     info.records.appendAssumeCapacity(record);
                     maybe_prev = record;
                     break :blk record_id;
                 } else {
-                    break :blk @intCast(RecordIndex, info.records.items.len - 1);
+                    break :blk @as(RecordIndex, @intCast(info.records.items.len - 1));
                 }
             } else {
-                const record_id = @intCast(RecordIndex, info.records.items.len);
+                const record_id = @as(RecordIndex, @intCast(info.records.items.len));
                 info.records.appendAssumeCapacity(record);
                 maybe_prev = record;
                 break :blk record_id;
@@ -459,14 +459,14 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
                 }
             }
 
-            page.count = @intCast(u16, i - page.start);
+            page.count = @as(u16, @intCast(i - page.start));
 
             if (i < info.records.items.len and page.count < max_regular_second_level_entries) {
                 page.kind = .regular;
-                page.count = @intCast(u16, @min(
+                page.count = @as(u16, @intCast(@min(
                     max_regular_second_level_entries,
                     info.records.items.len - page.start,
-                ));
+                )));
                 i = page.start + page.count;
             } else {
                 page.kind = .compressed;
@@ -479,11 +479,11 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
     }
 
     // Save indices of records requiring LSDA relocation
-    try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len));
+    try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @as(u32, @intCast(info.records.items.len)));
     for (info.records.items, 0..) |rec, i| {
-        info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len));
+        info.lsdas_lookup.putAssumeCapacityNoClobber(@as(RecordIndex, @intCast(i)), @as(u32, @intCast(info.lsdas.items.len)));
         if (rec.lsda == 0) continue;
-        try info.lsdas.append(info.gpa, @intCast(RecordIndex, i));
+        try info.lsdas.append(info.gpa, @as(RecordIndex, @intCast(i)));
     }
 }
 
@@ -506,7 +506,7 @@ fn collectPersonalityFromDwarf(
 
     if (cie.getPersonalityPointerReloc(
         zld,
-        @intCast(u32, object_id),
+        @as(u32, @intCast(object_id)),
         cie_offset,
     )) |target| {
         const personality_index = info.getPersonalityFunction(target) orelse inner: {
@@ -532,8 +532,8 @@ fn calcRequiredSize(info: UnwindInfo) usize {
     var total_size: usize = 0;
     total_size += @sizeOf(macho.unwind_info_section_header);
     total_size +=
-        @intCast(usize, info.common_encodings_count) * @sizeOf(macho.compact_unwind_encoding_t);
-    total_size += @intCast(usize, info.personalities_count) * @sizeOf(u32);
+        @as(usize, @intCast(info.common_encodings_count)) * @sizeOf(macho.compact_unwind_encoding_t);
+    total_size += @as(usize, @intCast(info.personalities_count)) * @sizeOf(u32);
     total_size += (info.pages.items.len + 1) * @sizeOf(macho.unwind_info_section_header_index_entry);
     total_size += info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry);
     total_size += info.pages.items.len * second_level_page_bytes;
@@ -557,7 +557,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
         const atom_index = zld.getGotAtomIndexForSymbol(target).?;
         const atom = zld.getAtom(atom_index);
         const sym = zld.getSymbol(atom.getSymbolWithLoc());
-        personalities[i] = @intCast(u32, sym.n_value - seg.vmaddr);
+        personalities[i] = @as(u32, @intCast(sym.n_value - seg.vmaddr));
         log.debug("  {d}: 0x{x} ({s})", .{ i, personalities[i], zld.getSymbolName(target) });
     }
 
@@ -570,7 +570,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
         }
 
         if (rec.compactUnwindEncoding > 0 and !UnwindEncoding.isDwarf(rec.compactUnwindEncoding, cpu_arch)) {
-            const lsda_target = @bitCast(SymbolWithLoc, rec.lsda);
+            const lsda_target = @as(SymbolWithLoc, @bitCast(rec.lsda));
             if (lsda_target.getFile()) |_| {
                 const sym = zld.getSymbol(lsda_target);
                 rec.lsda = sym.n_value - seg.vmaddr;
@@ -601,7 +601,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
     const personalities_offset: u32 = common_encodings_offset + common_encodings_count * @sizeOf(u32);
     const personalities_count: u32 = info.personalities_count;
     const indexes_offset: u32 = personalities_offset + personalities_count * @sizeOf(u32);
-    const indexes_count: u32 = @intCast(u32, info.pages.items.len + 1);
+    const indexes_count: u32 = @as(u32, @intCast(info.pages.items.len + 1));
 
     try writer.writeStruct(macho.unwind_info_section_header{
         .commonEncodingsArraySectionOffset = common_encodings_offset,
@@ -615,34 +615,34 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
     try writer.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count]));
     try writer.writeAll(mem.sliceAsBytes(personalities[0..info.personalities_count]));
 
-    const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes));
-    const lsda_base_offset = @intCast(u32, pages_base_offset -
-        (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry)));
+    const pages_base_offset = @as(u32, @intCast(size - (info.pages.items.len * second_level_page_bytes)));
+    const lsda_base_offset = @as(u32, @intCast(pages_base_offset -
+        (info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry))));
     for (info.pages.items, 0..) |page, i| {
         assert(page.count > 0);
         const first_entry = info.records.items[page.start];
         try writer.writeStruct(macho.unwind_info_section_header_index_entry{
-            .functionOffset = @intCast(u32, first_entry.rangeStart),
-            .secondLevelPagesSectionOffset = @intCast(u32, pages_base_offset + i * second_level_page_bytes),
+            .functionOffset = @as(u32, @intCast(first_entry.rangeStart)),
+            .secondLevelPagesSectionOffset = @as(u32, @intCast(pages_base_offset + i * second_level_page_bytes)),
             .lsdaIndexArraySectionOffset = lsda_base_offset +
                 info.lsdas_lookup.get(page.start).? * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
         });
     }
 
     const last_entry = info.records.items[info.records.items.len - 1];
-    const sentinel_address = @intCast(u32, last_entry.rangeStart + last_entry.rangeLength);
+    const sentinel_address = @as(u32, @intCast(last_entry.rangeStart + last_entry.rangeLength));
     try writer.writeStruct(macho.unwind_info_section_header_index_entry{
         .functionOffset = sentinel_address,
         .secondLevelPagesSectionOffset = 0,
         .lsdaIndexArraySectionOffset = lsda_base_offset +
-            @intCast(u32, info.lsdas.items.len) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
+            @as(u32, @intCast(info.lsdas.items.len)) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
     });
 
     for (info.lsdas.items) |record_id| {
         const record = info.records.items[record_id];
         try writer.writeStruct(macho.unwind_info_section_header_lsda_index_entry{
-            .functionOffset = @intCast(u32, record.rangeStart),
-            .lsdaOffset = @intCast(u32, record.lsda),
+            .functionOffset = @as(u32, @intCast(record.rangeStart)),
+            .lsdaOffset = @as(u32, @intCast(record.lsda)),
         });
     }
 
@@ -674,7 +674,7 @@ fn getRelocs(zld: *Zld, object_id: u32, record_id: usize) []const macho.relocati
 }
 
 fn isPersonalityFunction(record_id: usize, rel: macho.relocation_info) bool {
-    const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry));
+    const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry)));
     const rel_offset = rel.r_address - base_offset;
     return rel_offset == 16;
 }
@@ -703,7 +703,7 @@ fn getPersonalityFunction(info: UnwindInfo, global_index: SymbolWithLoc) ?u2 {
 }
 
 fn isLsda(record_id: usize, rel: macho.relocation_info) bool {
-    const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry));
+    const base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry)));
     const rel_offset = rel.r_address - base_offset;
     return rel_offset == 24;
 }
@@ -754,45 +754,45 @@ fn getCommonEncoding(info: UnwindInfo, enc: macho.compact_unwind_encoding_t) ?u7
 pub const UnwindEncoding = struct {
     pub fn getMode(enc: macho.compact_unwind_encoding_t) u4 {
         comptime assert(macho.UNWIND_ARM64_MODE_MASK == macho.UNWIND_X86_64_MODE_MASK);
-        return @truncate(u4, (enc & macho.UNWIND_ARM64_MODE_MASK) >> 24);
+        return @as(u4, @truncate((enc & macho.UNWIND_ARM64_MODE_MASK) >> 24));
     }
 
     pub fn isDwarf(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) bool {
         const mode = getMode(enc);
         return switch (cpu_arch) {
-            .aarch64 => @enumFromInt(macho.UNWIND_ARM64_MODE, mode) == .DWARF,
-            .x86_64 => @enumFromInt(macho.UNWIND_X86_64_MODE, mode) == .DWARF,
+            .aarch64 => @as(macho.UNWIND_ARM64_MODE, @enumFromInt(mode)) == .DWARF,
+            .x86_64 => @as(macho.UNWIND_X86_64_MODE, @enumFromInt(mode)) == .DWARF,
             else => unreachable,
         };
     }
 
     pub fn setMode(enc: *macho.compact_unwind_encoding_t, mode: anytype) void {
-        enc.* |= @intCast(u32, @intFromEnum(mode)) << 24;
+        enc.* |= @as(u32, @intCast(@intFromEnum(mode))) << 24;
     }
 
     pub fn hasLsda(enc: macho.compact_unwind_encoding_t) bool {
-        const has_lsda = @truncate(u1, (enc & macho.UNWIND_HAS_LSDA) >> 31);
+        const has_lsda = @as(u1, @truncate((enc & macho.UNWIND_HAS_LSDA) >> 31));
         return has_lsda == 1;
     }
 
     pub fn setHasLsda(enc: *macho.compact_unwind_encoding_t, has_lsda: bool) void {
-        const mask = @intCast(u32, @intFromBool(has_lsda)) << 31;
+        const mask = @as(u32, @intCast(@intFromBool(has_lsda))) << 31;
         enc.* |= mask;
     }
 
     pub fn getPersonalityIndex(enc: macho.compact_unwind_encoding_t) u2 {
-        const index = @truncate(u2, (enc & macho.UNWIND_PERSONALITY_MASK) >> 28);
+        const index = @as(u2, @truncate((enc & macho.UNWIND_PERSONALITY_MASK) >> 28));
         return index;
     }
 
     pub fn setPersonalityIndex(enc: *macho.compact_unwind_encoding_t, index: u2) void {
-        const mask = @intCast(u32, index) << 28;
+        const mask = @as(u32, @intCast(index)) << 28;
         enc.* |= mask;
     }
 
     pub fn getDwarfSectionOffset(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) u24 {
         assert(isDwarf(enc, cpu_arch));
-        const offset = @truncate(u24, enc);
+        const offset = @as(u24, @truncate(enc));
         return offset;
     }
 
src/link/MachO/zld.zig
@@ -103,7 +103,7 @@ pub const Zld = struct {
         const cpu_arch = self.options.target.cpu.arch;
         const mtime: u64 = mtime: {
             const stat = file.stat() catch break :mtime 0;
-            break :mtime @intCast(u64, @divFloor(stat.mtime, 1_000_000_000));
+            break :mtime @as(u64, @intCast(@divFloor(stat.mtime, 1_000_000_000)));
         };
         const file_stat = try file.stat();
         const file_size = math.cast(usize, file_stat.size) orelse return error.Overflow;
@@ -220,7 +220,7 @@ pub const Zld = struct {
         const contents = try file.readToEndAllocOptions(gpa, file_size, file_size, @alignOf(u64), null);
         defer gpa.free(contents);
 
-        const dylib_id = @intCast(u16, self.dylibs.items.len);
+        const dylib_id = @as(u16, @intCast(self.dylibs.items.len));
         var dylib = Dylib{ .weak = opts.weak };
 
         dylib.parseFromBinary(
@@ -535,7 +535,7 @@ pub const Zld = struct {
 
     pub fn createEmptyAtom(self: *Zld, sym_index: u32, size: u64, alignment: u32) !AtomIndex {
         const gpa = self.gpa;
-        const index = @intCast(AtomIndex, self.atoms.items.len);
+        const index = @as(AtomIndex, @intCast(self.atoms.items.len));
         const atom = try self.atoms.addOne(gpa);
         atom.* = Atom.empty;
         atom.sym_index = sym_index;
@@ -596,7 +596,7 @@ pub const Zld = struct {
         const global_index = self.dyld_stub_binder_index orelse return;
         const target = self.globals.items[global_index];
         const atom_index = try self.createGotAtom();
-        const got_index = @intCast(u32, self.got_entries.items.len);
+        const got_index = @as(u32, @intCast(self.got_entries.items.len));
         try self.got_entries.append(gpa, .{
             .target = target,
             .atom_index = atom_index,
@@ -874,7 +874,7 @@ pub const Zld = struct {
         }
 
         for (self.objects.items, 0..) |_, object_id| {
-            try self.resolveSymbolsInObject(@intCast(u32, object_id), resolver);
+            try self.resolveSymbolsInObject(@as(u32, @intCast(object_id)), resolver);
         }
 
         try self.resolveSymbolsInArchives(resolver);
@@ -1024,7 +1024,7 @@ pub const Zld = struct {
                 };
                 assert(offsets.items.len > 0);
 
-                const object_id = @intCast(u16, self.objects.items.len);
+                const object_id = @as(u16, @intCast(self.objects.items.len));
                 const object = archive.parseObject(gpa, cpu_arch, offsets.items[0]) catch |e| switch (e) {
                     error.MismatchedCpuArchitecture => {
                         log.err("CPU architecture mismatch found in {s}", .{archive.name});
@@ -1055,14 +1055,14 @@ pub const Zld = struct {
             for (self.dylibs.items, 0..) |dylib, id| {
                 if (!dylib.symbols.contains(sym_name)) continue;
 
-                const dylib_id = @intCast(u16, id);
+                const dylib_id = @as(u16, @intCast(id));
                 if (!self.referenced_dylibs.contains(dylib_id)) {
                     try self.referenced_dylibs.putNoClobber(self.gpa, dylib_id, {});
                 }
 
                 const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable;
                 sym.n_type |= macho.N_EXT;
-                sym.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER;
+                sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER;
 
                 if (dylib.weak) {
                     sym.n_desc |= macho.N_WEAK_REF;
@@ -1099,9 +1099,9 @@ pub const Zld = struct {
                 _ = resolver.unresolved.swapRemove(global_index);
                 continue;
             } else if (allow_undef) {
-                const n_desc = @bitCast(
+                const n_desc = @as(
                     u16,
-                    macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @intCast(i16, macho.N_SYMBOL_RESOLVER),
+                    @bitCast(macho.BIND_SPECIAL_DYLIB_FLAT_LOOKUP * @as(i16, @intCast(macho.N_SYMBOL_RESOLVER))),
                 );
                 sym.n_type = macho.N_EXT;
                 sym.n_desc = n_desc;
@@ -1238,7 +1238,7 @@ pub const Zld = struct {
             const segname = header.segName();
             const segment_id = self.getSegmentByName(segname) orelse blk: {
                 log.debug("creating segment '{s}'", .{segname});
-                const segment_id = @intCast(u8, self.segments.items.len);
+                const segment_id = @as(u8, @intCast(self.segments.items.len));
                 const protection = getSegmentMemoryProtection(segname);
                 try self.segments.append(self.gpa, .{
                     .cmdsize = @sizeOf(macho.segment_command_64),
@@ -1269,7 +1269,7 @@ pub const Zld = struct {
     pub fn allocateSymbol(self: *Zld) !u32 {
         try self.locals.ensureUnusedCapacity(self.gpa, 1);
         log.debug("  (allocating symbol index {d})", .{self.locals.items.len});
-        const index = @intCast(u32, self.locals.items.len);
+        const index = @as(u32, @intCast(self.locals.items.len));
         _ = self.locals.addOneAssumeCapacity();
         self.locals.items[index] = .{
             .n_strx = 0,
@@ -1282,7 +1282,7 @@ pub const Zld = struct {
     }
 
     fn addGlobal(self: *Zld, sym_loc: SymbolWithLoc) !u32 {
-        const global_index = @intCast(u32, self.globals.items.len);
+        const global_index = @as(u32, @intCast(self.globals.items.len));
         try self.globals.append(self.gpa, sym_loc);
         return global_index;
     }
@@ -1489,7 +1489,7 @@ pub const Zld = struct {
                 if (mem.eql(u8, header.sectName(), "__stub_helper")) continue;
 
                 // Create jump/branch range extenders if needed.
-                try thunks.createThunks(self, @intCast(u8, sect_id));
+                try thunks.createThunks(self, @as(u8, @intCast(sect_id)));
             }
         }
     }
@@ -1502,7 +1502,7 @@ pub const Zld = struct {
                 .dylibs = self.dylibs.items,
                 .referenced_dylibs = self.referenced_dylibs.keys(),
             }) else 0;
-            try self.allocateSegment(@intCast(u8, segment_index), base_size);
+            try self.allocateSegment(@as(u8, @intCast(segment_index)), base_size);
         }
     }
 
@@ -1536,12 +1536,12 @@ pub const Zld = struct {
         for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| {
             const alignment = try math.powi(u32, 2, header.@"align");
             const start_aligned = mem.alignForward(u64, start, alignment);
-            const n_sect = @intCast(u8, indexes.start + sect_id + 1);
+            const n_sect = @as(u8, @intCast(indexes.start + sect_id + 1));
 
             header.offset = if (header.isZerofill())
                 0
             else
-                @intCast(u32, segment.fileoff + start_aligned);
+                @as(u32, @intCast(segment.fileoff + start_aligned));
             header.addr = segment.vmaddr + start_aligned;
 
             var atom_index = slice.items(.first_atom_index)[indexes.start + sect_id];
@@ -1617,7 +1617,7 @@ pub const Zld = struct {
     ) !u8 {
         const gpa = self.gpa;
         log.debug("creating section '{s},{s}'", .{ segname, sectname });
-        const index = @intCast(u8, self.sections.slice().len);
+        const index = @as(u8, @intCast(self.sections.slice().len));
         try self.sections.append(gpa, .{
             .segment_index = undefined, // Segments will be created automatically later down the pipeline
             .header = .{
@@ -1673,12 +1673,12 @@ pub const Zld = struct {
                 },
             }
         };
-        return (@intCast(u8, segment_precedence) << 4) + section_precedence;
+        return (@as(u8, @intCast(segment_precedence)) << 4) + section_precedence;
     }
 
     fn writeSegmentHeaders(self: *Zld, writer: anytype) !void {
         for (self.segments.items, 0..) |seg, i| {
-            const indexes = self.getSectionIndexes(@intCast(u8, i));
+            const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
             var out_seg = seg;
             out_seg.cmdsize = @sizeOf(macho.segment_command_64);
             out_seg.nsects = 0;
@@ -1790,7 +1790,7 @@ pub const Zld = struct {
             }
 
             const segment_index = slice.items(.segment_index)[sect_id];
-            const segment = self.getSegment(@intCast(u8, sect_id));
+            const segment = self.getSegment(@as(u8, @intCast(sect_id)));
             if (segment.maxprot & macho.PROT.WRITE == 0) continue;
 
             log.debug("{s},{s}", .{ header.segName(), header.sectName() });
@@ -1820,12 +1820,12 @@ pub const Zld = struct {
                     for (relocs) |rel| {
                         switch (cpu_arch) {
                             .aarch64 => {
-                                const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+                                const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
                                 if (rel_type != .ARM64_RELOC_UNSIGNED) continue;
                                 if (rel.r_length != 3) continue;
                             },
                             .x86_64 => {
-                                const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+                                const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
                                 if (rel_type != .X86_64_RELOC_UNSIGNED) continue;
                                 if (rel.r_length != 3) continue;
                             },
@@ -1841,9 +1841,9 @@ pub const Zld = struct {
                         const target_sym = self.getSymbol(target);
                         if (target_sym.undf()) continue;
 
-                        const base_offset = @intCast(i32, sym.n_value - segment.vmaddr);
+                        const base_offset = @as(i32, @intCast(sym.n_value - segment.vmaddr));
                         const rel_offset = rel.r_address - ctx.base_offset;
-                        const offset = @intCast(u64, base_offset + rel_offset);
+                        const offset = @as(u64, @intCast(base_offset + rel_offset));
                         log.debug("    | rebase at {x}", .{offset});
 
                         try rebase.entries.append(self.gpa, .{
@@ -1882,7 +1882,7 @@ pub const Zld = struct {
             const sym = entry.getAtomSymbol(self);
             const base_offset = sym.n_value - seg.vmaddr;
 
-            const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER);
+            const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
             log.debug("    | bind at {x}, import('{s}') in dylib({d})", .{
                 base_offset,
                 bind_sym_name,
@@ -1929,7 +1929,7 @@ pub const Zld = struct {
             }
 
             const segment_index = slice.items(.segment_index)[sect_id];
-            const segment = self.getSegment(@intCast(u8, sect_id));
+            const segment = self.getSegment(@as(u8, @intCast(sect_id)));
             if (segment.maxprot & macho.PROT.WRITE == 0) continue;
 
             const cpu_arch = self.options.target.cpu.arch;
@@ -1959,12 +1959,12 @@ pub const Zld = struct {
                     for (relocs) |rel| {
                         switch (cpu_arch) {
                             .aarch64 => {
-                                const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+                                const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
                                 if (rel_type != .ARM64_RELOC_UNSIGNED) continue;
                                 if (rel.r_length != 3) continue;
                             },
                             .x86_64 => {
-                                const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+                                const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
                                 if (rel_type != .X86_64_RELOC_UNSIGNED) continue;
                                 if (rel.r_length != 3) continue;
                             },
@@ -1983,11 +1983,11 @@ pub const Zld = struct {
                         if (!bind_sym.undf()) continue;
 
                         const base_offset = sym.n_value - segment.vmaddr;
-                        const rel_offset = @intCast(u32, rel.r_address - ctx.base_offset);
-                        const offset = @intCast(u64, base_offset + rel_offset);
+                        const rel_offset = @as(u32, @intCast(rel.r_address - ctx.base_offset));
+                        const offset = @as(u64, @intCast(base_offset + rel_offset));
                         const addend = mem.readIntLittle(i64, code[rel_offset..][0..8]);
 
-                        const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER);
+                        const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
                         log.debug("    | bind at {x}, import('{s}') in dylib({d})", .{
                             base_offset,
                             bind_sym_name,
@@ -2039,7 +2039,7 @@ pub const Zld = struct {
             const stub_entry = self.stubs.items[count];
             const bind_sym = stub_entry.getTargetSymbol(self);
             const bind_sym_name = stub_entry.getTargetSymbolName(self);
-            const dylib_ordinal = @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER);
+            const dylib_ordinal = @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER);
             log.debug("    | lazy bind at {x}, import('{s}') in dylib({d})", .{
                 base_offset,
                 bind_sym_name,
@@ -2165,14 +2165,14 @@ pub const Zld = struct {
         try self.file.pwriteAll(buffer, rebase_off);
         try self.populateLazyBindOffsetsInStubHelper(lazy_bind);
 
-        self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off);
-        self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned);
-        self.dyld_info_cmd.bind_off = @intCast(u32, bind_off);
-        self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned);
-        self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off);
-        self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned);
-        self.dyld_info_cmd.export_off = @intCast(u32, export_off);
-        self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned);
+        self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off));
+        self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned));
+        self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off));
+        self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned));
+        self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off));
+        self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned));
+        self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off));
+        self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned));
     }
 
     fn populateLazyBindOffsetsInStubHelper(self: *Zld, lazy_bind: LazyBind) !void {
@@ -2246,7 +2246,7 @@ pub const Zld = struct {
 
         var last_off: u32 = 0;
         for (addresses.items) |addr| {
-            const offset = @intCast(u32, addr - text_seg.vmaddr);
+            const offset = @as(u32, @intCast(addr - text_seg.vmaddr));
             const diff = offset - last_off;
 
             if (diff == 0) continue;
@@ -2258,7 +2258,7 @@ pub const Zld = struct {
         var buffer = std.ArrayList(u8).init(gpa);
         defer buffer.deinit();
 
-        const max_size = @intCast(usize, offsets.items.len * @sizeOf(u64));
+        const max_size = @as(usize, @intCast(offsets.items.len * @sizeOf(u64)));
         try buffer.ensureTotalCapacity(max_size);
 
         for (offsets.items) |offset| {
@@ -2281,8 +2281,8 @@ pub const Zld = struct {
 
         try self.file.pwriteAll(buffer.items, offset);
 
-        self.function_starts_cmd.dataoff = @intCast(u32, offset);
-        self.function_starts_cmd.datasize = @intCast(u32, needed_size_aligned);
+        self.function_starts_cmd.dataoff = @as(u32, @intCast(offset));
+        self.function_starts_cmd.datasize = @as(u32, @intCast(needed_size_aligned));
     }
 
     fn filterDataInCode(
@@ -2324,8 +2324,8 @@ pub const Zld = struct {
                 const source_addr = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
                     source_sym.n_value
                 else blk: {
-                    const nbase = @intCast(u32, object.in_symtab.?.len);
-                    const source_sect_id = @intCast(u8, atom.sym_index - nbase);
+                    const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+                    const source_sect_id = @as(u8, @intCast(atom.sym_index - nbase));
                     break :blk object.getSourceSection(source_sect_id).addr;
                 };
                 const filtered_dice = filterDataInCode(dice, source_addr, source_addr + atom.size);
@@ -2363,8 +2363,8 @@ pub const Zld = struct {
 
         try self.file.pwriteAll(buffer, offset);
 
-        self.data_in_code_cmd.dataoff = @intCast(u32, offset);
-        self.data_in_code_cmd.datasize = @intCast(u32, needed_size_aligned);
+        self.data_in_code_cmd.dataoff = @as(u32, @intCast(offset));
+        self.data_in_code_cmd.datasize = @as(u32, @intCast(needed_size_aligned));
     }
 
     fn writeSymtabs(self: *Zld) !void {
@@ -2428,7 +2428,7 @@ pub const Zld = struct {
             if (!sym.undf()) continue; // not an import, skip
             if (sym.n_desc == N_DEAD) continue;
 
-            const new_index = @intCast(u32, imports.items.len);
+            const new_index = @as(u32, @intCast(imports.items.len));
             var out_sym = sym;
             out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global));
             try imports.append(out_sym);
@@ -2443,9 +2443,9 @@ pub const Zld = struct {
             }
         }
 
-        const nlocals = @intCast(u32, locals.items.len);
-        const nexports = @intCast(u32, exports.items.len);
-        const nimports = @intCast(u32, imports.items.len);
+        const nlocals = @as(u32, @intCast(locals.items.len));
+        const nexports = @as(u32, @intCast(exports.items.len));
+        const nimports = @as(u32, @intCast(imports.items.len));
         const nsyms = nlocals + nexports + nimports;
 
         const seg = self.getLinkeditSegmentPtr();
@@ -2465,7 +2465,7 @@ pub const Zld = struct {
         log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
         try self.file.pwriteAll(buffer.items, offset);
 
-        self.symtab_cmd.symoff = @intCast(u32, offset);
+        self.symtab_cmd.symoff = @as(u32, @intCast(offset));
         self.symtab_cmd.nsyms = nsyms;
 
         return SymtabCtx{
@@ -2493,8 +2493,8 @@ pub const Zld = struct {
 
         try self.file.pwriteAll(buffer, offset);
 
-        self.symtab_cmd.stroff = @intCast(u32, offset);
-        self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned);
+        self.symtab_cmd.stroff = @as(u32, @intCast(offset));
+        self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned));
     }
 
     const SymtabCtx = struct {
@@ -2506,8 +2506,8 @@ pub const Zld = struct {
 
     fn writeDysymtab(self: *Zld, ctx: SymtabCtx) !void {
         const gpa = self.gpa;
-        const nstubs = @intCast(u32, self.stubs.items.len);
-        const ngot_entries = @intCast(u32, self.got_entries.items.len);
+        const nstubs = @as(u32, @intCast(self.stubs.items.len));
+        const ngot_entries = @as(u32, @intCast(self.got_entries.items.len));
         const nindirectsyms = nstubs * 2 + ngot_entries;
         const iextdefsym = ctx.nlocalsym;
         const iundefsym = iextdefsym + ctx.nextdefsym;
@@ -2572,7 +2572,7 @@ pub const Zld = struct {
         self.dysymtab_cmd.nextdefsym = ctx.nextdefsym;
         self.dysymtab_cmd.iundefsym = iundefsym;
         self.dysymtab_cmd.nundefsym = ctx.nundefsym;
-        self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset);
+        self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset));
         self.dysymtab_cmd.nindirectsyms = nindirectsyms;
     }
 
@@ -2599,8 +2599,8 @@ pub const Zld = struct {
         // except for code signature data.
         try self.file.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
 
-        self.codesig_cmd.dataoff = @intCast(u32, offset);
-        self.codesig_cmd.datasize = @intCast(u32, needed_size);
+        self.codesig_cmd.dataoff = @as(u32, @intCast(offset));
+        self.codesig_cmd.datasize = @as(u32, @intCast(needed_size));
     }
 
     fn writeCodeSignature(self: *Zld, comp: *const Compilation, code_sig: *CodeSignature) !void {
@@ -2689,7 +2689,7 @@ pub const Zld = struct {
 
     fn getSegmentByName(self: Zld, segname: []const u8) ?u8 {
         for (self.segments.items, 0..) |seg, i| {
-            if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
+            if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i));
         } else return null;
     }
 
@@ -2714,15 +2714,15 @@ pub const Zld = struct {
         // TODO investigate caching with a hashmap
         for (self.sections.items(.header), 0..) |header, i| {
             if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
-                return @intCast(u8, i);
+                return @as(u8, @intCast(i));
         } else return null;
     }
 
     pub fn getSectionIndexes(self: Zld, segment_index: u8) struct { start: u8, end: u8 } {
         var start: u8 = 0;
         const nsects = for (self.segments.items, 0..) |seg, i| {
-            if (i == segment_index) break @intCast(u8, seg.nsects);
-            start += @intCast(u8, seg.nsects);
+            if (i == segment_index) break @as(u8, @intCast(seg.nsects));
+            start += @as(u8, @intCast(seg.nsects));
         } else 0;
         return .{ .start = start, .end = start + nsects };
     }
@@ -2879,7 +2879,7 @@ pub const Zld = struct {
         var name_lookup: ?DwarfInfo.SubprogramLookupByName = if (object.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS == 0) blk: {
             var name_lookup = DwarfInfo.SubprogramLookupByName.init(gpa);
             errdefer name_lookup.deinit();
-            try name_lookup.ensureUnusedCapacity(@intCast(u32, object.atoms.items.len));
+            try name_lookup.ensureUnusedCapacity(@as(u32, @intCast(object.atoms.items.len)));
             try debug_info.genSubprogramLookupByName(compile_unit, lookup, &name_lookup);
             break :blk name_lookup;
         } else null;
@@ -3069,7 +3069,7 @@ pub const Zld = struct {
                 @memset(&buf, '_');
                 scoped_log.debug("    %{d}: {s} @{x} in sect({d}), {s}", .{
                     sym_id,
-                    object.getSymbolName(@intCast(u32, sym_id)),
+                    object.getSymbolName(@as(u32, @intCast(sym_id))),
                     sym.n_value,
                     sym.n_sect,
                     logSymAttributes(sym, &buf),
@@ -3252,7 +3252,7 @@ pub const Zld = struct {
     }
 };
 
-pub const N_DEAD: u16 = @bitCast(u16, @as(i16, -1));
+pub const N_DEAD: u16 = @as(u16, @bitCast(@as(i16, -1)));
 
 const Section = struct {
     header: macho.section_64,
@@ -3791,7 +3791,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
         }
 
         for (zld.objects.items, 0..) |*object, object_id| {
-            try object.splitIntoAtoms(&zld, @intCast(u32, object_id));
+            try object.splitIntoAtoms(&zld, @as(u32, @intCast(object_id)));
         }
 
         if (gc_sections) {
@@ -3929,7 +3929,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
             } else sym.n_value;
 
             try lc_writer.writeStruct(macho.entry_point_command{
-                .entryoff = @intCast(u32, addr - seg.vmaddr),
+                .entryoff = @as(u32, @intCast(addr - seg.vmaddr)),
                 .stacksize = options.stack_size_override orelse 0,
             });
         } else {
@@ -3943,7 +3943,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
         });
         try load_commands.writeBuildVersionLC(zld.options, lc_writer);
 
-        const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len);
+        const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len));
         try lc_writer.writeStruct(zld.uuid_cmd);
 
         try load_commands.writeLoadDylibLCs(zld.dylibs.items, zld.referenced_dylibs.keys(), lc_writer);
@@ -3954,7 +3954,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
 
         const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
         try zld.file.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
-        try zld.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len));
+        try zld.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len)));
         try zld.writeUuid(comp, uuid_cmd_offset, requires_codesig);
 
         if (codesig) |*csig| {
src/link/MachO/ZldAtom.zig
@@ -117,8 +117,8 @@ pub fn getSectionAlias(zld: *Zld, atom_index: AtomIndex) ?SymbolWithLoc {
     assert(atom.getFile() != null);
 
     const object = zld.objects.items[atom.getFile().?];
-    const nbase = @intCast(u32, object.in_symtab.?.len);
-    const ntotal = @intCast(u32, object.symtab.len);
+    const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+    const ntotal = @as(u32, @intCast(object.symtab.len));
     var sym_index: u32 = nbase;
     while (sym_index < ntotal) : (sym_index += 1) {
         if (object.getAtomIndexForSymbol(sym_index)) |other_atom_index| {
@@ -144,8 +144,8 @@ pub fn calcInnerSymbolOffset(zld: *Zld, atom_index: AtomIndex, sym_index: u32) u
     const base_addr = if (object.getSourceSymbol(atom.sym_index)) |sym|
         sym.n_value
     else blk: {
-        const nbase = @intCast(u32, object.in_symtab.?.len);
-        const sect_id = @intCast(u8, atom.sym_index - nbase);
+        const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+        const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
         const source_sect = object.getSourceSection(sect_id);
         break :blk source_sect.addr;
     };
@@ -177,15 +177,15 @@ pub fn getRelocContext(zld: *Zld, atom_index: AtomIndex) RelocContext {
     if (object.getSourceSymbol(atom.sym_index)) |source_sym| {
         const source_sect = object.getSourceSection(source_sym.n_sect - 1);
         return .{
-            .base_addr = @intCast(i64, source_sect.addr),
-            .base_offset = @intCast(i32, source_sym.n_value - source_sect.addr),
+            .base_addr = @as(i64, @intCast(source_sect.addr)),
+            .base_offset = @as(i32, @intCast(source_sym.n_value - source_sect.addr)),
         };
     }
-    const nbase = @intCast(u32, object.in_symtab.?.len);
-    const sect_id = @intCast(u8, atom.sym_index - nbase);
+    const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+    const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
     const source_sect = object.getSourceSection(sect_id);
     return .{
-        .base_addr = @intCast(i64, source_sect.addr),
+        .base_addr = @as(i64, @intCast(source_sect.addr)),
         .base_offset = 0,
     };
 }
@@ -204,8 +204,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct {
     log.debug("parsing reloc target in object({d}) '{s}' ", .{ ctx.object_id, object.name });
 
     const sym_index = if (ctx.rel.r_extern == 0) sym_index: {
-        const sect_id = @intCast(u8, ctx.rel.r_symbolnum - 1);
-        const rel_offset = @intCast(u32, ctx.rel.r_address - ctx.base_offset);
+        const sect_id = @as(u8, @intCast(ctx.rel.r_symbolnum - 1));
+        const rel_offset = @as(u32, @intCast(ctx.rel.r_address - ctx.base_offset));
 
         const address_in_section = if (ctx.rel.r_pcrel == 0) blk: {
             break :blk if (ctx.rel.r_length == 3)
@@ -214,7 +214,7 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct {
                 mem.readIntLittle(u32, ctx.code[rel_offset..][0..4]);
         } else blk: {
             assert(zld.options.target.cpu.arch == .x86_64);
-            const correction: u3 = switch (@enumFromInt(macho.reloc_type_x86_64, ctx.rel.r_type)) {
+            const correction: u3 = switch (@as(macho.reloc_type_x86_64, @enumFromInt(ctx.rel.r_type))) {
                 .X86_64_RELOC_SIGNED => 0,
                 .X86_64_RELOC_SIGNED_1 => 1,
                 .X86_64_RELOC_SIGNED_2 => 2,
@@ -222,8 +222,8 @@ pub fn parseRelocTarget(zld: *Zld, ctx: struct {
                 else => unreachable,
             };
             const addend = mem.readIntLittle(i32, ctx.code[rel_offset..][0..4]);
-            const target_address = @intCast(i64, ctx.base_addr) + ctx.rel.r_address + 4 + correction + addend;
-            break :blk @intCast(u64, target_address);
+            const target_address = @as(i64, @intCast(ctx.base_addr)) + ctx.rel.r_address + 4 + correction + addend;
+            break :blk @as(u64, @intCast(target_address));
         };
 
         // Find containing atom
@@ -272,7 +272,7 @@ pub fn getRelocTargetAtomIndex(zld: *Zld, target: SymbolWithLoc, is_via_got: boo
 
 fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void {
     for (relocs) |rel| {
-        const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+        const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
 
         switch (rel_type) {
             .ARM64_RELOC_ADDEND, .ARM64_RELOC_SUBTRACTOR => continue,
@@ -318,7 +318,7 @@ fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) cons
 
 fn scanAtomRelocsX86(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void {
     for (relocs) |rel| {
-        const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+        const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
 
         switch (rel_type) {
             .X86_64_RELOC_SUBTRACTOR => continue,
@@ -364,7 +364,7 @@ fn addTlvPtrEntry(zld: *Zld, target: SymbolWithLoc) !void {
 
     const gpa = zld.gpa;
     const atom_index = try zld.createTlvPtrAtom();
-    const tlv_ptr_index = @intCast(u32, zld.tlv_ptr_entries.items.len);
+    const tlv_ptr_index = @as(u32, @intCast(zld.tlv_ptr_entries.items.len));
     try zld.tlv_ptr_entries.append(gpa, .{
         .target = target,
         .atom_index = atom_index,
@@ -376,7 +376,7 @@ pub fn addGotEntry(zld: *Zld, target: SymbolWithLoc) !void {
     if (zld.got_table.contains(target)) return;
     const gpa = zld.gpa;
     const atom_index = try zld.createGotAtom();
-    const got_index = @intCast(u32, zld.got_entries.items.len);
+    const got_index = @as(u32, @intCast(zld.got_entries.items.len));
     try zld.got_entries.append(gpa, .{
         .target = target,
         .atom_index = atom_index,
@@ -393,7 +393,7 @@ pub fn addStub(zld: *Zld, target: SymbolWithLoc) !void {
     _ = try zld.createStubHelperAtom();
     _ = try zld.createLazyPointerAtom();
     const atom_index = try zld.createStubAtom();
-    const stubs_index = @intCast(u32, zld.stubs.items.len);
+    const stubs_index = @as(u32, @intCast(zld.stubs.items.len));
     try zld.stubs.append(gpa, .{
         .target = target,
         .atom_index = atom_index,
@@ -489,7 +489,7 @@ fn resolveRelocsArm64(
     var subtractor: ?SymbolWithLoc = null;
 
     for (atom_relocs) |rel| {
-        const rel_type = @enumFromInt(macho.reloc_type_arm64, rel.r_type);
+        const rel_type = @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type));
 
         switch (rel_type) {
             .ARM64_RELOC_ADDEND => {
@@ -529,7 +529,7 @@ fn resolveRelocsArm64(
             .base_addr = context.base_addr,
             .base_offset = context.base_offset,
         });
-        const rel_offset = @intCast(u32, rel.r_address - context.base_offset);
+        const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset));
 
         log.debug("  RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{
             @tagName(rel_type),
@@ -590,7 +590,7 @@ fn resolveRelocsArm64(
                         aarch64.Instruction.unconditional_branch_immediate,
                     ), code),
                 };
-                inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
+                inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(displacement >> 2))));
                 mem.writeIntLittle(u32, code, inst.toU32());
             },
 
@@ -598,11 +598,11 @@ fn resolveRelocsArm64(
             .ARM64_RELOC_GOT_LOAD_PAGE21,
             .ARM64_RELOC_TLVP_LOAD_PAGE21,
             => {
-                const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+                const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
 
                 log.debug("    | target_addr = 0x{x}", .{adjusted_target_addr});
 
-                const pages = @bitCast(u21, Relocation.calcNumberOfPages(source_addr, adjusted_target_addr));
+                const pages = @as(u21, @bitCast(Relocation.calcNumberOfPages(source_addr, adjusted_target_addr)));
                 const code = atom_code[rel_offset..][0..4];
                 var inst = aarch64.Instruction{
                     .pc_relative_address = mem.bytesToValue(meta.TagPayload(
@@ -610,14 +610,14 @@ fn resolveRelocsArm64(
                         aarch64.Instruction.pc_relative_address,
                     ), code),
                 };
-                inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
-                inst.pc_relative_address.immlo = @truncate(u2, pages);
+                inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
+                inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
                 mem.writeIntLittle(u32, code, inst.toU32());
                 addend = null;
             },
 
             .ARM64_RELOC_PAGEOFF12 => {
-                const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+                const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
 
                 log.debug("    | target_addr = 0x{x}", .{adjusted_target_addr});
 
@@ -656,7 +656,7 @@ fn resolveRelocsArm64(
 
             .ARM64_RELOC_GOT_LOAD_PAGEOFF12 => {
                 const code = atom_code[rel_offset..][0..4];
-                const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+                const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
 
                 log.debug("    | target_addr = 0x{x}", .{adjusted_target_addr});
 
@@ -674,7 +674,7 @@ fn resolveRelocsArm64(
 
             .ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => {
                 const code = atom_code[rel_offset..][0..4];
-                const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + (addend orelse 0));
+                const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + (addend orelse 0)));
 
                 log.debug("    | target_addr = 0x{x}", .{adjusted_target_addr});
 
@@ -725,7 +725,7 @@ fn resolveRelocsArm64(
                         .sh = 0,
                         .s = 0,
                         .op = 0,
-                        .sf = @truncate(u1, reg_info.size),
+                        .sf = @as(u1, @truncate(reg_info.size)),
                     },
                 };
                 mem.writeIntLittle(u32, code, inst.toU32());
@@ -734,9 +734,9 @@ fn resolveRelocsArm64(
 
             .ARM64_RELOC_POINTER_TO_GOT => {
                 log.debug("    | target_addr = 0x{x}", .{target_addr});
-                const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse
+                const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse
                     return error.Overflow;
-                mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @bitCast(u32, result));
+                mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @bitCast(result)));
             },
 
             .ARM64_RELOC_UNSIGNED => {
@@ -747,7 +747,7 @@ fn resolveRelocsArm64(
 
                 if (rel.r_extern == 0) {
                     const base_addr = if (target.sym_index >= object.source_address_lookup.len)
-                        @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr)
+                        @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr))
                     else
                         object.source_address_lookup[target.sym_index];
                     ptr_addend -= base_addr;
@@ -756,17 +756,17 @@ fn resolveRelocsArm64(
                 const result = blk: {
                     if (subtractor) |sub| {
                         const sym = zld.getSymbol(sub);
-                        break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + ptr_addend;
+                        break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + ptr_addend;
                     } else {
-                        break :blk @intCast(i64, target_addr) + ptr_addend;
+                        break :blk @as(i64, @intCast(target_addr)) + ptr_addend;
                     }
                 };
                 log.debug("    | target_addr = 0x{x}", .{result});
 
                 if (rel.r_length == 3) {
-                    mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result));
+                    mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result)));
                 } else {
-                    mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result)));
+                    mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result)))));
                 }
 
                 subtractor = null;
@@ -791,7 +791,7 @@ fn resolveRelocsX86(
     var subtractor: ?SymbolWithLoc = null;
 
     for (atom_relocs) |rel| {
-        const rel_type = @enumFromInt(macho.reloc_type_x86_64, rel.r_type);
+        const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type));
 
         switch (rel_type) {
             .X86_64_RELOC_SUBTRACTOR => {
@@ -823,7 +823,7 @@ fn resolveRelocsX86(
             .base_addr = context.base_addr,
             .base_offset = context.base_offset,
         });
-        const rel_offset = @intCast(u32, rel.r_address - context.base_offset);
+        const rel_offset = @as(u32, @intCast(rel.r_address - context.base_offset));
 
         log.debug("  RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{
             @tagName(rel_type),
@@ -851,7 +851,7 @@ fn resolveRelocsX86(
         switch (rel_type) {
             .X86_64_RELOC_BRANCH => {
                 const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
-                const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+                const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
                 log.debug("    | target_addr = 0x{x}", .{adjusted_target_addr});
                 const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
                 mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp);
@@ -861,7 +861,7 @@ fn resolveRelocsX86(
             .X86_64_RELOC_GOT_LOAD,
             => {
                 const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
-                const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+                const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
                 log.debug("    | target_addr = 0x{x}", .{adjusted_target_addr});
                 const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
                 mem.writeIntLittle(i32, atom_code[rel_offset..][0..4], disp);
@@ -869,7 +869,7 @@ fn resolveRelocsX86(
 
             .X86_64_RELOC_TLV => {
                 const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
-                const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+                const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
                 log.debug("    | target_addr = 0x{x}", .{adjusted_target_addr});
                 const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
 
@@ -897,14 +897,14 @@ fn resolveRelocsX86(
 
                 if (rel.r_extern == 0) {
                     const base_addr = if (target.sym_index >= object.source_address_lookup.len)
-                        @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr)
+                        @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr))
                     else
                         object.source_address_lookup[target.sym_index];
-                    addend += @intCast(i32, @intCast(i64, context.base_addr) + rel.r_address + 4 -
-                        @intCast(i64, base_addr));
+                    addend += @as(i32, @intCast(@as(i64, @intCast(context.base_addr)) + rel.r_address + 4 -
+                        @as(i64, @intCast(base_addr))));
                 }
 
-                const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
+                const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend));
 
                 log.debug("    | target_addr = 0x{x}", .{adjusted_target_addr});
 
@@ -920,7 +920,7 @@ fn resolveRelocsX86(
 
                 if (rel.r_extern == 0) {
                     const base_addr = if (target.sym_index >= object.source_address_lookup.len)
-                        @intCast(i64, object.getSourceSection(@intCast(u8, rel.r_symbolnum - 1)).addr)
+                        @as(i64, @intCast(object.getSourceSection(@as(u8, @intCast(rel.r_symbolnum - 1))).addr))
                     else
                         object.source_address_lookup[target.sym_index];
                     addend -= base_addr;
@@ -929,17 +929,17 @@ fn resolveRelocsX86(
                 const result = blk: {
                     if (subtractor) |sub| {
                         const sym = zld.getSymbol(sub);
-                        break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + addend;
+                        break :blk @as(i64, @intCast(target_addr)) - @as(i64, @intCast(sym.n_value)) + addend;
                     } else {
-                        break :blk @intCast(i64, target_addr) + addend;
+                        break :blk @as(i64, @intCast(target_addr)) + addend;
                     }
                 };
                 log.debug("    | target_addr = 0x{x}", .{result});
 
                 if (rel.r_length == 3) {
-                    mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @bitCast(u64, result));
+                    mem.writeIntLittle(u64, atom_code[rel_offset..][0..8], @as(u64, @bitCast(result)));
                 } else {
-                    mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @truncate(u32, @bitCast(u64, result)));
+                    mem.writeIntLittle(u32, atom_code[rel_offset..][0..4], @as(u32, @truncate(@as(u64, @bitCast(result)))));
                 }
 
                 subtractor = null;
@@ -958,19 +958,19 @@ pub fn getAtomCode(zld: *Zld, atom_index: AtomIndex) []const u8 {
         // If there was no matching symbol present in the source symtab, this means
         // we are dealing with either an entire section, or part of it, but also
         // starting at the beginning.
-        const nbase = @intCast(u32, object.in_symtab.?.len);
-        const sect_id = @intCast(u8, atom.sym_index - nbase);
+        const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+        const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
         const source_sect = object.getSourceSection(sect_id);
         assert(!source_sect.isZerofill());
         const code = object.getSectionContents(source_sect);
-        const code_len = @intCast(usize, atom.size);
+        const code_len = @as(usize, @intCast(atom.size));
         return code[0..code_len];
     };
     const source_sect = object.getSourceSection(source_sym.n_sect - 1);
     assert(!source_sect.isZerofill());
     const code = object.getSectionContents(source_sect);
-    const offset = @intCast(usize, source_sym.n_value - source_sect.addr);
-    const code_len = @intCast(usize, atom.size);
+    const offset = @as(usize, @intCast(source_sym.n_value - source_sect.addr));
+    const code_len = @as(usize, @intCast(atom.size));
     return code[offset..][0..code_len];
 }
 
@@ -986,8 +986,8 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_
         // If there was no matching symbol present in the source symtab, this means
         // we are dealing with either an entire section, or part of it, but also
         // starting at the beginning.
-        const nbase = @intCast(u32, object.in_symtab.?.len);
-        const sect_id = @intCast(u8, atom.sym_index - nbase);
+        const nbase = @as(u32, @intCast(object.in_symtab.?.len));
+        const sect_id = @as(u8, @intCast(atom.sym_index - nbase));
         break :blk sect_id;
     };
     const source_sect = object.getSourceSection(source_sect_id);
@@ -998,14 +998,14 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []const macho.relocation_
 
 pub fn relocRequiresGot(zld: *Zld, rel: macho.relocation_info) bool {
     switch (zld.options.target.cpu.arch) {
-        .aarch64 => switch (@enumFromInt(macho.reloc_type_arm64, rel.r_type)) {
+        .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) {
             .ARM64_RELOC_GOT_LOAD_PAGE21,
             .ARM64_RELOC_GOT_LOAD_PAGEOFF12,
             .ARM64_RELOC_POINTER_TO_GOT,
             => return true,
             else => return false,
         },
-        .x86_64 => switch (@enumFromInt(macho.reloc_type_x86_64, rel.r_type)) {
+        .x86_64 => switch (@as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type))) {
             .X86_64_RELOC_GOT,
             .X86_64_RELOC_GOT_LOAD,
             => return true,
src/link/tapi/Tokenizer.zig
@@ -67,11 +67,11 @@ pub const TokenIterator = struct {
     }
 
     pub fn seekBy(self: *TokenIterator, offset: isize) void {
-        const new_pos = @bitCast(isize, self.pos) + offset;
+        const new_pos = @as(isize, @bitCast(self.pos)) + offset;
         if (new_pos < 0) {
             self.pos = 0;
         } else {
-            self.pos = @intCast(usize, new_pos);
+            self.pos = @as(usize, @intCast(new_pos));
         }
     }
 };
src/link/Wasm/Atom.zig
@@ -114,7 +114,7 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void {
             .R_WASM_GLOBAL_INDEX_I32,
             .R_WASM_MEMORY_ADDR_I32,
             .R_WASM_SECTION_OFFSET_I32,
-            => std.mem.writeIntLittle(u32, atom.code.items[reloc.offset..][0..4], @intCast(u32, value)),
+            => std.mem.writeIntLittle(u32, atom.code.items[reloc.offset..][0..4], @as(u32, @intCast(value))),
             .R_WASM_TABLE_INDEX_I64,
             .R_WASM_MEMORY_ADDR_I64,
             => std.mem.writeIntLittle(u64, atom.code.items[reloc.offset..][0..8], value),
@@ -127,7 +127,7 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void {
             .R_WASM_TABLE_NUMBER_LEB,
             .R_WASM_TYPE_INDEX_LEB,
             .R_WASM_MEMORY_ADDR_TLS_SLEB,
-            => leb.writeUnsignedFixed(5, atom.code.items[reloc.offset..][0..5], @intCast(u32, value)),
+            => leb.writeUnsignedFixed(5, atom.code.items[reloc.offset..][0..5], @as(u32, @intCast(value))),
             .R_WASM_MEMORY_ADDR_LEB64,
             .R_WASM_MEMORY_ADDR_SLEB64,
             .R_WASM_TABLE_INDEX_SLEB64,
@@ -173,24 +173,24 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
             if (symbol.isUndefined()) {
                 return 0;
             }
-            const va = @intCast(i64, symbol.virtual_address);
-            return @intCast(u32, va + relocation.addend);
+            const va = @as(i64, @intCast(symbol.virtual_address));
+            return @as(u32, @intCast(va + relocation.addend));
         },
         .R_WASM_EVENT_INDEX_LEB => return symbol.index,
         .R_WASM_SECTION_OFFSET_I32 => {
             const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?;
             const target_atom = wasm_bin.getAtom(target_atom_index);
-            const rel_value = @intCast(i32, target_atom.offset) + relocation.addend;
-            return @intCast(u32, rel_value);
+            const rel_value = @as(i32, @intCast(target_atom.offset)) + relocation.addend;
+            return @as(u32, @intCast(rel_value));
         },
         .R_WASM_FUNCTION_OFFSET_I32 => {
             const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
-                return @bitCast(u32, @as(i32, -1));
+                return @as(u32, @bitCast(@as(i32, -1)));
             };
             const target_atom = wasm_bin.getAtom(target_atom_index);
             const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded)
-            const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
-            return @intCast(u32, rel_value);
+            const rel_value = @as(i32, @intCast(target_atom.offset + offset)) + relocation.addend;
+            return @as(u32, @intCast(rel_value));
         },
         .R_WASM_MEMORY_ADDR_TLS_SLEB,
         .R_WASM_MEMORY_ADDR_TLS_SLEB64,
src/link/Wasm/Object.zig
@@ -93,7 +93,7 @@ const RelocatableData = struct {
         const data_alignment = object.segment_info[relocatable_data.index].alignment;
         if (data_alignment == 0) return 1;
         // Decode from power of 2 to natural alignment
-        return @as(u32, 1) << @intCast(u5, data_alignment);
+        return @as(u32, 1) << @as(u5, @intCast(data_alignment));
     }
 
     /// Returns the symbol kind that corresponds to the relocatable section
@@ -130,7 +130,7 @@ pub fn create(gpa: Allocator, file: std.fs.File, name: []const u8, maybe_max_siz
     const size = maybe_max_size orelse size: {
         errdefer gpa.free(object.name);
         const stat = try file.stat();
-        break :size @intCast(usize, stat.size);
+        break :size @as(usize, @intCast(stat.size));
     };
 
     const file_contents = try gpa.alloc(u8, size);
@@ -365,7 +365,7 @@ fn Parser(comptime ReaderType: type) type {
                 const len = try readLeb(u32, parser.reader.reader());
                 var limited_reader = std.io.limitedReader(parser.reader.reader(), len);
                 const reader = limited_reader.reader();
-                switch (@enumFromInt(std.wasm.Section, byte)) {
+                switch (@as(std.wasm.Section, @enumFromInt(byte))) {
                     .custom => {
                         const name_len = try readLeb(u32, reader);
                         const name = try gpa.alloc(u8, name_len);
@@ -375,13 +375,13 @@ fn Parser(comptime ReaderType: type) type {
                         if (std.mem.eql(u8, name, "linking")) {
                             is_object_file.* = true;
                             parser.object.relocatable_data = relocatable_data.items; // at this point no new relocatable sections will appear so we're free to store them.
-                            try parser.parseMetadata(gpa, @intCast(usize, reader.context.bytes_left));
+                            try parser.parseMetadata(gpa, @as(usize, @intCast(reader.context.bytes_left)));
                         } else if (std.mem.startsWith(u8, name, "reloc")) {
                             try parser.parseRelocations(gpa);
                         } else if (std.mem.eql(u8, name, "target_features")) {
                             try parser.parseFeatures(gpa);
                         } else if (std.mem.startsWith(u8, name, ".debug")) {
-                            const debug_size = @intCast(u32, reader.context.bytes_left);
+                            const debug_size = @as(u32, @intCast(reader.context.bytes_left));
                             const debug_content = try gpa.alloc(u8, debug_size);
                             errdefer gpa.free(debug_content);
                             try reader.readNoEof(debug_content);
@@ -514,7 +514,7 @@ fn Parser(comptime ReaderType: type) type {
                         const count = try readLeb(u32, reader);
                         while (index < count) : (index += 1) {
                             const code_len = try readLeb(u32, reader);
-                            const offset = @intCast(u32, start - reader.context.bytes_left);
+                            const offset = @as(u32, @intCast(start - reader.context.bytes_left));
                             const data = try gpa.alloc(u8, code_len);
                             errdefer gpa.free(data);
                             try reader.readNoEof(data);
@@ -538,7 +538,7 @@ fn Parser(comptime ReaderType: type) type {
                             _ = flags; // TODO: Do we need to check flags to detect passive/active memory?
                             _ = data_offset;
                             const data_len = try readLeb(u32, reader);
-                            const offset = @intCast(u32, start - reader.context.bytes_left);
+                            const offset = @as(u32, @intCast(start - reader.context.bytes_left));
                             const data = try gpa.alloc(u8, data_len);
                             errdefer gpa.free(data);
                             try reader.readNoEof(data);
@@ -645,7 +645,7 @@ fn Parser(comptime ReaderType: type) type {
         /// such as access to the `import` section to find the name of a symbol.
         fn parseSubsection(parser: *ObjectParser, gpa: Allocator, reader: anytype) !void {
             const sub_type = try leb.readULEB128(u8, reader);
-            log.debug("Found subsection: {s}", .{@tagName(@enumFromInt(types.SubsectionType, sub_type))});
+            log.debug("Found subsection: {s}", .{@tagName(@as(types.SubsectionType, @enumFromInt(sub_type)))});
             const payload_len = try leb.readULEB128(u32, reader);
             if (payload_len == 0) return;
 
@@ -655,7 +655,7 @@ fn Parser(comptime ReaderType: type) type {
             // every subsection contains a 'count' field
             const count = try leb.readULEB128(u32, limited_reader);
 
-            switch (@enumFromInt(types.SubsectionType, sub_type)) {
+            switch (@as(types.SubsectionType, @enumFromInt(sub_type))) {
                 .WASM_SEGMENT_INFO => {
                     const segments = try gpa.alloc(types.Segment, count);
                     errdefer gpa.free(segments);
@@ -714,7 +714,7 @@ fn Parser(comptime ReaderType: type) type {
                         errdefer gpa.free(symbols);
                         for (symbols) |*symbol| {
                             symbol.* = .{
-                                .kind = @enumFromInt(types.ComdatSym.Type, try leb.readULEB128(u8, reader)),
+                                .kind = @as(types.ComdatSym.Type, @enumFromInt(try leb.readULEB128(u8, reader))),
                                 .index = try leb.readULEB128(u32, reader),
                             };
                         }
@@ -758,7 +758,7 @@ fn Parser(comptime ReaderType: type) type {
         /// requires access to `Object` to find the name of a symbol when it's
         /// an import and flag `WASM_SYM_EXPLICIT_NAME` is not set.
         fn parseSymbol(parser: *ObjectParser, gpa: Allocator, reader: anytype) !Symbol {
-            const tag = @enumFromInt(Symbol.Tag, try leb.readULEB128(u8, reader));
+            const tag = @as(Symbol.Tag, @enumFromInt(try leb.readULEB128(u8, reader)));
             const flags = try leb.readULEB128(u32, reader);
             var symbol: Symbol = .{
                 .flags = flags,
@@ -846,7 +846,7 @@ fn readLeb(comptime T: type, reader: anytype) !T {
 /// Asserts `T` is an enum
 fn readEnum(comptime T: type, reader: anytype) !T {
     switch (@typeInfo(T)) {
-        .Enum => |enum_type| return @enumFromInt(T, try readLeb(enum_type.tag_type, reader)),
+        .Enum => |enum_type| return @as(T, @enumFromInt(try readLeb(enum_type.tag_type, reader))),
         else => @compileError("T must be an enum. Instead was given type " ++ @typeName(T)),
     }
 }
@@ -867,7 +867,7 @@ fn readLimits(reader: anytype) !std.wasm.Limits {
 
 fn readInit(reader: anytype) !std.wasm.InitExpression {
     const opcode = try reader.readByte();
-    const init_expr: std.wasm.InitExpression = switch (@enumFromInt(std.wasm.Opcode, opcode)) {
+    const init_expr: std.wasm.InitExpression = switch (@as(std.wasm.Opcode, @enumFromInt(opcode))) {
         .i32_const => .{ .i32_const = try readLeb(i32, reader) },
         .global_get => .{ .global_get = try readLeb(u32, reader) },
         else => @panic("TODO: initexpression for other opcodes"),
@@ -899,7 +899,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
         switch (symbol.tag) {
             .function, .data, .section => if (!symbol.isUndefined()) {
                 const gop = try symbol_for_segment.getOrPut(.{ .kind = symbol.tag, .index = symbol.index });
-                const sym_idx = @intCast(u32, symbol_index);
+                const sym_idx = @as(u32, @intCast(symbol_index));
                 if (!gop.found_existing) {
                     gop.value_ptr.* = std.ArrayList(u32).init(gpa);
                 }
@@ -910,11 +910,11 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
     }
 
     for (object.relocatable_data, 0..) |relocatable_data, index| {
-        const final_index = (try wasm_bin.getMatchingSegment(object_index, @intCast(u32, index))) orelse {
+        const final_index = (try wasm_bin.getMatchingSegment(object_index, @as(u32, @intCast(index)))) orelse {
             continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
         };
 
-        const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len);
+        const atom_index = @as(Atom.Index, @intCast(wasm_bin.managed_atoms.items.len));
         const atom = try wasm_bin.managed_atoms.addOne(gpa);
         atom.* = Atom.empty;
         atom.file = object_index;
src/link/Wasm/types.zig
@@ -205,7 +205,7 @@ pub const Feature = struct {
 
         /// From a given cpu feature, returns its linker feature
         pub fn fromCpuFeature(feature: std.Target.wasm.Feature) Tag {
-            return @enumFromInt(Tag, @intFromEnum(feature));
+            return @as(Tag, @enumFromInt(@intFromEnum(feature)));
         }
 
         pub fn format(tag: Tag, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void {
src/link/C.zig
@@ -292,7 +292,7 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo
     {
         var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
         defer export_names.deinit(gpa);
-        try export_names.ensureTotalCapacity(gpa, @intCast(u32, module.decl_exports.entries.len));
+        try export_names.ensureTotalCapacity(gpa, @as(u32, @intCast(module.decl_exports.entries.len)));
         for (module.decl_exports.values()) |exports| for (exports.items) |@"export"|
             try export_names.put(gpa, @"export".opts.name, {});
 
@@ -426,7 +426,7 @@ fn flushCTypes(
                 return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count];
             }
         };
-        const decl_idx = @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + decl_i);
+        const decl_idx = @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + decl_i));
         const ctx = Context{
             .arena = global_ctypes.arena.allocator(),
             .ctypes_map = f.ctypes_map.items,
@@ -437,7 +437,7 @@ fn flushCTypes(
             .store = &global_ctypes.set,
         });
         const global_idx =
-            @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + gop.index);
+            @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + gop.index));
         f.ctypes_map.appendAssumeCapacity(global_idx);
         if (!gop.found_existing) {
             errdefer _ = global_ctypes.set.map.pop();
@@ -538,7 +538,7 @@ fn flushLazyFn(self: *C, db: *DeclBlock, lazy_fn: codegen.LazyFnMap.Entry) Flush
 
 fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void {
     const gpa = self.base.allocator;
-    try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(Flush.LazyFns.Size, lazy_fns.count()));
+    try f.lazy_fns.ensureUnusedCapacity(gpa, @as(Flush.LazyFns.Size, @intCast(lazy_fns.count())));
 
     var it = lazy_fns.iterator();
     while (it.next()) |entry| {
src/link/Coff.zig
@@ -358,7 +358,7 @@ fn populateMissingMetadata(self: *Coff) !void {
     });
 
     if (self.text_section_index == null) {
-        const file_size = @intCast(u32, self.base.options.program_code_size_hint);
+        const file_size = @as(u32, @intCast(self.base.options.program_code_size_hint));
         self.text_section_index = try self.allocateSection(".text", file_size, .{
             .CNT_CODE = 1,
             .MEM_EXECUTE = 1,
@@ -367,7 +367,7 @@ fn populateMissingMetadata(self: *Coff) !void {
     }
 
     if (self.got_section_index == null) {
-        const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size();
+        const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size();
         self.got_section_index = try self.allocateSection(".got", file_size, .{
             .CNT_INITIALIZED_DATA = 1,
             .MEM_READ = 1,
@@ -392,7 +392,7 @@ fn populateMissingMetadata(self: *Coff) !void {
     }
 
     if (self.idata_section_index == null) {
-        const file_size = @intCast(u32, self.base.options.symbol_count_hint) * self.ptr_width.size();
+        const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * self.ptr_width.size();
         self.idata_section_index = try self.allocateSection(".idata", file_size, .{
             .CNT_INITIALIZED_DATA = 1,
             .MEM_READ = 1,
@@ -400,7 +400,7 @@ fn populateMissingMetadata(self: *Coff) !void {
     }
 
     if (self.reloc_section_index == null) {
-        const file_size = @intCast(u32, self.base.options.symbol_count_hint) * @sizeOf(coff.BaseRelocation);
+        const file_size = @as(u32, @intCast(self.base.options.symbol_count_hint)) * @sizeOf(coff.BaseRelocation);
         self.reloc_section_index = try self.allocateSection(".reloc", file_size, .{
             .CNT_INITIALIZED_DATA = 1,
             .MEM_DISCARDABLE = 1,
@@ -409,7 +409,7 @@ fn populateMissingMetadata(self: *Coff) !void {
     }
 
     if (self.strtab_offset == null) {
-        const file_size = @intCast(u32, self.strtab.len());
+        const file_size = @as(u32, @intCast(self.strtab.len()));
         self.strtab_offset = self.findFreeSpace(file_size, @alignOf(u32)); // 4bytes aligned seems like a good idea here
         log.debug("found strtab free space 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + file_size });
     }
@@ -430,7 +430,7 @@ fn populateMissingMetadata(self: *Coff) !void {
 }
 
 fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.SectionHeaderFlags) !u16 {
-    const index = @intCast(u16, self.sections.slice().len);
+    const index = @as(u16, @intCast(self.sections.slice().len));
     const off = self.findFreeSpace(size, default_file_alignment);
     // Memory is always allocated in sequence
     // TODO: investigate if we can allocate .text last; this way it would never need to grow in memory!
@@ -652,7 +652,7 @@ pub fn allocateSymbol(self: *Coff) !u32 {
             break :blk index;
         } else {
             log.debug("  (allocating symbol index {d})", .{self.locals.items.len});
-            const index = @intCast(u32, self.locals.items.len);
+            const index = @as(u32, @intCast(self.locals.items.len));
             _ = self.locals.addOneAssumeCapacity();
             break :blk index;
         }
@@ -680,7 +680,7 @@ fn allocateGlobal(self: *Coff) !u32 {
             break :blk index;
         } else {
             log.debug("  (allocating global index {d})", .{self.globals.items.len});
-            const index = @intCast(u32, self.globals.items.len);
+            const index = @as(u32, @intCast(self.globals.items.len));
             _ = self.globals.addOneAssumeCapacity();
             break :blk index;
         }
@@ -704,7 +704,7 @@ fn addGotEntry(self: *Coff, target: SymbolWithLoc) !void {
 
 pub fn createAtom(self: *Coff) !Atom.Index {
     const gpa = self.base.allocator;
-    const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+    const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len));
     const atom = try self.atoms.addOne(gpa);
     const sym_index = try self.allocateSymbol();
     try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
@@ -776,7 +776,7 @@ fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []u8) !void {
             self.resolveRelocs(atom_index, relocs.items, mem_code, slide);
 
             const vaddr = sym.value + slide;
-            const pvaddr = @ptrFromInt(*anyopaque, vaddr);
+            const pvaddr = @as(*anyopaque, @ptrFromInt(vaddr));
 
             log.debug("writing to memory at address {x}", .{vaddr});
 
@@ -830,7 +830,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
     const sect_id = self.got_section_index.?;
 
     if (self.got_table_count_dirty) {
-        const needed_size = @intCast(u32, self.got_table.entries.items.len * self.ptr_width.size());
+        const needed_size = @as(u32, @intCast(self.got_table.entries.items.len * self.ptr_width.size()));
         try self.growSection(sect_id, needed_size);
         self.got_table_count_dirty = false;
     }
@@ -847,7 +847,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
     switch (self.ptr_width) {
         .p32 => {
             var buf: [4]u8 = undefined;
-            mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + self.getImageBase()));
+            mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + self.getImageBase())));
             try self.base.file.?.pwriteAll(&buf, file_offset);
         },
         .p64 => {
@@ -862,7 +862,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
             const gpa = self.base.allocator;
             const slide = @intFromPtr(self.hot_state.loaded_base_address.?);
             const actual_vmaddr = vmaddr + slide;
-            const pvaddr = @ptrFromInt(*anyopaque, actual_vmaddr);
+            const pvaddr = @as(*anyopaque, @ptrFromInt(actual_vmaddr));
             log.debug("writing GOT entry to memory at address {x}", .{actual_vmaddr});
             if (build_options.enable_logging) {
                 switch (self.ptr_width) {
@@ -880,7 +880,7 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
             switch (self.ptr_width) {
                 .p32 => {
                     var buf: [4]u8 = undefined;
-                    mem.writeIntLittle(u32, &buf, @intCast(u32, entry_value + slide));
+                    mem.writeIntLittle(u32, &buf, @as(u32, @intCast(entry_value + slide)));
                     writeMem(handle, pvaddr, &buf) catch |err| {
                         log.warn("writing to protected memory failed with error: {s}", .{@errorName(err)});
                     };
@@ -1107,7 +1107,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
         const atom = self.getAtom(atom_index);
         const sym = atom.getSymbolPtr(self);
         try self.setSymbolName(sym, sym_name);
-        sym.section_number = @enumFromInt(coff.SectionNumber, self.rdata_section_index.? + 1);
+        sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.rdata_section_index.? + 1));
     }
 
     const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .none, .{
@@ -1125,7 +1125,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
 
     const required_alignment = tv.ty.abiAlignment(mod);
     const atom = self.getAtomPtr(atom_index);
-    atom.size = @intCast(u32, code.len);
+    atom.size = @as(u32, @intCast(code.len));
     atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment);
     errdefer self.freeAtom(atom_index);
 
@@ -1241,10 +1241,10 @@ fn updateLazySymbolAtom(
         },
     };
 
-    const code_len = @intCast(u32, code.len);
+    const code_len = @as(u32, @intCast(code.len));
     const symbol = atom.getSymbolPtr(self);
     try self.setSymbolName(symbol, name);
-    symbol.section_number = @enumFromInt(coff.SectionNumber, section_index + 1);
+    symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1));
     symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
 
     const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
@@ -1336,12 +1336,12 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
     const atom = self.getAtom(atom_index);
     const sym_index = atom.getSymbolIndex().?;
     const sect_index = decl_metadata.section;
-    const code_len = @intCast(u32, code.len);
+    const code_len = @as(u32, @intCast(code.len));
 
     if (atom.size != 0) {
         const sym = atom.getSymbolPtr(self);
         try self.setSymbolName(sym, decl_name);
-        sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1);
+        sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1));
         sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
 
         const capacity = atom.capacity(self);
@@ -1365,7 +1365,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
     } else {
         const sym = atom.getSymbolPtr(self);
         try self.setSymbolName(sym, decl_name);
-        sym.section_number = @enumFromInt(coff.SectionNumber, sect_index + 1);
+        sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1));
         sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
 
         const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
@@ -1502,7 +1502,7 @@ pub fn updateDeclExports(
         const sym = self.getSymbolPtr(sym_loc);
         try self.setSymbolName(sym, mod.intern_pool.stringToSlice(exp.opts.name));
         sym.value = decl_sym.value;
-        sym.section_number = @enumFromInt(coff.SectionNumber, self.text_section_index.? + 1);
+        sym.section_number = @as(coff.SectionNumber, @enumFromInt(self.text_section_index.? + 1));
         sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL };
 
         switch (exp.opts.linkage) {
@@ -1728,12 +1728,12 @@ pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link
     try Atom.addRelocation(self, atom_index, .{
         .type = .direct,
         .target = target,
-        .offset = @intCast(u32, reloc_info.offset),
+        .offset = @as(u32, @intCast(reloc_info.offset)),
         .addend = reloc_info.addend,
         .pcrel = false,
         .length = 3,
     });
-    try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset));
+    try Atom.addBaseRelocation(self, atom_index, @as(u32, @intCast(reloc_info.offset)));
 
     return 0;
 }
@@ -1804,7 +1804,7 @@ fn writeBaseRelocations(self: *Coff) !void {
                     gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
                 }
                 try gop.value_ptr.append(.{
-                    .offset = @intCast(u12, rva - page),
+                    .offset = @as(u12, @intCast(rva - page)),
                     .type = .DIR64,
                 });
             }
@@ -1818,14 +1818,14 @@ fn writeBaseRelocations(self: *Coff) !void {
                 const sym = self.getSymbol(entry);
                 if (sym.section_number == .UNDEFINED) continue;
 
-                const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size());
+                const rva = @as(u32, @intCast(header.virtual_address + index * self.ptr_width.size()));
                 const page = mem.alignBackward(u32, rva, self.page_size);
                 const gop = try page_table.getOrPut(page);
                 if (!gop.found_existing) {
                     gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
                 }
                 try gop.value_ptr.append(.{
-                    .offset = @intCast(u12, rva - page),
+                    .offset = @as(u12, @intCast(rva - page)),
                     .type = .DIR64,
                 });
             }
@@ -1860,9 +1860,9 @@ fn writeBaseRelocations(self: *Coff) !void {
             });
         }
 
-        const block_size = @intCast(
+        const block_size = @as(
             u32,
-            entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry),
+            @intCast(entries.items.len * @sizeOf(coff.BaseRelocation) + @sizeOf(coff.BaseRelocationDirectoryEntry)),
         );
         try buffer.ensureUnusedCapacity(block_size);
         buffer.appendSliceAssumeCapacity(mem.asBytes(&coff.BaseRelocationDirectoryEntry{
@@ -1873,7 +1873,7 @@ fn writeBaseRelocations(self: *Coff) !void {
     }
 
     const header = &self.sections.items(.header)[self.reloc_section_index.?];
-    const needed_size = @intCast(u32, buffer.items.len);
+    const needed_size = @as(u32, @intCast(buffer.items.len));
     try self.growSection(self.reloc_section_index.?, needed_size);
 
     try self.base.file.?.pwriteAll(buffer.items, header.pointer_to_raw_data);
@@ -1904,12 +1904,12 @@ fn writeImportTables(self: *Coff) !void {
         const itable = self.import_tables.values()[i];
         iat_size += itable.size() + 8;
         dir_table_size += @sizeOf(coff.ImportDirectoryEntry);
-        lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName);
+        lookup_table_size += @as(u32, @intCast(itable.entries.items.len + 1)) * @sizeOf(coff.ImportLookupEntry64.ByName);
         for (itable.entries.items) |entry| {
             const sym_name = self.getSymbolName(entry);
-            names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2);
+            names_table_size += 2 + mem.alignForward(u32, @as(u32, @intCast(sym_name.len + 1)), 2);
         }
-        dll_names_size += @intCast(u32, lib_name.len + ext.len + 1);
+        dll_names_size += @as(u32, @intCast(lib_name.len + ext.len + 1));
     }
 
     const needed_size = iat_size + dir_table_size + lookup_table_size + names_table_size + dll_names_size;
@@ -1948,7 +1948,7 @@ fn writeImportTables(self: *Coff) !void {
             const import_name = self.getSymbolName(entry);
 
             // IAT and lookup table entry
-            const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @intCast(u31, header.virtual_address + names_table_offset) };
+            const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @as(u31, @intCast(header.virtual_address + names_table_offset)) };
             @memcpy(
                 buffer.items[iat_offset..][0..@sizeOf(coff.ImportLookupEntry64.ByName)],
                 mem.asBytes(&lookup),
@@ -1964,7 +1964,7 @@ fn writeImportTables(self: *Coff) !void {
             mem.writeIntLittle(u16, buffer.items[names_table_offset..][0..2], 0); // Hint set to 0 until we learn how to parse DLLs
             names_table_offset += 2;
             @memcpy(buffer.items[names_table_offset..][0..import_name.len], import_name);
-            names_table_offset += @intCast(u32, import_name.len);
+            names_table_offset += @as(u32, @intCast(import_name.len));
             buffer.items[names_table_offset] = 0;
             names_table_offset += 1;
             if (!mem.isAlignedGeneric(usize, names_table_offset, @sizeOf(u16))) {
@@ -1986,9 +1986,9 @@ fn writeImportTables(self: *Coff) !void {
 
         // DLL name
         @memcpy(buffer.items[dll_names_offset..][0..lib_name.len], lib_name);
-        dll_names_offset += @intCast(u32, lib_name.len);
+        dll_names_offset += @as(u32, @intCast(lib_name.len));
         @memcpy(buffer.items[dll_names_offset..][0..ext.len], ext);
-        dll_names_offset += @intCast(u32, ext.len);
+        dll_names_offset += @as(u32, @intCast(ext.len));
         buffer.items[dll_names_offset] = 0;
         dll_names_offset += 1;
     }
@@ -2027,11 +2027,11 @@ fn writeStrtab(self: *Coff) !void {
     if (self.strtab_offset == null) return;
 
     const allocated_size = self.allocatedSize(self.strtab_offset.?);
-    const needed_size = @intCast(u32, self.strtab.len());
+    const needed_size = @as(u32, @intCast(self.strtab.len()));
 
     if (needed_size > allocated_size) {
         self.strtab_offset = null;
-        self.strtab_offset = @intCast(u32, self.findFreeSpace(needed_size, @alignOf(u32)));
+        self.strtab_offset = @as(u32, @intCast(self.findFreeSpace(needed_size, @alignOf(u32))));
     }
 
     log.debug("writing strtab from 0x{x} to 0x{x}", .{ self.strtab_offset.?, self.strtab_offset.? + needed_size });
@@ -2042,7 +2042,7 @@ fn writeStrtab(self: *Coff) !void {
     buffer.appendSliceAssumeCapacity(self.strtab.items());
     // Here, we do a trick in that we do not commit the size of the strtab to strtab buffer, instead
     // we write the length of the strtab to a temporary buffer that goes to file.
-    mem.writeIntLittle(u32, buffer.items[0..4], @intCast(u32, self.strtab.len()));
+    mem.writeIntLittle(u32, buffer.items[0..4], @as(u32, @intCast(self.strtab.len())));
 
     try self.base.file.?.pwriteAll(buffer.items, self.strtab_offset.?);
 }
@@ -2081,11 +2081,11 @@ fn writeHeader(self: *Coff) !void {
     }
 
     const timestamp = std.time.timestamp();
-    const size_of_optional_header = @intCast(u16, self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize());
+    const size_of_optional_header = @as(u16, @intCast(self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize()));
     var coff_header = coff.CoffHeader{
         .machine = coff.MachineType.fromTargetCpuArch(self.base.options.target.cpu.arch),
-        .number_of_sections = @intCast(u16, self.sections.slice().len), // TODO what if we prune a section
-        .time_date_stamp = @truncate(u32, @bitCast(u64, timestamp)),
+        .number_of_sections = @as(u16, @intCast(self.sections.slice().len)), // TODO what if we prune a section
+        .time_date_stamp = @as(u32, @truncate(@as(u64, @bitCast(timestamp)))),
         .pointer_to_symbol_table = self.strtab_offset orelse 0,
         .number_of_symbols = 0,
         .size_of_optional_header = size_of_optional_header,
@@ -2135,7 +2135,7 @@ fn writeHeader(self: *Coff) !void {
                 .address_of_entry_point = self.entry_addr orelse 0,
                 .base_of_code = base_of_code,
                 .base_of_data = base_of_data,
-                .image_base = @intCast(u32, image_base),
+                .image_base = @as(u32, @intCast(image_base)),
                 .section_alignment = self.page_size,
                 .file_alignment = default_file_alignment,
                 .major_operating_system_version = 6,
@@ -2155,7 +2155,7 @@ fn writeHeader(self: *Coff) !void {
                 .size_of_heap_reserve = default_size_of_heap_reserve,
                 .size_of_heap_commit = default_size_of_heap_commit,
                 .loader_flags = 0,
-                .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len),
+                .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)),
             };
             writer.writeAll(mem.asBytes(&opt_header)) catch unreachable;
         },
@@ -2189,7 +2189,7 @@ fn writeHeader(self: *Coff) !void {
                 .size_of_heap_reserve = default_size_of_heap_reserve,
                 .size_of_heap_commit = default_size_of_heap_commit,
                 .loader_flags = 0,
-                .number_of_rva_and_sizes = @intCast(u32, self.data_directories.len),
+                .number_of_rva_and_sizes = @as(u32, @intCast(self.data_directories.len)),
             };
             writer.writeAll(mem.asBytes(&opt_header)) catch unreachable;
         },
@@ -2210,7 +2210,7 @@ fn detectAllocCollision(self: *Coff, start: u32, size: u32) ?u32 {
     const end = start + padToIdeal(size);
 
     if (self.strtab_offset) |off| {
-        const tight_size = @intCast(u32, self.strtab.len());
+        const tight_size = @as(u32, @intCast(self.strtab.len()));
         const increased_size = padToIdeal(tight_size);
         const test_end = off + increased_size;
         if (end > off and start < test_end) {
@@ -2265,28 +2265,28 @@ fn allocatedVirtualSize(self: *Coff, start: u32) u32 {
 
 inline fn getSizeOfHeaders(self: Coff) u32 {
     const msdos_hdr_size = msdos_stub.len + 4;
-    return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() +
-        self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize());
+    return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize() +
+        self.getDataDirectoryHeadersSize() + self.getSectionHeadersSize()));
 }
 
 inline fn getOptionalHeaderSize(self: Coff) u32 {
     return switch (self.ptr_width) {
-        .p32 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE32)),
-        .p64 => @intCast(u32, @sizeOf(coff.OptionalHeaderPE64)),
+        .p32 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE32))),
+        .p64 => @as(u32, @intCast(@sizeOf(coff.OptionalHeaderPE64))),
     };
 }
 
 inline fn getDataDirectoryHeadersSize(self: Coff) u32 {
-    return @intCast(u32, self.data_directories.len * @sizeOf(coff.ImageDataDirectory));
+    return @as(u32, @intCast(self.data_directories.len * @sizeOf(coff.ImageDataDirectory)));
 }
 
 inline fn getSectionHeadersSize(self: Coff) u32 {
-    return @intCast(u32, self.sections.slice().len * @sizeOf(coff.SectionHeader));
+    return @as(u32, @intCast(self.sections.slice().len * @sizeOf(coff.SectionHeader)));
 }
 
 inline fn getDataDirectoryHeadersOffset(self: Coff) u32 {
     const msdos_hdr_size = msdos_stub.len + 4;
-    return @intCast(u32, msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize());
+    return @as(u32, @intCast(msdos_hdr_size + @sizeOf(coff.CoffHeader) + self.getOptionalHeaderSize()));
 }
 
 inline fn getSectionHeadersOffset(self: Coff) u32 {
@@ -2473,7 +2473,7 @@ fn logSymtab(self: *Coff) void {
         };
         log.debug("    %{d}: {?s} @{x} in {s}({d}), {s}", .{
             sym_id,
-            self.getSymbolName(.{ .sym_index = @intCast(u32, sym_id), .file = null }),
+            self.getSymbolName(.{ .sym_index = @as(u32, @intCast(sym_id)), .file = null }),
             sym.value,
             where,
             def_index,
src/link/Dwarf.zig
@@ -138,7 +138,7 @@ pub const DeclState = struct {
     /// which we use as our target of the relocation.
     fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
         const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: {
-            const sym_index = @intCast(u32, self.abbrev_table.items.len);
+            const sym_index = @as(u32, @intCast(self.abbrev_table.items.len));
             try self.abbrev_table.append(self.gpa, .{
                 .atom_index = atom_index,
                 .type = ty,
@@ -225,7 +225,7 @@ pub const DeclState = struct {
                     // DW.AT.type, DW.FORM.ref4
                     var index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(index + 4);
-                    try self.addTypeRelocGlobal(atom_index, Type.bool, @intCast(u32, index));
+                    try self.addTypeRelocGlobal(atom_index, Type.bool, @as(u32, @intCast(index)));
                     // DW.AT.data_member_location, DW.FORM.udata
                     try dbg_info_buffer.ensureUnusedCapacity(6);
                     dbg_info_buffer.appendAssumeCapacity(0);
@@ -237,7 +237,7 @@ pub const DeclState = struct {
                     // DW.AT.type, DW.FORM.ref4
                     index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(index + 4);
-                    try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
+                    try self.addTypeRelocGlobal(atom_index, payload_ty, @as(u32, @intCast(index)));
                     // DW.AT.data_member_location, DW.FORM.udata
                     const offset = abi_size - payload_ty.abiSize(mod);
                     try leb128.writeULEB128(dbg_info_buffer.writer(), offset);
@@ -249,7 +249,7 @@ pub const DeclState = struct {
                 if (ty.isSlice(mod)) {
                     // Slices are structs: struct { .ptr = *, .len = N }
                     const ptr_bits = target.ptrBitWidth();
-                    const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8));
+                    const ptr_bytes = @as(u8, @intCast(@divExact(ptr_bits, 8)));
                     // DW.AT.structure_type
                     try dbg_info_buffer.ensureUnusedCapacity(2);
                     dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevKind.struct_type));
@@ -267,7 +267,7 @@ pub const DeclState = struct {
                     var index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(index + 4);
                     const ptr_ty = ty.slicePtrFieldType(mod);
-                    try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index));
+                    try self.addTypeRelocGlobal(atom_index, ptr_ty, @as(u32, @intCast(index)));
                     // DW.AT.data_member_location, DW.FORM.udata
                     try dbg_info_buffer.ensureUnusedCapacity(6);
                     dbg_info_buffer.appendAssumeCapacity(0);
@@ -279,7 +279,7 @@ pub const DeclState = struct {
                     // DW.AT.type, DW.FORM.ref4
                     index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(index + 4);
-                    try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
+                    try self.addTypeRelocGlobal(atom_index, Type.usize, @as(u32, @intCast(index)));
                     // DW.AT.data_member_location, DW.FORM.udata
                     try dbg_info_buffer.ensureUnusedCapacity(2);
                     dbg_info_buffer.appendAssumeCapacity(ptr_bytes);
@@ -291,7 +291,7 @@ pub const DeclState = struct {
                     // DW.AT.type, DW.FORM.ref4
                     const index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(index + 4);
-                    try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index));
+                    try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @as(u32, @intCast(index)));
                 }
             },
             .Array => {
@@ -302,13 +302,13 @@ pub const DeclState = struct {
                 // DW.AT.type, DW.FORM.ref4
                 var index = dbg_info_buffer.items.len;
                 try dbg_info_buffer.resize(index + 4);
-                try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index));
+                try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @as(u32, @intCast(index)));
                 // DW.AT.subrange_type
                 try dbg_info_buffer.append(@intFromEnum(AbbrevKind.array_dim));
                 // DW.AT.type, DW.FORM.ref4
                 index = dbg_info_buffer.items.len;
                 try dbg_info_buffer.resize(index + 4);
-                try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
+                try self.addTypeRelocGlobal(atom_index, Type.usize, @as(u32, @intCast(index)));
                 // DW.AT.count, DW.FORM.udata
                 const len = ty.arrayLenIncludingSentinel(mod);
                 try leb128.writeULEB128(dbg_info_buffer.writer(), len);
@@ -334,7 +334,7 @@ pub const DeclState = struct {
                             // DW.AT.type, DW.FORM.ref4
                             var index = dbg_info_buffer.items.len;
                             try dbg_info_buffer.resize(index + 4);
-                            try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index));
+                            try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @as(u32, @intCast(index)));
                             // DW.AT.data_member_location, DW.FORM.udata
                             const field_off = ty.structFieldOffset(field_index, mod);
                             try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -367,7 +367,7 @@ pub const DeclState = struct {
                             // DW.AT.type, DW.FORM.ref4
                             var index = dbg_info_buffer.items.len;
                             try dbg_info_buffer.resize(index + 4);
-                            try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
+                            try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index)));
                             // DW.AT.data_member_location, DW.FORM.udata
                             const field_off = ty.structFieldOffset(field_index, mod);
                             try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -404,7 +404,7 @@ pub const DeclState = struct {
                         // TODO do not assume a 64bit enum value - could be bigger.
                         // See https://github.com/ziglang/zig/issues/645
                         const field_int_val = try value.toValue().intFromEnum(ty, mod);
-                        break :value @bitCast(u64, field_int_val.toSignedInt(mod));
+                        break :value @as(u64, @bitCast(field_int_val.toSignedInt(mod)));
                     };
                     mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian);
                 }
@@ -439,7 +439,7 @@ pub const DeclState = struct {
                     // DW.AT.type, DW.FORM.ref4
                     const inner_union_index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(inner_union_index + 4);
-                    try self.addTypeRelocLocal(atom_index, @intCast(u32, inner_union_index), 5);
+                    try self.addTypeRelocLocal(atom_index, @as(u32, @intCast(inner_union_index)), 5);
                     // DW.AT.data_member_location, DW.FORM.udata
                     try leb128.writeULEB128(dbg_info_buffer.writer(), payload_offset);
                 }
@@ -468,7 +468,7 @@ pub const DeclState = struct {
                     // DW.AT.type, DW.FORM.ref4
                     const index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(index + 4);
-                    try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
+                    try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index)));
                     // DW.AT.data_member_location, DW.FORM.udata
                     try dbg_info_buffer.append(0);
                 }
@@ -485,7 +485,7 @@ pub const DeclState = struct {
                     // DW.AT.type, DW.FORM.ref4
                     const index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(index + 4);
-                    try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @intCast(u32, index));
+                    try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @as(u32, @intCast(index)));
                     // DW.AT.data_member_location, DW.FORM.udata
                     try leb128.writeULEB128(dbg_info_buffer.writer(), tag_offset);
 
@@ -521,7 +521,7 @@ pub const DeclState = struct {
                     // DW.AT.type, DW.FORM.ref4
                     const index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(index + 4);
-                    try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
+                    try self.addTypeRelocGlobal(atom_index, payload_ty, @as(u32, @intCast(index)));
                     // DW.AT.data_member_location, DW.FORM.udata
                     try leb128.writeULEB128(dbg_info_buffer.writer(), payload_off);
                 }
@@ -536,7 +536,7 @@ pub const DeclState = struct {
                     // DW.AT.type, DW.FORM.ref4
                     const index = dbg_info_buffer.items.len;
                     try dbg_info_buffer.resize(index + 4);
-                    try self.addTypeRelocGlobal(atom_index, error_ty, @intCast(u32, index));
+                    try self.addTypeRelocGlobal(atom_index, error_ty, @as(u32, @intCast(index)));
                     // DW.AT.data_member_location, DW.FORM.udata
                     try leb128.writeULEB128(dbg_info_buffer.writer(), error_off);
                 }
@@ -640,7 +640,7 @@ pub const DeclState = struct {
         try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
         const index = dbg_info.items.len;
         try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
-        try self.addTypeRelocGlobal(atom_index, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
+        try self.addTypeRelocGlobal(atom_index, ty, @as(u32, @intCast(index))); // DW.AT.type, DW.FORM.ref4
         dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
     }
 
@@ -723,20 +723,20 @@ pub const DeclState = struct {
             .memory,
             .linker_load,
             => {
-                const ptr_width = @intCast(u8, @divExact(target.ptrBitWidth(), 8));
+                const ptr_width = @as(u8, @intCast(@divExact(target.ptrBitWidth(), 8)));
                 try dbg_info.ensureUnusedCapacity(2 + ptr_width);
                 dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
                     1 + ptr_width + @intFromBool(is_ptr),
                     DW.OP.addr, // literal address
                 });
-                const offset = @intCast(u32, dbg_info.items.len);
+                const offset = @as(u32, @intCast(dbg_info.items.len));
                 const addr = switch (loc) {
                     .memory => |x| x,
                     else => 0,
                 };
                 switch (ptr_width) {
                     0...4 => {
-                        try dbg_info.writer().writeInt(u32, @intCast(u32, addr), endian);
+                        try dbg_info.writer().writeInt(u32, @as(u32, @intCast(addr)), endian);
                     },
                     5...8 => {
                         try dbg_info.writer().writeInt(u64, addr, endian);
@@ -765,19 +765,19 @@ pub const DeclState = struct {
                     if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu,
                 });
                 if (child_ty.isSignedInt(mod)) {
-                    try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x));
+                    try leb128.writeILEB128(dbg_info.writer(), @as(i64, @bitCast(x)));
                 } else {
                     try leb128.writeULEB128(dbg_info.writer(), x);
                 }
                 try dbg_info.append(DW.OP.stack_value);
-                dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2);
+                dbg_info.items[fixup] += @as(u8, @intCast(dbg_info.items.len - fixup - 2));
             },
 
             .undef => {
                 // DW.AT.location, DW.FORM.exprloc
                 // uleb128(exprloc_len)
                 // DW.OP.implicit_value uleb128(len_of_bytes) bytes
-                const abi_size = @intCast(u32, child_ty.abiSize(mod));
+                const abi_size = @as(u32, @intCast(child_ty.abiSize(mod)));
                 var implicit_value_len = std.ArrayList(u8).init(self.gpa);
                 defer implicit_value_len.deinit();
                 try leb128.writeULEB128(implicit_value_len.writer(), abi_size);
@@ -807,7 +807,7 @@ pub const DeclState = struct {
         try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
         const index = dbg_info.items.len;
         try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
-        try self.addTypeRelocGlobal(atom_index, child_ty, @intCast(u32, index));
+        try self.addTypeRelocGlobal(atom_index, child_ty, @as(u32, @intCast(index)));
         dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
     }
 
@@ -963,7 +963,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
                 func.lbrace_line,
                 func.rbrace_line,
             });
-            const line = @intCast(u28, decl.src_line + func.lbrace_line);
+            const line = @as(u28, @intCast(decl.src_line + func.lbrace_line));
 
             const ptr_width_bytes = self.ptrWidthBytes();
             dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{
@@ -1013,7 +1013,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
             dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
             //
             if (fn_ret_has_bits) {
-                try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
+                try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @as(u32, @intCast(dbg_info_buffer.items.len)));
                 dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
             }
 
@@ -1055,11 +1055,11 @@ pub fn commitDeclState(
                 .p32 => {
                     {
                         const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4];
-                        mem.writeInt(u32, ptr, @intCast(u32, sym_addr), target_endian);
+                        mem.writeInt(u32, ptr, @as(u32, @intCast(sym_addr)), target_endian);
                     }
                     {
                         const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4];
-                        mem.writeInt(u32, ptr, @intCast(u32, sym_addr), target_endian);
+                        mem.writeInt(u32, ptr, @as(u32, @intCast(sym_addr)), target_endian);
                     }
                 },
                 .p64 => {
@@ -1079,7 +1079,7 @@ pub fn commitDeclState(
                     sym_size,
                 });
                 const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4];
-                mem.writeInt(u32, ptr, @intCast(u32, sym_size), target_endian);
+                mem.writeInt(u32, ptr, @as(u32, @intCast(sym_size)), target_endian);
             }
 
             try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS.extended_op, 1, DW.LNE.end_sequence });
@@ -1091,7 +1091,7 @@ pub fn commitDeclState(
             // probably need to edit that logic too.
             const src_fn_index = self.src_fn_decls.get(decl_index).?;
             const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
-            src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
+            src_fn.len = @as(u32, @intCast(dbg_line_buffer.items.len));
 
             if (self.src_fn_last_index) |last_index| blk: {
                 if (src_fn_index == last_index) break :blk;
@@ -1254,12 +1254,12 @@ pub fn commitDeclState(
             };
             if (deferred) continue;
 
-            symbol.offset = @intCast(u32, dbg_info_buffer.items.len);
+            symbol.offset = @as(u32, @intCast(dbg_info_buffer.items.len));
             try decl_state.addDbgInfoType(mod, di_atom_index, ty);
         }
     }
 
-    try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
+    try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len)));
 
     while (decl_state.abbrev_relocs.popOrNull()) |reloc| {
         if (reloc.target) |target| {
@@ -1402,7 +1402,7 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32)
         self.di_atom_first_index = atom_index;
         self.di_atom_last_index = atom_index;
 
-        atom.off = @intCast(u32, padToIdeal(self.dbgInfoHeaderBytes()));
+        atom.off = @as(u32, @intCast(padToIdeal(self.dbgInfoHeaderBytes())));
     }
 }
 
@@ -1513,7 +1513,7 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl.
         func.lbrace_line,
         func.rbrace_line,
     });
-    const line = @intCast(u28, decl.src_line + func.lbrace_line);
+    const line = @as(u28, @intCast(decl.src_line + func.lbrace_line));
     var data: [4]u8 = undefined;
     leb128.writeUnsignedFixed(4, &data, line);
 
@@ -1791,10 +1791,10 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
     const dbg_info_end = self.getDebugInfoEnd().? + 1;
     const init_len = dbg_info_end - after_init_len;
     if (self.bin_file.tag == .macho) {
-        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len));
+        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(init_len)));
     } else switch (self.ptr_width) {
         .p32 => {
-            mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, init_len), target_endian);
+            mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(init_len)), target_endian);
         },
         .p64 => {
             di_buf.appendNTimesAssumeCapacity(0xff, 4);
@@ -1804,11 +1804,11 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
     mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 4, target_endian); // DWARF version
     const abbrev_offset = self.abbrev_table_offset.?;
     if (self.bin_file.tag == .macho) {
-        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, abbrev_offset));
+        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(abbrev_offset)));
         di_buf.appendAssumeCapacity(8); // address size
     } else switch (self.ptr_width) {
         .p32 => {
-            mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, abbrev_offset), target_endian);
+            mem.writeInt(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(abbrev_offset)), target_endian);
             di_buf.appendAssumeCapacity(4); // address size
         },
         .p64 => {
@@ -1828,9 +1828,9 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
         mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), 0); // DW.AT.stmt_list, DW.FORM.sec_offset
         mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), low_pc);
         mem.writeIntLittle(u64, di_buf.addManyAsArrayAssumeCapacity(8), high_pc);
-        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, name_strp));
-        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, comp_dir_strp));
-        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, producer_strp));
+        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(name_strp)));
+        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(comp_dir_strp)));
+        mem.writeIntLittle(u32, di_buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(producer_strp)));
     } else {
         self.writeAddrAssumeCapacity(&di_buf, 0); // DW.AT.stmt_list, DW.FORM.sec_offset
         self.writeAddrAssumeCapacity(&di_buf, low_pc);
@@ -1885,7 +1885,7 @@ fn resolveCompilationDir(module: *Module, buffer: *[std.fs.MAX_PATH_BYTES]u8) []
 fn writeAddrAssumeCapacity(self: *Dwarf, buf: *std.ArrayList(u8), addr: u64) void {
     const target_endian = self.target.cpu.arch.endian();
     switch (self.ptr_width) {
-        .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, addr), target_endian),
+        .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(addr)), target_endian),
         .p64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), addr, target_endian),
     }
 }
@@ -2152,10 +2152,10 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
     // Go back and populate the initial length.
     const init_len = di_buf.items.len - after_init_len;
     if (self.bin_file.tag == .macho) {
-        mem.writeIntLittle(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len));
+        mem.writeIntLittle(u32, di_buf.items[init_len_index..][0..4], @as(u32, @intCast(init_len)));
     } else switch (self.ptr_width) {
         .p32 => {
-            mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @intCast(u32, init_len), target_endian);
+            mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @as(u32, @intCast(init_len)), target_endian);
         },
         .p64 => {
             // initial length - length of the .debug_aranges contribution for this compilation unit,
@@ -2165,7 +2165,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
         },
     }
 
-    const needed_size = @intCast(u32, di_buf.items.len);
+    const needed_size = @as(u32, @intCast(di_buf.items.len));
     switch (self.bin_file.tag) {
         .elf => {
             const elf_file = self.bin_file.cast(File.Elf).?;
@@ -2293,7 +2293,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
         di_buf.appendSliceAssumeCapacity(file);
         di_buf.appendSliceAssumeCapacity(&[_]u8{
             0, // null byte for the relative path name
-            @intCast(u8, dir_index), // directory_index
+            @as(u8, @intCast(dir_index)), // directory_index
             0, // mtime (TODO supply this)
             0, // file size bytes (TODO supply this)
         });
@@ -2304,11 +2304,11 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
 
     switch (self.bin_file.tag) {
         .macho => {
-            mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len));
+            mem.writeIntLittle(u32, di_buf.items[before_header_len..][0..4], @as(u32, @intCast(header_len)));
         },
         else => switch (self.ptr_width) {
             .p32 => {
-                mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @intCast(u32, header_len), target_endian);
+                mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @as(u32, @intCast(header_len)), target_endian);
             },
             .p64 => {
                 mem.writeInt(u64, di_buf.items[before_header_len..][0..8], header_len, target_endian);
@@ -2348,7 +2348,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
             .macho => {
                 const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
                 const sect_index = d_sym.debug_line_section_index.?;
-                const needed_size = @intCast(u32, d_sym.getSection(sect_index).size + delta);
+                const needed_size = @as(u32, @intCast(d_sym.getSection(sect_index).size + delta));
                 try d_sym.growSection(sect_index, needed_size, true);
                 const file_pos = d_sym.getSection(sect_index).offset + first_fn.off;
 
@@ -2384,11 +2384,11 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
     const init_len = self.getDebugLineProgramEnd().? - before_init_len - init_len_size;
     switch (self.bin_file.tag) {
         .macho => {
-            mem.writeIntLittle(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len));
+            mem.writeIntLittle(u32, di_buf.items[before_init_len..][0..4], @as(u32, @intCast(init_len)));
         },
         else => switch (self.ptr_width) {
             .p32 => {
-                mem.writeInt(u32, di_buf.items[before_init_len..][0..4], @intCast(u32, init_len), target_endian);
+                mem.writeInt(u32, di_buf.items[before_init_len..][0..4], @as(u32, @intCast(init_len)), target_endian);
             },
             .p64 => {
                 mem.writeInt(u64, di_buf.items[before_init_len + 4 ..][0..8], init_len, target_endian);
@@ -2477,7 +2477,7 @@ fn dbgLineNeededHeaderBytes(self: Dwarf, dirs: []const []const u8, files: []cons
     }
     size += 1; // file names sentinel
 
-    return @intCast(u32, size);
+    return @as(u32, @intCast(size));
 }
 
 /// The reloc offset for the line offset of a function from the previous function's line.
@@ -2516,7 +2516,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
 
         const di_atom_index = try self.createAtom(.di_atom);
         log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
-        try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
+        try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len)));
         log.debug("writeDeclDebugInfo in flushModule", .{});
         try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
 
@@ -2581,7 +2581,7 @@ fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 {
             else => unreachable,
         }
     }
-    return @intCast(u28, gop.index + 1);
+    return @as(u28, @intCast(gop.index + 1));
 }
 
 fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
@@ -2614,7 +2614,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
 
         const dir_index: u28 = blk: {
             const dirs_gop = dirs.getOrPutAssumeCapacity(dir_path);
-            break :blk @intCast(u28, dirs_gop.index + 1);
+            break :blk @as(u28, @intCast(dirs_gop.index + 1));
         };
 
         files_dir_indexes.appendAssumeCapacity(dir_index);
@@ -2679,12 +2679,12 @@ fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index {
     const index = blk: {
         switch (kind) {
             .src_fn => {
-                const index = @intCast(Atom.Index, self.src_fns.items.len);
+                const index = @as(Atom.Index, @intCast(self.src_fns.items.len));
                 _ = try self.src_fns.addOne(self.allocator);
                 break :blk index;
             },
             .di_atom => {
-                const index = @intCast(Atom.Index, self.di_atoms.items.len);
+                const index = @as(Atom.Index, @intCast(self.di_atoms.items.len));
                 _ = try self.di_atoms.addOne(self.allocator);
                 break :blk index;
             },
src/link/Elf.zig
@@ -455,7 +455,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     const ptr_size: u8 = self.ptrWidthBytes();
 
     if (self.phdr_table_index == null) {
-        self.phdr_table_index = @intCast(u16, self.program_headers.items.len);
+        self.phdr_table_index = @as(u16, @intCast(self.program_headers.items.len));
         const p_align: u16 = switch (self.ptr_width) {
             .p32 => @alignOf(elf.Elf32_Phdr),
             .p64 => @alignOf(elf.Elf64_Phdr),
@@ -474,7 +474,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_table_load_index == null) {
-        self.phdr_table_load_index = @intCast(u16, self.program_headers.items.len);
+        self.phdr_table_load_index = @as(u16, @intCast(self.program_headers.items.len));
         // TODO Same as for GOT
         const phdr_addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x1000000 else 0x1000;
         const p_align = self.page_size;
@@ -492,7 +492,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_load_re_index == null) {
-        self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len);
+        self.phdr_load_re_index = @as(u16, @intCast(self.program_headers.items.len));
         const file_size = self.base.options.program_code_size_hint;
         const p_align = self.page_size;
         const off = self.findFreeSpace(file_size, p_align);
@@ -513,7 +513,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_got_index == null) {
-        self.phdr_got_index = @intCast(u16, self.program_headers.items.len);
+        self.phdr_got_index = @as(u16, @intCast(self.program_headers.items.len));
         const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint;
         // We really only need ptr alignment but since we are using PROGBITS, linux requires
         // page align.
@@ -538,7 +538,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_load_ro_index == null) {
-        self.phdr_load_ro_index = @intCast(u16, self.program_headers.items.len);
+        self.phdr_load_ro_index = @as(u16, @intCast(self.program_headers.items.len));
         // TODO Find a hint about how much data need to be in rodata ?
         const file_size = 1024;
         // Same reason as for GOT
@@ -561,7 +561,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.phdr_load_rw_index == null) {
-        self.phdr_load_rw_index = @intCast(u16, self.program_headers.items.len);
+        self.phdr_load_rw_index = @as(u16, @intCast(self.program_headers.items.len));
         // TODO Find a hint about how much data need to be in data ?
         const file_size = 1024;
         // Same reason as for GOT
@@ -584,7 +584,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.shstrtab_index == null) {
-        self.shstrtab_index = @intCast(u16, self.sections.slice().len);
+        self.shstrtab_index = @as(u16, @intCast(self.sections.slice().len));
         assert(self.shstrtab.buffer.items.len == 0);
         try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0
         const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1);
@@ -609,7 +609,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.text_section_index == null) {
-        self.text_section_index = @intCast(u16, self.sections.slice().len);
+        self.text_section_index = @as(u16, @intCast(self.sections.slice().len));
         const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
 
         try self.sections.append(gpa, .{
@@ -631,7 +631,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.got_section_index == null) {
-        self.got_section_index = @intCast(u16, self.sections.slice().len);
+        self.got_section_index = @as(u16, @intCast(self.sections.slice().len));
         const phdr = &self.program_headers.items[self.phdr_got_index.?];
 
         try self.sections.append(gpa, .{
@@ -653,7 +653,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.rodata_section_index == null) {
-        self.rodata_section_index = @intCast(u16, self.sections.slice().len);
+        self.rodata_section_index = @as(u16, @intCast(self.sections.slice().len));
         const phdr = &self.program_headers.items[self.phdr_load_ro_index.?];
 
         try self.sections.append(gpa, .{
@@ -675,7 +675,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.data_section_index == null) {
-        self.data_section_index = @intCast(u16, self.sections.slice().len);
+        self.data_section_index = @as(u16, @intCast(self.sections.slice().len));
         const phdr = &self.program_headers.items[self.phdr_load_rw_index.?];
 
         try self.sections.append(gpa, .{
@@ -697,7 +697,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
     }
 
     if (self.symtab_section_index == null) {
-        self.symtab_section_index = @intCast(u16, self.sections.slice().len);
+        self.symtab_section_index = @as(u16, @intCast(self.sections.slice().len));
         const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
         const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
         const file_size = self.base.options.symbol_count_hint * each_size;
@@ -714,7 +714,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
                 .sh_size = file_size,
                 // The section header index of the associated string table.
                 .sh_link = self.shstrtab_index.?,
-                .sh_info = @intCast(u32, self.local_symbols.items.len),
+                .sh_info = @as(u32, @intCast(self.local_symbols.items.len)),
                 .sh_addralign = min_align,
                 .sh_entsize = each_size,
             },
@@ -726,7 +726,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
 
     if (self.dwarf) |*dw| {
         if (self.debug_str_section_index == null) {
-            self.debug_str_section_index = @intCast(u16, self.sections.slice().len);
+            self.debug_str_section_index = @as(u16, @intCast(self.sections.slice().len));
             assert(dw.strtab.buffer.items.len == 0);
             try dw.strtab.buffer.append(gpa, 0);
             try self.sections.append(gpa, .{
@@ -749,7 +749,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
         }
 
         if (self.debug_info_section_index == null) {
-            self.debug_info_section_index = @intCast(u16, self.sections.slice().len);
+            self.debug_info_section_index = @as(u16, @intCast(self.sections.slice().len));
 
             const file_size_hint = 200;
             const p_align = 1;
@@ -778,7 +778,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
         }
 
         if (self.debug_abbrev_section_index == null) {
-            self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len);
+            self.debug_abbrev_section_index = @as(u16, @intCast(self.sections.slice().len));
 
             const file_size_hint = 128;
             const p_align = 1;
@@ -807,7 +807,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
         }
 
         if (self.debug_aranges_section_index == null) {
-            self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len);
+            self.debug_aranges_section_index = @as(u16, @intCast(self.sections.slice().len));
 
             const file_size_hint = 160;
             const p_align = 16;
@@ -836,7 +836,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
         }
 
         if (self.debug_line_section_index == null) {
-            self.debug_line_section_index = @intCast(u16, self.sections.slice().len);
+            self.debug_line_section_index = @as(u16, @intCast(self.sections.slice().len));
 
             const file_size_hint = 250;
             const p_align = 1;
@@ -1100,7 +1100,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
                 });
 
                 switch (self.ptr_width) {
-                    .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@intCast(u32, target_vaddr)), file_offset),
+                    .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@as(u32, @intCast(target_vaddr))), file_offset),
                     .p64 => try self.base.file.?.pwriteAll(mem.asBytes(&target_vaddr), file_offset),
                 }
 
@@ -1170,7 +1170,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
 
         if (needed_size > allocated_size) {
             phdr_table.p_offset = 0; // free the space
-            phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align));
+            phdr_table.p_offset = self.findFreeSpace(needed_size, @as(u32, @intCast(phdr_table.p_align)));
         }
 
         phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align);
@@ -2004,7 +2004,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
 fn writeDwarfAddrAssumeCapacity(self: *Elf, buf: *std.ArrayList(u8), addr: u64) void {
     const target_endian = self.base.options.target.cpu.arch.endian();
     switch (self.ptr_width) {
-        .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(u32, addr), target_endian),
+        .p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @as(u32, @intCast(addr)), target_endian),
         .p64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), addr, target_endian),
     }
 }
@@ -2064,15 +2064,15 @@ fn writeElfHeader(self: *Elf) !void {
     const phdr_table_offset = self.program_headers.items[self.phdr_table_index.?].p_offset;
     switch (self.ptr_width) {
         .p32 => {
-            mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian);
+            mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(e_entry)), endian);
             index += 4;
 
             // e_phoff
-            mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, phdr_table_offset), endian);
+            mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(phdr_table_offset)), endian);
             index += 4;
 
             // e_shoff
-            mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.shdr_table_offset.?), endian);
+            mem.writeInt(u32, hdr_buf[index..][0..4], @as(u32, @intCast(self.shdr_table_offset.?)), endian);
             index += 4;
         },
         .p64 => {
@@ -2108,7 +2108,7 @@ fn writeElfHeader(self: *Elf) !void {
     mem.writeInt(u16, hdr_buf[index..][0..2], e_phentsize, endian);
     index += 2;
 
-    const e_phnum = @intCast(u16, self.program_headers.items.len);
+    const e_phnum = @as(u16, @intCast(self.program_headers.items.len));
     mem.writeInt(u16, hdr_buf[index..][0..2], e_phnum, endian);
     index += 2;
 
@@ -2119,7 +2119,7 @@ fn writeElfHeader(self: *Elf) !void {
     mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
     index += 2;
 
-    const e_shnum = @intCast(u16, self.sections.slice().len);
+    const e_shnum = @as(u16, @intCast(self.sections.slice().len));
     mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
     index += 2;
 
@@ -2223,7 +2223,7 @@ fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment:
 
 pub fn createAtom(self: *Elf) !Atom.Index {
     const gpa = self.base.allocator;
-    const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+    const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len));
     const atom = try self.atoms.addOne(gpa);
     const local_sym_index = try self.allocateLocalSymbol();
     try self.atom_by_index_table.putNoClobber(gpa, local_sym_index, atom_index);
@@ -2367,7 +2367,7 @@ pub fn allocateLocalSymbol(self: *Elf) !u32 {
             break :blk index;
         } else {
             log.debug("  (allocating symbol index {d})", .{self.local_symbols.items.len});
-            const index = @intCast(u32, self.local_symbols.items.len);
+            const index = @as(u32, @intCast(self.local_symbols.items.len));
             _ = self.local_symbols.addOneAssumeCapacity();
             break :blk index;
         }
@@ -2557,7 +2557,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
                     .iov_len = code.len,
                 }};
                 var remote_vec: [1]std.os.iovec_const = .{.{
-                    .iov_base = @ptrFromInt([*]u8, @intCast(usize, local_sym.st_value)),
+                    .iov_base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(local_sym.st_value)))),
                     .iov_len = code.len,
                 }};
                 const rc = std.os.linux.process_vm_writev(pid, &code_vec, &remote_vec, 0);
@@ -2910,7 +2910,7 @@ pub fn updateDeclExports(
                 continue;
             },
         };
-        const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
+        const stt_bits: u8 = @as(u4, @truncate(decl_sym.st_info));
         if (decl_metadata.getExport(self, exp_name)) |i| {
             const sym = &self.global_symbols.items[i];
             sym.* = .{
@@ -2926,7 +2926,7 @@ pub fn updateDeclExports(
                 _ = self.global_symbols.addOneAssumeCapacity();
                 break :blk self.global_symbols.items.len - 1;
             };
-            try decl_metadata.exports.append(gpa, @intCast(u32, i));
+            try decl_metadata.exports.append(gpa, @as(u32, @intCast(i)));
             self.global_symbols.items[i] = .{
                 .st_name = try self.shstrtab.insert(gpa, exp_name),
                 .st_info = (stb_bits << 4) | stt_bits,
@@ -3030,12 +3030,12 @@ fn writeOffsetTableEntry(self: *Elf, index: @TypeOf(self.got_table).Index) !void
     switch (entry_size) {
         2 => {
             var buf: [2]u8 = undefined;
-            mem.writeInt(u16, &buf, @intCast(u16, got_value), endian);
+            mem.writeInt(u16, &buf, @as(u16, @intCast(got_value)), endian);
             try self.base.file.?.pwriteAll(&buf, off);
         },
         4 => {
             var buf: [4]u8 = undefined;
-            mem.writeInt(u32, &buf, @intCast(u32, got_value), endian);
+            mem.writeInt(u32, &buf, @as(u32, @intCast(got_value)), endian);
             try self.base.file.?.pwriteAll(&buf, off);
         },
         8 => {
@@ -3051,7 +3051,7 @@ fn writeOffsetTableEntry(self: *Elf, index: @TypeOf(self.got_table).Index) !void
                             .iov_len = buf.len,
                         }};
                         var remote_vec: [1]std.os.iovec_const = .{.{
-                            .iov_base = @ptrFromInt([*]u8, @intCast(usize, vaddr)),
+                            .iov_base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(vaddr)))),
                             .iov_len = buf.len,
                         }};
                         const rc = std.os.linux.process_vm_writev(pid, &local_vec, &remote_vec, 0);
@@ -3086,7 +3086,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
         };
         const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size;
         try self.growNonAllocSection(self.symtab_section_index.?, needed_size, sym_align, true);
-        syms_sect.sh_info = @intCast(u32, self.local_symbols.items.len);
+        syms_sect.sh_info = @as(u32, @intCast(self.local_symbols.items.len));
     }
     const foreign_endian = self.base.options.target.cpu.arch.endian() != builtin.cpu.arch.endian();
     const off = switch (self.ptr_width) {
@@ -3101,8 +3101,8 @@ fn writeSymbol(self: *Elf, index: usize) !void {
             var sym = [1]elf.Elf32_Sym{
                 .{
                     .st_name = local.st_name,
-                    .st_value = @intCast(u32, local.st_value),
-                    .st_size = @intCast(u32, local.st_size),
+                    .st_value = @as(u32, @intCast(local.st_value)),
+                    .st_size = @as(u32, @intCast(local.st_size)),
                     .st_info = local.st_info,
                     .st_other = local.st_other,
                     .st_shndx = local.st_shndx,
@@ -3148,8 +3148,8 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
                 const global = self.global_symbols.items[i];
                 sym.* = .{
                     .st_name = global.st_name,
-                    .st_value = @intCast(u32, global.st_value),
-                    .st_size = @intCast(u32, global.st_size),
+                    .st_value = @as(u32, @intCast(global.st_value)),
+                    .st_size = @as(u32, @intCast(global.st_size)),
                     .st_info = global.st_info,
                     .st_other = global.st_other,
                     .st_shndx = global.st_shndx,
@@ -3194,19 +3194,19 @@ fn ptrWidthBytes(self: Elf) u8 {
 /// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes
 /// in a 32-bit ELF file.
 fn archPtrWidthBytes(self: Elf) u8 {
-    return @intCast(u8, self.base.options.target.ptrBitWidth() / 8);
+    return @as(u8, @intCast(self.base.options.target.ptrBitWidth() / 8));
 }
 
 fn progHeaderTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr {
     return .{
         .p_type = phdr.p_type,
         .p_flags = phdr.p_flags,
-        .p_offset = @intCast(u32, phdr.p_offset),
-        .p_vaddr = @intCast(u32, phdr.p_vaddr),
-        .p_paddr = @intCast(u32, phdr.p_paddr),
-        .p_filesz = @intCast(u32, phdr.p_filesz),
-        .p_memsz = @intCast(u32, phdr.p_memsz),
-        .p_align = @intCast(u32, phdr.p_align),
+        .p_offset = @as(u32, @intCast(phdr.p_offset)),
+        .p_vaddr = @as(u32, @intCast(phdr.p_vaddr)),
+        .p_paddr = @as(u32, @intCast(phdr.p_paddr)),
+        .p_filesz = @as(u32, @intCast(phdr.p_filesz)),
+        .p_memsz = @as(u32, @intCast(phdr.p_memsz)),
+        .p_align = @as(u32, @intCast(phdr.p_align)),
     };
 }
 
@@ -3214,14 +3214,14 @@ fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr {
     return .{
         .sh_name = shdr.sh_name,
         .sh_type = shdr.sh_type,
-        .sh_flags = @intCast(u32, shdr.sh_flags),
-        .sh_addr = @intCast(u32, shdr.sh_addr),
-        .sh_offset = @intCast(u32, shdr.sh_offset),
-        .sh_size = @intCast(u32, shdr.sh_size),
+        .sh_flags = @as(u32, @intCast(shdr.sh_flags)),
+        .sh_addr = @as(u32, @intCast(shdr.sh_addr)),
+        .sh_offset = @as(u32, @intCast(shdr.sh_offset)),
+        .sh_size = @as(u32, @intCast(shdr.sh_size)),
         .sh_link = shdr.sh_link,
         .sh_info = shdr.sh_info,
-        .sh_addralign = @intCast(u32, shdr.sh_addralign),
-        .sh_entsize = @intCast(u32, shdr.sh_entsize),
+        .sh_addralign = @as(u32, @intCast(shdr.sh_addralign)),
+        .sh_entsize = @as(u32, @intCast(shdr.sh_entsize)),
     };
 }
 
src/link/MachO.zig
@@ -741,7 +741,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
             };
             const sym = self.getSymbol(global);
             try lc_writer.writeStruct(macho.entry_point_command{
-                .entryoff = @intCast(u32, sym.n_value - seg.vmaddr),
+                .entryoff = @as(u32, @intCast(sym.n_value - seg.vmaddr)),
                 .stacksize = self.base.options.stack_size_override orelse 0,
             });
         },
@@ -757,7 +757,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
     });
     try load_commands.writeBuildVersionLC(&self.base.options, lc_writer);
 
-    const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @intCast(u32, lc_buffer.items.len);
+    const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + @as(u32, @intCast(lc_buffer.items.len));
     try lc_writer.writeStruct(self.uuid_cmd);
 
     try load_commands.writeLoadDylibLCs(self.dylibs.items, self.referenced_dylibs.keys(), lc_writer);
@@ -768,7 +768,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
 
     const ncmds = load_commands.calcNumOfLCs(lc_buffer.items);
     try self.base.file.?.pwriteAll(lc_buffer.items, @sizeOf(macho.mach_header_64));
-    try self.writeHeader(ncmds, @intCast(u32, lc_buffer.items.len));
+    try self.writeHeader(ncmds, @as(u32, @intCast(lc_buffer.items.len)));
     try self.writeUuid(comp, uuid_cmd_offset, requires_codesig);
 
     if (codesig) |*csig| {
@@ -992,7 +992,7 @@ pub fn parseDylib(
     const contents = try file.readToEndAllocOptions(gpa, file_size, file_size, @alignOf(u64), null);
     defer gpa.free(contents);
 
-    const dylib_id = @intCast(u16, self.dylibs.items.len);
+    const dylib_id = @as(u16, @intCast(self.dylibs.items.len));
     var dylib = Dylib{ .weak = opts.weak };
 
     dylib.parseFromBinary(
@@ -1412,7 +1412,7 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
 
 pub fn createAtom(self: *MachO) !Atom.Index {
     const gpa = self.base.allocator;
-    const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+    const atom_index = @as(Atom.Index, @intCast(self.atoms.items.len));
     const atom = try self.atoms.addOne(gpa);
     const sym_index = try self.allocateSymbol();
     try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
@@ -1588,14 +1588,14 @@ fn resolveSymbolsInDylibs(self: *MachO, actions: *std.ArrayList(ResolveAction))
         for (self.dylibs.items, 0..) |dylib, id| {
             if (!dylib.symbols.contains(sym_name)) continue;
 
-            const dylib_id = @intCast(u16, id);
+            const dylib_id = @as(u16, @intCast(id));
             if (!self.referenced_dylibs.contains(dylib_id)) {
                 try self.referenced_dylibs.putNoClobber(gpa, dylib_id, {});
             }
 
             const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable;
             sym.n_type |= macho.N_EXT;
-            sym.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER;
+            sym.n_desc = @as(u16, @intCast(ordinal + 1)) * macho.N_SYMBOL_RESOLVER;
 
             if (dylib.weak) {
                 sym.n_desc |= macho.N_WEAK_REF;
@@ -1789,7 +1789,7 @@ fn allocateSymbol(self: *MachO) !u32 {
             break :blk index;
         } else {
             log.debug("  (allocating symbol index {d})", .{self.locals.items.len});
-            const index = @intCast(u32, self.locals.items.len);
+            const index = @as(u32, @intCast(self.locals.items.len));
             _ = self.locals.addOneAssumeCapacity();
             break :blk index;
         }
@@ -1815,7 +1815,7 @@ fn allocateGlobal(self: *MachO) !u32 {
             break :blk index;
         } else {
             log.debug("  (allocating symbol index {d})", .{self.globals.items.len});
-            const index = @intCast(u32, self.globals.items.len);
+            const index = @as(u32, @intCast(self.globals.items.len));
             _ = self.globals.addOneAssumeCapacity();
             break :blk index;
         }
@@ -2563,12 +2563,12 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil
     try Atom.addRelocation(self, atom_index, .{
         .type = .unsigned,
         .target = .{ .sym_index = sym_index, .file = null },
-        .offset = @intCast(u32, reloc_info.offset),
+        .offset = @as(u32, @intCast(reloc_info.offset)),
         .addend = reloc_info.addend,
         .pcrel = false,
         .length = 3,
     });
-    try Atom.addRebase(self, atom_index, @intCast(u32, reloc_info.offset));
+    try Atom.addRebase(self, atom_index, @as(u32, @intCast(reloc_info.offset)));
 
     return 0;
 }
@@ -2582,7 +2582,7 @@ fn populateMissingMetadata(self: *MachO) !void {
 
     if (self.pagezero_segment_cmd_index == null) {
         if (pagezero_vmsize > 0) {
-            self.pagezero_segment_cmd_index = @intCast(u8, self.segments.items.len);
+            self.pagezero_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
             try self.segments.append(gpa, .{
                 .segname = makeStaticString("__PAGEZERO"),
                 .vmsize = pagezero_vmsize,
@@ -2593,7 +2593,7 @@ fn populateMissingMetadata(self: *MachO) !void {
 
     if (self.header_segment_cmd_index == null) {
         // The first __TEXT segment is immovable and covers MachO header and load commands.
-        self.header_segment_cmd_index = @intCast(u8, self.segments.items.len);
+        self.header_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
         const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size);
         const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size);
 
@@ -2719,7 +2719,7 @@ fn populateMissingMetadata(self: *MachO) !void {
     }
 
     if (self.linkedit_segment_cmd_index == null) {
-        self.linkedit_segment_cmd_index = @intCast(u8, self.segments.items.len);
+        self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
 
         try self.segments.append(gpa, .{
             .segname = makeStaticString("__LINKEDIT"),
@@ -2752,8 +2752,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
     const gpa = self.base.allocator;
     // In incremental context, we create one section per segment pairing. This way,
     // we can move the segment in raw file as we please.
-    const segment_id = @intCast(u8, self.segments.items.len);
-    const section_id = @intCast(u8, self.sections.slice().len);
+    const segment_id = @as(u8, @intCast(self.segments.items.len));
+    const section_id = @as(u8, @intCast(self.sections.slice().len));
     const vmaddr = blk: {
         const prev_segment = self.segments.items[segment_id - 1];
         break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size);
@@ -2788,7 +2788,7 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
         .sectname = makeStaticString(sectname),
         .segname = makeStaticString(segname),
         .addr = mem.alignForward(u64, vmaddr, opts.alignment),
-        .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment),
+        .offset = mem.alignForward(u32, @as(u32, @intCast(off)), opts.alignment),
         .size = opts.size,
         .@"align" = math.log2(opts.alignment),
         .flags = opts.flags,
@@ -2832,7 +2832,7 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void {
             current_size,
         );
         if (amt != current_size) return error.InputOutput;
-        header.offset = @intCast(u32, new_offset);
+        header.offset = @as(u32, @intCast(new_offset));
         segment.fileoff = new_offset;
     }
 
@@ -2862,7 +2862,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void {
 
     // TODO: enforce order by increasing VM addresses in self.sections container.
     for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| {
-        const index = @intCast(u8, sect_id + 1 + next_sect_id);
+        const index = @as(u8, @intCast(sect_id + 1 + next_sect_id));
         const next_segment = self.getSegmentPtr(index);
         next_header.addr += diff;
         next_segment.vmaddr += diff;
@@ -2972,7 +2972,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
         self.segment_table_dirty = true;
     }
 
-    const align_pow = @intCast(u32, math.log2(alignment));
+    const align_pow = @as(u32, @intCast(math.log2(alignment)));
     if (header.@"align" < align_pow) {
         header.@"align" = align_pow;
     }
@@ -3015,7 +3015,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u
 
 fn writeSegmentHeaders(self: *MachO, writer: anytype) !void {
     for (self.segments.items, 0..) |seg, i| {
-        const indexes = self.getSectionIndexes(@intCast(u8, i));
+        const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
         try writer.writeStruct(seg);
         for (self.sections.items(.header)[indexes.start..indexes.end]) |header| {
             try writer.writeStruct(header);
@@ -3029,7 +3029,7 @@ fn writeLinkeditSegmentData(self: *MachO) !void {
     seg.vmsize = 0;
 
     for (self.segments.items, 0..) |segment, id| {
-        if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue;
+        if (self.linkedit_segment_cmd_index.? == @as(u8, @intCast(id))) continue;
         if (seg.vmaddr < segment.vmaddr + segment.vmsize) {
             seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size);
         }
@@ -3115,7 +3115,7 @@ fn collectBindDataFromTableSection(self: *MachO, sect_id: u8, bind: anytype, tab
         log.debug("    | bind at {x}, import('{s}') in dylib({d})", .{
             base_offset + offset,
             self.getSymbolName(entry),
-            @divTrunc(@bitCast(i16, bind_sym.n_desc), macho.N_SYMBOL_RESOLVER),
+            @divTrunc(@as(i16, @bitCast(bind_sym.n_desc)), macho.N_SYMBOL_RESOLVER),
         });
         if (bind_sym.weakRef()) {
             log.debug("    | marking as weak ref ", .{});
@@ -3150,7 +3150,7 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
             const bind_sym = self.getSymbol(binding.target);
             const bind_sym_name = self.getSymbolName(binding.target);
             const dylib_ordinal = @divTrunc(
-                @bitCast(i16, bind_sym.n_desc),
+                @as(i16, @bitCast(bind_sym.n_desc)),
                 macho.N_SYMBOL_RESOLVER,
             );
             log.debug("    | bind at {x}, import('{s}') in dylib({d})", .{
@@ -3285,14 +3285,14 @@ fn writeDyldInfoData(self: *MachO) !void {
     try self.base.file.?.pwriteAll(buffer, rebase_off);
     try self.populateLazyBindOffsetsInStubHelper(lazy_bind);
 
-    self.dyld_info_cmd.rebase_off = @intCast(u32, rebase_off);
-    self.dyld_info_cmd.rebase_size = @intCast(u32, rebase_size_aligned);
-    self.dyld_info_cmd.bind_off = @intCast(u32, bind_off);
-    self.dyld_info_cmd.bind_size = @intCast(u32, bind_size_aligned);
-    self.dyld_info_cmd.lazy_bind_off = @intCast(u32, lazy_bind_off);
-    self.dyld_info_cmd.lazy_bind_size = @intCast(u32, lazy_bind_size_aligned);
-    self.dyld_info_cmd.export_off = @intCast(u32, export_off);
-    self.dyld_info_cmd.export_size = @intCast(u32, export_size_aligned);
+    self.dyld_info_cmd.rebase_off = @as(u32, @intCast(rebase_off));
+    self.dyld_info_cmd.rebase_size = @as(u32, @intCast(rebase_size_aligned));
+    self.dyld_info_cmd.bind_off = @as(u32, @intCast(bind_off));
+    self.dyld_info_cmd.bind_size = @as(u32, @intCast(bind_size_aligned));
+    self.dyld_info_cmd.lazy_bind_off = @as(u32, @intCast(lazy_bind_off));
+    self.dyld_info_cmd.lazy_bind_size = @as(u32, @intCast(lazy_bind_size_aligned));
+    self.dyld_info_cmd.export_off = @as(u32, @intCast(export_off));
+    self.dyld_info_cmd.export_size = @as(u32, @intCast(export_size_aligned));
 }
 
 fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void {
@@ -3337,7 +3337,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
 
     for (self.locals.items, 0..) |sym, sym_id| {
         if (sym.n_strx == 0) continue; // no name, skip
-        const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
+        const sym_loc = SymbolWithLoc{ .sym_index = @as(u32, @intCast(sym_id)), .file = null };
         if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
         if (self.getGlobal(self.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip
         try locals.append(sym);
@@ -3363,16 +3363,16 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
         const sym = self.getSymbol(global);
         if (sym.n_strx == 0) continue; // no name, skip
         if (!sym.undf()) continue; // not an import, skip
-        const new_index = @intCast(u32, imports.items.len);
+        const new_index = @as(u32, @intCast(imports.items.len));
         var out_sym = sym;
         out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(global));
         try imports.append(out_sym);
         try imports_table.putNoClobber(global, new_index);
     }
 
-    const nlocals = @intCast(u32, locals.items.len);
-    const nexports = @intCast(u32, exports.items.len);
-    const nimports = @intCast(u32, imports.items.len);
+    const nlocals = @as(u32, @intCast(locals.items.len));
+    const nexports = @as(u32, @intCast(exports.items.len));
+    const nimports = @as(u32, @intCast(imports.items.len));
     const nsyms = nlocals + nexports + nimports;
 
     const seg = self.getLinkeditSegmentPtr();
@@ -3392,7 +3392,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
     log.debug("writing symtab from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
     try self.base.file.?.pwriteAll(buffer.items, offset);
 
-    self.symtab_cmd.symoff = @intCast(u32, offset);
+    self.symtab_cmd.symoff = @as(u32, @intCast(offset));
     self.symtab_cmd.nsyms = nsyms;
 
     return SymtabCtx{
@@ -3421,8 +3421,8 @@ fn writeStrtab(self: *MachO) !void {
 
     try self.base.file.?.pwriteAll(buffer, offset);
 
-    self.symtab_cmd.stroff = @intCast(u32, offset);
-    self.symtab_cmd.strsize = @intCast(u32, needed_size_aligned);
+    self.symtab_cmd.stroff = @as(u32, @intCast(offset));
+    self.symtab_cmd.strsize = @as(u32, @intCast(needed_size_aligned));
 }
 
 const SymtabCtx = struct {
@@ -3434,8 +3434,8 @@ const SymtabCtx = struct {
 
 fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
     const gpa = self.base.allocator;
-    const nstubs = @intCast(u32, self.stub_table.lookup.count());
-    const ngot_entries = @intCast(u32, self.got_table.lookup.count());
+    const nstubs = @as(u32, @intCast(self.stub_table.lookup.count()));
+    const ngot_entries = @as(u32, @intCast(self.got_table.lookup.count()));
     const nindirectsyms = nstubs * 2 + ngot_entries;
     const iextdefsym = ctx.nlocalsym;
     const iundefsym = iextdefsym + ctx.nextdefsym;
@@ -3503,7 +3503,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
     self.dysymtab_cmd.nextdefsym = ctx.nextdefsym;
     self.dysymtab_cmd.iundefsym = iundefsym;
     self.dysymtab_cmd.nundefsym = ctx.nundefsym;
-    self.dysymtab_cmd.indirectsymoff = @intCast(u32, offset);
+    self.dysymtab_cmd.indirectsymoff = @as(u32, @intCast(offset));
     self.dysymtab_cmd.nindirectsyms = nindirectsyms;
 }
 
@@ -3530,8 +3530,8 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
     // except for code signature data.
     try self.base.file.?.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
 
-    self.codesig_cmd.dataoff = @intCast(u32, offset);
-    self.codesig_cmd.datasize = @intCast(u32, needed_size);
+    self.codesig_cmd.dataoff = @as(u32, @intCast(offset));
+    self.codesig_cmd.datasize = @as(u32, @intCast(needed_size));
 }
 
 fn writeCodeSignature(self: *MachO, comp: *const Compilation, code_sig: *CodeSignature) !void {
@@ -3711,7 +3711,7 @@ pub fn makeStaticString(bytes: []const u8) [16]u8 {
 
 fn getSegmentByName(self: MachO, segname: []const u8) ?u8 {
     for (self.segments.items, 0..) |seg, i| {
-        if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
+        if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i));
     } else return null;
 }
 
@@ -3734,15 +3734,15 @@ pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8)
     // TODO investigate caching with a hashmap
     for (self.sections.items(.header), 0..) |header, i| {
         if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
-            return @intCast(u8, i);
+            return @as(u8, @intCast(i));
     } else return null;
 }
 
 pub fn getSectionIndexes(self: MachO, segment_index: u8) struct { start: u8, end: u8 } {
     var start: u8 = 0;
     const nsects = for (self.segments.items, 0..) |seg, i| {
-        if (i == segment_index) break @intCast(u8, seg.nsects);
-        start += @intCast(u8, seg.nsects);
+        if (i == segment_index) break @as(u8, @intCast(seg.nsects));
+        start += @as(u8, @intCast(seg.nsects));
     } else 0;
     return .{ .start = start, .end = start + nsects };
 }
src/link/Plan9.zig
@@ -295,7 +295,7 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void {
             .sym_index = blk: {
                 try self.syms.append(gpa, undefined);
                 try self.syms.append(gpa, undefined);
-                break :blk @intCast(u32, self.syms.items.len - 1);
+                break :blk @as(u32, @intCast(self.syms.items.len - 1));
             },
         };
         try fn_map_res.value_ptr.functions.put(gpa, decl_index, out);
@@ -485,7 +485,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo
         .ty = decl.ty,
         .val = decl_val,
     }, &code_buffer, .{ .none = {} }, .{
-        .parent_atom_index = @intCast(Atom.Index, atom_idx),
+        .parent_atom_index = @as(Atom.Index, @intCast(atom_idx)),
     });
     const code = switch (res) {
         .ok => code_buffer.items,
@@ -562,10 +562,10 @@ pub fn flush(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.Node) li
 
 pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void {
     if (delta_line > 0 and delta_line < 65) {
-        const toappend = @intCast(u8, delta_line);
+        const toappend = @as(u8, @intCast(delta_line));
         try l.append(toappend);
     } else if (delta_line < 0 and delta_line > -65) {
-        const toadd: u8 = @intCast(u8, -delta_line + 64);
+        const toadd: u8 = @as(u8, @intCast(-delta_line + 64));
         try l.append(toadd);
     } else if (delta_line != 0) {
         try l.append(0);
@@ -675,7 +675,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
                 const out = entry.value_ptr.*;
                 {
                     // connect the previous decl to the next
-                    const delta_line = @intCast(i32, out.start_line) - @intCast(i32, linecount);
+                    const delta_line = @as(i32, @intCast(out.start_line)) - @as(i32, @intCast(linecount));
 
                     try changeLine(&linecountinfo, delta_line);
                     // TODO change the pc too (maybe?)
@@ -692,7 +692,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
                 atom.offset = off;
                 log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&mod.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off });
                 if (!self.sixtyfour_bit) {
-                    mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+                    mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
                 } else {
                     mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
                 }
@@ -721,7 +721,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
             text_i += code.len;
             text_atom.offset = off;
             if (!self.sixtyfour_bit) {
-                mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+                mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
             } else {
                 mem.writeInt(u64, got_table[text_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
             }
@@ -749,7 +749,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
             data_i += code.len;
             atom.offset = off;
             if (!self.sixtyfour_bit) {
-                mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+                mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
             } else {
                 mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
             }
@@ -772,7 +772,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
                 data_i += code.len;
                 atom.offset = off;
                 if (!self.sixtyfour_bit) {
-                    mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+                    mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
                 } else {
                     mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
                 }
@@ -792,7 +792,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
             data_i += code.len;
             data_atom.offset = off;
             if (!self.sixtyfour_bit) {
-                mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+                mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian());
             } else {
                 mem.writeInt(u64, got_table[data_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
             }
@@ -815,13 +815,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
     // generate the header
     self.hdr = .{
         .magic = self.magic,
-        .text = @intCast(u32, text_i),
-        .data = @intCast(u32, data_i),
-        .syms = @intCast(u32, syms.len),
+        .text = @as(u32, @intCast(text_i)),
+        .data = @as(u32, @intCast(data_i)),
+        .syms = @as(u32, @intCast(syms.len)),
         .bss = 0,
         .spsz = 0,
-        .pcsz = @intCast(u32, linecountinfo.items.len),
-        .entry = @intCast(u32, self.entry_val.?),
+        .pcsz = @as(u32, @intCast(linecountinfo.items.len)),
+        .entry = @as(u32, @intCast(self.entry_val.?)),
     };
     @memcpy(hdr_slice, self.hdr.toU8s()[0..hdr_size]);
     // write the fat header for 64 bit entry points
@@ -847,13 +847,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
                 const code = source_atom.code.getCode(self);
 
                 if (reloc.pcrel) {
-                    const disp = @intCast(i32, target_offset) - @intCast(i32, source_atom.offset.?) - 4 - @intCast(i32, offset);
-                    mem.writeInt(i32, code[@intCast(usize, offset)..][0..4], @intCast(i32, disp), self.base.options.target.cpu.arch.endian());
+                    const disp = @as(i32, @intCast(target_offset)) - @as(i32, @intCast(source_atom.offset.?)) - 4 - @as(i32, @intCast(offset));
+                    mem.writeInt(i32, code[@as(usize, @intCast(offset))..][0..4], @as(i32, @intCast(disp)), self.base.options.target.cpu.arch.endian());
                 } else {
                     if (!self.sixtyfour_bit) {
-                        mem.writeInt(u32, code[@intCast(usize, offset)..][0..4], @intCast(u32, target_offset + addend), self.base.options.target.cpu.arch.endian());
+                        mem.writeInt(u32, code[@as(usize, @intCast(offset))..][0..4], @as(u32, @intCast(target_offset + addend)), self.base.options.target.cpu.arch.endian());
                     } else {
-                        mem.writeInt(u64, code[@intCast(usize, offset)..][0..8], target_offset + addend, self.base.options.target.cpu.arch.endian());
+                        mem.writeInt(u64, code[@as(usize, @intCast(offset))..][0..8], target_offset + addend, self.base.options.target.cpu.arch.endian());
                     }
                 }
                 log.debug("relocating the address of '{s}' + {d} into '{s}' + {d} (({s}[{d}] = 0x{x} + 0x{x})", .{ target_symbol.name, addend, source_atom_symbol.name, offset, source_atom_symbol.name, offset, target_offset, addend });
@@ -960,7 +960,7 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void {
 
 fn createAtom(self: *Plan9) !Atom.Index {
     const gpa = self.base.allocator;
-    const index = @intCast(Atom.Index, self.atoms.items.len);
+    const index = @as(Atom.Index, @intCast(self.atoms.items.len));
     const atom = try self.atoms.addOne(gpa);
     atom.* = .{
         .type = .t,
@@ -1060,7 +1060,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
         &required_alignment,
         &code_buffer,
         .none,
-        .{ .parent_atom_index = @intCast(Atom.Index, atom_index) },
+        .{ .parent_atom_index = @as(Atom.Index, @intCast(atom_index)) },
     );
     const code = switch (res) {
         .ok => code_buffer.items,
@@ -1188,7 +1188,7 @@ pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void {
     // log.debug("write sym{{name: {s}, value: {x}}}", .{ sym.name, sym.value });
     if (sym.type == .bad) return; // we don't want to write free'd symbols
     if (!self.sixtyfour_bit) {
-        try w.writeIntBig(u32, @intCast(u32, sym.value));
+        try w.writeIntBig(u32, @as(u32, @intCast(sym.value)));
     } else {
         try w.writeIntBig(u64, sym.value);
     }
src/link/strtab.zig
@@ -45,7 +45,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type {
                 const off = entry.key_ptr.*;
                 const save = entry.value_ptr.*;
                 if (!save) continue;
-                const new_off = @intCast(u32, buffer.items.len);
+                const new_off = @as(u32, @intCast(buffer.items.len));
                 buffer.appendSliceAssumeCapacity(self.getAssumeExists(off));
                 idx_map.putAssumeCapacityNoClobber(off, new_off);
             }
@@ -73,7 +73,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type {
             }
 
             try self.buffer.ensureUnusedCapacity(gpa, string.len + 1);
-            const new_off = @intCast(u32, self.buffer.items.len);
+            const new_off = @as(u32, @intCast(self.buffer.items.len));
 
             log.debug("writing new string '{s}' at offset 0x{x}", .{ string, new_off });
 
@@ -103,7 +103,7 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type {
         pub fn get(self: Self, off: u32) ?[]const u8 {
             log.debug("getting string at 0x{x}", .{off});
             if (off >= self.buffer.items.len) return null;
-            return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.items.ptr + off), 0);
+            return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.buffer.items.ptr + off)), 0);
         }
 
         pub fn getAssumeExists(self: Self, off: u32) []const u8 {
src/link/table_section.zig
@@ -18,7 +18,7 @@ pub fn TableSection(comptime Entry: type) type {
                     break :blk index;
                 } else {
                     log.debug("  (allocating entry at index {d})", .{self.entries.items.len});
-                    const index = @intCast(u32, self.entries.items.len);
+                    const index = @as(u32, @intCast(self.entries.items.len));
                     _ = self.entries.addOneAssumeCapacity();
                     break :blk index;
                 }
src/link/Wasm.zig
@@ -317,7 +317,7 @@ pub const StringTable = struct {
         }
 
         try table.string_data.ensureUnusedCapacity(allocator, string.len + 1);
-        const offset = @intCast(u32, table.string_data.items.len);
+        const offset = @as(u32, @intCast(table.string_data.items.len));
 
         log.debug("writing new string '{s}' at offset 0x{x}", .{ string, offset });
 
@@ -333,7 +333,7 @@ pub const StringTable = struct {
     /// Asserts offset does not exceed bounds.
     pub fn get(table: StringTable, off: u32) []const u8 {
         assert(off < table.string_data.items.len);
-        return mem.sliceTo(@ptrCast([*:0]const u8, table.string_data.items.ptr + off), 0);
+        return mem.sliceTo(@as([*:0]const u8, @ptrCast(table.string_data.items.ptr + off)), 0);
     }
 
     /// Returns the offset of a given string when it exists.
@@ -396,7 +396,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
         // For object files we will import the stack pointer symbol
         if (options.output_mode == .Obj) {
             symbol.setUndefined(true);
-            symbol.index = @intCast(u32, wasm_bin.imported_globals_count);
+            symbol.index = @as(u32, @intCast(wasm_bin.imported_globals_count));
             wasm_bin.imported_globals_count += 1;
             try wasm_bin.imports.putNoClobber(
                 allocator,
@@ -408,7 +408,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
                 },
             );
         } else {
-            symbol.index = @intCast(u32, wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len);
+            symbol.index = @as(u32, @intCast(wasm_bin.imported_globals_count + wasm_bin.wasm_globals.items.len));
             symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
             const global = try wasm_bin.wasm_globals.addOne(allocator);
             global.* = .{
@@ -431,7 +431,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
         };
         if (options.output_mode == .Obj or options.import_table) {
             symbol.setUndefined(true);
-            symbol.index = @intCast(u32, wasm_bin.imported_tables_count);
+            symbol.index = @as(u32, @intCast(wasm_bin.imported_tables_count));
             wasm_bin.imported_tables_count += 1;
             try wasm_bin.imports.put(allocator, loc, .{
                 .module_name = try wasm_bin.string_table.put(allocator, wasm_bin.host_name),
@@ -439,7 +439,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
                 .kind = .{ .table = table },
             });
         } else {
-            symbol.index = @intCast(u32, wasm_bin.imported_tables_count + wasm_bin.tables.items.len);
+            symbol.index = @as(u32, @intCast(wasm_bin.imported_tables_count + wasm_bin.tables.items.len));
             try wasm_bin.tables.append(allocator, table);
             if (options.export_table) {
                 symbol.setFlag(.WASM_SYM_EXPORTED);
@@ -519,7 +519,7 @@ fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !Symbol
 }
 
 fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !SymbolLoc {
-    const sym_index = @intCast(u32, wasm.symbols.items.len);
+    const sym_index = @as(u32, @intCast(wasm.symbols.items.len));
     const loc: SymbolLoc = .{ .index = sym_index, .file = null };
     try wasm.symbols.append(wasm.base.allocator, .{
         .name = name_offset,
@@ -588,7 +588,7 @@ pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: Module.Decl.Index) !Atom.
 
 /// Creates a new empty `Atom` and returns its `Atom.Index`
 fn createAtom(wasm: *Wasm) !Atom.Index {
-    const index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+    const index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
     const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
     atom.* = Atom.empty;
     atom.sym_index = try wasm.allocateSymbol();
@@ -669,7 +669,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
     log.debug("Resolving symbols in object: '{s}'", .{object.name});
 
     for (object.symtable, 0..) |symbol, i| {
-        const sym_index = @intCast(u32, i);
+        const sym_index = @as(u32, @intCast(i));
         const location: SymbolLoc = .{
             .file = object_index,
             .index = sym_index,
@@ -830,7 +830,7 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void {
             // Symbol is found in unparsed object file within current archive.
             // Parse object and and resolve symbols again before we check remaining
             // undefined symbols.
-            const object_file_index = @intCast(u16, wasm.objects.items.len);
+            const object_file_index = @as(u16, @intCast(wasm.objects.items.len));
             var object = try archive.parseObject(wasm.base.allocator, offset.items[0]);
             try wasm.objects.append(wasm.base.allocator, object);
             try wasm.resolveSymbolsInObject(object_file_index);
@@ -1046,7 +1046,7 @@ fn setupTLSRelocationsFunction(wasm: *Wasm) !void {
 
         try writer.writeByte(std.wasm.opcode(.i32_add));
         try writer.writeByte(std.wasm.opcode(.global_set));
-        try leb.writeULEB128(writer, wasm.imported_globals_count + @intCast(u32, wasm.wasm_globals.items.len + got_index));
+        try leb.writeULEB128(writer, wasm.imported_globals_count + @as(u32, @intCast(wasm.wasm_globals.items.len + got_index)));
     }
     try writer.writeByte(std.wasm.opcode(.end));
 
@@ -1091,7 +1091,7 @@ fn validateFeatures(
     // linked object file so we can test them.
     for (wasm.objects.items, 0..) |object, object_index| {
         for (object.features) |feature| {
-            const value = @intCast(u16, object_index) << 1 | @as(u1, 1);
+            const value = @as(u16, @intCast(object_index)) << 1 | @as(u1, 1);
             switch (feature.prefix) {
                 .used => {
                     used[@intFromEnum(feature.tag)] = value;
@@ -1117,12 +1117,12 @@ fn validateFeatures(
     // and insert it into the 'allowed' set. When features are not inferred,
     // we validate that a used feature is allowed.
     for (used, 0..) |used_set, used_index| {
-        const is_enabled = @truncate(u1, used_set) != 0;
+        const is_enabled = @as(u1, @truncate(used_set)) != 0;
         if (infer) {
             allowed[used_index] = is_enabled;
             emit_features_count.* += @intFromBool(is_enabled);
         } else if (is_enabled and !allowed[used_index]) {
-            log.err("feature '{}' not allowed, but used by linked object", .{@enumFromInt(types.Feature.Tag, used_index)});
+            log.err("feature '{}' not allowed, but used by linked object", .{@as(types.Feature.Tag, @enumFromInt(used_index))});
             log.err("  defined in '{s}'", .{wasm.objects.items[used_set >> 1].name});
             valid_feature_set = false;
         }
@@ -1134,7 +1134,7 @@ fn validateFeatures(
 
     if (wasm.base.options.shared_memory) {
         const disallowed_feature = disallowed[@intFromEnum(types.Feature.Tag.shared_mem)];
-        if (@truncate(u1, disallowed_feature) != 0) {
+        if (@as(u1, @truncate(disallowed_feature)) != 0) {
             log.err(
                 "shared-memory is disallowed by '{s}' because it wasn't compiled with 'atomics' and 'bulk-memory' features enabled",
                 .{wasm.objects.items[disallowed_feature >> 1].name},
@@ -1163,7 +1163,7 @@ fn validateFeatures(
             if (feature.prefix == .disallowed) continue; // already defined in 'disallowed' set.
             // from here a feature is always used
             const disallowed_feature = disallowed[@intFromEnum(feature.tag)];
-            if (@truncate(u1, disallowed_feature) != 0) {
+            if (@as(u1, @truncate(disallowed_feature)) != 0) {
                 log.err("feature '{}' is disallowed, but used by linked object", .{feature.tag});
                 log.err("  disallowed by '{s}'", .{wasm.objects.items[disallowed_feature >> 1].name});
                 log.err("  used in '{s}'", .{object.name});
@@ -1175,9 +1175,9 @@ fn validateFeatures(
 
         // validate the linked object file has each required feature
         for (required, 0..) |required_feature, feature_index| {
-            const is_required = @truncate(u1, required_feature) != 0;
+            const is_required = @as(u1, @truncate(required_feature)) != 0;
             if (is_required and !object_used_features[feature_index]) {
-                log.err("feature '{}' is required but not used in linked object", .{@enumFromInt(types.Feature.Tag, feature_index)});
+                log.err("feature '{}' is required but not used in linked object", .{@as(types.Feature.Tag, @enumFromInt(feature_index))});
                 log.err("  required by '{s}'", .{wasm.objects.items[required_feature >> 1].name});
                 log.err("  missing in '{s}'", .{object.name});
                 valid_feature_set = false;
@@ -1333,7 +1333,7 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 {
         wasm.symbols.items[index] = symbol;
         return index;
     }
-    const index = @intCast(u32, wasm.symbols.items.len);
+    const index = @as(u32, @intCast(wasm.symbols.items.len));
     wasm.symbols.appendAssumeCapacity(symbol);
     return index;
 }
@@ -1485,7 +1485,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8
     try atom.code.appendSlice(wasm.base.allocator, code);
     try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
 
-    atom.size = @intCast(u32, code.len);
+    atom.size = @as(u32, @intCast(code.len));
     if (code.len == 0) return;
     atom.alignment = decl.getAlignment(mod);
 }
@@ -1589,7 +1589,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
     };
 
     const atom = wasm.getAtomPtr(atom_index);
-    atom.size = @intCast(u32, code.len);
+    atom.size = @as(u32, @intCast(code.len));
     try atom.code.appendSlice(wasm.base.allocator, code);
     return atom.sym_index;
 }
@@ -1617,7 +1617,7 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !u3
     symbol.setUndefined(true);
 
     const sym_index = if (wasm.symbols_free_list.popOrNull()) |index| index else blk: {
-        var index = @intCast(u32, wasm.symbols.items.len);
+        var index = @as(u32, @intCast(wasm.symbols.items.len));
         try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1);
         wasm.symbols.items.len += 1;
         break :blk index;
@@ -1654,15 +1654,15 @@ pub fn getDeclVAddr(
         try wasm.addTableFunction(target_symbol_index);
         try atom.relocs.append(wasm.base.allocator, .{
             .index = target_symbol_index,
-            .offset = @intCast(u32, reloc_info.offset),
+            .offset = @as(u32, @intCast(reloc_info.offset)),
             .relocation_type = if (is_wasm32) .R_WASM_TABLE_INDEX_I32 else .R_WASM_TABLE_INDEX_I64,
         });
     } else {
         try atom.relocs.append(wasm.base.allocator, .{
             .index = target_symbol_index,
-            .offset = @intCast(u32, reloc_info.offset),
+            .offset = @as(u32, @intCast(reloc_info.offset)),
             .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_I32 else .R_WASM_MEMORY_ADDR_I64,
-            .addend = @intCast(i32, reloc_info.addend),
+            .addend = @as(i32, @intCast(reloc_info.addend)),
         });
     }
     // we do not know the final address at this point,
@@ -1840,7 +1840,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
 
 /// Appends a new entry to the indirect function table
 pub fn addTableFunction(wasm: *Wasm, symbol_index: u32) !void {
-    const index = @intCast(u32, wasm.function_table.count());
+    const index = @as(u32, @intCast(wasm.function_table.count()));
     try wasm.function_table.put(wasm.base.allocator, .{ .file = null, .index = symbol_index }, index);
 }
 
@@ -1971,7 +1971,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
     const symbol = (SymbolLoc{ .file = null, .index = atom.sym_index }).getSymbol(wasm);
     const final_index: u32 = switch (kind) {
         .function => result: {
-            const index = @intCast(u32, wasm.functions.count() + wasm.imported_functions_count);
+            const index = @as(u32, @intCast(wasm.functions.count() + wasm.imported_functions_count));
             const type_index = wasm.atom_types.get(atom_index).?;
             try wasm.functions.putNoClobber(
                 wasm.base.allocator,
@@ -1982,7 +1982,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
             symbol.index = index;
 
             if (wasm.code_section_index == null) {
-                wasm.code_section_index = @intCast(u32, wasm.segments.items.len);
+                wasm.code_section_index = @as(u32, @intCast(wasm.segments.items.len));
                 try wasm.segments.append(wasm.base.allocator, .{
                     .alignment = atom.alignment,
                     .size = atom.size,
@@ -2020,12 +2020,12 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
                 const index = gop.value_ptr.*;
                 wasm.segments.items[index].size += atom.size;
 
-                symbol.index = @intCast(u32, wasm.segment_info.getIndex(index).?);
+                symbol.index = @as(u32, @intCast(wasm.segment_info.getIndex(index).?));
                 // segment info already exists, so free its memory
                 wasm.base.allocator.free(segment_name);
                 break :result index;
             } else {
-                const index = @intCast(u32, wasm.segments.items.len);
+                const index = @as(u32, @intCast(wasm.segments.items.len));
                 var flags: u32 = 0;
                 if (wasm.base.options.shared_memory) {
                     flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE);
@@ -2038,7 +2038,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
                 });
                 gop.value_ptr.* = index;
 
-                const info_index = @intCast(u32, wasm.segment_info.count());
+                const info_index = @as(u32, @intCast(wasm.segment_info.count()));
                 try wasm.segment_info.put(wasm.base.allocator, index, segment_info);
                 symbol.index = info_index;
                 break :result index;
@@ -2074,13 +2074,13 @@ fn allocateDebugAtoms(wasm: *Wasm) !void {
     const allocAtom = struct {
         fn f(bin: *Wasm, maybe_index: *?u32, atom_index: Atom.Index) !void {
             const index = maybe_index.* orelse idx: {
-                const index = @intCast(u32, bin.segments.items.len);
+                const index = @as(u32, @intCast(bin.segments.items.len));
                 try bin.appendDummySegment();
                 maybe_index.* = index;
                 break :idx index;
             };
             const atom = bin.getAtomPtr(atom_index);
-            atom.size = @intCast(u32, atom.code.items.len);
+            atom.size = @as(u32, @intCast(atom.code.items.len));
             bin.symbols.items[atom.sym_index].index = index;
             try bin.appendAtomAtIndex(index, atom_index);
         }
@@ -2215,7 +2215,7 @@ fn setupInitFunctions(wasm: *Wasm) !void {
             log.debug("appended init func '{s}'\n", .{object.string_table.get(symbol.name)});
             wasm.init_funcs.appendAssumeCapacity(.{
                 .index = init_func.symbol_index,
-                .file = @intCast(u16, file_index),
+                .file = @as(u16, @intCast(file_index)),
                 .priority = init_func.priority,
             });
         }
@@ -2248,7 +2248,7 @@ fn setupErrorsLen(wasm: *Wasm) !void {
         atom.deinit(wasm);
         break :blk index;
     } else new_atom: {
-        const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+        const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
         try wasm.symbol_atom.put(wasm.base.allocator, loc, atom_index);
         try wasm.managed_atoms.append(wasm.base.allocator, undefined);
         break :new_atom atom_index;
@@ -2257,7 +2257,7 @@ fn setupErrorsLen(wasm: *Wasm) !void {
     atom.* = Atom.empty;
     atom.sym_index = loc.index;
     atom.size = 2;
-    try atom.code.writer(wasm.base.allocator).writeIntLittle(u16, @intCast(u16, errors_len));
+    try atom.code.writer(wasm.base.allocator).writeIntLittle(u16, @as(u16, @intCast(errors_len)));
 
     try wasm.parseAtom(atom_index, .{ .data = .read_only });
 }
@@ -2325,7 +2325,7 @@ fn createSyntheticFunction(
     const symbol = loc.getSymbol(wasm);
     const ty_index = try wasm.putOrGetFuncType(func_ty);
     // create function with above type
-    const func_index = wasm.imported_functions_count + @intCast(u32, wasm.functions.count());
+    const func_index = wasm.imported_functions_count + @as(u32, @intCast(wasm.functions.count()));
     try wasm.functions.putNoClobber(
         wasm.base.allocator,
         .{ .file = null, .index = func_index },
@@ -2334,10 +2334,10 @@ fn createSyntheticFunction(
     symbol.index = func_index;
 
     // create the atom that will be output into the final binary
-    const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+    const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
     const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
     atom.* = .{
-        .size = @intCast(u32, function_body.items.len),
+        .size = @as(u32, @intCast(function_body.items.len)),
         .offset = 0,
         .sym_index = loc.index,
         .file = null,
@@ -2369,10 +2369,10 @@ pub fn createFunction(
 ) !u32 {
     const loc = try wasm.createSyntheticSymbol(symbol_name, .function);
 
-    const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+    const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
     const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
     atom.* = .{
-        .size = @intCast(u32, function_body.items.len),
+        .size = @as(u32, @intCast(function_body.items.len)),
         .offset = 0,
         .sym_index = loc.index,
         .file = null,
@@ -2386,7 +2386,7 @@ pub fn createFunction(
     symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); // ensure function does not get exported
 
     const section_index = wasm.code_section_index orelse idx: {
-        const index = @intCast(u32, wasm.segments.items.len);
+        const index = @as(u32, @intCast(wasm.segments.items.len));
         try wasm.appendDummySegment();
         break :idx index;
     };
@@ -2438,7 +2438,7 @@ fn initializeTLSFunction(wasm: *Wasm) !void {
         try writer.writeByte(std.wasm.opcode(.misc_prefix));
         try leb.writeULEB128(writer, std.wasm.miscOpcode(.memory_init));
         // segment immediate
-        try leb.writeULEB128(writer, @intCast(u32, data_index));
+        try leb.writeULEB128(writer, @as(u32, @intCast(data_index)));
         // memory index immediate (always 0)
         try leb.writeULEB128(writer, @as(u32, 0));
     }
@@ -2567,16 +2567,16 @@ fn mergeSections(wasm: *Wasm) !void {
                 if (!gop.found_existing) {
                     gop.value_ptr.* = object.functions[index];
                 }
-                symbol.index = @intCast(u32, gop.index) + wasm.imported_functions_count;
+                symbol.index = @as(u32, @intCast(gop.index)) + wasm.imported_functions_count;
             },
             .global => {
                 const original_global = object.globals[index];
-                symbol.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+                symbol.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count;
                 try wasm.wasm_globals.append(wasm.base.allocator, original_global);
             },
             .table => {
                 const original_table = object.tables[index];
-                symbol.index = @intCast(u32, wasm.tables.items.len) + wasm.imported_tables_count;
+                symbol.index = @as(u32, @intCast(wasm.tables.items.len)) + wasm.imported_tables_count;
                 try wasm.tables.append(wasm.base.allocator, original_table);
             },
             else => unreachable,
@@ -2596,7 +2596,7 @@ fn mergeTypes(wasm: *Wasm) !void {
     // type inserted. If we do this for the same function multiple times,
     // it will be overwritten with the incorrect type.
     var dirty = std.AutoHashMap(u32, void).init(wasm.base.allocator);
-    try dirty.ensureUnusedCapacity(@intCast(u32, wasm.functions.count()));
+    try dirty.ensureUnusedCapacity(@as(u32, @intCast(wasm.functions.count())));
     defer dirty.deinit();
 
     for (wasm.resolved_symbols.keys()) |sym_loc| {
@@ -2660,10 +2660,10 @@ fn setupExports(wasm: *Wasm) !void {
             break :blk try wasm.string_table.put(wasm.base.allocator, sym_name);
         };
         const exp: types.Export = if (symbol.tag == .data) exp: {
-            const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len);
+            const global_index = @as(u32, @intCast(wasm.imported_globals_count + wasm.wasm_globals.items.len));
             try wasm.wasm_globals.append(wasm.base.allocator, .{
                 .global_type = .{ .valtype = .i32, .mutable = false },
-                .init = .{ .i32_const = @intCast(i32, symbol.virtual_address) },
+                .init = .{ .i32_const = @as(i32, @intCast(symbol.virtual_address)) },
             });
             break :exp .{
                 .name = export_name,
@@ -2734,10 +2734,10 @@ fn setupMemory(wasm: *Wasm) !void {
         memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
         memory_ptr += stack_size;
         // We always put the stack pointer global at index 0
-        wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr));
+        wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
     }
 
-    var offset: u32 = @intCast(u32, memory_ptr);
+    var offset: u32 = @as(u32, @intCast(memory_ptr));
     var data_seg_it = wasm.data_segments.iterator();
     while (data_seg_it.next()) |entry| {
         const segment = &wasm.segments.items[entry.value_ptr.*];
@@ -2747,26 +2747,26 @@ fn setupMemory(wasm: *Wasm) !void {
         if (mem.eql(u8, entry.key_ptr.*, ".tdata")) {
             if (wasm.findGlobalSymbol("__tls_size")) |loc| {
                 const sym = loc.getSymbol(wasm);
-                sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+                sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count;
                 try wasm.wasm_globals.append(wasm.base.allocator, .{
                     .global_type = .{ .valtype = .i32, .mutable = false },
-                    .init = .{ .i32_const = @intCast(i32, segment.size) },
+                    .init = .{ .i32_const = @as(i32, @intCast(segment.size)) },
                 });
             }
             if (wasm.findGlobalSymbol("__tls_align")) |loc| {
                 const sym = loc.getSymbol(wasm);
-                sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+                sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count;
                 try wasm.wasm_globals.append(wasm.base.allocator, .{
                     .global_type = .{ .valtype = .i32, .mutable = false },
-                    .init = .{ .i32_const = @intCast(i32, segment.alignment) },
+                    .init = .{ .i32_const = @as(i32, @intCast(segment.alignment)) },
                 });
             }
             if (wasm.findGlobalSymbol("__tls_base")) |loc| {
                 const sym = loc.getSymbol(wasm);
-                sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+                sym.index = @as(u32, @intCast(wasm.wasm_globals.items.len)) + wasm.imported_globals_count;
                 try wasm.wasm_globals.append(wasm.base.allocator, .{
                     .global_type = .{ .valtype = .i32, .mutable = wasm.base.options.shared_memory },
-                    .init = .{ .i32_const = if (wasm.base.options.shared_memory) @as(u32, 0) else @intCast(i32, memory_ptr) },
+                    .init = .{ .i32_const = if (wasm.base.options.shared_memory) @as(u32, 0) else @as(i32, @intCast(memory_ptr)) },
                 });
             }
         }
@@ -2782,21 +2782,21 @@ fn setupMemory(wasm: *Wasm) !void {
         memory_ptr = mem.alignForward(u64, memory_ptr, 4);
         const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data);
         const sym = loc.getSymbol(wasm);
-        sym.virtual_address = @intCast(u32, memory_ptr);
+        sym.virtual_address = @as(u32, @intCast(memory_ptr));
         memory_ptr += 4;
     }
 
     if (!place_stack_first and !is_obj) {
         memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
         memory_ptr += stack_size;
-        wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr));
+        wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
     }
 
     // One of the linked object files has a reference to the __heap_base symbol.
     // We must set its virtual address so it can be used in relocations.
     if (wasm.findGlobalSymbol("__heap_base")) |loc| {
         const symbol = loc.getSymbol(wasm);
-        symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment));
+        symbol.virtual_address = @as(u32, @intCast(mem.alignForward(u64, memory_ptr, heap_alignment)));
     }
 
     // Setup the max amount of pages
@@ -2821,12 +2821,12 @@ fn setupMemory(wasm: *Wasm) !void {
     memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size);
     // In case we do not import memory, but define it ourselves,
     // set the minimum amount of pages on the memory section.
-    wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size);
+    wasm.memories.limits.min = @as(u32, @intCast(memory_ptr / page_size));
     log.debug("Total memory pages: {d}", .{wasm.memories.limits.min});
 
     if (wasm.findGlobalSymbol("__heap_end")) |loc| {
         const symbol = loc.getSymbol(wasm);
-        symbol.virtual_address = @intCast(u32, memory_ptr);
+        symbol.virtual_address = @as(u32, @intCast(memory_ptr));
     }
 
     if (wasm.base.options.max_memory) |max_memory| {
@@ -2842,7 +2842,7 @@ fn setupMemory(wasm: *Wasm) !void {
             log.err("Maximum memory exceeds maxmium amount {d}", .{max_memory_allowed});
             return error.MemoryTooBig;
         }
-        wasm.memories.limits.max = @intCast(u32, max_memory / page_size);
+        wasm.memories.limits.max = @as(u32, @intCast(max_memory / page_size));
         wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_HAS_MAX);
         if (wasm.base.options.shared_memory) {
             wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_IS_SHARED);
@@ -2857,7 +2857,7 @@ fn setupMemory(wasm: *Wasm) !void {
 pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32) !?u32 {
     const object: Object = wasm.objects.items[object_index];
     const relocatable_data = object.relocatable_data[relocatable_index];
-    const index = @intCast(u32, wasm.segments.items.len);
+    const index = @as(u32, @intCast(wasm.segments.items.len));
 
     switch (relocatable_data.type) {
         .data => {
@@ -3023,10 +3023,10 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
     const mod = wasm.base.options.module.?;
     for (mod.global_error_set.keys()) |error_name_nts| {
         const error_name = mod.intern_pool.stringToSlice(error_name_nts);
-        const len = @intCast(u32, error_name.len + 1); // names are 0-termianted
+        const len = @as(u32, @intCast(error_name.len + 1)); // names are 0-termianted
 
         const slice_ty = Type.slice_const_u8_sentinel_0;
-        const offset = @intCast(u32, atom.code.items.len);
+        const offset = @as(u32, @intCast(atom.code.items.len));
         // first we create the data for the slice of the name
         try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated
         try atom.code.writer(wasm.base.allocator).writeIntLittle(u32, len - 1);
@@ -3035,9 +3035,9 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
             .index = names_atom.sym_index,
             .relocation_type = .R_WASM_MEMORY_ADDR_I32,
             .offset = offset,
-            .addend = @intCast(i32, addend),
+            .addend = @as(i32, @intCast(addend)),
         });
-        atom.size += @intCast(u32, slice_ty.abiSize(mod));
+        atom.size += @as(u32, @intCast(slice_ty.abiSize(mod)));
         addend += len;
 
         // as we updated the error name table, we now store the actual name within the names atom
@@ -3063,7 +3063,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
 /// This initializes the index, appends a new segment,
 /// and finally, creates a managed `Atom`.
 pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index {
-    const new_index = @intCast(u32, wasm.segments.items.len);
+    const new_index = @as(u32, @intCast(wasm.segments.items.len));
     index.* = new_index;
     try wasm.appendDummySegment();
 
@@ -3294,7 +3294,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
     try wasm.parseInputFiles(positionals.items);
 
     for (wasm.objects.items, 0..) |_, object_index| {
-        try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
+        try wasm.resolveSymbolsInObject(@as(u16, @intCast(object_index)));
     }
 
     var emit_features_count: u32 = 0;
@@ -3309,7 +3309,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
     try wasm.setupImports();
 
     for (wasm.objects.items, 0..) |*object, object_index| {
-        try object.parseIntoAtoms(gpa, @intCast(u16, object_index), wasm);
+        try object.parseIntoAtoms(gpa, @as(u16, @intCast(object_index)), wasm);
     }
 
     try wasm.allocateAtoms();
@@ -3382,7 +3382,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
     try wasm.parseInputFiles(positionals.items);
 
     for (wasm.objects.items, 0..) |_, object_index| {
-        try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
+        try wasm.resolveSymbolsInObject(@as(u16, @intCast(object_index)));
     }
 
     var emit_features_count: u32 = 0;
@@ -3446,7 +3446,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
     }
 
     for (wasm.objects.items, 0..) |*object, object_index| {
-        try object.parseIntoAtoms(wasm.base.allocator, @intCast(u16, object_index), wasm);
+        try object.parseIntoAtoms(wasm.base.allocator, @as(u16, @intCast(object_index)), wasm);
     }
 
     try wasm.allocateAtoms();
@@ -3497,11 +3497,11 @@ fn writeToFile(
         log.debug("Writing type section. Count: ({d})", .{wasm.func_types.items.len});
         for (wasm.func_types.items) |func_type| {
             try leb.writeULEB128(binary_writer, std.wasm.function_type);
-            try leb.writeULEB128(binary_writer, @intCast(u32, func_type.params.len));
+            try leb.writeULEB128(binary_writer, @as(u32, @intCast(func_type.params.len)));
             for (func_type.params) |param_ty| {
                 try leb.writeULEB128(binary_writer, std.wasm.valtype(param_ty));
             }
-            try leb.writeULEB128(binary_writer, @intCast(u32, func_type.returns.len));
+            try leb.writeULEB128(binary_writer, @as(u32, @intCast(func_type.returns.len)));
             for (func_type.returns) |ret_ty| {
                 try leb.writeULEB128(binary_writer, std.wasm.valtype(ret_ty));
             }
@@ -3511,8 +3511,8 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .type,
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
-            @intCast(u32, wasm.func_types.items.len),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+            @as(u32, @intCast(wasm.func_types.items.len)),
         );
         section_count += 1;
     }
@@ -3543,8 +3543,8 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .import,
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
-            @intCast(u32, wasm.imports.count() + @intFromBool(import_memory)),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+            @as(u32, @intCast(wasm.imports.count() + @intFromBool(import_memory))),
         );
         section_count += 1;
     }
@@ -3560,8 +3560,8 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .function,
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
-            @intCast(u32, wasm.functions.count()),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+            @as(u32, @intCast(wasm.functions.count())),
         );
         section_count += 1;
     }
@@ -3579,8 +3579,8 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .table,
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
-            @intCast(u32, wasm.tables.items.len),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+            @as(u32, @intCast(wasm.tables.items.len)),
         );
         section_count += 1;
     }
@@ -3594,7 +3594,7 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .memory,
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
             @as(u32, 1), // wasm currently only supports 1 linear memory segment
         );
         section_count += 1;
@@ -3614,8 +3614,8 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .global,
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
-            @intCast(u32, wasm.wasm_globals.items.len),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+            @as(u32, @intCast(wasm.wasm_globals.items.len)),
         );
         section_count += 1;
     }
@@ -3626,14 +3626,14 @@ fn writeToFile(
 
         for (wasm.exports.items) |exp| {
             const name = wasm.string_table.get(exp.name);
-            try leb.writeULEB128(binary_writer, @intCast(u32, name.len));
+            try leb.writeULEB128(binary_writer, @as(u32, @intCast(name.len)));
             try binary_writer.writeAll(name);
             try leb.writeULEB128(binary_writer, @intFromEnum(exp.kind));
             try leb.writeULEB128(binary_writer, exp.index);
         }
 
         if (!import_memory) {
-            try leb.writeULEB128(binary_writer, @intCast(u32, "memory".len));
+            try leb.writeULEB128(binary_writer, @as(u32, @intCast("memory".len)));
             try binary_writer.writeAll("memory");
             try binary_writer.writeByte(std.wasm.externalKind(.memory));
             try leb.writeULEB128(binary_writer, @as(u32, 0));
@@ -3643,8 +3643,8 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .@"export",
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
-            @intCast(u32, wasm.exports.items.len) + @intFromBool(!import_memory),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+            @as(u32, @intCast(wasm.exports.items.len)) + @intFromBool(!import_memory),
         );
         section_count += 1;
     }
@@ -3665,7 +3665,7 @@ fn writeToFile(
         if (flags == 0x02) {
             try leb.writeULEB128(binary_writer, @as(u8, 0)); // represents funcref
         }
-        try leb.writeULEB128(binary_writer, @intCast(u32, wasm.function_table.count()));
+        try leb.writeULEB128(binary_writer, @as(u32, @intCast(wasm.function_table.count())));
         var symbol_it = wasm.function_table.keyIterator();
         while (symbol_it.next()) |symbol_loc_ptr| {
             try leb.writeULEB128(binary_writer, symbol_loc_ptr.*.getSymbol(wasm).index);
@@ -3675,7 +3675,7 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .element,
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
             @as(u32, 1),
         );
         section_count += 1;
@@ -3689,8 +3689,8 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .data_count,
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
-            @intCast(u32, data_segments_count),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+            @as(u32, @intCast(data_segments_count)),
         );
     }
 
@@ -3731,13 +3731,13 @@ fn writeToFile(
             try binary_writer.writeAll(sorted_atom.code.items);
         }
 
-        code_section_size = @intCast(u32, binary_bytes.items.len - header_offset - header_size);
+        code_section_size = @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size));
         try writeVecSectionHeader(
             binary_bytes.items,
             header_offset,
             .code,
             code_section_size,
-            @intCast(u32, wasm.functions.count()),
+            @as(u32, @intCast(wasm.functions.count())),
         );
         code_section_index = section_count;
         section_count += 1;
@@ -3765,7 +3765,7 @@ fn writeToFile(
             }
             // when a segment is passive, it's initialized during runtime.
             if (!segment.isPassive()) {
-                try emitInit(binary_writer, .{ .i32_const = @bitCast(i32, segment.offset) });
+                try emitInit(binary_writer, .{ .i32_const = @as(i32, @bitCast(segment.offset)) });
             }
             // offset into data section
             try leb.writeULEB128(binary_writer, segment.size);
@@ -3808,8 +3808,8 @@ fn writeToFile(
             binary_bytes.items,
             header_offset,
             .data,
-            @intCast(u32, binary_bytes.items.len - header_offset - header_size),
-            @intCast(u32, segment_count),
+            @as(u32, @intCast(binary_bytes.items.len - header_offset - header_size)),
+            @as(u32, @intCast(segment_count)),
         );
         data_section_index = section_count;
         section_count += 1;
@@ -3927,7 +3927,7 @@ fn emitDebugSection(binary_bytes: *std.ArrayList(u8), data: []const u8, name: []
     if (data.len == 0) return;
     const header_offset = try reserveCustomSectionHeader(binary_bytes);
     const writer = binary_bytes.writer();
-    try leb.writeULEB128(writer, @intCast(u32, name.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(name.len)));
     try writer.writeAll(name);
 
     const start = binary_bytes.items.len - header_offset;
@@ -3937,7 +3937,7 @@ fn emitDebugSection(binary_bytes: *std.ArrayList(u8), data: []const u8, name: []
     try writeCustomSectionHeader(
         binary_bytes.items,
         header_offset,
-        @intCast(u32, binary_bytes.items.len - header_offset - 6),
+        @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
     );
 }
 
@@ -3946,7 +3946,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
 
     const writer = binary_bytes.writer();
     const producers = "producers";
-    try leb.writeULEB128(writer, @intCast(u32, producers.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(producers.len)));
     try writer.writeAll(producers);
 
     try leb.writeULEB128(writer, @as(u32, 2)); // 2 fields: Language + processed-by
@@ -3958,7 +3958,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
     // language field
     {
         const language = "language";
-        try leb.writeULEB128(writer, @intCast(u32, language.len));
+        try leb.writeULEB128(writer, @as(u32, @intCast(language.len)));
         try writer.writeAll(language);
 
         // field_value_count (TODO: Parse object files for producer sections to detect their language)
@@ -3969,7 +3969,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
             try leb.writeULEB128(writer, @as(u32, 3)); // len of "Zig"
             try writer.writeAll("Zig");
 
-            try leb.writeULEB128(writer, @intCast(u32, version.len));
+            try leb.writeULEB128(writer, @as(u32, @intCast(version.len)));
             try writer.writeAll(version);
         }
     }
@@ -3977,7 +3977,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
     // processed-by field
     {
         const processed_by = "processed-by";
-        try leb.writeULEB128(writer, @intCast(u32, processed_by.len));
+        try leb.writeULEB128(writer, @as(u32, @intCast(processed_by.len)));
         try writer.writeAll(processed_by);
 
         // field_value_count (TODO: Parse object files for producer sections to detect other used tools)
@@ -3988,7 +3988,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
             try leb.writeULEB128(writer, @as(u32, 3)); // len of "Zig"
             try writer.writeAll("Zig");
 
-            try leb.writeULEB128(writer, @intCast(u32, version.len));
+            try leb.writeULEB128(writer, @as(u32, @intCast(version.len)));
             try writer.writeAll(version);
         }
     }
@@ -3996,7 +3996,7 @@ fn emitProducerSection(binary_bytes: *std.ArrayList(u8)) !void {
     try writeCustomSectionHeader(
         binary_bytes.items,
         header_offset,
-        @intCast(u32, binary_bytes.items.len - header_offset - 6),
+        @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
     );
 }
 
@@ -4005,17 +4005,17 @@ fn emitBuildIdSection(binary_bytes: *std.ArrayList(u8), build_id: []const u8) !v
 
     const writer = binary_bytes.writer();
     const hdr_build_id = "build_id";
-    try leb.writeULEB128(writer, @intCast(u32, hdr_build_id.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(hdr_build_id.len)));
     try writer.writeAll(hdr_build_id);
 
     try leb.writeULEB128(writer, @as(u32, 1));
-    try leb.writeULEB128(writer, @intCast(u32, build_id.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(build_id.len)));
     try writer.writeAll(build_id);
 
     try writeCustomSectionHeader(
         binary_bytes.items,
         header_offset,
-        @intCast(u32, binary_bytes.items.len - header_offset - 6),
+        @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
     );
 }
 
@@ -4024,17 +4024,17 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con
 
     const writer = binary_bytes.writer();
     const target_features = "target_features";
-    try leb.writeULEB128(writer, @intCast(u32, target_features.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(target_features.len)));
     try writer.writeAll(target_features);
 
     try leb.writeULEB128(writer, features_count);
     for (enabled_features, 0..) |enabled, feature_index| {
         if (enabled) {
-            const feature: types.Feature = .{ .prefix = .used, .tag = @enumFromInt(types.Feature.Tag, feature_index) };
+            const feature: types.Feature = .{ .prefix = .used, .tag = @as(types.Feature.Tag, @enumFromInt(feature_index)) };
             try leb.writeULEB128(writer, @intFromEnum(feature.prefix));
             var buf: [100]u8 = undefined;
             const string = try std.fmt.bufPrint(&buf, "{}", .{feature.tag});
-            try leb.writeULEB128(writer, @intCast(u32, string.len));
+            try leb.writeULEB128(writer, @as(u32, @intCast(string.len)));
             try writer.writeAll(string);
         }
     }
@@ -4042,7 +4042,7 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con
     try writeCustomSectionHeader(
         binary_bytes.items,
         header_offset,
-        @intCast(u32, binary_bytes.items.len - header_offset - 6),
+        @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
     );
 }
 
@@ -4092,7 +4092,7 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem
 
     const header_offset = try reserveCustomSectionHeader(binary_bytes);
     const writer = binary_bytes.writer();
-    try leb.writeULEB128(writer, @intCast(u32, "name".len));
+    try leb.writeULEB128(writer, @as(u32, @intCast("name".len)));
     try writer.writeAll("name");
 
     try wasm.emitNameSubsection(.function, funcs.values(), writer);
@@ -4102,7 +4102,7 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem
     try writeCustomSectionHeader(
         binary_bytes.items,
         header_offset,
-        @intCast(u32, binary_bytes.items.len - header_offset - 6),
+        @as(u32, @intCast(binary_bytes.items.len - header_offset - 6)),
     );
 }
 
@@ -4112,17 +4112,17 @@ fn emitNameSubsection(wasm: *Wasm, section_id: std.wasm.NameSubsection, names: a
     defer section_list.deinit();
     const sub_writer = section_list.writer();
 
-    try leb.writeULEB128(sub_writer, @intCast(u32, names.len));
+    try leb.writeULEB128(sub_writer, @as(u32, @intCast(names.len)));
     for (names) |name| {
         log.debug("Emit symbol '{s}' type({s})", .{ name.name, @tagName(section_id) });
         try leb.writeULEB128(sub_writer, name.index);
-        try leb.writeULEB128(sub_writer, @intCast(u32, name.name.len));
+        try leb.writeULEB128(sub_writer, @as(u32, @intCast(name.name.len)));
         try sub_writer.writeAll(name.name);
     }
 
     // From now, write to the actual writer
     try leb.writeULEB128(writer, @intFromEnum(section_id));
-    try leb.writeULEB128(writer, @intCast(u32, section_list.items.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(section_list.items.len)));
     try writer.writeAll(section_list.items);
 }
 
@@ -4146,11 +4146,11 @@ fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void {
         },
         .f32_const => |val| {
             try writer.writeByte(std.wasm.opcode(.f32_const));
-            try writer.writeIntLittle(u32, @bitCast(u32, val));
+            try writer.writeIntLittle(u32, @as(u32, @bitCast(val)));
         },
         .f64_const => |val| {
             try writer.writeByte(std.wasm.opcode(.f64_const));
-            try writer.writeIntLittle(u64, @bitCast(u64, val));
+            try writer.writeIntLittle(u64, @as(u64, @bitCast(val)));
         },
         .global_get => |val| {
             try writer.writeByte(std.wasm.opcode(.global_get));
@@ -4162,11 +4162,11 @@ fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void {
 
 fn emitImport(wasm: *Wasm, writer: anytype, import: types.Import) !void {
     const module_name = wasm.string_table.get(import.module_name);
-    try leb.writeULEB128(writer, @intCast(u32, module_name.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(module_name.len)));
     try writer.writeAll(module_name);
 
     const name = wasm.string_table.get(import.name);
-    try leb.writeULEB128(writer, @intCast(u32, name.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(name.len)));
     try writer.writeAll(name);
 
     try writer.writeByte(@intFromEnum(import.kind));
@@ -4594,7 +4594,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
 fn reserveVecSectionHeader(bytes: *std.ArrayList(u8)) !u32 {
     // section id + fixed leb contents size + fixed leb vector length
     const header_size = 1 + 5 + 5;
-    const offset = @intCast(u32, bytes.items.len);
+    const offset = @as(u32, @intCast(bytes.items.len));
     try bytes.appendSlice(&[_]u8{0} ** header_size);
     return offset;
 }
@@ -4602,7 +4602,7 @@ fn reserveVecSectionHeader(bytes: *std.ArrayList(u8)) !u32 {
 fn reserveCustomSectionHeader(bytes: *std.ArrayList(u8)) !u32 {
     // unlike regular section, we don't emit the count
     const header_size = 1 + 5;
-    const offset = @intCast(u32, bytes.items.len);
+    const offset = @as(u32, @intCast(bytes.items.len));
     try bytes.appendSlice(&[_]u8{0} ** header_size);
     return offset;
 }
@@ -4638,7 +4638,7 @@ fn emitLinkSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
     try wasm.emitSymbolTable(binary_bytes, symbol_table);
     try wasm.emitSegmentInfo(binary_bytes);
 
-    const size = @intCast(u32, binary_bytes.items.len - offset - 6);
+    const size = @as(u32, @intCast(binary_bytes.items.len - offset - 6));
     try writeCustomSectionHeader(binary_bytes.items, offset, size);
 }
 
@@ -4661,7 +4661,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
         const sym_name = if (wasm.export_names.get(sym_loc)) |exp_name| wasm.string_table.get(exp_name) else sym_loc.getName(wasm);
         switch (symbol.tag) {
             .data => {
-                try leb.writeULEB128(writer, @intCast(u32, sym_name.len));
+                try leb.writeULEB128(writer, @as(u32, @intCast(sym_name.len)));
                 try writer.writeAll(sym_name);
 
                 if (symbol.isDefined()) {
@@ -4678,7 +4678,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
             else => {
                 try leb.writeULEB128(writer, symbol.index);
                 if (symbol.isDefined()) {
-                    try leb.writeULEB128(writer, @intCast(u32, sym_name.len));
+                    try leb.writeULEB128(writer, @as(u32, @intCast(sym_name.len)));
                     try writer.writeAll(sym_name);
                 }
             },
@@ -4686,7 +4686,7 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
     }
 
     var buf: [10]u8 = undefined;
-    leb.writeUnsignedFixed(5, buf[0..5], @intCast(u32, binary_bytes.items.len - table_offset + 5));
+    leb.writeUnsignedFixed(5, buf[0..5], @as(u32, @intCast(binary_bytes.items.len - table_offset + 5)));
     leb.writeUnsignedFixed(5, buf[5..], symbol_count);
     try binary_bytes.insertSlice(table_offset, &buf);
 }
@@ -4696,28 +4696,28 @@ fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void {
     try leb.writeULEB128(writer, @intFromEnum(types.SubsectionType.WASM_SEGMENT_INFO));
     const segment_offset = binary_bytes.items.len;
 
-    try leb.writeULEB128(writer, @intCast(u32, wasm.segment_info.count()));
+    try leb.writeULEB128(writer, @as(u32, @intCast(wasm.segment_info.count())));
     for (wasm.segment_info.values()) |segment_info| {
         log.debug("Emit segment: {s} align({d}) flags({b})", .{
             segment_info.name,
             @ctz(segment_info.alignment),
             segment_info.flags,
         });
-        try leb.writeULEB128(writer, @intCast(u32, segment_info.name.len));
+        try leb.writeULEB128(writer, @as(u32, @intCast(segment_info.name.len)));
         try writer.writeAll(segment_info.name);
         try leb.writeULEB128(writer, @ctz(segment_info.alignment));
         try leb.writeULEB128(writer, segment_info.flags);
     }
 
     var buf: [5]u8 = undefined;
-    leb.writeUnsignedFixed(5, &buf, @intCast(u32, binary_bytes.items.len - segment_offset));
+    leb.writeUnsignedFixed(5, &buf, @as(u32, @intCast(binary_bytes.items.len - segment_offset)));
     try binary_bytes.insertSlice(segment_offset, &buf);
 }
 
 pub fn getULEB128Size(uint_value: anytype) u32 {
     const T = @TypeOf(uint_value);
     const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
-    var value = @intCast(U, uint_value);
+    var value = @as(U, @intCast(uint_value));
 
     var size: u32 = 0;
     while (value != 0) : (size += 1) {
@@ -4739,7 +4739,7 @@ fn emitCodeRelocations(
 
     // write custom section information
     const name = "reloc.CODE";
-    try leb.writeULEB128(writer, @intCast(u32, name.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(name.len)));
     try writer.writeAll(name);
     try leb.writeULEB128(writer, section_index);
     const reloc_start = binary_bytes.items.len;
@@ -4769,7 +4769,7 @@ fn emitCodeRelocations(
     var buf: [5]u8 = undefined;
     leb.writeUnsignedFixed(5, &buf, count);
     try binary_bytes.insertSlice(reloc_start, &buf);
-    const size = @intCast(u32, binary_bytes.items.len - header_offset - 6);
+    const size = @as(u32, @intCast(binary_bytes.items.len - header_offset - 6));
     try writeCustomSectionHeader(binary_bytes.items, header_offset, size);
 }
 
@@ -4785,7 +4785,7 @@ fn emitDataRelocations(
 
     // write custom section information
     const name = "reloc.DATA";
-    try leb.writeULEB128(writer, @intCast(u32, name.len));
+    try leb.writeULEB128(writer, @as(u32, @intCast(name.len)));
     try writer.writeAll(name);
     try leb.writeULEB128(writer, section_index);
     const reloc_start = binary_bytes.items.len;
@@ -4821,7 +4821,7 @@ fn emitDataRelocations(
     var buf: [5]u8 = undefined;
     leb.writeUnsignedFixed(5, &buf, count);
     try binary_bytes.insertSlice(reloc_start, &buf);
-    const size = @intCast(u32, binary_bytes.items.len - header_offset - 6);
+    const size = @as(u32, @intCast(binary_bytes.items.len - header_offset - 6));
     try writeCustomSectionHeader(binary_bytes.items, header_offset, size);
 }
 
@@ -4852,7 +4852,7 @@ pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 {
     }
 
     // functype does not exist.
-    const index = @intCast(u32, wasm.func_types.items.len);
+    const index = @as(u32, @intCast(wasm.func_types.items.len));
     const params = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.params);
     errdefer wasm.base.allocator.free(params);
     const returns = try wasm.base.allocator.dupe(std.wasm.Valtype, func_type.returns);
src/Liveness/Verify.zig
@@ -325,8 +325,8 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
             .aggregate_init => {
                 const ty_pl = data[inst].ty_pl;
                 const aggregate_ty = self.air.getRefType(ty_pl.ty);
-                const len = @intCast(usize, aggregate_ty.arrayLenIp(ip));
-                const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+                const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip)));
+                const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
 
                 var bt = self.liveness.iterateBigTomb(inst);
                 for (elements) |element| {
@@ -337,9 +337,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
             .call, .call_always_tail, .call_never_tail, .call_never_inline => {
                 const pl_op = data[inst].pl_op;
                 const extra = self.air.extraData(Air.Call, pl_op.payload);
-                const args = @ptrCast(
+                const args = @as(
                     []const Air.Inst.Ref,
-                    self.air.extra[extra.end..][0..extra.data.args_len],
+                    @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]),
                 );
 
                 var bt = self.liveness.iterateBigTomb(inst);
@@ -353,14 +353,14 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
                 const ty_pl = data[inst].ty_pl;
                 const extra = self.air.extraData(Air.Asm, ty_pl.payload);
                 var extra_i = extra.end;
-                const outputs = @ptrCast(
+                const outputs = @as(
                     []const Air.Inst.Ref,
-                    self.air.extra[extra_i..][0..extra.data.outputs_len],
+                    @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]),
                 );
                 extra_i += outputs.len;
-                const inputs = @ptrCast(
+                const inputs = @as(
                     []const Air.Inst.Ref,
-                    self.air.extra[extra_i..][0..extra.data.inputs_len],
+                    @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]),
                 );
                 extra_i += inputs.len;
 
@@ -521,9 +521,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
 
                 while (case_i < switch_br.data.cases_len) : (case_i += 1) {
                     const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
-                    const items = @ptrCast(
+                    const items = @as(
                         []const Air.Inst.Ref,
-                        self.air.extra[case.end..][0..case.data.items_len],
+                        @ptrCast(self.air.extra[case.end..][0..case.data.items_len]),
                     );
                     const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
                     extra_index = case.end + items.len + case_body.len;
@@ -576,7 +576,7 @@ fn verifyInstOperands(
     operands: [Liveness.bpi - 1]Air.Inst.Ref,
 ) Error!void {
     for (operands, 0..) |operand, operand_index| {
-        const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index));
+        const dies = self.liveness.operandDies(inst, @as(Liveness.OperandInt, @intCast(operand_index)));
         try self.verifyOperand(inst, operand, dies);
     }
     try self.verifyInst(inst);
src/translate_c/ast.zig
@@ -393,7 +393,7 @@ pub const Node = extern union {
 
     pub fn tag(self: Node) Tag {
         if (self.tag_if_small_enough < Tag.no_payload_count) {
-            return @enumFromInt(Tag, @intCast(std.meta.Tag(Tag), self.tag_if_small_enough));
+            return @as(Tag, @enumFromInt(@as(std.meta.Tag(Tag), @intCast(self.tag_if_small_enough))));
         } else {
             return self.ptr_otherwise.tag;
         }
@@ -778,7 +778,7 @@ pub fn render(gpa: Allocator, nodes: []const Node) !std.zig.Ast {
 
     try ctx.tokens.append(gpa, .{
         .tag = .eof,
-        .start = @intCast(u32, ctx.buf.items.len),
+        .start = @as(u32, @intCast(ctx.buf.items.len)),
     });
 
     return std.zig.Ast{
@@ -808,10 +808,10 @@ const Context = struct {
 
         try c.tokens.append(c.gpa, .{
             .tag = tag,
-            .start = @intCast(u32, start_index),
+            .start = @as(u32, @intCast(start_index)),
         });
 
-        return @intCast(u32, c.tokens.len - 1);
+        return @as(u32, @intCast(c.tokens.len - 1));
     }
 
     fn addToken(c: *Context, tag: TokenTag, bytes: []const u8) Allocator.Error!TokenIndex {
@@ -827,13 +827,13 @@ const Context = struct {
     fn listToSpan(c: *Context, list: []const NodeIndex) Allocator.Error!NodeSubRange {
         try c.extra_data.appendSlice(c.gpa, list);
         return NodeSubRange{
-            .start = @intCast(NodeIndex, c.extra_data.items.len - list.len),
-            .end = @intCast(NodeIndex, c.extra_data.items.len),
+            .start = @as(NodeIndex, @intCast(c.extra_data.items.len - list.len)),
+            .end = @as(NodeIndex, @intCast(c.extra_data.items.len)),
         };
     }
 
     fn addNode(c: *Context, elem: std.zig.Ast.Node) Allocator.Error!NodeIndex {
-        const result = @intCast(NodeIndex, c.nodes.len);
+        const result = @as(NodeIndex, @intCast(c.nodes.len));
         try c.nodes.append(c.gpa, elem);
         return result;
     }
@@ -841,7 +841,7 @@ const Context = struct {
     fn addExtra(c: *Context, extra: anytype) Allocator.Error!NodeIndex {
         const fields = std.meta.fields(@TypeOf(extra));
         try c.extra_data.ensureUnusedCapacity(c.gpa, fields.len);
-        const result = @intCast(u32, c.extra_data.items.len);
+        const result = @as(u32, @intCast(c.extra_data.items.len));
         inline for (fields) |field| {
             comptime std.debug.assert(field.type == NodeIndex);
             c.extra_data.appendAssumeCapacity(@field(extra, field.name));
src/Air.zig
@@ -1106,7 +1106,7 @@ pub const VectorCmp = struct {
     op: u32,
 
     pub fn compareOperator(self: VectorCmp) std.math.CompareOperator {
-        return @enumFromInt(std.math.CompareOperator, @truncate(u3, self.op));
+        return @as(std.math.CompareOperator, @enumFromInt(@as(u3, @truncate(self.op))));
     }
 
     pub fn encodeOp(compare_operator: std.math.CompareOperator) u32 {
@@ -1151,11 +1151,11 @@ pub const Cmpxchg = struct {
     flags: u32,
 
     pub fn successOrder(self: Cmpxchg) std.builtin.AtomicOrder {
-        return @enumFromInt(std.builtin.AtomicOrder, @truncate(u3, self.flags));
+        return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags))));
     }
 
     pub fn failureOrder(self: Cmpxchg) std.builtin.AtomicOrder {
-        return @enumFromInt(std.builtin.AtomicOrder, @truncate(u3, self.flags >> 3));
+        return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags >> 3))));
     }
 };
 
@@ -1166,11 +1166,11 @@ pub const AtomicRmw = struct {
     flags: u32,
 
     pub fn ordering(self: AtomicRmw) std.builtin.AtomicOrder {
-        return @enumFromInt(std.builtin.AtomicOrder, @truncate(u3, self.flags));
+        return @as(std.builtin.AtomicOrder, @enumFromInt(@as(u3, @truncate(self.flags))));
     }
 
     pub fn op(self: AtomicRmw) std.builtin.AtomicRmwOp {
-        return @enumFromInt(std.builtin.AtomicRmwOp, @truncate(u4, self.flags >> 3));
+        return @as(std.builtin.AtomicRmwOp, @enumFromInt(@as(u4, @truncate(self.flags >> 3))));
     }
 };
 
@@ -1451,7 +1451,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
 pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type {
     const ref_int = @intFromEnum(ref);
     if (ref_int < ref_start_index) {
-        const ip_index = @enumFromInt(InternPool.Index, ref_int);
+        const ip_index = @as(InternPool.Index, @enumFromInt(ref_int));
         return ip_index.toType();
     }
     const inst_index = ref_int - ref_start_index;
@@ -1472,9 +1472,9 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
     inline for (fields) |field| {
         @field(result, field.name) = switch (field.type) {
             u32 => air.extra[i],
-            Inst.Ref => @enumFromInt(Inst.Ref, air.extra[i]),
-            i32 => @bitCast(i32, air.extra[i]),
-            InternPool.Index => @enumFromInt(InternPool.Index, air.extra[i]),
+            Inst.Ref => @as(Inst.Ref, @enumFromInt(air.extra[i])),
+            i32 => @as(i32, @bitCast(air.extra[i])),
+            InternPool.Index => @as(InternPool.Index, @enumFromInt(air.extra[i])),
             else => @compileError("bad field type: " ++ @typeName(field.type)),
         };
         i += 1;
@@ -1494,7 +1494,7 @@ pub fn deinit(air: *Air, gpa: std.mem.Allocator) void {
 pub const ref_start_index: u32 = InternPool.static_len;
 
 pub fn indexToRef(inst: Inst.Index) Inst.Ref {
-    return @enumFromInt(Inst.Ref, ref_start_index + inst);
+    return @as(Inst.Ref, @enumFromInt(ref_start_index + inst));
 }
 
 pub fn refToIndex(inst: Inst.Ref) ?Inst.Index {
@@ -1516,10 +1516,10 @@ pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index {
 pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value {
     const ref_int = @intFromEnum(inst);
     if (ref_int < ref_start_index) {
-        const ip_index = @enumFromInt(InternPool.Index, ref_int);
+        const ip_index = @as(InternPool.Index, @enumFromInt(ref_int));
         return ip_index.toValue();
     }
-    const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index);
+    const inst_index = @as(Air.Inst.Index, @intCast(ref_int - ref_start_index));
     const air_datas = air.instructions.items(.data);
     switch (air.instructions.items(.tag)[inst_index]) {
         .interned => return air_datas[inst_index].interned.toValue(),
@@ -1747,7 +1747,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
         .work_group_id,
         => false,
 
-        .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0,
+        .assembly => @as(u1, @truncate(air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31)) != 0,
         .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip),
         .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip),
         .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip),
src/AstGen.zig
@@ -70,7 +70,7 @@ fn addExtra(astgen: *AstGen, extra: anytype) Allocator.Error!u32 {
 
 fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    const result = @intCast(u32, astgen.extra.items.len);
+    const result = @as(u32, @intCast(astgen.extra.items.len));
     astgen.extra.items.len += fields.len;
     setExtra(astgen, result, extra);
     return result;
@@ -83,11 +83,11 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void {
         astgen.extra.items[i] = switch (field.type) {
             u32 => @field(extra, field.name),
             Zir.Inst.Ref => @intFromEnum(@field(extra, field.name)),
-            i32 => @bitCast(u32, @field(extra, field.name)),
-            Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)),
-            Zir.Inst.BuiltinCall.Flags => @bitCast(u32, @field(extra, field.name)),
-            Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)),
-            Zir.Inst.FuncFancy.Bits => @bitCast(u32, @field(extra, field.name)),
+            i32 => @as(u32, @bitCast(@field(extra, field.name))),
+            Zir.Inst.Call.Flags => @as(u32, @bitCast(@field(extra, field.name))),
+            Zir.Inst.BuiltinCall.Flags => @as(u32, @bitCast(@field(extra, field.name))),
+            Zir.Inst.SwitchBlock.Bits => @as(u32, @bitCast(@field(extra, field.name))),
+            Zir.Inst.FuncFancy.Bits => @as(u32, @bitCast(@field(extra, field.name))),
             else => @compileError("bad field type"),
         };
         i += 1;
@@ -95,18 +95,18 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void {
 }
 
 fn reserveExtra(astgen: *AstGen, size: usize) Allocator.Error!u32 {
-    const result = @intCast(u32, astgen.extra.items.len);
+    const result = @as(u32, @intCast(astgen.extra.items.len));
     try astgen.extra.resize(astgen.gpa, result + size);
     return result;
 }
 
 fn appendRefs(astgen: *AstGen, refs: []const Zir.Inst.Ref) !void {
-    const coerced = @ptrCast([]const u32, refs);
+    const coerced = @as([]const u32, @ptrCast(refs));
     return astgen.extra.appendSlice(astgen.gpa, coerced);
 }
 
 fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void {
-    const coerced = @ptrCast([]const u32, refs);
+    const coerced = @as([]const u32, @ptrCast(refs));
     astgen.extra.appendSliceAssumeCapacity(coerced);
 }
 
@@ -176,7 +176,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
             @typeInfo(Zir.Inst.CompileErrors.Item).Struct.fields.len);
 
         astgen.extra.items[err_index] = astgen.addExtraAssumeCapacity(Zir.Inst.CompileErrors{
-            .items_len = @intCast(u32, astgen.compile_errors.items.len),
+            .items_len = @as(u32, @intCast(astgen.compile_errors.items.len)),
         });
 
         for (astgen.compile_errors.items) |item| {
@@ -192,7 +192,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
             astgen.imports.count() * @typeInfo(Zir.Inst.Imports.Item).Struct.fields.len);
 
         astgen.extra.items[imports_index] = astgen.addExtraAssumeCapacity(Zir.Inst.Imports{
-            .imports_len = @intCast(u32, astgen.imports.count()),
+            .imports_len = @as(u32, @intCast(astgen.imports.count())),
         });
 
         var it = astgen.imports.iterator();
@@ -1334,7 +1334,7 @@ fn fnProtoExpr(
                 var param_gz = block_scope.makeSubBlock(scope);
                 defer param_gz.unstack();
                 const param_type = try expr(&param_gz, scope, coerced_type_ri, param_type_node);
-                const param_inst_expected = @intCast(u32, astgen.instructions.len + 1);
+                const param_inst_expected = @as(u32, @intCast(astgen.instructions.len + 1));
                 _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node);
                 const main_tokens = tree.nodes.items(.main_token);
                 const name_token = param.name_token orelse main_tokens[param_type_node];
@@ -1468,7 +1468,7 @@ fn arrayInitExpr(
         const array_type_inst = try typeExpr(gz, scope, array_init.ast.type_expr);
         _ = try gz.addPlNode(.validate_array_init_ty, node, Zir.Inst.ArrayInit{
             .ty = array_type_inst,
-            .init_count = @intCast(u32, array_init.ast.elements.len),
+            .init_count = @as(u32, @intCast(array_init.ast.elements.len)),
         });
         break :inst .{
             .array = array_type_inst,
@@ -1533,7 +1533,7 @@ fn arrayInitExprRlNone(
     const astgen = gz.astgen;
 
     const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{
-        .operands_len = @intCast(u32, elements.len),
+        .operands_len = @as(u32, @intCast(elements.len)),
     });
     var extra_index = try reserveExtra(astgen, elements.len);
 
@@ -1558,7 +1558,7 @@ fn arrayInitExprInner(
 
     const len = elements.len + @intFromBool(array_ty_inst != .none);
     const payload_index = try addExtra(astgen, Zir.Inst.MultiOp{
-        .operands_len = @intCast(u32, len),
+        .operands_len = @as(u32, @intCast(len)),
     });
     var extra_index = try reserveExtra(astgen, len);
     if (array_ty_inst != .none) {
@@ -1574,7 +1574,7 @@ fn arrayInitExprInner(
                 .tag = .elem_type_index,
                 .data = .{ .bin = .{
                     .lhs = array_ty_inst,
-                    .rhs = @enumFromInt(Zir.Inst.Ref, i),
+                    .rhs = @as(Zir.Inst.Ref, @enumFromInt(i)),
                 } },
             });
             break :ri ResultInfo{ .rl = .{ .coerced_ty = ty_expr } };
@@ -1619,14 +1619,14 @@ fn arrayInitExprRlPtrInner(
     const astgen = gz.astgen;
 
     const payload_index = try addExtra(astgen, Zir.Inst.Block{
-        .body_len = @intCast(u32, elements.len),
+        .body_len = @as(u32, @intCast(elements.len)),
     });
     var extra_index = try reserveExtra(astgen, elements.len);
 
     for (elements, 0..) |elem_init, i| {
         const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{
             .ptr = result_ptr,
-            .index = @intCast(u32, i),
+            .index = @as(u32, @intCast(i)),
         });
         astgen.extra.items[extra_index] = refToIndex(elem_ptr).?;
         extra_index += 1;
@@ -1776,7 +1776,7 @@ fn structInitExprRlNone(
     const tree = astgen.tree;
 
     const payload_index = try addExtra(astgen, Zir.Inst.StructInitAnon{
-        .fields_len = @intCast(u32, struct_init.ast.fields.len),
+        .fields_len = @as(u32, @intCast(struct_init.ast.fields.len)),
     });
     const field_size = @typeInfo(Zir.Inst.StructInitAnon.Item).Struct.fields.len;
     var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size);
@@ -1834,7 +1834,7 @@ fn structInitExprRlPtrInner(
     const tree = astgen.tree;
 
     const payload_index = try addExtra(astgen, Zir.Inst.Block{
-        .body_len = @intCast(u32, struct_init.ast.fields.len),
+        .body_len = @as(u32, @intCast(struct_init.ast.fields.len)),
     });
     var extra_index = try reserveExtra(astgen, struct_init.ast.fields.len);
 
@@ -1866,7 +1866,7 @@ fn structInitExprRlTy(
     const tree = astgen.tree;
 
     const payload_index = try addExtra(astgen, Zir.Inst.StructInit{
-        .fields_len = @intCast(u32, struct_init.ast.fields.len),
+        .fields_len = @as(u32, @intCast(struct_init.ast.fields.len)),
     });
     const field_size = @typeInfo(Zir.Inst.StructInit.Item).Struct.fields.len;
     var extra_index: usize = try reserveExtra(astgen, struct_init.ast.fields.len * field_size);
@@ -2105,7 +2105,7 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn
                 }
 
                 const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node);
-                const search_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+                const search_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
 
                 try genDefers(parent_gz, scope, parent_scope, .normal_only);
 
@@ -2511,17 +2511,17 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
             .call, .field_call => {
                 const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index;
                 const slot = &gz.astgen.extra.items[extra_index];
-                var flags = @bitCast(Zir.Inst.Call.Flags, slot.*);
+                var flags = @as(Zir.Inst.Call.Flags, @bitCast(slot.*));
                 flags.ensure_result_used = true;
-                slot.* = @bitCast(u32, flags);
+                slot.* = @as(u32, @bitCast(flags));
                 break :b true;
             },
             .builtin_call => {
                 const extra_index = gz.astgen.instructions.items(.data)[inst].pl_node.payload_index;
                 const slot = &gz.astgen.extra.items[extra_index];
-                var flags = @bitCast(Zir.Inst.BuiltinCall.Flags, slot.*);
+                var flags = @as(Zir.Inst.BuiltinCall.Flags, @bitCast(slot.*));
                 flags.ensure_result_used = true;
-                slot.* = @bitCast(u32, flags);
+                slot.* = @as(u32, @bitCast(flags));
                 break :b true;
             },
 
@@ -2897,7 +2897,7 @@ fn genDefers(
                                 .index = defer_scope.index,
                                 .len = defer_scope.len,
                             });
-                            const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+                            const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
                             gz.astgen.instructions.appendAssumeCapacity(.{
                                 .tag = .defer_err_code,
                                 .data = .{ .defer_err_code = .{
@@ -2976,7 +2976,7 @@ fn deferStmt(
     const sub_scope = if (!have_err_code) &defer_gen.base else blk: {
         try gz.addDbgBlockBegin();
         const ident_name = try gz.astgen.identAsString(payload_token);
-        remapped_err_code = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        remapped_err_code = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         try gz.astgen.instructions.append(gz.astgen.gpa, .{
             .tag = .extended,
             .data = .{ .extended = .{
@@ -3016,7 +3016,7 @@ fn deferStmt(
         break :blk gz.astgen.countBodyLenAfterFixups(body) + refs;
     };
 
-    const index = @intCast(u32, gz.astgen.extra.items.len);
+    const index = @as(u32, @intCast(gz.astgen.extra.items.len));
     try gz.astgen.extra.ensureUnusedCapacity(gz.astgen.gpa, body_len);
     if (have_err_code) {
         if (gz.astgen.ref_table.fetchRemove(remapped_err_code)) |kv| {
@@ -3554,7 +3554,7 @@ fn ptrType(
         gz.astgen.extra.appendAssumeCapacity(@intFromEnum(bit_end_ref));
     }
 
-    const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+    const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
     const result = indexToRef(new_index);
     gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{
         .ptr_type = .{
@@ -3645,7 +3645,7 @@ const WipMembers = struct {
     const max_decl_size = 11;
 
     fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self {
-        const payload_top = @intCast(u32, payload.items.len);
+        const payload_top = @as(u32, @intCast(payload.items.len));
         const decls_start = payload_top + (decl_count + decls_per_u32 - 1) / decls_per_u32;
         const field_bits_start = decls_start + decl_count * max_decl_size;
         const fields_start = field_bits_start + if (bits_per_field > 0) blk: {
@@ -3700,7 +3700,7 @@ const WipMembers = struct {
     fn appendToDeclSlice(self: *Self, data: []const u32) void {
         assert(self.decls_end + data.len <= self.field_bits_start);
         @memcpy(self.payload.items[self.decls_end..][0..data.len], data);
-        self.decls_end += @intCast(u32, data.len);
+        self.decls_end += @as(u32, @intCast(data.len));
     }
 
     fn appendToField(self: *Self, data: u32) void {
@@ -3713,14 +3713,14 @@ const WipMembers = struct {
         const empty_decl_slots = decls_per_u32 - (self.decl_index % decls_per_u32);
         if (self.decl_index > 0 and empty_decl_slots < decls_per_u32) {
             const index = self.payload_top + self.decl_index / decls_per_u32;
-            self.payload.items[index] >>= @intCast(u5, empty_decl_slots * bits_per_decl);
+            self.payload.items[index] >>= @as(u5, @intCast(empty_decl_slots * bits_per_decl));
         }
         if (bits_per_field > 0) {
             const fields_per_u32 = 32 / bits_per_field;
             const empty_field_slots = fields_per_u32 - (self.field_index % fields_per_u32);
             if (self.field_index > 0 and empty_field_slots < fields_per_u32) {
                 const index = self.field_bits_start + self.field_index / fields_per_u32;
-                self.payload.items[index] >>= @intCast(u5, empty_field_slots * bits_per_field);
+                self.payload.items[index] >>= @as(u5, @intCast(empty_field_slots * bits_per_field));
             }
         }
     }
@@ -3882,7 +3882,7 @@ fn fnDecl(
                 var param_gz = decl_gz.makeSubBlock(scope);
                 defer param_gz.unstack();
                 const param_type = try expr(&param_gz, params_scope, coerced_type_ri, param_type_node);
-                const param_inst_expected = @intCast(u32, astgen.instructions.len + 1);
+                const param_inst_expected = @as(u32, @intCast(astgen.instructions.len + 1));
                 _ = try param_gz.addBreakWithSrcNode(.break_inline, param_inst_expected, param_type, param_type_node);
 
                 const main_tokens = tree.nodes.items(.main_token);
@@ -4097,7 +4097,7 @@ fn fnDecl(
 
     {
         const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node));
-        const casted = @bitCast([4]u32, contents_hash);
+        const casted = @as([4]u32, @bitCast(contents_hash));
         wip_members.appendToDeclSlice(&casted);
     }
     {
@@ -4248,7 +4248,7 @@ fn globalVarDecl(
 
     {
         const contents_hash = std.zig.hashSrc(tree.getNodeSource(node));
-        const casted = @bitCast([4]u32, contents_hash);
+        const casted = @as([4]u32, @bitCast(contents_hash));
         wip_members.appendToDeclSlice(&casted);
     }
     {
@@ -4303,7 +4303,7 @@ fn comptimeDecl(
 
     {
         const contents_hash = std.zig.hashSrc(tree.getNodeSource(node));
-        const casted = @bitCast([4]u32, contents_hash);
+        const casted = @as([4]u32, @bitCast(contents_hash));
         wip_members.appendToDeclSlice(&casted);
     }
     {
@@ -4355,7 +4355,7 @@ fn usingnamespaceDecl(
 
     {
         const contents_hash = std.zig.hashSrc(tree.getNodeSource(node));
-        const casted = @bitCast([4]u32, contents_hash);
+        const casted = @as([4]u32, @bitCast(contents_hash));
         wip_members.appendToDeclSlice(&casted);
     }
     {
@@ -4542,7 +4542,7 @@ fn testDecl(
 
     {
         const contents_hash = std.zig.hashSrc(tree.getNodeSource(node));
-        const casted = @bitCast([4]u32, contents_hash);
+        const casted = @as([4]u32, @bitCast(contents_hash));
         wip_members.appendToDeclSlice(&casted);
     }
     {
@@ -4642,7 +4642,7 @@ fn structDeclInner(
     };
 
     const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members);
-    const field_count = @intCast(u32, container_decl.ast.members.len - decl_count);
+    const field_count = @as(u32, @intCast(container_decl.ast.members.len - decl_count));
 
     const bits_per_field = 4;
     const max_field_size = 5;
@@ -4750,7 +4750,7 @@ fn structDeclInner(
             const old_scratch_len = astgen.scratch.items.len;
             try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body));
             appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body);
-            wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len));
+            wip_members.appendToField(@as(u32, @intCast(astgen.scratch.items.len - old_scratch_len)));
             block_scope.instructions.items.len = block_scope.instructions_top;
         } else {
             wip_members.appendToField(@intFromEnum(field_type));
@@ -4768,7 +4768,7 @@ fn structDeclInner(
             const old_scratch_len = astgen.scratch.items.len;
             try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body));
             appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body);
-            wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len));
+            wip_members.appendToField(@as(u32, @intCast(astgen.scratch.items.len - old_scratch_len)));
             block_scope.instructions.items.len = block_scope.instructions_top;
         }
 
@@ -4783,7 +4783,7 @@ fn structDeclInner(
             const old_scratch_len = astgen.scratch.items.len;
             try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body));
             appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body);
-            wip_members.appendToField(@intCast(u32, astgen.scratch.items.len - old_scratch_len));
+            wip_members.appendToField(@as(u32, @intCast(astgen.scratch.items.len - old_scratch_len)));
             block_scope.instructions.items.len = block_scope.instructions_top;
         } else if (member.comptime_token) |comptime_token| {
             return astgen.failTok(comptime_token, "comptime field without default initialization value", .{});
@@ -4796,7 +4796,7 @@ fn structDeclInner(
         .fields_len = field_count,
         .decls_len = decl_count,
         .backing_int_ref = backing_int_ref,
-        .backing_int_body_len = @intCast(u32, backing_int_body_len),
+        .backing_int_body_len = @as(u32, @intCast(backing_int_body_len)),
         .known_non_opv = known_non_opv,
         .known_comptime_only = known_comptime_only,
         .is_tuple = is_tuple,
@@ -4856,7 +4856,7 @@ fn unionDeclInner(
     defer block_scope.unstack();
 
     const decl_count = try astgen.scanDecls(&namespace, members);
-    const field_count = @intCast(u32, members.len - decl_count);
+    const field_count = @as(u32, @intCast(members.len - decl_count));
 
     if (layout != .Auto and (auto_enum_tok != null or arg_node != 0)) {
         const layout_str = if (layout == .Extern) "extern" else "packed";
@@ -5151,7 +5151,7 @@ fn containerDecl(
 
             const bits_per_field = 1;
             const max_field_size = 3;
-            var wip_members = try WipMembers.init(gpa, &astgen.scratch, @intCast(u32, counts.decls), @intCast(u32, counts.total_fields), bits_per_field, max_field_size);
+            var wip_members = try WipMembers.init(gpa, &astgen.scratch, @as(u32, @intCast(counts.decls)), @as(u32, @intCast(counts.total_fields)), bits_per_field, max_field_size);
             defer wip_members.deinit();
 
             for (container_decl.ast.members) |member_node| {
@@ -5209,8 +5209,8 @@ fn containerDecl(
                 .nonexhaustive = nonexhaustive,
                 .tag_type = arg_inst,
                 .body_len = body_len,
-                .fields_len = @intCast(u32, counts.total_fields),
-                .decls_len = @intCast(u32, counts.decls),
+                .fields_len = @as(u32, @intCast(counts.total_fields)),
+                .decls_len = @as(u32, @intCast(counts.decls)),
             });
 
             wip_members.finishBits(bits_per_field);
@@ -5400,7 +5400,7 @@ fn errorSetDecl(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zi
     }
 
     setExtra(astgen, payload_index, Zir.Inst.ErrorSetDecl{
-        .fields_len = @intCast(u32, fields_len),
+        .fields_len = @as(u32, @intCast(fields_len)),
     });
     const result = try gz.addPlNodePayloadIndex(.error_set_decl, node, payload_index);
     return rvalue(gz, ri, result, node);
@@ -6463,7 +6463,7 @@ fn forExpr(
     {
         var capture_token = for_full.payload_token;
         for (for_full.ast.inputs, 0..) |input, i_usize| {
-            const i = @intCast(u32, i_usize);
+            const i = @as(u32, @intCast(i_usize));
             const capture_is_ref = token_tags[capture_token] == .asterisk;
             const ident_tok = capture_token + @intFromBool(capture_is_ref);
             const is_discard = mem.eql(u8, tree.tokenSlice(ident_tok), "_");
@@ -6521,7 +6521,7 @@ fn forExpr(
     // We use a dedicated ZIR instruction to assert the lengths to assist with
     // nicer error reporting as well as fewer ZIR bytes emitted.
     const len: Zir.Inst.Ref = len: {
-        const lens_len = @intCast(u32, lens.len);
+        const lens_len = @as(u32, @intCast(lens.len));
         try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.MultiOp).Struct.fields.len + lens_len);
         const len = try parent_gz.addPlNode(.for_len, node, Zir.Inst.MultiOp{
             .operands_len = lens_len,
@@ -6591,7 +6591,7 @@ fn forExpr(
         var capture_token = for_full.payload_token;
         var capture_sub_scope: *Scope = &then_scope.base;
         for (for_full.ast.inputs, 0..) |input, i_usize| {
-            const i = @intCast(u32, i_usize);
+            const i = @as(u32, @intCast(i_usize));
             const capture_is_ref = token_tags[capture_token] == .asterisk;
             const ident_tok = capture_token + @intFromBool(capture_is_ref);
             const capture_name = tree.tokenSlice(ident_tok);
@@ -6891,7 +6891,7 @@ fn switchExpr(
 
     // If any prong has an inline tag capture, allocate a shared dummy instruction for it
     const tag_inst = if (any_has_tag_capture) tag_inst: {
-        const inst = @intCast(Zir.Inst.Index, astgen.instructions.len);
+        const inst = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
         try astgen.instructions.append(astgen.gpa, .{
             .tag = .extended,
             .data = .{ .extended = .{
@@ -6984,7 +6984,7 @@ fn switchExpr(
             break :blk &tag_scope.base;
         };
 
-        const header_index = @intCast(u32, payloads.items.len);
+        const header_index = @as(u32, @intCast(payloads.items.len));
         const body_len_index = if (is_multi_case) blk: {
             payloads.items[multi_case_table + multi_case_index] = header_index;
             multi_case_index += 1;
@@ -7074,12 +7074,12 @@ fn switchExpr(
             };
             const body_len = refs_len + astgen.countBodyLenAfterFixups(case_slice);
             try payloads.ensureUnusedCapacity(gpa, body_len);
-            payloads.items[body_len_index] = @bitCast(u32, Zir.Inst.SwitchBlock.ProngInfo{
-                .body_len = @intCast(u28, body_len),
+            payloads.items[body_len_index] = @as(u32, @bitCast(Zir.Inst.SwitchBlock.ProngInfo{
+                .body_len = @as(u28, @intCast(body_len)),
                 .capture = capture,
                 .is_inline = case.inline_token != null,
                 .has_tag_capture = has_tag_capture,
-            });
+            }));
             if (astgen.ref_table.fetchRemove(switch_block)) |kv| {
                 appendPossiblyRefdBodyInst(astgen, payloads, kv.value);
             }
@@ -7106,7 +7106,7 @@ fn switchExpr(
             .has_else = special_prong == .@"else",
             .has_under = special_prong == .under,
             .any_has_tag_capture = any_has_tag_capture,
-            .scalar_cases_len = @intCast(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, scalar_cases_len),
+            .scalar_cases_len = @as(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, @intCast(scalar_cases_len)),
         },
     });
 
@@ -7140,7 +7140,7 @@ fn switchExpr(
             end_index += 3 + items_len + 2 * ranges_len;
         }
 
-        const body_len = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, payloads.items[body_len_index]).body_len;
+        const body_len = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(payloads.items[body_len_index])).body_len;
         end_index += body_len;
 
         switch (strat.tag) {
@@ -7579,7 +7579,7 @@ fn tunnelThroughClosure(
                 .src_tok = ns.?.declaring_gz.?.tokenIndexToRelative(token),
             } },
         });
-        gop.value_ptr.* = @intCast(Zir.Inst.Index, gz.astgen.instructions.len - 1);
+        gop.value_ptr.* = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len - 1));
     }
 
     // Add an instruction to get the value from the closure into
@@ -7680,7 +7680,7 @@ fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node:
             };
             // If the value fits into a f64 without losing any precision, store it that way.
             @setFloatMode(.Strict);
-            const smaller_float = @floatCast(f64, float_number);
+            const smaller_float = @as(f64, @floatCast(float_number));
             const bigger_again: f128 = smaller_float;
             if (bigger_again == float_number) {
                 const result = try gz.addFloat(smaller_float);
@@ -7688,12 +7688,12 @@ fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node:
             }
             // We need to use 128 bits. Break the float into 4 u32 values so we can
             // put it into the `extra` array.
-            const int_bits = @bitCast(u128, float_number);
+            const int_bits = @as(u128, @bitCast(float_number));
             const result = try gz.addPlNode(.float128, node, Zir.Inst.Float128{
-                .piece0 = @truncate(u32, int_bits),
-                .piece1 = @truncate(u32, int_bits >> 32),
-                .piece2 = @truncate(u32, int_bits >> 64),
-                .piece3 = @truncate(u32, int_bits >> 96),
+                .piece0 = @as(u32, @truncate(int_bits)),
+                .piece1 = @as(u32, @truncate(int_bits >> 32)),
+                .piece2 = @as(u32, @truncate(int_bits >> 64)),
+                .piece3 = @as(u32, @truncate(int_bits >> 96)),
             });
             return rvalue(gz, ri, result, source_node);
         },
@@ -7719,22 +7719,22 @@ fn failWithNumberError(astgen: *AstGen, err: std.zig.number_literal.Error, token
             });
         },
         .digit_after_base => return astgen.failTok(token, "expected a digit after base prefix", .{}),
-        .upper_case_base => |i| return astgen.failOff(token, @intCast(u32, i), "base prefix must be lowercase", .{}),
-        .invalid_float_base => |i| return astgen.failOff(token, @intCast(u32, i), "invalid base for float literal", .{}),
-        .repeated_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "repeated digit separator", .{}),
-        .invalid_underscore_after_special => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before digit separator", .{}),
-        .invalid_digit => |info| return astgen.failOff(token, @intCast(u32, info.i), "invalid digit '{c}' for {s} base", .{ bytes[info.i], @tagName(info.base) }),
-        .invalid_digit_exponent => |i| return astgen.failOff(token, @intCast(u32, i), "invalid digit '{c}' in exponent", .{bytes[i]}),
-        .duplicate_exponent => |i| return astgen.failOff(token, @intCast(u32, i), "duplicate exponent", .{}),
-        .exponent_after_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before exponent", .{}),
-        .special_after_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit before '{c}'", .{bytes[i]}),
-        .trailing_special => |i| return astgen.failOff(token, @intCast(u32, i), "expected digit after '{c}'", .{bytes[i - 1]}),
-        .trailing_underscore => |i| return astgen.failOff(token, @intCast(u32, i), "trailing digit separator", .{}),
+        .upper_case_base => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "base prefix must be lowercase", .{}),
+        .invalid_float_base => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "invalid base for float literal", .{}),
+        .repeated_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "repeated digit separator", .{}),
+        .invalid_underscore_after_special => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit before digit separator", .{}),
+        .invalid_digit => |info| return astgen.failOff(token, @as(u32, @intCast(info.i)), "invalid digit '{c}' for {s} base", .{ bytes[info.i], @tagName(info.base) }),
+        .invalid_digit_exponent => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "invalid digit '{c}' in exponent", .{bytes[i]}),
+        .duplicate_exponent => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "duplicate exponent", .{}),
+        .exponent_after_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit before exponent", .{}),
+        .special_after_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit before '{c}'", .{bytes[i]}),
+        .trailing_special => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "expected digit after '{c}'", .{bytes[i - 1]}),
+        .trailing_underscore => |i| return astgen.failOff(token, @as(u32, @intCast(i)), "trailing digit separator", .{}),
         .duplicate_period => unreachable, // Validated by tokenizer
         .invalid_character => unreachable, // Validated by tokenizer
         .invalid_exponent_sign => |i| {
             assert(bytes.len >= 2 and bytes[0] == '0' and bytes[1] == 'x'); // Validated by tokenizer
-            return astgen.failOff(token, @intCast(u32, i), "sign '{c}' cannot follow digit '{c}' in hex base", .{ bytes[i], bytes[i - 1] });
+            return astgen.failOff(token, @as(u32, @intCast(i)), "sign '{c}' cannot follow digit '{c}' in hex base", .{ bytes[i], bytes[i - 1] });
         },
     }
 }
@@ -7801,7 +7801,7 @@ fn asmExpr(
             if (output_type_bits != 0) {
                 return astgen.failNode(output_node, "inline assembly allows up to one output value", .{});
             }
-            output_type_bits |= @as(u32, 1) << @intCast(u5, i);
+            output_type_bits |= @as(u32, 1) << @as(u5, @intCast(i));
             const out_type_node = node_datas[output_node].lhs;
             const out_type_inst = try typeExpr(gz, scope, out_type_node);
             outputs[i] = .{
@@ -8024,11 +8024,11 @@ fn ptrCast(
         node = node_datas[node].lhs;
     }
 
-    const flags_i = @bitCast(u5, flags);
+    const flags_i = @as(u5, @bitCast(flags));
     assert(flags_i != 0);
 
     const ptr_only: Zir.Inst.FullPtrCastFlags = .{ .ptr_cast = true };
-    if (flags_i == @bitCast(u5, ptr_only)) {
+    if (flags_i == @as(u5, @bitCast(ptr_only))) {
         // Special case: simpler representation
         return typeCast(gz, scope, ri, root_node, node, .ptr_cast, "@ptrCast");
     }
@@ -8037,7 +8037,7 @@ fn ptrCast(
         .const_cast = true,
         .volatile_cast = true,
     };
-    if ((flags_i & ~@bitCast(u5, no_result_ty_flags)) == 0) {
+    if ((flags_i & ~@as(u5, @bitCast(no_result_ty_flags))) == 0) {
         // Result type not needed
         const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node);
         const operand = try expr(gz, scope, .{ .rl = .none }, node);
@@ -8119,8 +8119,8 @@ fn typeOf(
     const body = typeof_scope.instructionsSlice();
     const body_len = astgen.countBodyLenAfterFixups(body);
     astgen.setExtra(payload_index, Zir.Inst.TypeOfPeer{
-        .body_len = @intCast(u32, body_len),
-        .body_index = @intCast(u32, astgen.extra.items.len),
+        .body_len = @as(u32, @intCast(body_len)),
+        .body_index = @as(u32, @intCast(astgen.extra.items.len)),
         .src_node = gz.nodeIndexToRelative(node),
     });
     try astgen.extra.ensureUnusedCapacity(gpa, body_len);
@@ -8464,7 +8464,7 @@ fn builtinCall(
                 .node = gz.nodeIndexToRelative(node),
                 .operand = operand,
             });
-            const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+            const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
             gz.astgen.instructions.appendAssumeCapacity(.{
                 .tag = .extended,
                 .data = .{ .extended = .{
@@ -9115,7 +9115,7 @@ fn callExpr(
     }
     assert(node != 0);
 
-    const call_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+    const call_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
     const call_inst = Zir.indexToRef(call_index);
     try gz.astgen.instructions.append(astgen.gpa, undefined);
     try gz.instructions.append(astgen.gpa, call_index);
@@ -9139,7 +9139,7 @@ fn callExpr(
         try astgen.scratch.ensureUnusedCapacity(astgen.gpa, countBodyLenAfterFixups(astgen, body));
         appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body);
 
-        astgen.scratch.items[scratch_index] = @intCast(u32, astgen.scratch.items.len - scratch_top);
+        astgen.scratch.items[scratch_index] = @as(u32, @intCast(astgen.scratch.items.len - scratch_top));
         scratch_index += 1;
     }
 
@@ -9157,8 +9157,8 @@ fn callExpr(
                 .callee = callee_obj,
                 .flags = .{
                     .pop_error_return_trace = !propagate_error_trace,
-                    .packed_modifier = @intCast(Zir.Inst.Call.Flags.PackedModifier, @intFromEnum(modifier)),
-                    .args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len),
+                    .packed_modifier = @as(Zir.Inst.Call.Flags.PackedModifier, @intCast(@intFromEnum(modifier))),
+                    .args_len = @as(Zir.Inst.Call.Flags.PackedArgsLen, @intCast(call.ast.params.len)),
                 },
             });
             if (call.ast.params.len != 0) {
@@ -9178,8 +9178,8 @@ fn callExpr(
                 .field_name_start = callee_field.field_name_start,
                 .flags = .{
                     .pop_error_return_trace = !propagate_error_trace,
-                    .packed_modifier = @intCast(Zir.Inst.Call.Flags.PackedModifier, @intFromEnum(modifier)),
-                    .args_len = @intCast(Zir.Inst.Call.Flags.PackedArgsLen, call.ast.params.len),
+                    .packed_modifier = @as(Zir.Inst.Call.Flags.PackedModifier, @intCast(@intFromEnum(modifier))),
+                    .args_len = @as(Zir.Inst.Call.Flags.PackedArgsLen, @intCast(call.ast.params.len)),
                 },
             });
             if (call.ast.params.len != 0) {
@@ -10552,7 +10552,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token
         .invalid_escape_character => |bad_index| {
             return astgen.failOff(
                 token,
-                offset + @intCast(u32, bad_index),
+                offset + @as(u32, @intCast(bad_index)),
                 "invalid escape character: '{c}'",
                 .{raw_string[bad_index]},
             );
@@ -10560,7 +10560,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token
         .expected_hex_digit => |bad_index| {
             return astgen.failOff(
                 token,
-                offset + @intCast(u32, bad_index),
+                offset + @as(u32, @intCast(bad_index)),
                 "expected hex digit, found '{c}'",
                 .{raw_string[bad_index]},
             );
@@ -10568,7 +10568,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token
         .empty_unicode_escape_sequence => |bad_index| {
             return astgen.failOff(
                 token,
-                offset + @intCast(u32, bad_index),
+                offset + @as(u32, @intCast(bad_index)),
                 "empty unicode escape sequence",
                 .{},
             );
@@ -10576,7 +10576,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token
         .expected_hex_digit_or_rbrace => |bad_index| {
             return astgen.failOff(
                 token,
-                offset + @intCast(u32, bad_index),
+                offset + @as(u32, @intCast(bad_index)),
                 "expected hex digit or '}}', found '{c}'",
                 .{raw_string[bad_index]},
             );
@@ -10584,7 +10584,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token
         .invalid_unicode_codepoint => |bad_index| {
             return astgen.failOff(
                 token,
-                offset + @intCast(u32, bad_index),
+                offset + @as(u32, @intCast(bad_index)),
                 "unicode escape does not correspond to a valid codepoint",
                 .{},
             );
@@ -10592,7 +10592,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token
         .expected_lbrace => |bad_index| {
             return astgen.failOff(
                 token,
-                offset + @intCast(u32, bad_index),
+                offset + @as(u32, @intCast(bad_index)),
                 "expected '{{', found '{c}",
                 .{raw_string[bad_index]},
             );
@@ -10600,7 +10600,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token
         .expected_rbrace => |bad_index| {
             return astgen.failOff(
                 token,
-                offset + @intCast(u32, bad_index),
+                offset + @as(u32, @intCast(bad_index)),
                 "expected '}}', found '{c}",
                 .{raw_string[bad_index]},
             );
@@ -10608,7 +10608,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token
         .expected_single_quote => |bad_index| {
             return astgen.failOff(
                 token,
-                offset + @intCast(u32, bad_index),
+                offset + @as(u32, @intCast(bad_index)),
                 "expected single quote ('), found '{c}",
                 .{raw_string[bad_index]},
             );
@@ -10616,7 +10616,7 @@ fn failWithStrLitError(astgen: *AstGen, err: std.zig.string_literal.Error, token
         .invalid_character => |bad_index| {
             return astgen.failOff(
                 token,
-                offset + @intCast(u32, bad_index),
+                offset + @as(u32, @intCast(bad_index)),
                 "invalid byte in string or character literal: '{c}'",
                 .{raw_string[bad_index]},
             );
@@ -10651,14 +10651,14 @@ fn appendErrorNodeNotes(
 ) Allocator.Error!void {
     @setCold(true);
     const string_bytes = &astgen.string_bytes;
-    const msg = @intCast(u32, string_bytes.items.len);
+    const msg = @as(u32, @intCast(string_bytes.items.len));
     try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args);
     const notes_index: u32 = if (notes.len != 0) blk: {
         const notes_start = astgen.extra.items.len;
         try astgen.extra.ensureTotalCapacity(astgen.gpa, notes_start + 1 + notes.len);
-        astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len));
+        astgen.extra.appendAssumeCapacity(@as(u32, @intCast(notes.len)));
         astgen.extra.appendSliceAssumeCapacity(notes);
-        break :blk @intCast(u32, notes_start);
+        break :blk @as(u32, @intCast(notes_start));
     } else 0;
     try astgen.compile_errors.append(astgen.gpa, .{
         .msg = msg,
@@ -10743,14 +10743,14 @@ fn appendErrorTokNotesOff(
     @setCold(true);
     const gpa = astgen.gpa;
     const string_bytes = &astgen.string_bytes;
-    const msg = @intCast(u32, string_bytes.items.len);
+    const msg = @as(u32, @intCast(string_bytes.items.len));
     try string_bytes.writer(gpa).print(format ++ "\x00", args);
     const notes_index: u32 = if (notes.len != 0) blk: {
         const notes_start = astgen.extra.items.len;
         try astgen.extra.ensureTotalCapacity(gpa, notes_start + 1 + notes.len);
-        astgen.extra.appendAssumeCapacity(@intCast(u32, notes.len));
+        astgen.extra.appendAssumeCapacity(@as(u32, @intCast(notes.len)));
         astgen.extra.appendSliceAssumeCapacity(notes);
-        break :blk @intCast(u32, notes_start);
+        break :blk @as(u32, @intCast(notes_start));
     } else 0;
     try astgen.compile_errors.append(gpa, .{
         .msg = msg,
@@ -10779,7 +10779,7 @@ fn errNoteTokOff(
 ) Allocator.Error!u32 {
     @setCold(true);
     const string_bytes = &astgen.string_bytes;
-    const msg = @intCast(u32, string_bytes.items.len);
+    const msg = @as(u32, @intCast(string_bytes.items.len));
     try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args);
     return astgen.addExtra(Zir.Inst.CompileErrors.Item{
         .msg = msg,
@@ -10798,7 +10798,7 @@ fn errNoteNode(
 ) Allocator.Error!u32 {
     @setCold(true);
     const string_bytes = &astgen.string_bytes;
-    const msg = @intCast(u32, string_bytes.items.len);
+    const msg = @as(u32, @intCast(string_bytes.items.len));
     try string_bytes.writer(astgen.gpa).print(format ++ "\x00", args);
     return astgen.addExtra(Zir.Inst.CompileErrors.Item{
         .msg = msg,
@@ -10812,7 +10812,7 @@ fn errNoteNode(
 fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 {
     const gpa = astgen.gpa;
     const string_bytes = &astgen.string_bytes;
-    const str_index = @intCast(u32, string_bytes.items.len);
+    const str_index = @as(u32, @intCast(string_bytes.items.len));
     try astgen.appendIdentStr(ident_token, string_bytes);
     const key: []const u8 = string_bytes.items[str_index..];
     const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{
@@ -10858,7 +10858,7 @@ fn docCommentAsStringFromFirst(
 
     const gpa = astgen.gpa;
     const string_bytes = &astgen.string_bytes;
-    const str_index = @intCast(u32, string_bytes.items.len);
+    const str_index = @as(u32, @intCast(string_bytes.items.len));
     const token_starts = astgen.tree.tokens.items(.start);
     const token_tags = astgen.tree.tokens.items(.tag);
 
@@ -10901,7 +10901,7 @@ const IndexSlice = struct { index: u32, len: u32 };
 fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice {
     const gpa = astgen.gpa;
     const string_bytes = &astgen.string_bytes;
-    const str_index = @intCast(u32, string_bytes.items.len);
+    const str_index = @as(u32, @intCast(string_bytes.items.len));
     const token_bytes = astgen.tree.tokenSlice(str_lit_token);
     try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0);
     const key = string_bytes.items[str_index..];
@@ -10914,7 +10914,7 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice {
         string_bytes.shrinkRetainingCapacity(str_index);
         return IndexSlice{
             .index = gop.key_ptr.*,
-            .len = @intCast(u32, key.len),
+            .len = @as(u32, @intCast(key.len)),
         };
     } else {
         gop.key_ptr.* = str_index;
@@ -10924,7 +10924,7 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !IndexSlice {
         try string_bytes.append(gpa, 0);
         return IndexSlice{
             .index = str_index,
-            .len = @intCast(u32, key.len),
+            .len = @as(u32, @intCast(key.len)),
         };
     }
 }
@@ -10961,15 +10961,15 @@ fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice {
     const len = string_bytes.items.len - str_index;
     try string_bytes.append(gpa, 0);
     return IndexSlice{
-        .index = @intCast(u32, str_index),
-        .len = @intCast(u32, len),
+        .index = @as(u32, @intCast(str_index)),
+        .len = @as(u32, @intCast(len)),
     };
 }
 
 fn testNameString(astgen: *AstGen, str_lit_token: Ast.TokenIndex) !u32 {
     const gpa = astgen.gpa;
     const string_bytes = &astgen.string_bytes;
-    const str_index = @intCast(u32, string_bytes.items.len);
+    const str_index = @as(u32, @intCast(string_bytes.items.len));
     const token_bytes = astgen.tree.tokenSlice(str_lit_token);
     try string_bytes.append(gpa, 0); // Indicates this is a test.
     try astgen.parseStrLit(str_lit_token, string_bytes, token_bytes, 0);
@@ -11321,7 +11321,7 @@ const GenZir = struct {
     }
 
     fn nodeIndexToRelative(gz: GenZir, node_index: Ast.Node.Index) i32 {
-        return @bitCast(i32, node_index) - @bitCast(i32, gz.decl_node_index);
+        return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(gz.decl_node_index));
     }
 
     fn tokenIndexToRelative(gz: GenZir, token: Ast.TokenIndex) u32 {
@@ -11478,7 +11478,7 @@ const GenZir = struct {
         const astgen = gz.astgen;
         const gpa = astgen.gpa;
         const ret_ref = if (args.ret_ref == .void_type) .none else args.ret_ref;
-        const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
 
         try astgen.instructions.ensureUnusedCapacity(gpa, 1);
 
@@ -11496,8 +11496,8 @@ const GenZir = struct {
             const block = node_datas[fn_decl].rhs;
             const rbrace_start = token_starts[tree.lastToken(block)];
             astgen.advanceSourceCursor(rbrace_start);
-            const rbrace_line = @intCast(u32, astgen.source_line - gz.decl_line);
-            const rbrace_column = @intCast(u32, astgen.source_column);
+            const rbrace_line = @as(u32, @intCast(astgen.source_line - gz.decl_line));
+            const rbrace_column = @as(u32, @intCast(astgen.source_column));
 
             const columns = args.lbrace_column | (rbrace_column << 16);
             src_locs_buffer[0] = args.lbrace_line;
@@ -11733,18 +11733,18 @@ const GenZir = struct {
             astgen.extra.appendAssumeCapacity(@intFromEnum(args.init));
         }
 
-        const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
         astgen.instructions.appendAssumeCapacity(.{
             .tag = .extended,
             .data = .{ .extended = .{
                 .opcode = .variable,
-                .small = @bitCast(u16, Zir.Inst.ExtendedVar.Small{
+                .small = @as(u16, @bitCast(Zir.Inst.ExtendedVar.Small{
                     .has_lib_name = args.lib_name != 0,
                     .has_align = args.align_inst != .none,
                     .has_init = args.init != .none,
                     .is_extern = args.is_extern,
                     .is_threadlocal = args.is_threadlocal,
-                }),
+                })),
                 .operand = payload_index,
             } },
         });
@@ -11764,7 +11764,7 @@ const GenZir = struct {
         try gz.instructions.ensureUnusedCapacity(gpa, 1);
         try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
 
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.appendAssumeCapacity(.{
             .tag = tag,
             .data = .{ .bool_br = .{
@@ -11790,12 +11790,12 @@ const GenZir = struct {
         try astgen.instructions.ensureUnusedCapacity(gpa, 1);
         try astgen.string_bytes.ensureUnusedCapacity(gpa, @sizeOf(std.math.big.Limb) * limbs.len);
 
-        const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
         astgen.instructions.appendAssumeCapacity(.{
             .tag = .int_big,
             .data = .{ .str = .{
-                .start = @intCast(u32, astgen.string_bytes.items.len),
-                .len = @intCast(u32, limbs.len),
+                .start = @as(u32, @intCast(astgen.string_bytes.items.len)),
+                .len = @as(u32, @intCast(limbs.len)),
             } },
         });
         gz.instructions.appendAssumeCapacity(new_index);
@@ -11835,7 +11835,7 @@ const GenZir = struct {
         src_node: Ast.Node.Index,
     ) !Zir.Inst.Index {
         assert(operand != .none);
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         try gz.astgen.instructions.append(gz.astgen.gpa, .{
             .tag = tag,
             .data = .{ .un_node = .{
@@ -11858,7 +11858,7 @@ const GenZir = struct {
         try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
 
         const payload_index = try gz.astgen.addExtra(extra);
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.appendAssumeCapacity(.{
             .tag = tag,
             .data = .{ .pl_node = .{
@@ -11910,12 +11910,12 @@ const GenZir = struct {
         const payload_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Param{
             .name = name,
             .doc_comment = doc_comment_index,
-            .body_len = @intCast(u32, body_len),
+            .body_len = @as(u32, @intCast(body_len)),
         });
         gz.astgen.appendBodyWithFixups(param_body);
         param_gz.unstack();
 
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.appendAssumeCapacity(.{
             .tag = tag,
             .data = .{ .pl_tok = .{
@@ -11943,7 +11943,7 @@ const GenZir = struct {
         try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
 
         const payload_index = try gz.astgen.addExtra(extra);
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.appendAssumeCapacity(.{
             .tag = .extended,
             .data = .{ .extended = .{
@@ -11975,12 +11975,12 @@ const GenZir = struct {
         const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.NodeMultiOp{
             .src_node = gz.nodeIndexToRelative(node),
         });
-        const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
         astgen.instructions.appendAssumeCapacity(.{
             .tag = .extended,
             .data = .{ .extended = .{
                 .opcode = opcode,
-                .small = @intCast(u16, operands.len),
+                .small = @as(u16, @intCast(operands.len)),
                 .operand = payload_index,
             } },
         });
@@ -12000,12 +12000,12 @@ const GenZir = struct {
 
         try gz.instructions.ensureUnusedCapacity(gpa, 1);
         try astgen.instructions.ensureUnusedCapacity(gpa, 1);
-        const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
         astgen.instructions.appendAssumeCapacity(.{
             .tag = .extended,
             .data = .{ .extended = .{
                 .opcode = opcode,
-                .small = @intCast(u16, trailing_len),
+                .small = @as(u16, @intCast(trailing_len)),
                 .operand = payload_index,
             } },
         });
@@ -12038,7 +12038,7 @@ const GenZir = struct {
         abs_tok_index: Ast.TokenIndex,
     ) !Zir.Inst.Index {
         const astgen = gz.astgen;
-        const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
         assert(operand != .none);
         try astgen.instructions.append(astgen.gpa, .{
             .tag = tag,
@@ -12121,7 +12121,7 @@ const GenZir = struct {
             .operand_src_node = Zir.Inst.Break.no_src_node,
         };
         const payload_index = try gz.astgen.addExtra(extra);
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.appendAssumeCapacity(.{
             .tag = tag,
             .data = .{ .@"break" = .{
@@ -12147,7 +12147,7 @@ const GenZir = struct {
             .operand_src_node = Zir.Inst.Break.no_src_node,
         };
         const payload_index = try gz.astgen.addExtra(extra);
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.appendAssumeCapacity(.{
             .tag = tag,
             .data = .{ .@"break" = .{
@@ -12174,7 +12174,7 @@ const GenZir = struct {
             .operand_src_node = gz.nodeIndexToRelative(operand_src_node),
         };
         const payload_index = try gz.astgen.addExtra(extra);
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.appendAssumeCapacity(.{
             .tag = tag,
             .data = .{ .@"break" = .{
@@ -12201,7 +12201,7 @@ const GenZir = struct {
             .operand_src_node = gz.nodeIndexToRelative(operand_src_node),
         };
         const payload_index = try gz.astgen.addExtra(extra);
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.appendAssumeCapacity(.{
             .tag = tag,
             .data = .{ .@"break" = .{
@@ -12293,7 +12293,7 @@ const GenZir = struct {
             .data = .{ .extended = .{
                 .opcode = opcode,
                 .small = undefined,
-                .operand = @bitCast(u32, gz.nodeIndexToRelative(src_node)),
+                .operand = @as(u32, @bitCast(gz.nodeIndexToRelative(src_node))),
             } },
         });
     }
@@ -12336,7 +12336,7 @@ const GenZir = struct {
         const is_comptime: u4 = @intFromBool(args.is_comptime);
         const small: u16 = has_type | (has_align << 1) | (is_const << 2) | (is_comptime << 3);
 
-        const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
         astgen.instructions.appendAssumeCapacity(.{
             .tag = .extended,
             .data = .{ .extended = .{
@@ -12390,12 +12390,12 @@ const GenZir = struct {
         //  * 0b000000XX_XXX00000 - `inputs_len`.
         //  * 0b0XXXXX00_00000000 - `clobbers_len`.
         //  * 0bX0000000_00000000 - is volatile
-        const small: u16 = @intCast(u16, args.outputs.len) |
-            @intCast(u16, args.inputs.len << 5) |
-            @intCast(u16, args.clobbers.len << 10) |
+        const small: u16 = @as(u16, @intCast(args.outputs.len)) |
+            @as(u16, @intCast(args.inputs.len << 5)) |
+            @as(u16, @intCast(args.clobbers.len << 10)) |
             (@as(u16, @intFromBool(args.is_volatile)) << 15);
 
-        const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(astgen.instructions.len));
         astgen.instructions.appendAssumeCapacity(.{
             .tag = .extended,
             .data = .{ .extended = .{
@@ -12412,7 +12412,7 @@ const GenZir = struct {
     /// Does *not* append the block instruction to the scope.
     /// Leaves the `payload_index` field undefined.
     fn makeBlockInst(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index {
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         const gpa = gz.astgen.gpa;
         try gz.astgen.instructions.append(gpa, .{
             .tag = tag,
@@ -12429,7 +12429,7 @@ const GenZir = struct {
     fn addCondBr(gz: *GenZir, tag: Zir.Inst.Tag, node: Ast.Node.Index) !Zir.Inst.Index {
         const gpa = gz.astgen.gpa;
         try gz.instructions.ensureUnusedCapacity(gpa, 1);
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         try gz.astgen.instructions.append(gpa, .{
             .tag = tag,
             .data = .{ .pl_node = .{
@@ -12456,11 +12456,11 @@ const GenZir = struct {
         const gpa = astgen.gpa;
 
         try astgen.extra.ensureUnusedCapacity(gpa, 6);
-        const payload_index = @intCast(u32, astgen.extra.items.len);
+        const payload_index = @as(u32, @intCast(astgen.extra.items.len));
 
         if (args.src_node != 0) {
             const node_offset = gz.nodeIndexToRelative(args.src_node);
-            astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset));
+            astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset)));
         }
         if (args.fields_len != 0) {
             astgen.extra.appendAssumeCapacity(args.fields_len);
@@ -12478,7 +12478,7 @@ const GenZir = struct {
             .tag = .extended,
             .data = .{ .extended = .{
                 .opcode = .struct_decl,
-                .small = @bitCast(u16, Zir.Inst.StructDecl.Small{
+                .small = @as(u16, @bitCast(Zir.Inst.StructDecl.Small{
                     .has_src_node = args.src_node != 0,
                     .has_fields_len = args.fields_len != 0,
                     .has_decls_len = args.decls_len != 0,
@@ -12488,7 +12488,7 @@ const GenZir = struct {
                     .is_tuple = args.is_tuple,
                     .name_strategy = gz.anon_name_strategy,
                     .layout = args.layout,
-                }),
+                })),
                 .operand = payload_index,
             } },
         });
@@ -12507,11 +12507,11 @@ const GenZir = struct {
         const gpa = astgen.gpa;
 
         try astgen.extra.ensureUnusedCapacity(gpa, 5);
-        const payload_index = @intCast(u32, astgen.extra.items.len);
+        const payload_index = @as(u32, @intCast(astgen.extra.items.len));
 
         if (args.src_node != 0) {
             const node_offset = gz.nodeIndexToRelative(args.src_node);
-            astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset));
+            astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset)));
         }
         if (args.tag_type != .none) {
             astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type));
@@ -12529,7 +12529,7 @@ const GenZir = struct {
             .tag = .extended,
             .data = .{ .extended = .{
                 .opcode = .union_decl,
-                .small = @bitCast(u16, Zir.Inst.UnionDecl.Small{
+                .small = @as(u16, @bitCast(Zir.Inst.UnionDecl.Small{
                     .has_src_node = args.src_node != 0,
                     .has_tag_type = args.tag_type != .none,
                     .has_body_len = args.body_len != 0,
@@ -12538,7 +12538,7 @@ const GenZir = struct {
                     .name_strategy = gz.anon_name_strategy,
                     .layout = args.layout,
                     .auto_enum_tag = args.auto_enum_tag,
-                }),
+                })),
                 .operand = payload_index,
             } },
         });
@@ -12556,11 +12556,11 @@ const GenZir = struct {
         const gpa = astgen.gpa;
 
         try astgen.extra.ensureUnusedCapacity(gpa, 5);
-        const payload_index = @intCast(u32, astgen.extra.items.len);
+        const payload_index = @as(u32, @intCast(astgen.extra.items.len));
 
         if (args.src_node != 0) {
             const node_offset = gz.nodeIndexToRelative(args.src_node);
-            astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset));
+            astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset)));
         }
         if (args.tag_type != .none) {
             astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type));
@@ -12578,7 +12578,7 @@ const GenZir = struct {
             .tag = .extended,
             .data = .{ .extended = .{
                 .opcode = .enum_decl,
-                .small = @bitCast(u16, Zir.Inst.EnumDecl.Small{
+                .small = @as(u16, @bitCast(Zir.Inst.EnumDecl.Small{
                     .has_src_node = args.src_node != 0,
                     .has_tag_type = args.tag_type != .none,
                     .has_body_len = args.body_len != 0,
@@ -12586,7 +12586,7 @@ const GenZir = struct {
                     .has_decls_len = args.decls_len != 0,
                     .name_strategy = gz.anon_name_strategy,
                     .nonexhaustive = args.nonexhaustive,
-                }),
+                })),
                 .operand = payload_index,
             } },
         });
@@ -12600,11 +12600,11 @@ const GenZir = struct {
         const gpa = astgen.gpa;
 
         try astgen.extra.ensureUnusedCapacity(gpa, 2);
-        const payload_index = @intCast(u32, astgen.extra.items.len);
+        const payload_index = @as(u32, @intCast(astgen.extra.items.len));
 
         if (args.src_node != 0) {
             const node_offset = gz.nodeIndexToRelative(args.src_node);
-            astgen.extra.appendAssumeCapacity(@bitCast(u32, node_offset));
+            astgen.extra.appendAssumeCapacity(@as(u32, @bitCast(node_offset)));
         }
         if (args.decls_len != 0) {
             astgen.extra.appendAssumeCapacity(args.decls_len);
@@ -12613,11 +12613,11 @@ const GenZir = struct {
             .tag = .extended,
             .data = .{ .extended = .{
                 .opcode = .opaque_decl,
-                .small = @bitCast(u16, Zir.Inst.OpaqueDecl.Small{
+                .small = @as(u16, @bitCast(Zir.Inst.OpaqueDecl.Small{
                     .has_src_node = args.src_node != 0,
                     .has_decls_len = args.decls_len != 0,
                     .name_strategy = gz.anon_name_strategy,
-                }),
+                })),
                 .operand = payload_index,
             } },
         });
@@ -12632,7 +12632,7 @@ const GenZir = struct {
         try gz.instructions.ensureUnusedCapacity(gpa, 1);
         try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
 
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.appendAssumeCapacity(inst);
         gz.instructions.appendAssumeCapacity(new_index);
         return new_index;
@@ -12643,7 +12643,7 @@ const GenZir = struct {
         try gz.instructions.ensureUnusedCapacity(gpa, 1);
         try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
 
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         gz.astgen.instructions.len += 1;
         gz.instructions.appendAssumeCapacity(new_index);
         return new_index;
@@ -12695,7 +12695,7 @@ const GenZir = struct {
             return;
         }
 
-        const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
+        const new_index = @as(Zir.Inst.Index, @intCast(gz.astgen.instructions.len));
         try gz.astgen.instructions.append(gpa, .{ .tag = .dbg_block_end, .data = undefined });
         try gz.instructions.append(gpa, new_index);
     }
@@ -12704,7 +12704,7 @@ const GenZir = struct {
 /// This can only be for short-lived references; the memory becomes invalidated
 /// when another string is added.
 fn nullTerminatedString(astgen: AstGen, index: usize) [*:0]const u8 {
-    return @ptrCast([*:0]const u8, astgen.string_bytes.items.ptr) + index;
+    return @as([*:0]const u8, @ptrCast(astgen.string_bytes.items.ptr)) + index;
 }
 
 /// Local variables shadowing detection, including function parameters.
@@ -12983,7 +12983,7 @@ fn isInferred(astgen: *AstGen, ref: Zir.Inst.Ref) bool {
         .extended => {
             const zir_data = astgen.instructions.items(.data);
             if (zir_data[inst].extended.opcode != .alloc) return false;
-            const small = @bitCast(Zir.Inst.AllocExtended.Small, zir_data[inst].extended.small);
+            const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(zir_data[inst].extended.small));
             return !small.has_type;
         },
 
@@ -13027,7 +13027,7 @@ fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 {
             check_inst = ref_inst;
         }
     }
-    return @intCast(u32, count);
+    return @as(u32, @intCast(count));
 }
 
 fn emitDbgStmt(gz: *GenZir, lc: LineColumn) !void {
@@ -13059,7 +13059,7 @@ fn lowerAstErrors(astgen: *AstGen) !void {
 
     if (token_tags[parse_err.token + @intFromBool(parse_err.token_is_prev)] == .invalid) {
         const tok = parse_err.token + @intFromBool(parse_err.token_is_prev);
-        const bad_off = @intCast(u32, tree.tokenSlice(parse_err.token + @intFromBool(parse_err.token_is_prev)).len);
+        const bad_off = @as(u32, @intCast(tree.tokenSlice(parse_err.token + @intFromBool(parse_err.token_is_prev)).len));
         const byte_abs = token_starts[parse_err.token + @intFromBool(parse_err.token_is_prev)] + bad_off;
         try notes.append(gpa, try astgen.errNoteTokOff(tok, bad_off, "invalid byte: '{'}'", .{
             std.zig.fmtEscapes(tree.source[byte_abs..][0..1]),
src/Autodoc.zig
@@ -110,7 +110,7 @@ pub fn generateZirData(self: *Autodoc) !void {
         comptime std.debug.assert(@intFromEnum(InternPool.Index.first_type) == 0);
         var i: u32 = 0;
         while (i <= @intFromEnum(InternPool.Index.last_type)) : (i += 1) {
-            const ip_index = @enumFromInt(InternPool.Index, i);
+            const ip_index = @as(InternPool.Index, @enumFromInt(i));
             var tmpbuf = std.ArrayList(u8).init(self.arena);
             if (ip_index == .generic_poison_type) {
                 // Not a real type, doesn't have a normal name
@@ -1669,7 +1669,7 @@ fn walkInstruction(
             // present in json
             var sentinel: ?DocData.Expr = null;
             if (ptr.flags.has_sentinel) {
-                const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+                const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
                 const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false);
                 sentinel = ref_result.expr;
                 extra_index += 1;
@@ -1677,21 +1677,21 @@ fn walkInstruction(
 
             var @"align": ?DocData.Expr = null;
             if (ptr.flags.has_align) {
-                const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+                const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
                 const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false);
                 @"align" = ref_result.expr;
                 extra_index += 1;
             }
             var address_space: ?DocData.Expr = null;
             if (ptr.flags.has_addrspace) {
-                const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+                const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
                 const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false);
                 address_space = ref_result.expr;
                 extra_index += 1;
             }
             var bit_start: ?DocData.Expr = null;
             if (ptr.flags.has_bit_range) {
-                const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+                const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
                 const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false);
                 address_space = ref_result.expr;
                 extra_index += 1;
@@ -1699,7 +1699,7 @@ fn walkInstruction(
 
             var host_size: ?DocData.Expr = null;
             if (ptr.flags.has_bit_range) {
-                const ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+                const ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
                 const ref_result = try self.walkRef(file, parent_scope, parent_src, ref, false);
                 host_size = ref_result.expr;
             }
@@ -2549,11 +2549,11 @@ fn walkInstruction(
                         .enclosing_type = type_slot_index,
                     };
 
-                    const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small);
+                    const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small));
                     var extra_index: usize = extended.operand;
 
                     const src_node: ?i32 = if (small.has_src_node) blk: {
-                        const src_node = @bitCast(i32, file.zir.extra[extra_index]);
+                        const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
                         extra_index += 1;
                         break :blk src_node;
                     } else null;
@@ -2606,7 +2606,7 @@ fn walkInstruction(
                 .variable => {
                     const extra = file.zir.extraData(Zir.Inst.ExtendedVar, extended.operand);
 
-                    const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small);
+                    const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small));
                     var extra_index: usize = extra.end;
                     if (small.has_lib_name) extra_index += 1;
                     if (small.has_align) extra_index += 1;
@@ -2619,7 +2619,7 @@ fn walkInstruction(
                     };
 
                     if (small.has_init) {
-                        const var_init_ref = @enumFromInt(Ref, file.zir.extra[extra_index]);
+                        const var_init_ref = @as(Ref, @enumFromInt(file.zir.extra[extra_index]));
                         const var_init = try self.walkRef(file, parent_scope, parent_src, var_init_ref, need_type);
                         value.expr = var_init.expr;
                         value.typeRef = var_init.typeRef;
@@ -2636,11 +2636,11 @@ fn walkInstruction(
                         .enclosing_type = type_slot_index,
                     };
 
-                    const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
+                    const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small));
                     var extra_index: usize = extended.operand;
 
                     const src_node: ?i32 = if (small.has_src_node) blk: {
-                        const src_node = @bitCast(i32, file.zir.extra[extra_index]);
+                        const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
                         extra_index += 1;
                         break :blk src_node;
                     } else null;
@@ -2655,7 +2655,7 @@ fn walkInstruction(
                     const tag_type_ref: ?Ref = if (small.has_tag_type) blk: {
                         const tag_type = file.zir.extra[extra_index];
                         extra_index += 1;
-                        const tag_ref = @enumFromInt(Ref, tag_type);
+                        const tag_ref = @as(Ref, @enumFromInt(tag_type));
                         break :blk tag_ref;
                     } else null;
 
@@ -2763,11 +2763,11 @@ fn walkInstruction(
                         .enclosing_type = type_slot_index,
                     };
 
-                    const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small);
+                    const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small));
                     var extra_index: usize = extended.operand;
 
                     const src_node: ?i32 = if (small.has_src_node) blk: {
-                        const src_node = @bitCast(i32, file.zir.extra[extra_index]);
+                        const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
                         extra_index += 1;
                         break :blk src_node;
                     } else null;
@@ -2780,7 +2780,7 @@ fn walkInstruction(
                     const tag_type: ?DocData.Expr = if (small.has_tag_type) blk: {
                         const tag_type = file.zir.extra[extra_index];
                         extra_index += 1;
-                        const tag_ref = @enumFromInt(Ref, tag_type);
+                        const tag_ref = @as(Ref, @enumFromInt(tag_type));
                         const wr = try self.walkRef(file, parent_scope, parent_src, tag_ref, false);
                         break :blk wr.expr;
                     } else null;
@@ -2826,7 +2826,7 @@ fn walkInstruction(
                                 bit_bag_idx += 1;
                             }
 
-                            const has_value = @truncate(u1, cur_bit_bag) != 0;
+                            const has_value = @as(u1, @truncate(cur_bit_bag)) != 0;
                             cur_bit_bag >>= 1;
 
                             const field_name_index = file.zir.extra[extra_index];
@@ -2838,7 +2838,7 @@ fn walkInstruction(
                             const value_expr: ?DocData.Expr = if (has_value) blk: {
                                 const value_ref = file.zir.extra[extra_index];
                                 extra_index += 1;
-                                const value = try self.walkRef(file, &scope, src_info, @enumFromInt(Ref, value_ref), false);
+                                const value = try self.walkRef(file, &scope, src_info, @as(Ref, @enumFromInt(value_ref)), false);
                                 break :blk value.expr;
                             } else null;
                             try field_values.append(self.arena, value_expr);
@@ -2899,11 +2899,11 @@ fn walkInstruction(
                         .enclosing_type = type_slot_index,
                     };
 
-                    const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+                    const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
                     var extra_index: usize = extended.operand;
 
                     const src_node: ?i32 = if (small.has_src_node) blk: {
-                        const src_node = @bitCast(i32, file.zir.extra[extra_index]);
+                        const src_node = @as(i32, @bitCast(file.zir.extra[extra_index]));
                         extra_index += 1;
                         break :blk src_node;
                     } else null;
@@ -2927,7 +2927,7 @@ fn walkInstruction(
                         const backing_int_body_len = file.zir.extra[extra_index];
                         extra_index += 1; // backing_int_body_len
                         if (backing_int_body_len == 0) {
-                            const backing_int_ref = @enumFromInt(Ref, file.zir.extra[extra_index]);
+                            const backing_int_ref = @as(Ref, @enumFromInt(file.zir.extra[extra_index]));
                             const backing_int_res = try self.walkRef(file, &scope, src_info, backing_int_ref, true);
                             backing_int = backing_int_res.expr;
                             extra_index += 1; // backing_int_ref
@@ -3154,7 +3154,7 @@ fn analyzeAllDecls(
     priv_decl_indexes: *std.ArrayListUnmanaged(usize),
 ) AutodocErrors!usize {
     const first_decl_indexes_slot = decl_indexes.items.len;
-    const original_it = file.zir.declIterator(@intCast(u32, parent_inst_index));
+    const original_it = file.zir.declIterator(@as(u32, @intCast(parent_inst_index)));
 
     // First loop to discover decl names
     {
@@ -3180,7 +3180,7 @@ fn analyzeAllDecls(
             const decl_name_index = file.zir.extra[d.sub_index + 5];
             switch (decl_name_index) {
                 0 => {
-                    const is_exported = @truncate(u1, d.flags >> 1);
+                    const is_exported = @as(u1, @truncate(d.flags >> 1));
                     switch (is_exported) {
                         0 => continue, // comptime decl
                         1 => {
@@ -3255,10 +3255,10 @@ fn analyzeDecl(
     d: Zir.DeclIterator.Item,
 ) AutodocErrors!void {
     const data = file.zir.instructions.items(.data);
-    const is_pub = @truncate(u1, d.flags >> 0) != 0;
+    const is_pub = @as(u1, @truncate(d.flags >> 0)) != 0;
     // const is_exported = @truncate(u1, d.flags >> 1) != 0;
-    const has_align = @truncate(u1, d.flags >> 2) != 0;
-    const has_section_or_addrspace = @truncate(u1, d.flags >> 3) != 0;
+    const has_align = @as(u1, @truncate(d.flags >> 2)) != 0;
+    const has_section_or_addrspace = @as(u1, @truncate(d.flags >> 3)) != 0;
 
     var extra_index = d.sub_index;
     // const hash_u32s = file.zir.extra[extra_index..][0..4];
@@ -3277,21 +3277,21 @@ fn analyzeDecl(
 
     extra_index += 1;
     const align_inst: Zir.Inst.Ref = if (!has_align) .none else inst: {
-        const inst = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+        const inst = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
         extra_index += 1;
         break :inst inst;
     };
     _ = align_inst;
 
     const section_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: {
-        const inst = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+        const inst = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
         extra_index += 1;
         break :inst inst;
     };
     _ = section_inst;
 
     const addrspace_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: {
-        const inst = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+        const inst = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
         extra_index += 1;
         break :inst inst;
     };
@@ -3381,7 +3381,7 @@ fn analyzeUsingnamespaceDecl(
 ) AutodocErrors!void {
     const data = file.zir.instructions.items(.data);
 
-    const is_pub = @truncate(u1, d.flags) != 0;
+    const is_pub = @as(u1, @truncate(d.flags)) != 0;
     const value_index = file.zir.extra[d.sub_index + 6];
     const doc_comment_index = file.zir.extra[d.sub_index + 7];
 
@@ -4028,7 +4028,7 @@ fn analyzeFancyFunction(
 ) AutodocErrors!DocData.WalkResult {
     const tags = file.zir.instructions.items(.tag);
     const data = file.zir.instructions.items(.data);
-    const fn_info = file.zir.getFnInfo(@intCast(u32, inst_index));
+    const fn_info = file.zir.getFnInfo(@as(u32, @intCast(inst_index)));
 
     try self.ast_nodes.ensureUnusedCapacity(self.arena, fn_info.total_params_len);
     var param_type_refs = try std.ArrayListUnmanaged(DocData.Expr).initCapacity(
@@ -4108,7 +4108,7 @@ fn analyzeFancyFunction(
 
     var align_index: ?usize = null;
     if (extra.data.bits.has_align_ref) {
-        const align_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+        const align_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
         align_index = self.exprs.items.len;
         _ = try self.walkRef(file, scope, parent_src, align_ref, false);
         extra_index += 1;
@@ -4125,7 +4125,7 @@ fn analyzeFancyFunction(
 
     var addrspace_index: ?usize = null;
     if (extra.data.bits.has_addrspace_ref) {
-        const addrspace_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+        const addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
         addrspace_index = self.exprs.items.len;
         _ = try self.walkRef(file, scope, parent_src, addrspace_ref, false);
         extra_index += 1;
@@ -4142,7 +4142,7 @@ fn analyzeFancyFunction(
 
     var section_index: ?usize = null;
     if (extra.data.bits.has_section_ref) {
-        const section_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+        const section_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
         section_index = self.exprs.items.len;
         _ = try self.walkRef(file, scope, parent_src, section_ref, false);
         extra_index += 1;
@@ -4159,7 +4159,7 @@ fn analyzeFancyFunction(
 
     var cc_index: ?usize = null;
     if (extra.data.bits.has_cc_ref and !extra.data.bits.has_cc_body) {
-        const cc_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+        const cc_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
         const cc_expr = try self.walkRef(file, scope, parent_src, cc_ref, false);
 
         cc_index = self.exprs.items.len;
@@ -4262,7 +4262,7 @@ fn analyzeFunction(
 ) AutodocErrors!DocData.WalkResult {
     const tags = file.zir.instructions.items(.tag);
     const data = file.zir.instructions.items(.data);
-    const fn_info = file.zir.getFnInfo(@intCast(u32, inst_index));
+    const fn_info = file.zir.getFnInfo(@as(u32, @intCast(inst_index)));
 
     try self.ast_nodes.ensureUnusedCapacity(self.arena, fn_info.total_params_len);
     var param_type_refs = try std.ArrayListUnmanaged(DocData.Expr).initCapacity(
@@ -4449,13 +4449,13 @@ fn collectUnionFieldInfo(
             cur_bit_bag = file.zir.extra[bit_bag_index];
             bit_bag_index += 1;
         }
-        const has_type = @truncate(u1, cur_bit_bag) != 0;
+        const has_type = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
-        const has_align = @truncate(u1, cur_bit_bag) != 0;
+        const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
-        const has_tag = @truncate(u1, cur_bit_bag) != 0;
+        const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
-        const unused = @truncate(u1, cur_bit_bag) != 0;
+        const unused = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
         _ = unused;
 
@@ -4464,7 +4464,7 @@ fn collectUnionFieldInfo(
         const doc_comment_index = file.zir.extra[extra_index];
         extra_index += 1;
         const field_type = if (has_type)
-            @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index])
+            @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]))
         else
             .void_type;
         if (has_type) extra_index += 1;
@@ -4532,13 +4532,13 @@ fn collectStructFieldInfo(
             cur_bit_bag = file.zir.extra[bit_bag_index];
             bit_bag_index += 1;
         }
-        const has_align = @truncate(u1, cur_bit_bag) != 0;
+        const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
-        const has_default = @truncate(u1, cur_bit_bag) != 0;
+        const has_default = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
         // const is_comptime = @truncate(u1, cur_bit_bag) != 0;
         cur_bit_bag >>= 1;
-        const has_type_body = @truncate(u1, cur_bit_bag) != 0;
+        const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
 
         const field_name: ?u32 = if (!is_tuple) blk: {
@@ -4558,7 +4558,7 @@ fn collectStructFieldInfo(
         if (has_type_body) {
             fields[field_i].type_body_len = file.zir.extra[extra_index];
         } else {
-            fields[field_i].type_ref = @enumFromInt(Zir.Inst.Ref, file.zir.extra[extra_index]);
+            fields[field_i].type_ref = @as(Zir.Inst.Ref, @enumFromInt(file.zir.extra[extra_index]));
         }
         extra_index += 1;
 
@@ -4855,9 +4855,9 @@ fn srcLocInfo(
     src_node: i32,
     parent_src: SrcLocInfo,
 ) !SrcLocInfo {
-    const sn = @intCast(u32, @intCast(i32, parent_src.src_node) + src_node);
+    const sn = @as(u32, @intCast(@as(i32, @intCast(parent_src.src_node)) + src_node));
     const tree = try file.getTree(self.comp_module.gpa);
-    const node_idx = @bitCast(Ast.Node.Index, sn);
+    const node_idx = @as(Ast.Node.Index, @bitCast(sn));
     const tokens = tree.nodes.items(.main_token);
 
     const tok_idx = tokens[node_idx];
@@ -4876,9 +4876,9 @@ fn declIsVar(
     src_node: i32,
     parent_src: SrcLocInfo,
 ) !bool {
-    const sn = @intCast(u32, @intCast(i32, parent_src.src_node) + src_node);
+    const sn = @as(u32, @intCast(@as(i32, @intCast(parent_src.src_node)) + src_node));
     const tree = try file.getTree(self.comp_module.gpa);
-    const node_idx = @bitCast(Ast.Node.Index, sn);
+    const node_idx = @as(Ast.Node.Index, @bitCast(sn));
     const tokens = tree.nodes.items(.main_token);
     const tags = tree.tokens.items(.tag);
 
src/clang.zig
@@ -117,7 +117,7 @@ pub const APFloatBaseSemantics = enum(c_int) {
 
 pub const APInt = opaque {
     pub fn getLimitedValue(self: *const APInt, comptime T: type) T {
-        return @truncate(T, ZigClangAPInt_getLimitedValue(self, std.math.maxInt(T)));
+        return @as(T, @truncate(ZigClangAPInt_getLimitedValue(self, std.math.maxInt(T))));
     }
     extern fn ZigClangAPInt_getLimitedValue(*const APInt, limit: u64) u64;
 };
src/codegen.zig
@@ -108,7 +108,7 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian
     _ = target;
     const bits = @typeInfo(F).Float.bits;
     const Int = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = bits } });
-    const int = @bitCast(Int, f);
+    const int = @as(Int, @bitCast(f));
     mem.writeInt(Int, code[0..@divExact(bits, 8)], int, endian);
 }
 
@@ -143,18 +143,18 @@ pub fn generateLazySymbol(
     if (lazy_sym.ty.isAnyError(mod)) {
         alignment.* = 4;
         const err_names = mod.global_error_set.keys();
-        mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian);
+        mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(err_names.len)), endian);
         var offset = code.items.len;
         try code.resize((1 + err_names.len + 1) * 4);
         for (err_names) |err_name_nts| {
             const err_name = mod.intern_pool.stringToSlice(err_name_nts);
-            mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian);
+            mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian);
             offset += 4;
             try code.ensureUnusedCapacity(err_name.len + 1);
             code.appendSliceAssumeCapacity(err_name);
             code.appendAssumeCapacity(0);
         }
-        mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian);
+        mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian);
         return Result.ok;
     } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) {
         alignment.* = 1;
@@ -253,12 +253,12 @@ pub fn generateSymbol(
         },
         .err => |err| {
             const int = try mod.getErrorValue(err.name);
-            try code.writer().writeInt(u16, @intCast(u16, int), endian);
+            try code.writer().writeInt(u16, @as(u16, @intCast(int)), endian);
         },
         .error_union => |error_union| {
             const payload_ty = typed_value.ty.errorUnionPayload(mod);
             const err_val = switch (error_union.val) {
-                .err_name => |err_name| @intCast(u16, try mod.getErrorValue(err_name)),
+                .err_name => |err_name| @as(u16, @intCast(try mod.getErrorValue(err_name))),
                 .payload => @as(u16, 0),
             };
 
@@ -397,7 +397,7 @@ pub fn generateSymbol(
                             .ty = array_type.child.toType(),
                             .val = switch (aggregate.storage) {
                                 .bytes => unreachable,
-                                .elems => |elems| elems[@intCast(usize, index)],
+                                .elems => |elems| elems[@as(usize, @intCast(index))],
                                 .repeated_elem => |elem| elem,
                             }.toValue(),
                         }, code, debug_output, reloc_info)) {
@@ -417,7 +417,7 @@ pub fn generateSymbol(
                                 .ty = vector_type.child.toType(),
                                 .val = switch (aggregate.storage) {
                                     .bytes => unreachable,
-                                    .elems => |elems| elems[@intCast(usize, index)],
+                                    .elems => |elems| elems[@as(usize, @intCast(index))],
                                     .repeated_elem => |elem| elem,
                                 }.toValue(),
                             }, code, debug_output, reloc_info)) {
@@ -509,7 +509,7 @@ pub fn generateSymbol(
                         } else {
                             field_val.toValue().writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
                         }
-                        bits += @intCast(u16, field_ty.bitSize(mod));
+                        bits += @as(u16, @intCast(field_ty.bitSize(mod)));
                     }
                 } else {
                     const struct_begin = code.items.len;
@@ -642,10 +642,10 @@ fn lowerParentPtr(
             eu_payload,
             code,
             debug_output,
-            reloc_info.offset(@intCast(u32, errUnionPayloadOffset(
+            reloc_info.offset(@as(u32, @intCast(errUnionPayloadOffset(
                 mod.intern_pool.typeOf(eu_payload).toType(),
                 mod,
-            ))),
+            )))),
         ),
         .opt_payload => |opt_payload| try lowerParentPtr(
             bin_file,
@@ -661,8 +661,8 @@ fn lowerParentPtr(
             elem.base,
             code,
             debug_output,
-            reloc_info.offset(@intCast(u32, elem.index *
-                mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))),
+            reloc_info.offset(@as(u32, @intCast(elem.index *
+                mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod)))),
         ),
         .field => |field| {
             const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.child;
@@ -684,10 +684,10 @@ fn lowerParentPtr(
                     .struct_type,
                     .anon_struct_type,
                     .union_type,
-                    => @intCast(u32, base_type.toType().structFieldOffset(
-                        @intCast(u32, field.index),
+                    => @as(u32, @intCast(base_type.toType().structFieldOffset(
+                        @as(u32, @intCast(field.index)),
                         mod,
-                    )),
+                    ))),
                     else => unreachable,
                 }),
             );
@@ -735,8 +735,8 @@ fn lowerDeclRef(
     });
     const endian = target.cpu.arch.endian();
     switch (ptr_width) {
-        16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(u16, vaddr), endian),
-        32 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, vaddr), endian),
+        16 => mem.writeInt(u16, try code.addManyAsArray(2), @as(u16, @intCast(vaddr)), endian),
+        32 => mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(vaddr)), endian),
         64 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian),
         else => unreachable,
     }
@@ -945,7 +945,7 @@ pub fn genTypedValue(
             const info = typed_value.ty.intInfo(mod);
             if (info.bits <= ptr_bits) {
                 const unsigned = switch (info.signedness) {
-                    .signed => @bitCast(u64, typed_value.val.toSignedInt(mod)),
+                    .signed => @as(u64, @bitCast(typed_value.val.toSignedInt(mod))),
                     .unsigned => typed_value.val.toUnsignedInt(mod),
                 };
                 return GenResult.mcv(.{ .immediate = unsigned });
src/Compilation.zig
@@ -1046,7 +1046,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
         const llvm_cpu_features: ?[*:0]const u8 = if (build_options.have_llvm and use_llvm) blk: {
             var buf = std.ArrayList(u8).init(arena);
             for (options.target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
-                const index = @intCast(Target.Cpu.Feature.Set.Index, index_usize);
+                const index = @as(Target.Cpu.Feature.Set.Index, @intCast(index_usize));
                 const is_enabled = options.target.cpu.features.isEnabled(index);
 
                 if (feature.llvm_name) |llvm_name| {
@@ -2562,7 +2562,7 @@ pub fn totalErrorCount(self: *Compilation) u32 {
         }
     }
 
-    return @intCast(u32, total);
+    return @as(u32, @intCast(total));
 }
 
 /// This function is temporally single-threaded.
@@ -2596,7 +2596,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle {
     }
 
     for (self.lld_errors.items) |lld_error| {
-        const notes_len = @intCast(u32, lld_error.context_lines.len);
+        const notes_len = @as(u32, @intCast(lld_error.context_lines.len));
 
         try bundle.addRootErrorMessage(.{
             .msg = try bundle.addString(lld_error.msg),
@@ -2753,7 +2753,7 @@ pub const ErrorNoteHashContext = struct {
             std.hash.autoHash(&hasher, src.span_main);
         }
 
-        return @truncate(u32, hasher.final());
+        return @as(u32, @truncate(hasher.final()));
     }
 
     pub fn eql(
@@ -2830,8 +2830,8 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod
                 .span_start = span.start,
                 .span_main = span.main,
                 .span_end = span.end,
-                .line = @intCast(u32, loc.line),
-                .column = @intCast(u32, loc.column),
+                .line = @as(u32, @intCast(loc.line)),
+                .column = @as(u32, @intCast(loc.column)),
                 .source_line = 0,
             }),
         });
@@ -2842,13 +2842,13 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod
         .span_start = err_span.start,
         .span_main = err_span.main,
         .span_end = err_span.end,
-        .line = @intCast(u32, err_loc.line),
-        .column = @intCast(u32, err_loc.column),
+        .line = @as(u32, @intCast(err_loc.line)),
+        .column = @as(u32, @intCast(err_loc.column)),
         .source_line = if (module_err_msg.src_loc.lazy == .entire_file)
             0
         else
             try eb.addString(err_loc.source_line),
-        .reference_trace_len = @intCast(u32, ref_traces.items.len),
+        .reference_trace_len = @as(u32, @intCast(ref_traces.items.len)),
     });
 
     for (ref_traces.items) |rt| {
@@ -2874,8 +2874,8 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod
                 .span_start = span.start,
                 .span_main = span.main,
                 .span_end = span.end,
-                .line = @intCast(u32, loc.line),
-                .column = @intCast(u32, loc.column),
+                .line = @as(u32, @intCast(loc.line)),
+                .column = @as(u32, @intCast(loc.column)),
                 .source_line = if (err_loc.eql(loc)) 0 else try eb.addString(loc.source_line),
             }),
         }, .{ .eb = eb });
@@ -2884,7 +2884,7 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod
         }
     }
 
-    const notes_len = @intCast(u32, notes.entries.len);
+    const notes_len = @as(u32, @intCast(notes.entries.len));
 
     try eb.addRootErrorMessage(.{
         .msg = try eb.addString(module_err_msg.msg),
@@ -2919,7 +2919,7 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void {
             }
             const token_starts = file.tree.tokens.items(.start);
             const start = token_starts[item.data.token] + item.data.byte_offset;
-            const end = start + @intCast(u32, file.tree.tokenSlice(item.data.token).len) - item.data.byte_offset;
+            const end = start + @as(u32, @intCast(file.tree.tokenSlice(item.data.token).len)) - item.data.byte_offset;
             break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start };
         };
         const err_loc = std.zig.findLineColumn(file.source, err_span.main);
@@ -2935,8 +2935,8 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void {
                     .span_start = err_span.start,
                     .span_main = err_span.main,
                     .span_end = err_span.end,
-                    .line = @intCast(u32, err_loc.line),
-                    .column = @intCast(u32, err_loc.column),
+                    .line = @as(u32, @intCast(err_loc.line)),
+                    .column = @as(u32, @intCast(err_loc.column)),
                     .source_line = try eb.addString(err_loc.source_line),
                 }),
                 .notes_len = item.data.notesLen(file.zir),
@@ -2956,7 +2956,7 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void {
                     }
                     const token_starts = file.tree.tokens.items(.start);
                     const start = token_starts[note_item.data.token] + note_item.data.byte_offset;
-                    const end = start + @intCast(u32, file.tree.tokenSlice(note_item.data.token).len) - item.data.byte_offset;
+                    const end = start + @as(u32, @intCast(file.tree.tokenSlice(note_item.data.token).len)) - item.data.byte_offset;
                     break :blk Module.SrcLoc.Span{ .start = start, .end = end, .main = start };
                 };
                 const loc = std.zig.findLineColumn(file.source, span.main);
@@ -2970,8 +2970,8 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void {
                         .span_start = span.start,
                         .span_main = span.main,
                         .span_end = span.end,
-                        .line = @intCast(u32, loc.line),
-                        .column = @intCast(u32, loc.column),
+                        .line = @as(u32, @intCast(loc.line)),
+                        .column = @as(u32, @intCast(loc.column)),
                         .source_line = if (loc.eql(err_loc))
                             0
                         else
@@ -4302,7 +4302,7 @@ pub fn addCCArgs(
             const all_features_list = target.cpu.arch.allFeaturesList();
             try argv.ensureUnusedCapacity(all_features_list.len * 4);
             for (all_features_list, 0..) |feature, index_usize| {
-                const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
+                const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize));
                 const is_enabled = target.cpu.features.isEnabled(index);
 
                 if (feature.llvm_name) |llvm_name| {
@@ -5172,7 +5172,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
     });
 
     for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
-        const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
+        const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize));
         const is_enabled = target.cpu.features.isEnabled(index);
         if (is_enabled) {
             try buffer.writer().print("        .{},\n", .{std.zig.fmtId(feature.name)});
src/crash_report.zig
@@ -204,49 +204,49 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
 
     const stack_ctx: StackContext = switch (builtin.cpu.arch) {
         .x86 => ctx: {
-            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
-            const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]);
-            const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]);
+            const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
+            const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP]));
+            const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP]));
             break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
         },
         .x86_64 => ctx: {
-            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+            const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
             const ip = switch (builtin.os.tag) {
-                .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
-                .freebsd => @intCast(usize, ctx.mcontext.rip),
-                .openbsd => @intCast(usize, ctx.sc_rip),
-                .macos => @intCast(usize, ctx.mcontext.ss.rip),
+                .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])),
+                .freebsd => @as(usize, @intCast(ctx.mcontext.rip)),
+                .openbsd => @as(usize, @intCast(ctx.sc_rip)),
+                .macos => @as(usize, @intCast(ctx.mcontext.ss.rip)),
                 else => unreachable,
             };
             const bp = switch (builtin.os.tag) {
-                .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
-                .openbsd => @intCast(usize, ctx.sc_rbp),
-                .freebsd => @intCast(usize, ctx.mcontext.rbp),
-                .macos => @intCast(usize, ctx.mcontext.ss.rbp),
+                .linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])),
+                .openbsd => @as(usize, @intCast(ctx.sc_rbp)),
+                .freebsd => @as(usize, @intCast(ctx.mcontext.rbp)),
+                .macos => @as(usize, @intCast(ctx.mcontext.ss.rbp)),
                 else => unreachable,
             };
             break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
         },
         .arm => ctx: {
-            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
-            const ip = @intCast(usize, ctx.mcontext.arm_pc);
-            const bp = @intCast(usize, ctx.mcontext.arm_fp);
+            const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
+            const ip = @as(usize, @intCast(ctx.mcontext.arm_pc));
+            const bp = @as(usize, @intCast(ctx.mcontext.arm_fp));
             break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
         },
         .aarch64 => ctx: {
-            const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
+            const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
             const ip = switch (native_os) {
-                .macos => @intCast(usize, ctx.mcontext.ss.pc),
-                .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]),
-                .freebsd => @intCast(usize, ctx.mcontext.gpregs.elr),
-                else => @intCast(usize, ctx.mcontext.pc),
+                .macos => @as(usize, @intCast(ctx.mcontext.ss.pc)),
+                .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.PC])),
+                .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.elr)),
+                else => @as(usize, @intCast(ctx.mcontext.pc)),
             };
             // x29 is the ABI-designated frame pointer
             const bp = switch (native_os) {
-                .macos => @intCast(usize, ctx.mcontext.ss.fp),
-                .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]),
-                .freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]),
-                else => @intCast(usize, ctx.mcontext.regs[29]),
+                .macos => @as(usize, @intCast(ctx.mcontext.ss.fp)),
+                .netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.FP])),
+                .freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.x[os.REG.FP])),
+                else => @as(usize, @intCast(ctx.mcontext.regs[29])),
             };
             break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
         },
src/glibc.zig
@@ -779,13 +779,13 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: *std.Progress.Node) !vo
             // Test whether the inclusion applies to our current library and target.
             const ok_lib_and_target =
                 (lib_index == lib_i) and
-                ((targets & (@as(u32, 1) << @intCast(u5, target_targ_index))) != 0);
+                ((targets & (@as(u32, 1) << @as(u5, @intCast(target_targ_index)))) != 0);
 
             while (true) {
                 const byte = metadata.inclusions[inc_i];
                 inc_i += 1;
                 const last = (byte & 0b1000_0000) != 0;
-                const ver_i = @truncate(u7, byte);
+                const ver_i = @as(u7, @truncate(byte));
                 if (ok_lib_and_target and ver_i <= target_ver_index) {
                     versions_buffer[versions_len] = ver_i;
                     versions_len += 1;
@@ -913,13 +913,13 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: *std.Progress.Node) !vo
             // Test whether the inclusion applies to our current library and target.
             const ok_lib_and_target =
                 (lib_index == lib_i) and
-                ((targets & (@as(u32, 1) << @intCast(u5, target_targ_index))) != 0);
+                ((targets & (@as(u32, 1) << @as(u5, @intCast(target_targ_index)))) != 0);
 
             while (true) {
                 const byte = metadata.inclusions[inc_i];
                 inc_i += 1;
                 const last = (byte & 0b1000_0000) != 0;
-                const ver_i = @truncate(u7, byte);
+                const ver_i = @as(u7, @truncate(byte));
                 if (ok_lib_and_target and ver_i <= target_ver_index) {
                     versions_buffer[versions_len] = ver_i;
                     versions_len += 1;
src/InternPool.zig
@@ -80,7 +80,7 @@ const KeyAdapter = struct {
 
     pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool {
         _ = b_void;
-        return ctx.intern_pool.indexToKey(@enumFromInt(Index, b_map_index)).eql(a, ctx.intern_pool);
+        return ctx.intern_pool.indexToKey(@as(Index, @enumFromInt(b_map_index))).eql(a, ctx.intern_pool);
     }
 
     pub fn hash(ctx: @This(), a: Key) u32 {
@@ -95,7 +95,7 @@ pub const OptionalMapIndex = enum(u32) {
 
     pub fn unwrap(oi: OptionalMapIndex) ?MapIndex {
         if (oi == .none) return null;
-        return @enumFromInt(MapIndex, @intFromEnum(oi));
+        return @as(MapIndex, @enumFromInt(@intFromEnum(oi)));
     }
 };
 
@@ -104,7 +104,7 @@ pub const MapIndex = enum(u32) {
     _,
 
     pub fn toOptional(i: MapIndex) OptionalMapIndex {
-        return @enumFromInt(OptionalMapIndex, @intFromEnum(i));
+        return @as(OptionalMapIndex, @enumFromInt(@intFromEnum(i)));
     }
 };
 
@@ -114,7 +114,7 @@ pub const RuntimeIndex = enum(u32) {
     _,
 
     pub fn increment(ri: *RuntimeIndex) void {
-        ri.* = @enumFromInt(RuntimeIndex, @intFromEnum(ri.*) + 1);
+        ri.* = @as(RuntimeIndex, @enumFromInt(@intFromEnum(ri.*) + 1));
     }
 };
 
@@ -130,11 +130,11 @@ pub const NullTerminatedString = enum(u32) {
     _,
 
     pub fn toString(self: NullTerminatedString) String {
-        return @enumFromInt(String, @intFromEnum(self));
+        return @as(String, @enumFromInt(@intFromEnum(self)));
     }
 
     pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString {
-        return @enumFromInt(OptionalNullTerminatedString, @intFromEnum(self));
+        return @as(OptionalNullTerminatedString, @enumFromInt(@intFromEnum(self)));
     }
 
     const Adapter = struct {
@@ -196,7 +196,7 @@ pub const OptionalNullTerminatedString = enum(u32) {
 
     pub fn unwrap(oi: OptionalNullTerminatedString) ?NullTerminatedString {
         if (oi == .none) return null;
-        return @enumFromInt(NullTerminatedString, @intFromEnum(oi));
+        return @as(NullTerminatedString, @enumFromInt(@intFromEnum(oi)));
     }
 };
 
@@ -282,7 +282,7 @@ pub const Key = union(enum) {
             const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)];
             const adapter: NullTerminatedString.Adapter = .{ .strings = self.names };
             const field_index = map.getIndexAdapted(name, adapter) orelse return null;
-            return @intCast(u32, field_index);
+            return @as(u32, @intCast(field_index));
         }
     };
 
@@ -420,7 +420,7 @@ pub const Key = union(enum) {
             const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)];
             const adapter: NullTerminatedString.Adapter = .{ .strings = self.names };
             const field_index = map.getIndexAdapted(name, adapter) orelse return null;
-            return @intCast(u32, field_index);
+            return @as(u32, @intCast(field_index));
         }
 
         /// Look up field index based on tag value.
@@ -440,7 +440,7 @@ pub const Key = union(enum) {
                 const map = &ip.maps.items[@intFromEnum(values_map)];
                 const adapter: Index.Adapter = .{ .indexes = self.values };
                 const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null;
-                return @intCast(u32, field_index);
+                return @as(u32, @intCast(field_index));
             }
             // Auto-numbered enum. Convert `int_tag_val` to field index.
             const field_index = switch (ip.indexToKey(int_tag_val).int.storage) {
@@ -511,12 +511,12 @@ pub const Key = union(enum) {
 
         pub fn paramIsComptime(self: @This(), i: u5) bool {
             assert(i < self.param_types.len);
-            return @truncate(u1, self.comptime_bits >> i) != 0;
+            return @as(u1, @truncate(self.comptime_bits >> i)) != 0;
         }
 
         pub fn paramIsNoalias(self: @This(), i: u5) bool {
             assert(i < self.param_types.len);
-            return @truncate(u1, self.noalias_bits >> i) != 0;
+            return @as(u1, @truncate(self.noalias_bits >> i)) != 0;
         }
     };
 
@@ -685,7 +685,7 @@ pub const Key = union(enum) {
     };
 
     pub fn hash32(key: Key, ip: *const InternPool) u32 {
-        return @truncate(u32, key.hash64(ip));
+        return @as(u32, @truncate(key.hash64(ip)));
     }
 
     pub fn hash64(key: Key, ip: *const InternPool) u64 {
@@ -767,7 +767,7 @@ pub const Key = union(enum) {
                 switch (float.storage) {
                     inline else => |val| std.hash.autoHash(
                         &hasher,
-                        @bitCast(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), val),
+                        @as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), @bitCast(val)),
                     ),
                 }
                 return hasher.final();
@@ -812,18 +812,18 @@ pub const Key = union(enum) {
 
                 if (child == .u8_type) {
                     switch (aggregate.storage) {
-                        .bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| {
+                        .bytes => |bytes| for (bytes[0..@as(usize, @intCast(len))]) |byte| {
                             std.hash.autoHash(&hasher, KeyTag.int);
                             std.hash.autoHash(&hasher, byte);
                         },
-                        .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| {
+                        .elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem| {
                             const elem_key = ip.indexToKey(elem);
                             std.hash.autoHash(&hasher, @as(KeyTag, elem_key));
                             switch (elem_key) {
                                 .undef => {},
                                 .int => |int| std.hash.autoHash(
                                     &hasher,
-                                    @intCast(u8, int.storage.u64),
+                                    @as(u8, @intCast(int.storage.u64)),
                                 ),
                                 else => unreachable,
                             }
@@ -837,7 +837,7 @@ pub const Key = union(enum) {
                                     .undef => {},
                                     .int => |int| std.hash.autoHash(
                                         &hasher,
-                                        @intCast(u8, int.storage.u64),
+                                        @as(u8, @intCast(int.storage.u64)),
                                     ),
                                     else => unreachable,
                                 }
@@ -849,7 +849,7 @@ pub const Key = union(enum) {
 
                 switch (aggregate.storage) {
                     .bytes => unreachable,
-                    .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem|
+                    .elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem|
                         std.hash.autoHash(&hasher, elem),
                     .repeated_elem => |elem| {
                         var remaining = len;
@@ -1061,10 +1061,10 @@ pub const Key = union(enum) {
                     // These are strange: we'll sometimes represent them as f128, even if the
                     // underlying type is smaller. f80 is an exception: see float_c_longdouble_f80.
                     const a_val = switch (a_info.storage) {
-                        inline else => |val| @floatCast(f128, val),
+                        inline else => |val| @as(f128, @floatCast(val)),
                     };
                     const b_val = switch (b_info.storage) {
-                        inline else => |val| @floatCast(f128, val),
+                        inline else => |val| @as(f128, @floatCast(val)),
                     };
                     return a_val == b_val;
                 }
@@ -1092,7 +1092,7 @@ pub const Key = union(enum) {
                 const len = ip.aggregateTypeLen(a_info.ty);
                 const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?;
                 if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) {
-                    for (0..@intCast(usize, len)) |elem_index| {
+                    for (0..@as(usize, @intCast(len))) |elem_index| {
                         const a_elem = switch (a_info.storage) {
                             .bytes => |bytes| ip.getIfExists(.{ .int = .{
                                 .ty = .u8_type,
@@ -1119,16 +1119,16 @@ pub const Key = union(enum) {
                         const b_bytes = b_info.storage.bytes;
                         return std.mem.eql(
                             u8,
-                            a_bytes[0..@intCast(usize, len)],
-                            b_bytes[0..@intCast(usize, len)],
+                            a_bytes[0..@as(usize, @intCast(len))],
+                            b_bytes[0..@as(usize, @intCast(len))],
                         );
                     },
                     .elems => |a_elems| {
                         const b_elems = b_info.storage.elems;
                         return std.mem.eql(
                             Index,
-                            a_elems[0..@intCast(usize, len)],
-                            b_elems[0..@intCast(usize, len)],
+                            a_elems[0..@as(usize, @intCast(len))],
+                            b_elems[0..@as(usize, @intCast(len))],
                         );
                     },
                     .repeated_elem => |a_elem| {
@@ -2291,7 +2291,7 @@ pub const Alignment = enum(u6) {
     pub fn fromByteUnits(n: u64) Alignment {
         if (n == 0) return .none;
         assert(std.math.isPowerOfTwo(n));
-        return @enumFromInt(Alignment, @ctz(n));
+        return @as(Alignment, @enumFromInt(@ctz(n)));
     }
 
     pub fn fromNonzeroByteUnits(n: u64) Alignment {
@@ -2368,11 +2368,11 @@ pub const PackedU64 = packed struct(u64) {
     b: u32,
 
     pub fn get(x: PackedU64) u64 {
-        return @bitCast(u64, x);
+        return @as(u64, @bitCast(x));
     }
 
     pub fn init(x: u64) PackedU64 {
-        return @bitCast(PackedU64, x);
+        return @as(PackedU64, @bitCast(x));
     }
 };
 
@@ -2435,14 +2435,14 @@ pub const Float64 = struct {
 
     pub fn get(self: Float64) f64 {
         const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32);
-        return @bitCast(f64, int_bits);
+        return @as(f64, @bitCast(int_bits));
     }
 
     fn pack(val: f64) Float64 {
-        const bits = @bitCast(u64, val);
+        const bits = @as(u64, @bitCast(val));
         return .{
-            .piece0 = @truncate(u32, bits),
-            .piece1 = @truncate(u32, bits >> 32),
+            .piece0 = @as(u32, @truncate(bits)),
+            .piece1 = @as(u32, @truncate(bits >> 32)),
         };
     }
 };
@@ -2457,15 +2457,15 @@ pub const Float80 = struct {
         const int_bits = @as(u80, self.piece0) |
             (@as(u80, self.piece1) << 32) |
             (@as(u80, self.piece2) << 64);
-        return @bitCast(f80, int_bits);
+        return @as(f80, @bitCast(int_bits));
     }
 
     fn pack(val: f80) Float80 {
-        const bits = @bitCast(u80, val);
+        const bits = @as(u80, @bitCast(val));
         return .{
-            .piece0 = @truncate(u32, bits),
-            .piece1 = @truncate(u32, bits >> 32),
-            .piece2 = @truncate(u16, bits >> 64),
+            .piece0 = @as(u32, @truncate(bits)),
+            .piece1 = @as(u32, @truncate(bits >> 32)),
+            .piece2 = @as(u16, @truncate(bits >> 64)),
         };
     }
 };
@@ -2482,16 +2482,16 @@ pub const Float128 = struct {
             (@as(u128, self.piece1) << 32) |
             (@as(u128, self.piece2) << 64) |
             (@as(u128, self.piece3) << 96);
-        return @bitCast(f128, int_bits);
+        return @as(f128, @bitCast(int_bits));
     }
 
     fn pack(val: f128) Float128 {
-        const bits = @bitCast(u128, val);
+        const bits = @as(u128, @bitCast(val));
         return .{
-            .piece0 = @truncate(u32, bits),
-            .piece1 = @truncate(u32, bits >> 32),
-            .piece2 = @truncate(u32, bits >> 64),
-            .piece3 = @truncate(u32, bits >> 96),
+            .piece0 = @as(u32, @truncate(bits)),
+            .piece1 = @as(u32, @truncate(bits >> 32)),
+            .piece2 = @as(u32, @truncate(bits >> 64)),
+            .piece3 = @as(u32, @truncate(bits >> 96)),
         };
     }
 };
@@ -2575,13 +2575,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         .type_int_signed => .{
             .int_type = .{
                 .signedness = .signed,
-                .bits = @intCast(u16, data),
+                .bits = @as(u16, @intCast(data)),
             },
         },
         .type_int_unsigned => .{
             .int_type = .{
                 .signedness = .unsigned,
-                .bits = @intCast(u16, data),
+                .bits = @as(u16, @intCast(data)),
             },
         },
         .type_array_big => {
@@ -2600,8 +2600,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
                 .sentinel = .none,
             } };
         },
-        .simple_type => .{ .simple_type = @enumFromInt(SimpleType, data) },
-        .simple_value => .{ .simple_value = @enumFromInt(SimpleValue, data) },
+        .simple_type => .{ .simple_type = @as(SimpleType, @enumFromInt(data)) },
+        .simple_value => .{ .simple_value = @as(SimpleValue, @enumFromInt(data)) },
 
         .type_vector => {
             const vector_info = ip.extraData(Vector, data);
@@ -2620,8 +2620,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
             return .{ .ptr_type = ptr_info };
         },
 
-        .type_optional => .{ .opt_type = @enumFromInt(Index, data) },
-        .type_anyframe => .{ .anyframe_type = @enumFromInt(Index, data) },
+        .type_optional => .{ .opt_type = @as(Index, @enumFromInt(data)) },
+        .type_anyframe => .{ .anyframe_type = @as(Index, @enumFromInt(data)) },
 
         .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) },
         .type_error_set => {
@@ -2629,17 +2629,17 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
             const names_len = error_set.data.names_len;
             const names = ip.extra.items[error_set.end..][0..names_len];
             return .{ .error_set_type = .{
-                .names = @ptrCast([]const NullTerminatedString, names),
+                .names = @as([]const NullTerminatedString, @ptrCast(names)),
                 .names_map = error_set.data.names_map.toOptional(),
             } };
         },
         .type_inferred_error_set => .{
-            .inferred_error_set_type = @enumFromInt(Module.Fn.InferredErrorSet.Index, data),
+            .inferred_error_set_type = @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(data)),
         },
 
         .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) },
         .type_struct => {
-            const struct_index = @enumFromInt(Module.Struct.OptionalIndex, data);
+            const struct_index = @as(Module.Struct.OptionalIndex, @enumFromInt(data));
             const namespace = if (struct_index.unwrap()) |i|
                 ip.structPtrConst(i).namespace.toOptional()
             else
@@ -2651,7 +2651,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         },
         .type_struct_ns => .{ .struct_type = .{
             .index = .none,
-            .namespace = @enumFromInt(Module.Namespace.Index, data).toOptional(),
+            .namespace = @as(Module.Namespace.Index, @enumFromInt(data)).toOptional(),
         } },
 
         .type_struct_anon => {
@@ -2661,9 +2661,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
             const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
             const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len];
             return .{ .anon_struct_type = .{
-                .types = @ptrCast([]const Index, types),
-                .values = @ptrCast([]const Index, values),
-                .names = @ptrCast([]const NullTerminatedString, names),
+                .types = @as([]const Index, @ptrCast(types)),
+                .values = @as([]const Index, @ptrCast(values)),
+                .names = @as([]const NullTerminatedString, @ptrCast(names)),
             } };
         },
         .type_tuple_anon => {
@@ -2672,30 +2672,30 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
             const types = ip.extra.items[type_struct_anon.end..][0..fields_len];
             const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
             return .{ .anon_struct_type = .{
-                .types = @ptrCast([]const Index, types),
-                .values = @ptrCast([]const Index, values),
+                .types = @as([]const Index, @ptrCast(types)),
+                .values = @as([]const Index, @ptrCast(values)),
                 .names = &.{},
             } };
         },
 
         .type_union_untagged => .{ .union_type = .{
-            .index = @enumFromInt(Module.Union.Index, data),
+            .index = @as(Module.Union.Index, @enumFromInt(data)),
             .runtime_tag = .none,
         } },
         .type_union_tagged => .{ .union_type = .{
-            .index = @enumFromInt(Module.Union.Index, data),
+            .index = @as(Module.Union.Index, @enumFromInt(data)),
             .runtime_tag = .tagged,
         } },
         .type_union_safety => .{ .union_type = .{
-            .index = @enumFromInt(Module.Union.Index, data),
+            .index = @as(Module.Union.Index, @enumFromInt(data)),
             .runtime_tag = .safety,
         } },
 
         .type_enum_auto => {
             const enum_auto = ip.extraDataTrail(EnumAuto, data);
-            const names = @ptrCast(
+            const names = @as(
                 []const NullTerminatedString,
-                ip.extra.items[enum_auto.end..][0..enum_auto.data.fields_len],
+                @ptrCast(ip.extra.items[enum_auto.end..][0..enum_auto.data.fields_len]),
             );
             return .{ .enum_type = .{
                 .decl = enum_auto.data.decl,
@@ -2712,10 +2712,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive),
         .type_function => .{ .func_type = ip.indexToKeyFuncType(data) },
 
-        .undef => .{ .undef = @enumFromInt(Index, data) },
+        .undef => .{ .undef = @as(Index, @enumFromInt(data)) },
         .runtime_value => .{ .runtime_value = ip.extraData(Tag.TypeValue, data) },
         .opt_null => .{ .opt = .{
-            .ty = @enumFromInt(Index, data),
+            .ty = @as(Index, @enumFromInt(data)),
             .val = .none,
         } },
         .opt_payload => {
@@ -2877,7 +2877,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         } },
         .int_i32 => .{ .int = .{
             .ty = .i32_type,
-            .storage = .{ .i64 = @bitCast(i32, data) },
+            .storage = .{ .i64 = @as(i32, @bitCast(data)) },
         } },
         .int_usize => .{ .int = .{
             .ty = .usize_type,
@@ -2889,7 +2889,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         } },
         .int_comptime_int_i32 => .{ .int = .{
             .ty = .comptime_int_type,
-            .storage = .{ .i64 = @bitCast(i32, data) },
+            .storage = .{ .i64 = @as(i32, @bitCast(data)) },
         } },
         .int_positive => ip.indexToKeyBigInt(data, true),
         .int_negative => ip.indexToKeyBigInt(data, false),
@@ -2913,11 +2913,11 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         },
         .float_f16 => .{ .float = .{
             .ty = .f16_type,
-            .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) },
+            .storage = .{ .f16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) },
         } },
         .float_f32 => .{ .float = .{
             .ty = .f32_type,
-            .storage = .{ .f32 = @bitCast(f32, data) },
+            .storage = .{ .f32 = @as(f32, @bitCast(data)) },
         } },
         .float_f64 => .{ .float = .{
             .ty = .f64_type,
@@ -2959,13 +2959,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) },
         .func => .{ .func = ip.extraData(Tag.Func, data) },
         .only_possible_value => {
-            const ty = @enumFromInt(Index, data);
+            const ty = @as(Index, @enumFromInt(data));
             const ty_item = ip.items.get(@intFromEnum(ty));
             return switch (ty_item.tag) {
                 .type_array_big => {
-                    const sentinel = @ptrCast(
+                    const sentinel = @as(
                         *const [1]Index,
-                        &ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?],
+                        @ptrCast(&ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?]),
                     );
                     return .{ .aggregate = .{
                         .ty = ty,
@@ -2994,7 +2994,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
                     const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
                     return .{ .aggregate = .{
                         .ty = ty,
-                        .storage = .{ .elems = @ptrCast([]const Index, values) },
+                        .storage = .{ .elems = @as([]const Index, @ptrCast(values)) },
                     } };
                 },
 
@@ -3010,7 +3010,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         },
         .bytes => {
             const extra = ip.extraData(Bytes, data);
-            const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.ty));
+            const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.ty)));
             return .{ .aggregate = .{
                 .ty = extra.ty,
                 .storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(extra.bytes)..][0..len] },
@@ -3018,8 +3018,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
         },
         .aggregate => {
             const extra = ip.extraDataTrail(Tag.Aggregate, data);
-            const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.data.ty));
-            const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]);
+            const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty)));
+            const fields = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..len]));
             return .{ .aggregate = .{
                 .ty = extra.data.ty,
                 .storage = .{ .elems = fields },
@@ -3048,14 +3048,14 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
                 .val = .{ .payload = extra.val },
             } };
         },
-        .enum_literal => .{ .enum_literal = @enumFromInt(NullTerminatedString, data) },
+        .enum_literal => .{ .enum_literal = @as(NullTerminatedString, @enumFromInt(data)) },
         .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) },
 
         .memoized_call => {
             const extra = ip.extraDataTrail(MemoizedCall, data);
             return .{ .memoized_call = .{
                 .func = extra.data.func,
-                .arg_values = @ptrCast([]const Index, ip.extra.items[extra.end..][0..extra.data.args_len]),
+                .arg_values = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..extra.data.args_len])),
                 .result = extra.data.result,
             } };
         },
@@ -3064,9 +3064,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
 
 fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType {
     const type_function = ip.extraDataTrail(TypeFunction, data);
-    const param_types = @ptrCast(
+    const param_types = @as(
         []Index,
-        ip.extra.items[type_function.end..][0..type_function.data.params_len],
+        @ptrCast(ip.extra.items[type_function.end..][0..type_function.data.params_len]),
     );
     return .{
         .param_types = param_types,
@@ -3087,13 +3087,13 @@ fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType {
 
 fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key {
     const enum_explicit = ip.extraDataTrail(EnumExplicit, data);
-    const names = @ptrCast(
+    const names = @as(
         []const NullTerminatedString,
-        ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len],
+        @ptrCast(ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len]),
     );
-    const values = if (enum_explicit.data.values_map != .none) @ptrCast(
+    const values = if (enum_explicit.data.values_map != .none) @as(
         []const Index,
-        ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len],
+        @ptrCast(ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len]),
     ) else &[0]Index{};
 
     return .{ .enum_type = .{
@@ -3122,7 +3122,7 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key
 pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
     const adapter: KeyAdapter = .{ .intern_pool = ip };
     const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
-    if (gop.found_existing) return @enumFromInt(Index, gop.index);
+    if (gop.found_existing) return @as(Index, @enumFromInt(gop.index));
     try ip.items.ensureUnusedCapacity(gpa, 1);
     switch (key) {
         .int_type => |int_type| {
@@ -3150,7 +3150,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     .tag = .type_slice,
                     .data = @intFromEnum(ptr_type_index),
                 });
-                return @enumFromInt(Index, ip.items.len - 1);
+                return @as(Index, @enumFromInt(ip.items.len - 1));
             }
 
             var ptr_type_adjusted = ptr_type;
@@ -3174,7 +3174,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                             .child = array_type.child,
                         }),
                     });
-                    return @enumFromInt(Index, ip.items.len - 1);
+                    return @as(Index, @enumFromInt(ip.items.len - 1));
                 }
             }
 
@@ -3223,7 +3223,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
             assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan));
             const names_map = try ip.addMap(gpa);
             try addStringsToMap(ip, gpa, names_map, error_set_type.names);
-            const names_len = @intCast(u32, error_set_type.names.len);
+            const names_len = @as(u32, @intCast(error_set_type.names.len));
             try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(ErrorSet).Struct.fields.len + names_len);
             ip.items.appendAssumeCapacity(.{
                 .tag = .type_error_set,
@@ -3232,7 +3232,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     .names_map = names_map,
                 }),
             });
-            ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, error_set_type.names));
+            ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(error_set_type.names)));
         },
         .inferred_error_set_type => |ies_index| {
             ip.items.appendAssumeCapacity(.{
@@ -3284,7 +3284,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
             assert(anon_struct_type.types.len == anon_struct_type.values.len);
             for (anon_struct_type.types) |elem| assert(elem != .none);
 
-            const fields_len = @intCast(u32, anon_struct_type.types.len);
+            const fields_len = @as(u32, @intCast(anon_struct_type.types.len));
             if (anon_struct_type.names.len == 0) {
                 try ip.extra.ensureUnusedCapacity(
                     gpa,
@@ -3296,9 +3296,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                         .fields_len = fields_len,
                     }),
                 });
-                ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types));
-                ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values));
-                return @enumFromInt(Index, ip.items.len - 1);
+                ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.types)));
+                ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.values)));
+                return @as(Index, @enumFromInt(ip.items.len - 1));
             }
 
             assert(anon_struct_type.names.len == anon_struct_type.types.len);
@@ -3313,10 +3313,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     .fields_len = fields_len,
                 }),
             });
-            ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types));
-            ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values));
-            ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.names));
-            return @enumFromInt(Index, ip.items.len - 1);
+            ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.types)));
+            ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.values)));
+            ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(anon_struct_type.names)));
+            return @as(Index, @enumFromInt(ip.items.len - 1));
         },
 
         .union_type => |union_type| {
@@ -3348,7 +3348,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     const names_map = try ip.addMap(gpa);
                     try addStringsToMap(ip, gpa, names_map, enum_type.names);
 
-                    const fields_len = @intCast(u32, enum_type.names.len);
+                    const fields_len = @as(u32, @intCast(enum_type.names.len));
                     try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len +
                         fields_len);
                     ip.items.appendAssumeCapacity(.{
@@ -3361,8 +3361,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                             .fields_len = fields_len,
                         }),
                     });
-                    ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names));
-                    return @enumFromInt(Index, ip.items.len - 1);
+                    ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.names)));
+                    return @as(Index, @enumFromInt(ip.items.len - 1));
                 },
                 .explicit => return finishGetEnum(ip, gpa, enum_type, .type_enum_explicit),
                 .nonexhaustive => return finishGetEnum(ip, gpa, enum_type, .type_enum_nonexhaustive),
@@ -3373,7 +3373,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
             assert(func_type.return_type != .none);
             for (func_type.param_types) |param_type| assert(param_type != .none);
 
-            const params_len = @intCast(u32, func_type.param_types.len);
+            const params_len = @as(u32, @intCast(func_type.param_types.len));
 
             try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len +
                 params_len);
@@ -3397,7 +3397,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     },
                 }),
             });
-            ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types));
+            ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(func_type.param_types)));
         },
 
         .variable => |variable| {
@@ -3559,7 +3559,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     });
                 },
             }
-            assert(ptr.ty == ip.indexToKey(@enumFromInt(Index, ip.items.len - 1)).ptr.ty);
+            assert(ptr.ty == ip.indexToKey(@as(Index, @enumFromInt(ip.items.len - 1))).ptr.ty);
         },
 
         .opt => |opt| {
@@ -3593,7 +3593,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                             .lazy_ty = lazy_ty,
                         }),
                     });
-                    return @enumFromInt(Index, ip.items.len - 1);
+                    return @as(Index, @enumFromInt(ip.items.len - 1));
                 },
             }
             switch (int.ty) {
@@ -3608,7 +3608,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     inline .u64, .i64 => |x| {
                         ip.items.appendAssumeCapacity(.{
                             .tag = .int_u8,
-                            .data = @intCast(u8, x),
+                            .data = @as(u8, @intCast(x)),
                         });
                         break :b;
                     },
@@ -3625,7 +3625,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     inline .u64, .i64 => |x| {
                         ip.items.appendAssumeCapacity(.{
                             .tag = .int_u16,
-                            .data = @intCast(u16, x),
+                            .data = @as(u16, @intCast(x)),
                         });
                         break :b;
                     },
@@ -3642,7 +3642,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     inline .u64, .i64 => |x| {
                         ip.items.appendAssumeCapacity(.{
                             .tag = .int_u32,
-                            .data = @intCast(u32, x),
+                            .data = @as(u32, @intCast(x)),
                         });
                         break :b;
                     },
@@ -3653,14 +3653,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                         const casted = big_int.to(i32) catch unreachable;
                         ip.items.appendAssumeCapacity(.{
                             .tag = .int_i32,
-                            .data = @bitCast(u32, casted),
+                            .data = @as(u32, @bitCast(casted)),
                         });
                         break :b;
                     },
                     inline .u64, .i64 => |x| {
                         ip.items.appendAssumeCapacity(.{
                             .tag = .int_i32,
-                            .data = @bitCast(u32, @intCast(i32, x)),
+                            .data = @as(u32, @bitCast(@as(i32, @intCast(x)))),
                         });
                         break :b;
                     },
@@ -3699,7 +3699,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                         if (big_int.to(i32)) |casted| {
                             ip.items.appendAssumeCapacity(.{
                                 .tag = .int_comptime_int_i32,
-                                .data = @bitCast(u32, casted),
+                                .data = @as(u32, @bitCast(casted)),
                             });
                             break :b;
                         } else |_| {}
@@ -3715,7 +3715,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                         if (std.math.cast(i32, x)) |casted| {
                             ip.items.appendAssumeCapacity(.{
                                 .tag = .int_comptime_int_i32,
-                                .data = @bitCast(u32, casted),
+                                .data = @as(u32, @bitCast(casted)),
                             });
                             break :b;
                         }
@@ -3734,7 +3734,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                                 .value = casted,
                             }),
                         });
-                        return @enumFromInt(Index, ip.items.len - 1);
+                        return @as(Index, @enumFromInt(ip.items.len - 1));
                     } else |_| {}
 
                     const tag: Tag = if (big_int.positive) .int_positive else .int_negative;
@@ -3749,7 +3749,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                                 .value = casted,
                             }),
                         });
-                        return @enumFromInt(Index, ip.items.len - 1);
+                        return @as(Index, @enumFromInt(ip.items.len - 1));
                     }
 
                     var buf: [2]Limb = undefined;
@@ -3816,11 +3816,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
             switch (float.ty) {
                 .f16_type => ip.items.appendAssumeCapacity(.{
                     .tag = .float_f16,
-                    .data = @bitCast(u16, float.storage.f16),
+                    .data = @as(u16, @bitCast(float.storage.f16)),
                 }),
                 .f32_type => ip.items.appendAssumeCapacity(.{
                     .tag = .float_f32,
-                    .data = @bitCast(u32, float.storage.f32),
+                    .data = @as(u32, @bitCast(float.storage.f32)),
                 }),
                 .f64_type => ip.items.appendAssumeCapacity(.{
                     .tag = .float_f64,
@@ -3872,13 +3872,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     assert(child == .u8_type);
                     if (bytes.len != len) {
                         assert(bytes.len == len_including_sentinel);
-                        assert(bytes[@intCast(usize, len)] == ip.indexToKey(sentinel).int.storage.u64);
+                        assert(bytes[@as(usize, @intCast(len))] == ip.indexToKey(sentinel).int.storage.u64);
                     }
                 },
                 .elems => |elems| {
                     if (elems.len != len) {
                         assert(elems.len == len_including_sentinel);
-                        assert(elems[@intCast(usize, len)] == sentinel);
+                        assert(elems[@as(usize, @intCast(len))] == sentinel);
                     }
                 },
                 .repeated_elem => |elem| {
@@ -3912,7 +3912,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     .tag = .only_possible_value,
                     .data = @intFromEnum(aggregate.ty),
                 });
-                return @enumFromInt(Index, ip.items.len - 1);
+                return @as(Index, @enumFromInt(ip.items.len - 1));
             }
 
             switch (ty_key) {
@@ -3940,16 +3940,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                         .tag = .only_possible_value,
                         .data = @intFromEnum(aggregate.ty),
                     });
-                    return @enumFromInt(Index, ip.items.len - 1);
+                    return @as(Index, @enumFromInt(ip.items.len - 1));
                 },
                 else => {},
             }
 
             repeated: {
                 switch (aggregate.storage) {
-                    .bytes => |bytes| for (bytes[1..@intCast(usize, len)]) |byte|
+                    .bytes => |bytes| for (bytes[1..@as(usize, @intCast(len))]) |byte|
                         if (byte != bytes[0]) break :repeated,
-                    .elems => |elems| for (elems[1..@intCast(usize, len)]) |elem|
+                    .elems => |elems| for (elems[1..@as(usize, @intCast(len))]) |elem|
                         if (elem != elems[0]) break :repeated,
                     .repeated_elem => {},
                 }
@@ -3979,12 +3979,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                         .elem_val = elem,
                     }),
                 });
-                return @enumFromInt(Index, ip.items.len - 1);
+                return @as(Index, @enumFromInt(ip.items.len - 1));
             }
 
             if (child == .u8_type) bytes: {
                 const string_bytes_index = ip.string_bytes.items.len;
-                try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(usize, len_including_sentinel + 1));
+                try ip.string_bytes.ensureUnusedCapacity(gpa, @as(usize, @intCast(len_including_sentinel + 1)));
                 try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
                 switch (aggregate.storage) {
                     .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes),
@@ -3994,15 +3994,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                             break :bytes;
                         },
                         .int => |int| ip.string_bytes.appendAssumeCapacity(
-                            @intCast(u8, int.storage.u64),
+                            @as(u8, @intCast(int.storage.u64)),
                         ),
                         else => unreachable,
                     },
                     .repeated_elem => |elem| switch (ip.indexToKey(elem)) {
                         .undef => break :bytes,
                         .int => |int| @memset(
-                            ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(usize, len)),
-                            @intCast(u8, int.storage.u64),
+                            ip.string_bytes.addManyAsSliceAssumeCapacity(@as(usize, @intCast(len))),
+                            @as(u8, @intCast(int.storage.u64)),
                         ),
                         else => unreachable,
                     },
@@ -4010,12 +4010,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                 const has_internal_null =
                     std.mem.indexOfScalar(u8, ip.string_bytes.items[string_bytes_index..], 0) != null;
                 if (sentinel != .none) ip.string_bytes.appendAssumeCapacity(
-                    @intCast(u8, ip.indexToKey(sentinel).int.storage.u64),
+                    @as(u8, @intCast(ip.indexToKey(sentinel).int.storage.u64)),
                 );
                 const string = if (has_internal_null)
-                    @enumFromInt(String, string_bytes_index)
+                    @as(String, @enumFromInt(string_bytes_index))
                 else
-                    (try ip.getOrPutTrailingString(gpa, @intCast(usize, len_including_sentinel))).toString();
+                    (try ip.getOrPutTrailingString(gpa, @as(usize, @intCast(len_including_sentinel)))).toString();
                 ip.items.appendAssumeCapacity(.{
                     .tag = .bytes,
                     .data = ip.addExtraAssumeCapacity(Bytes{
@@ -4023,12 +4023,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                         .bytes = string,
                     }),
                 });
-                return @enumFromInt(Index, ip.items.len - 1);
+                return @as(Index, @enumFromInt(ip.items.len - 1));
             }
 
             try ip.extra.ensureUnusedCapacity(
                 gpa,
-                @typeInfo(Tag.Aggregate).Struct.fields.len + @intCast(usize, len_including_sentinel),
+                @typeInfo(Tag.Aggregate).Struct.fields.len + @as(usize, @intCast(len_including_sentinel)),
             );
             ip.items.appendAssumeCapacity(.{
                 .tag = .aggregate,
@@ -4036,7 +4036,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                     .ty = aggregate.ty,
                 }),
             });
-            ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems));
+            ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(aggregate.storage.elems)));
             if (sentinel != .none) ip.extra.appendAssumeCapacity(@intFromEnum(sentinel));
         },
 
@@ -4058,14 +4058,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
                 .tag = .memoized_call,
                 .data = ip.addExtraAssumeCapacity(MemoizedCall{
                     .func = memoized_call.func,
-                    .args_len = @intCast(u32, memoized_call.arg_values.len),
+                    .args_len = @as(u32, @intCast(memoized_call.arg_values.len)),
                     .result = memoized_call.result,
                 }),
             });
-            ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, memoized_call.arg_values));
+            ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(memoized_call.arg_values)));
         },
     }
-    return @enumFromInt(Index, ip.items.len - 1);
+    return @as(Index, @enumFromInt(ip.items.len - 1));
 }
 
 /// Provides API for completing an enum type after calling `getIncompleteEnum`.
@@ -4093,10 +4093,10 @@ pub const IncompleteEnumType = struct {
         const field_index = map.count();
         const strings = ip.extra.items[self.names_start..][0..field_index];
         const adapter: NullTerminatedString.Adapter = .{
-            .strings = @ptrCast([]const NullTerminatedString, strings),
+            .strings = @as([]const NullTerminatedString, @ptrCast(strings)),
         };
         const gop = try map.getOrPutAdapted(gpa, name, adapter);
-        if (gop.found_existing) return @intCast(u32, gop.index);
+        if (gop.found_existing) return @as(u32, @intCast(gop.index));
         ip.extra.items[self.names_start + field_index] = @intFromEnum(name);
         return null;
     }
@@ -4109,15 +4109,15 @@ pub const IncompleteEnumType = struct {
         gpa: Allocator,
         value: Index,
     ) Allocator.Error!?u32 {
-        assert(ip.typeOf(value) == @enumFromInt(Index, ip.extra.items[self.tag_ty_index]));
+        assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[self.tag_ty_index])));
         const map = &ip.maps.items[@intFromEnum(self.values_map.unwrap().?)];
         const field_index = map.count();
         const indexes = ip.extra.items[self.values_start..][0..field_index];
         const adapter: Index.Adapter = .{
-            .indexes = @ptrCast([]const Index, indexes),
+            .indexes = @as([]const Index, @ptrCast(indexes)),
         };
         const gop = try map.getOrPutAdapted(gpa, value, adapter);
-        if (gop.found_existing) return @intCast(u32, gop.index);
+        if (gop.found_existing) return @as(u32, @intCast(gop.index));
         ip.extra.items[self.values_start + field_index] = @intFromEnum(value);
         return null;
     }
@@ -4177,7 +4177,7 @@ fn getIncompleteEnumAuto(
     });
     ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), enum_type.fields_len);
     return .{
-        .index = @enumFromInt(Index, ip.items.len - 1),
+        .index = @as(Index, @enumFromInt(ip.items.len - 1)),
         .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
         .names_map = names_map,
         .names_start = extra_index + extra_fields_len,
@@ -4228,7 +4228,7 @@ fn getIncompleteEnumExplicit(
     // This is both fields and values (if present).
     ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), reserved_len);
     return .{
-        .index = @enumFromInt(Index, ip.items.len - 1),
+        .index = @as(Index, @enumFromInt(ip.items.len - 1)),
         .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?,
         .names_map = names_map,
         .names_start = extra_index + extra_fields_len,
@@ -4251,7 +4251,7 @@ pub fn finishGetEnum(
         try addIndexesToMap(ip, gpa, values_map, enum_type.values);
         break :m values_map.toOptional();
     };
-    const fields_len = @intCast(u32, enum_type.names.len);
+    const fields_len = @as(u32, @intCast(enum_type.names.len));
     try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len +
         fields_len);
     ip.items.appendAssumeCapacity(.{
@@ -4265,15 +4265,15 @@ pub fn finishGetEnum(
             .values_map = values_map,
         }),
     });
-    ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names));
-    ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values));
-    return @enumFromInt(Index, ip.items.len - 1);
+    ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.names)));
+    ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.values)));
+    return @as(Index, @enumFromInt(ip.items.len - 1));
 }
 
 pub fn getIfExists(ip: *const InternPool, key: Key) ?Index {
     const adapter: KeyAdapter = .{ .intern_pool = ip };
     const index = ip.map.getIndexAdapted(key, adapter) orelse return null;
-    return @enumFromInt(Index, index);
+    return @as(Index, @enumFromInt(index));
 }
 
 pub fn getAssumeExists(ip: *const InternPool, key: Key) Index {
@@ -4311,7 +4311,7 @@ fn addIndexesToMap(
 fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex {
     const ptr = try ip.maps.addOne(gpa);
     ptr.* = .{};
-    return @enumFromInt(MapIndex, ip.maps.items.len - 1);
+    return @as(MapIndex, @enumFromInt(ip.maps.items.len - 1));
 }
 
 /// This operation only happens under compile error conditions.
@@ -4320,7 +4320,7 @@ fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex {
 pub const remove = @compileError("InternPool.remove is not currently a supported operation; put a TODO there instead");
 
 fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void {
-    const limbs_len = @intCast(u32, limbs.len);
+    const limbs_len = @as(u32, @intCast(limbs.len));
     try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len);
     ip.items.appendAssumeCapacity(.{
         .tag = tag,
@@ -4339,7 +4339,7 @@ fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32
 }
 
 fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
-    const result = @intCast(u32, ip.extra.items.len);
+    const result = @as(u32, @intCast(ip.extra.items.len));
     inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
         ip.extra.appendAssumeCapacity(switch (field.type) {
             u32 => @field(extra, field.name),
@@ -4354,12 +4354,12 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
             String => @intFromEnum(@field(extra, field.name)),
             NullTerminatedString => @intFromEnum(@field(extra, field.name)),
             OptionalNullTerminatedString => @intFromEnum(@field(extra, field.name)),
-            i32 => @bitCast(u32, @field(extra, field.name)),
-            Tag.TypePointer.Flags => @bitCast(u32, @field(extra, field.name)),
-            TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)),
-            Tag.TypePointer.PackedOffset => @bitCast(u32, @field(extra, field.name)),
+            i32 => @as(u32, @bitCast(@field(extra, field.name))),
+            Tag.TypePointer.Flags => @as(u32, @bitCast(@field(extra, field.name))),
+            TypeFunction.Flags => @as(u32, @bitCast(@field(extra, field.name))),
+            Tag.TypePointer.PackedOffset => @as(u32, @bitCast(@field(extra, field.name))),
             Tag.TypePointer.VectorIndex => @intFromEnum(@field(extra, field.name)),
-            Tag.Variable.Flags => @bitCast(u32, @field(extra, field.name)),
+            Tag.Variable.Flags => @as(u32, @bitCast(@field(extra, field.name))),
             else => @compileError("bad field type: " ++ @typeName(field.type)),
         });
     }
@@ -4380,7 +4380,7 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
         @sizeOf(u64) => {},
         else => @compileError("unsupported host"),
     }
-    const result = @intCast(u32, ip.limbs.items.len);
+    const result = @as(u32, @intCast(ip.limbs.items.len));
     inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| {
         const new: u32 = switch (field.type) {
             u32 => @field(extra, field.name),
@@ -4411,23 +4411,23 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct
         const int32 = ip.extra.items[i + index];
         @field(result, field.name) = switch (field.type) {
             u32 => int32,
-            Index => @enumFromInt(Index, int32),
-            Module.Decl.Index => @enumFromInt(Module.Decl.Index, int32),
-            Module.Namespace.Index => @enumFromInt(Module.Namespace.Index, int32),
-            Module.Namespace.OptionalIndex => @enumFromInt(Module.Namespace.OptionalIndex, int32),
-            Module.Fn.Index => @enumFromInt(Module.Fn.Index, int32),
-            MapIndex => @enumFromInt(MapIndex, int32),
-            OptionalMapIndex => @enumFromInt(OptionalMapIndex, int32),
-            RuntimeIndex => @enumFromInt(RuntimeIndex, int32),
-            String => @enumFromInt(String, int32),
-            NullTerminatedString => @enumFromInt(NullTerminatedString, int32),
-            OptionalNullTerminatedString => @enumFromInt(OptionalNullTerminatedString, int32),
-            i32 => @bitCast(i32, int32),
-            Tag.TypePointer.Flags => @bitCast(Tag.TypePointer.Flags, int32),
-            TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32),
-            Tag.TypePointer.PackedOffset => @bitCast(Tag.TypePointer.PackedOffset, int32),
-            Tag.TypePointer.VectorIndex => @enumFromInt(Tag.TypePointer.VectorIndex, int32),
-            Tag.Variable.Flags => @bitCast(Tag.Variable.Flags, int32),
+            Index => @as(Index, @enumFromInt(int32)),
+            Module.Decl.Index => @as(Module.Decl.Index, @enumFromInt(int32)),
+            Module.Namespace.Index => @as(Module.Namespace.Index, @enumFromInt(int32)),
+            Module.Namespace.OptionalIndex => @as(Module.Namespace.OptionalIndex, @enumFromInt(int32)),
+            Module.Fn.Index => @as(Module.Fn.Index, @enumFromInt(int32)),
+            MapIndex => @as(MapIndex, @enumFromInt(int32)),
+            OptionalMapIndex => @as(OptionalMapIndex, @enumFromInt(int32)),
+            RuntimeIndex => @as(RuntimeIndex, @enumFromInt(int32)),
+            String => @as(String, @enumFromInt(int32)),
+            NullTerminatedString => @as(NullTerminatedString, @enumFromInt(int32)),
+            OptionalNullTerminatedString => @as(OptionalNullTerminatedString, @enumFromInt(int32)),
+            i32 => @as(i32, @bitCast(int32)),
+            Tag.TypePointer.Flags => @as(Tag.TypePointer.Flags, @bitCast(int32)),
+            TypeFunction.Flags => @as(TypeFunction.Flags, @bitCast(int32)),
+            Tag.TypePointer.PackedOffset => @as(Tag.TypePointer.PackedOffset, @bitCast(int32)),
+            Tag.TypePointer.VectorIndex => @as(Tag.TypePointer.VectorIndex, @enumFromInt(int32)),
+            Tag.Variable.Flags => @as(Tag.Variable.Flags, @bitCast(int32)),
             else => @compileError("bad field type: " ++ @typeName(field.type)),
         };
     }
@@ -4452,13 +4452,13 @@ fn limbData(ip: *const InternPool, comptime T: type, index: usize) T {
     inline for (@typeInfo(T).Struct.fields, 0..) |field, i| {
         const host_int = ip.limbs.items[index + i / 2];
         const int32 = if (i % 2 == 0)
-            @truncate(u32, host_int)
+            @as(u32, @truncate(host_int))
         else
-            @truncate(u32, host_int >> 32);
+            @as(u32, @truncate(host_int >> 32));
 
         @field(result, field.name) = switch (field.type) {
             u32 => int32,
-            Index => @enumFromInt(Index, int32),
+            Index => @as(Index, @enumFromInt(int32)),
             else => @compileError("bad field type: " ++ @typeName(field.type)),
         };
     }
@@ -4494,8 +4494,8 @@ fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes
     };
     // TODO: https://github.com/ziglang/zig/issues/1738
     return .{
-        .start = @intCast(u32, @divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb))),
-        .len = @intCast(u32, limbs.len),
+        .start = @as(u32, @intCast(@divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb)))),
+        .len = @as(u32, @intCast(limbs.len)),
     };
 }
 
@@ -4557,7 +4557,7 @@ pub fn slicePtrType(ip: *const InternPool, i: Index) Index {
     }
     const item = ip.items.get(@intFromEnum(i));
     switch (item.tag) {
-        .type_slice => return @enumFromInt(Index, item.data),
+        .type_slice => return @as(Index, @enumFromInt(item.data)),
         else => unreachable, // not a slice type
     }
 }
@@ -4727,7 +4727,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
                     .val = error_union.val,
                 } }),
             .aggregate => |aggregate| {
-                const new_len = @intCast(usize, ip.aggregateTypeLen(new_ty));
+                const new_len = @as(usize, @intCast(ip.aggregateTypeLen(new_ty)));
                 direct: {
                     const old_ty_child = switch (ip.indexToKey(old_ty)) {
                         inline .array_type, .vector_type => |seq_type| seq_type.child,
@@ -4862,7 +4862,7 @@ pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.Option
     const tags = ip.items.items(.tag);
     if (tags[@intFromEnum(val)] != .type_struct) return .none;
     const datas = ip.items.items(.data);
-    return @enumFromInt(Module.Struct.Index, datas[@intFromEnum(val)]).toOptional();
+    return @as(Module.Struct.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
 }
 
 pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex {
@@ -4873,7 +4873,7 @@ pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.Optional
         else => return .none,
     }
     const datas = ip.items.items(.data);
-    return @enumFromInt(Module.Union.Index, datas[@intFromEnum(val)]).toOptional();
+    return @as(Module.Union.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
 }
 
 pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType {
@@ -4899,7 +4899,7 @@ pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.
     const tags = ip.items.items(.tag);
     if (tags[@intFromEnum(val)] != .type_inferred_error_set) return .none;
     const datas = ip.items.items(.data);
-    return @enumFromInt(Module.Fn.InferredErrorSet.Index, datas[@intFromEnum(val)]).toOptional();
+    return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
 }
 
 /// includes .comptime_int_type
@@ -5057,7 +5057,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
             .type_enum_auto => @sizeOf(EnumAuto),
             .type_opaque => @sizeOf(Key.OpaqueType),
             .type_struct => b: {
-                const struct_index = @enumFromInt(Module.Struct.Index, data);
+                const struct_index = @as(Module.Struct.Index, @enumFromInt(data));
                 const struct_obj = ip.structPtrConst(struct_index);
                 break :b @sizeOf(Module.Struct) +
                     @sizeOf(Module.Namespace) +
@@ -5124,13 +5124,13 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
 
             .bytes => b: {
                 const info = ip.extraData(Bytes, data);
-                const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty));
+                const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)));
                 break :b @sizeOf(Bytes) + len +
                     @intFromBool(ip.string_bytes.items[@intFromEnum(info.bytes) + len - 1] != 0);
             },
             .aggregate => b: {
                 const info = ip.extraData(Tag.Aggregate, data);
-                const fields_len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty));
+                const fields_len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)));
                 break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len);
             },
             .repeated => @sizeOf(Repeated),
@@ -5181,8 +5181,8 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
     for (tags, datas, 0..) |tag, data, i| {
         try w.print("${d} = {s}(", .{ i, @tagName(tag) });
         switch (tag) {
-            .simple_type => try w.print("{s}", .{@tagName(@enumFromInt(SimpleType, data))}),
-            .simple_value => try w.print("{s}", .{@tagName(@enumFromInt(SimpleValue, data))}),
+            .simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(data)))}),
+            .simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(data)))}),
 
             .type_int_signed,
             .type_int_unsigned,
@@ -5311,7 +5311,7 @@ pub fn createStruct(
     }
     const ptr = try ip.allocated_structs.addOne(gpa);
     ptr.* = initialization;
-    return @enumFromInt(Module.Struct.Index, ip.allocated_structs.len - 1);
+    return @as(Module.Struct.Index, @enumFromInt(ip.allocated_structs.len - 1));
 }
 
 pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void {
@@ -5333,7 +5333,7 @@ pub fn createUnion(
     }
     const ptr = try ip.allocated_unions.addOne(gpa);
     ptr.* = initialization;
-    return @enumFromInt(Module.Union.Index, ip.allocated_unions.len - 1);
+    return @as(Module.Union.Index, @enumFromInt(ip.allocated_unions.len - 1));
 }
 
 pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) void {
@@ -5355,7 +5355,7 @@ pub fn createFunc(
     }
     const ptr = try ip.allocated_funcs.addOne(gpa);
     ptr.* = initialization;
-    return @enumFromInt(Module.Fn.Index, ip.allocated_funcs.len - 1);
+    return @as(Module.Fn.Index, @enumFromInt(ip.allocated_funcs.len - 1));
 }
 
 pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void {
@@ -5377,7 +5377,7 @@ pub fn createInferredErrorSet(
     }
     const ptr = try ip.allocated_inferred_error_sets.addOne(gpa);
     ptr.* = initialization;
-    return @enumFromInt(Module.Fn.InferredErrorSet.Index, ip.allocated_inferred_error_sets.len - 1);
+    return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(ip.allocated_inferred_error_sets.len - 1));
 }
 
 pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.Fn.InferredErrorSet.Index) void {
@@ -5406,7 +5406,7 @@ pub fn getOrPutStringFmt(
     args: anytype,
 ) Allocator.Error!NullTerminatedString {
     // ensure that references to string_bytes in args do not get invalidated
-    const len = @intCast(usize, std.fmt.count(format, args) + 1);
+    const len = @as(usize, @intCast(std.fmt.count(format, args) + 1));
     try ip.string_bytes.ensureUnusedCapacity(gpa, len);
     ip.string_bytes.writer(undefined).print(format, args) catch unreachable;
     ip.string_bytes.appendAssumeCapacity(0);
@@ -5430,7 +5430,7 @@ pub fn getOrPutTrailingString(
     len: usize,
 ) Allocator.Error!NullTerminatedString {
     const string_bytes = &ip.string_bytes;
-    const str_index = @intCast(u32, string_bytes.items.len - len);
+    const str_index = @as(u32, @intCast(string_bytes.items.len - len));
     if (len > 0 and string_bytes.getLast() == 0) {
         _ = string_bytes.pop();
     } else {
@@ -5444,11 +5444,11 @@ pub fn getOrPutTrailingString(
     });
     if (gop.found_existing) {
         string_bytes.shrinkRetainingCapacity(str_index);
-        return @enumFromInt(NullTerminatedString, gop.key_ptr.*);
+        return @as(NullTerminatedString, @enumFromInt(gop.key_ptr.*));
     } else {
         gop.key_ptr.* = str_index;
         string_bytes.appendAssumeCapacity(0);
-        return @enumFromInt(NullTerminatedString, str_index);
+        return @as(NullTerminatedString, @enumFromInt(str_index));
     }
 }
 
@@ -5456,7 +5456,7 @@ pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString {
     if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{
         .bytes = &ip.string_bytes,
     })) |index| {
-        return @enumFromInt(NullTerminatedString, index).toOptional();
+        return @as(NullTerminatedString, @enumFromInt(index)).toOptional();
     } else {
         return .none;
     }
@@ -5596,7 +5596,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
             .undef,
             .opt_null,
             .only_possible_value,
-            => @enumFromInt(Index, ip.items.items(.data)[@intFromEnum(index)]),
+            => @as(Index, @enumFromInt(ip.items.items(.data)[@intFromEnum(index)])),
 
             .simple_value => unreachable, // handled via Index above
 
@@ -5628,7 +5628,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
             => |t| {
                 const extra_index = ip.items.items(.data)[@intFromEnum(index)];
                 const field_index = std.meta.fieldIndex(t.Payload(), "ty").?;
-                return @enumFromInt(Index, ip.extra.items[extra_index + field_index]);
+                return @as(Index, @enumFromInt(ip.extra.items[extra_index + field_index]));
             },
 
             .int_u8 => .u8_type,
@@ -5670,7 +5670,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
 /// Assumes that the enum's field indexes equal its value tags.
 pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E {
     const int = ip.indexToKey(i).enum_tag.int;
-    return @enumFromInt(E, ip.indexToKey(int).int.storage.u64);
+    return @as(E, @enumFromInt(ip.indexToKey(int).int.storage.u64));
 }
 
 pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
@@ -5703,9 +5703,9 @@ pub fn funcReturnType(ip: *const InternPool, ty: Index) Index {
         else => unreachable,
     };
     assert(child_item.tag == .type_function);
-    return @enumFromInt(Index, ip.extra.items[
+    return @as(Index, @enumFromInt(ip.extra.items[
         child_item.data + std.meta.fieldIndex(TypeFunction, "return_type").?
-    ]);
+    ]));
 }
 
 pub fn isNoReturn(ip: *const InternPool, ty: Index) bool {
@@ -5736,9 +5736,9 @@ pub fn getBackingDecl(ip: *const InternPool, val: Index) Module.Decl.OptionalInd
         switch (ip.items.items(.tag)[base]) {
             inline .ptr_decl,
             .ptr_mut_decl,
-            => |tag| return @enumFromInt(Module.Decl.OptionalIndex, ip.extra.items[
+            => |tag| return @as(Module.Decl.OptionalIndex, @enumFromInt(ip.extra.items[
                 ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "decl").?
-            ]),
+            ])),
             inline .ptr_eu_payload,
             .ptr_opt_payload,
             .ptr_elem,
src/Liveness.zig
@@ -178,14 +178,14 @@ pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocat
 
 pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi {
     const usize_index = (inst * bpi) / @bitSizeOf(usize);
-    return @truncate(Bpi, l.tomb_bits[usize_index] >>
-        @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi));
+    return @as(Bpi, @truncate(l.tomb_bits[usize_index] >>
+        @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi))));
 }
 
 pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool {
     const usize_index = (inst * bpi) / @bitSizeOf(usize);
     const mask = @as(usize, 1) <<
-        @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1));
+        @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)));
     return (l.tomb_bits[usize_index] & mask) != 0;
 }
 
@@ -193,7 +193,7 @@ pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool
     assert(operand < bpi - 1);
     const usize_index = (inst * bpi) / @bitSizeOf(usize);
     const mask = @as(usize, 1) <<
-        @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand);
+        @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand));
     return (l.tomb_bits[usize_index] & mask) != 0;
 }
 
@@ -201,7 +201,7 @@ pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt)
     assert(operand < bpi - 1);
     const usize_index = (inst * bpi) / @bitSizeOf(usize);
     const mask = @as(usize, 1) <<
-        @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand);
+        @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand));
     l.tomb_bits[usize_index] &= ~mask;
 }
 
@@ -484,11 +484,11 @@ pub fn categorizeOperand(
             const inst_data = air_datas[inst].pl_op;
             const callee = inst_data.operand;
             const extra = air.extraData(Air.Call, inst_data.payload);
-            const args = @ptrCast([]const Air.Inst.Ref, air.extra[extra.end..][0..extra.data.args_len]);
+            const args = @as([]const Air.Inst.Ref, @ptrCast(air.extra[extra.end..][0..extra.data.args_len]));
             if (args.len + 1 <= bpi - 1) {
                 if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
                 for (args, 0..) |arg, i| {
-                    if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i + 1), .write);
+                    if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i + 1)), .write);
                 }
                 return .write;
             }
@@ -535,12 +535,12 @@ pub fn categorizeOperand(
         .aggregate_init => {
             const ty_pl = air_datas[inst].ty_pl;
             const aggregate_ty = air.getRefType(ty_pl.ty);
-            const len = @intCast(usize, aggregate_ty.arrayLenIp(ip));
-            const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]);
+            const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip)));
+            const elements = @as([]const Air.Inst.Ref, @ptrCast(air.extra[ty_pl.payload..][0..len]));
 
             if (elements.len <= bpi - 1) {
                 for (elements, 0..) |elem, i| {
-                    if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i), .none);
+                    if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i)), .none);
                 }
                 return .none;
             }
@@ -808,20 +808,20 @@ pub const BigTomb = struct {
 
         const small_tombs = bpi - 1;
         if (this_bit_index < small_tombs) {
-            const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0;
+            const dies = @as(u1, @truncate(bt.tomb_bits >> @as(Liveness.OperandInt, @intCast(this_bit_index)))) != 0;
             return dies;
         }
 
         const big_bit_index = this_bit_index - small_tombs;
         while (big_bit_index - bt.extra_offset * 31 >= 31) {
-            if (@truncate(u1, bt.extra[bt.extra_start + bt.extra_offset] >> 31) != 0) {
+            if (@as(u1, @truncate(bt.extra[bt.extra_start + bt.extra_offset] >> 31)) != 0) {
                 bt.reached_end = true;
                 return false;
             }
             bt.extra_offset += 1;
         }
-        const dies = @truncate(u1, bt.extra[bt.extra_start + bt.extra_offset] >>
-            @intCast(u5, big_bit_index - bt.extra_offset * 31)) != 0;
+        const dies = @as(u1, @truncate(bt.extra[bt.extra_start + bt.extra_offset] >>
+            @as(u5, @intCast(big_bit_index - bt.extra_offset * 31)))) != 0;
         return dies;
     }
 };
@@ -838,7 +838,7 @@ const Analysis = struct {
     fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void {
         const usize_index = (inst * bpi) / @bitSizeOf(usize);
         a.tomb_bits[usize_index] |= @as(usize, tomb_bits) <<
-            @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi);
+            @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi));
     }
 
     fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 {
@@ -849,7 +849,7 @@ const Analysis = struct {
 
     fn addExtraAssumeCapacity(a: *Analysis, extra: anytype) u32 {
         const fields = std.meta.fields(@TypeOf(extra));
-        const result = @intCast(u32, a.extra.items.len);
+        const result = @as(u32, @intCast(a.extra.items.len));
         inline for (fields) |field| {
             a.extra.appendAssumeCapacity(switch (field.type) {
                 u32 => @field(extra, field.name),
@@ -1108,7 +1108,7 @@ fn analyzeInst(
             const inst_data = inst_datas[inst].pl_op;
             const callee = inst_data.operand;
             const extra = a.air.extraData(Air.Call, inst_data.payload);
-            const args = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]);
+            const args = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra.end..][0..extra.data.args_len]));
             if (args.len + 1 <= bpi - 1) {
                 var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
                 buf[0] = callee;
@@ -1146,8 +1146,8 @@ fn analyzeInst(
         .aggregate_init => {
             const ty_pl = inst_datas[inst].ty_pl;
             const aggregate_ty = a.air.getRefType(ty_pl.ty);
-            const len = @intCast(usize, aggregate_ty.arrayLenIp(ip));
-            const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]);
+            const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip)));
+            const elements = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[ty_pl.payload..][0..len]));
 
             if (elements.len <= bpi - 1) {
                 var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
@@ -1200,9 +1200,9 @@ fn analyzeInst(
         .assembly => {
             const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload);
             var extra_i: usize = extra.end;
-            const outputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.outputs_len]);
+            const outputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra_i..][0..extra.data.outputs_len]));
             extra_i += outputs.len;
-            const inputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.inputs_len]);
+            const inputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra_i..][0..extra.data.inputs_len]));
             extra_i += inputs.len;
 
             const num_operands = simple: {
@@ -1310,7 +1310,7 @@ fn analyzeOperands(
                     // Don't compute any liveness for constants
                     if (inst_tags[operand] == .interned) continue;
 
-                    const mask = @as(Bpi, 1) << @intCast(OperandInt, i);
+                    const mask = @as(Bpi, 1) << @as(OperandInt, @intCast(i));
 
                     if ((try data.live_set.fetchPut(gpa, operand, {})) == null) {
                         log.debug("[{}] %{}: added %{} to live set (operand dies here)", .{ pass, inst, operand });
@@ -1320,7 +1320,7 @@ fn analyzeOperands(
             }
 
             a.tomb_bits[usize_index] |= @as(usize, tomb_bits) <<
-                @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi);
+                @as(Log2Int(usize), @intCast((inst % (@bitSizeOf(usize) / bpi)) * bpi));
         },
     }
 }
@@ -1472,7 +1472,7 @@ fn analyzeInstLoop(
             const num_breaks = data.breaks.count();
             try a.extra.ensureUnusedCapacity(gpa, 1 + num_breaks);
 
-            const extra_index = @intCast(u32, a.extra.items.len);
+            const extra_index = @as(u32, @intCast(a.extra.items.len));
             a.extra.appendAssumeCapacity(num_breaks);
 
             var it = data.breaks.keyIterator();
@@ -1523,7 +1523,7 @@ fn analyzeInstLoop(
             // This is necessarily not in the same control flow branch, because loops are noreturn
             data.live_set.clearRetainingCapacity();
 
-            try data.live_set.ensureUnusedCapacity(gpa, @intCast(u32, loop_live.len));
+            try data.live_set.ensureUnusedCapacity(gpa, @as(u32, @intCast(loop_live.len)));
             for (loop_live) |alive| {
                 data.live_set.putAssumeCapacity(alive, {});
             }
@@ -1647,8 +1647,8 @@ fn analyzeInstCondBr(
             log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) });
 
             // Write the mirrored deaths to `extra`
-            const then_death_count = @intCast(u32, then_mirrored_deaths.items.len);
-            const else_death_count = @intCast(u32, else_mirrored_deaths.items.len);
+            const then_death_count = @as(u32, @intCast(then_mirrored_deaths.items.len));
+            const else_death_count = @as(u32, @intCast(else_mirrored_deaths.items.len));
             try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(CondBr).len + then_death_count + else_death_count);
             const extra_index = a.addExtraAssumeCapacity(CondBr{
                 .then_death_count = then_death_count,
@@ -1758,12 +1758,12 @@ fn analyzeInstSwitchBr(
                 log.debug("[{}] %{}: new live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) });
             }
 
-            const else_death_count = @intCast(u32, mirrored_deaths[ncases].items.len);
+            const else_death_count = @as(u32, @intCast(mirrored_deaths[ncases].items.len));
             const extra_index = try a.addExtra(SwitchBr{
                 .else_death_count = else_death_count,
             });
             for (mirrored_deaths[0..ncases]) |mirrored| {
-                const num = @intCast(u32, mirrored.items.len);
+                const num = @as(u32, @intCast(mirrored.items.len));
                 try a.extra.ensureUnusedCapacity(gpa, num + 1);
                 a.extra.appendAssumeCapacity(num);
                 a.extra.appendSliceAssumeCapacity(mirrored.items);
@@ -1798,7 +1798,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
             inst: Air.Inst.Index,
             total_operands: usize,
         ) !Self {
-            const extra_operands = @intCast(u32, total_operands) -| (bpi - 1);
+            const extra_operands = @as(u32, @intCast(total_operands)) -| (bpi - 1);
             const max_extra_tombs = (extra_operands + 30) / 31;
 
             const extra_tombs: []u32 = switch (pass) {
@@ -1818,7 +1818,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
                 .a = a,
                 .data = data,
                 .inst = inst,
-                .operands_remaining = @intCast(u32, total_operands),
+                .operands_remaining = @as(u32, @intCast(total_operands)),
                 .extra_tombs = extra_tombs,
                 .will_die_immediately = will_die_immediately,
             };
@@ -1847,7 +1847,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
             if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return;
 
             const extra_byte = (big.operands_remaining - (bpi - 1)) / 31;
-            const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31);
+            const extra_bit = @as(u5, @intCast(big.operands_remaining - (bpi - 1) - extra_byte * 31));
 
             const gpa = big.a.gpa;
 
@@ -1881,7 +1881,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
                     // keep at least one.
                     var num: usize = big.extra_tombs.len;
                     while (num > 1) {
-                        if (@truncate(u31, big.extra_tombs[num - 1]) != 0) {
+                        if (@as(u31, @truncate(big.extra_tombs[num - 1])) != 0) {
                             // Some operand dies here
                             break;
                         }
@@ -1892,7 +1892,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
 
                     const extra_tombs = big.extra_tombs[0..num];
 
-                    const extra_index = @intCast(u32, big.a.extra.items.len);
+                    const extra_index = @as(u32, @intCast(big.a.extra.items.len));
                     try big.a.extra.appendSlice(gpa, extra_tombs);
                     try big.a.special.put(gpa, big.inst, extra_index);
                 },
src/main.zig
@@ -3523,7 +3523,7 @@ fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Th
 
         server.serveMessage(.{
             .tag = .progress,
-            .bytes_len = @intCast(u32, progress_string.len),
+            .bytes_len = @as(u32, @intCast(progress_string.len)),
         }, &.{
             progress_string,
         }) catch |err| {
@@ -5020,8 +5020,8 @@ pub fn clangMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!
 
     // Convert the args to the null-terminated format Clang expects.
     const argv = try argsCopyZ(arena, args);
-    const exit_code = ZigClang_main(@intCast(c_int, argv.len), argv.ptr);
-    return @bitCast(u8, @truncate(i8, exit_code));
+    const exit_code = ZigClang_main(@as(c_int, @intCast(argv.len)), argv.ptr);
+    return @as(u8, @bitCast(@as(i8, @truncate(exit_code))));
 }
 
 pub fn llvmArMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}!u8 {
@@ -5035,8 +5035,8 @@ pub fn llvmArMain(alloc: Allocator, args: []const []const u8) error{OutOfMemory}
     // Convert the args to the format llvm-ar expects.
     // We intentionally shave off the zig binary at args[0].
     const argv = try argsCopyZ(arena, args[1..]);
-    const exit_code = ZigLlvmAr_main(@intCast(c_int, argv.len), argv.ptr);
-    return @bitCast(u8, @truncate(i8, exit_code));
+    const exit_code = ZigLlvmAr_main(@as(c_int, @intCast(argv.len)), argv.ptr);
+    return @as(u8, @bitCast(@as(i8, @truncate(exit_code))));
 }
 
 /// The first argument determines which backend is invoked. The options are:
@@ -5072,7 +5072,7 @@ pub fn lldMain(
     // "If an error occurs, false will be returned."
     const ok = rc: {
         const llvm = @import("codegen/llvm/bindings.zig");
-        const argc = @intCast(c_int, argv.len);
+        const argc = @as(c_int, @intCast(argv.len));
         if (mem.eql(u8, args[1], "ld.lld")) {
             break :rc llvm.LinkELF(argc, argv.ptr, can_exit_early, false);
         } else if (mem.eql(u8, args[1], "lld-link")) {
@@ -5507,7 +5507,7 @@ pub fn cmdAstCheck(
         if (stat.size > max_src_size)
             return error.FileTooBig;
 
-        const source = try arena.allocSentinel(u8, @intCast(usize, stat.size), 0);
+        const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
         const amt = try f.readAll(source);
         if (amt != stat.size)
             return error.UnexpectedEndOfFile;
@@ -5703,7 +5703,7 @@ pub fn cmdChangelist(
     file.pkg = try Package.create(gpa, null, file.sub_file_path);
     defer file.pkg.destroy(gpa);
 
-    const source = try arena.allocSentinel(u8, @intCast(usize, stat.size), 0);
+    const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
     const amt = try f.readAll(source);
     if (amt != stat.size)
         return error.UnexpectedEndOfFile;
@@ -5739,7 +5739,7 @@ pub fn cmdChangelist(
     if (new_stat.size > max_src_size)
         return error.FileTooBig;
 
-    const new_source = try arena.allocSentinel(u8, @intCast(usize, new_stat.size), 0);
+    const new_source = try arena.allocSentinel(u8, @as(usize, @intCast(new_stat.size)), 0);
     const new_amt = try new_f.readAll(new_source);
     if (new_amt != new_stat.size)
         return error.UnexpectedEndOfFile;
src/Manifest.zig
@@ -102,7 +102,7 @@ pub fn hex64(x: u64) [16]u8 {
     var result: [16]u8 = undefined;
     var i: usize = 0;
     while (i < 8) : (i += 1) {
-        const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
+        const byte = @as(u8, @truncate(x >> @as(u6, @intCast(8 * i))));
         result[i * 2 + 0] = hex_charset[byte >> 4];
         result[i * 2 + 1] = hex_charset[byte & 15];
     }
@@ -284,7 +284,7 @@ const Parse = struct {
                     @errorName(err),
                 });
             };
-            if (@enumFromInt(MultihashFunction, their_multihash_func) != multihash_function) {
+            if (@as(MultihashFunction, @enumFromInt(their_multihash_func)) != multihash_function) {
                 return fail(p, tok, "unsupported hash function: only sha2-256 is supported", .{});
             }
         }
@@ -345,7 +345,7 @@ const Parse = struct {
             .invalid_escape_character => |bad_index| {
                 try p.appendErrorOff(
                     token,
-                    offset + @intCast(u32, bad_index),
+                    offset + @as(u32, @intCast(bad_index)),
                     "invalid escape character: '{c}'",
                     .{raw_string[bad_index]},
                 );
@@ -353,7 +353,7 @@ const Parse = struct {
             .expected_hex_digit => |bad_index| {
                 try p.appendErrorOff(
                     token,
-                    offset + @intCast(u32, bad_index),
+                    offset + @as(u32, @intCast(bad_index)),
                     "expected hex digit, found '{c}'",
                     .{raw_string[bad_index]},
                 );
@@ -361,7 +361,7 @@ const Parse = struct {
             .empty_unicode_escape_sequence => |bad_index| {
                 try p.appendErrorOff(
                     token,
-                    offset + @intCast(u32, bad_index),
+                    offset + @as(u32, @intCast(bad_index)),
                     "empty unicode escape sequence",
                     .{},
                 );
@@ -369,7 +369,7 @@ const Parse = struct {
             .expected_hex_digit_or_rbrace => |bad_index| {
                 try p.appendErrorOff(
                     token,
-                    offset + @intCast(u32, bad_index),
+                    offset + @as(u32, @intCast(bad_index)),
                     "expected hex digit or '}}', found '{c}'",
                     .{raw_string[bad_index]},
                 );
@@ -377,7 +377,7 @@ const Parse = struct {
             .invalid_unicode_codepoint => |bad_index| {
                 try p.appendErrorOff(
                     token,
-                    offset + @intCast(u32, bad_index),
+                    offset + @as(u32, @intCast(bad_index)),
                     "unicode escape does not correspond to a valid codepoint",
                     .{},
                 );
@@ -385,7 +385,7 @@ const Parse = struct {
             .expected_lbrace => |bad_index| {
                 try p.appendErrorOff(
                     token,
-                    offset + @intCast(u32, bad_index),
+                    offset + @as(u32, @intCast(bad_index)),
                     "expected '{{', found '{c}",
                     .{raw_string[bad_index]},
                 );
@@ -393,7 +393,7 @@ const Parse = struct {
             .expected_rbrace => |bad_index| {
                 try p.appendErrorOff(
                     token,
-                    offset + @intCast(u32, bad_index),
+                    offset + @as(u32, @intCast(bad_index)),
                     "expected '}}', found '{c}",
                     .{raw_string[bad_index]},
                 );
@@ -401,7 +401,7 @@ const Parse = struct {
             .expected_single_quote => |bad_index| {
                 try p.appendErrorOff(
                     token,
-                    offset + @intCast(u32, bad_index),
+                    offset + @as(u32, @intCast(bad_index)),
                     "expected single quote ('), found '{c}",
                     .{raw_string[bad_index]},
                 );
@@ -409,7 +409,7 @@ const Parse = struct {
             .invalid_character => |bad_index| {
                 try p.appendErrorOff(
                     token,
-                    offset + @intCast(u32, bad_index),
+                    offset + @as(u32, @intCast(bad_index)),
                     "invalid byte in string or character literal: '{c}'",
                     .{raw_string[bad_index]},
                 );
src/Module.zig
@@ -554,7 +554,7 @@ pub const Decl = struct {
         _,
 
         pub fn toOptional(i: Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(i));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
         }
     };
 
@@ -563,12 +563,12 @@ pub const Decl = struct {
         _,
 
         pub fn init(oi: ?Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
         }
 
         pub fn unwrap(oi: OptionalIndex) ?Index {
             if (oi == .none) return null;
-            return @enumFromInt(Index, @intFromEnum(oi));
+            return @as(Index, @enumFromInt(@intFromEnum(oi)));
         }
     };
 
@@ -619,7 +619,7 @@ pub const Decl = struct {
     pub fn contentsHashZir(decl: Decl, zir: Zir) std.zig.SrcHash {
         assert(decl.zir_decl_index != 0);
         const hash_u32s = zir.extra[decl.zir_decl_index..][0..4];
-        const contents_hash = @bitCast(std.zig.SrcHash, hash_u32s.*);
+        const contents_hash = @as(std.zig.SrcHash, @bitCast(hash_u32s.*));
         return contents_hash;
     }
 
@@ -633,7 +633,7 @@ pub const Decl = struct {
         if (!decl.has_align) return .none;
         assert(decl.zir_decl_index != 0);
         const zir = decl.getFileScope(mod).zir;
-        return @enumFromInt(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 8]);
+        return @as(Zir.Inst.Ref, @enumFromInt(zir.extra[decl.zir_decl_index + 8]));
     }
 
     pub fn zirLinksectionRef(decl: Decl, mod: *Module) Zir.Inst.Ref {
@@ -641,7 +641,7 @@ pub const Decl = struct {
         assert(decl.zir_decl_index != 0);
         const zir = decl.getFileScope(mod).zir;
         const extra_index = decl.zir_decl_index + 8 + @intFromBool(decl.has_align);
-        return @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]);
+        return @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
     }
 
     pub fn zirAddrspaceRef(decl: Decl, mod: *Module) Zir.Inst.Ref {
@@ -649,7 +649,7 @@ pub const Decl = struct {
         assert(decl.zir_decl_index != 0);
         const zir = decl.getFileScope(mod).zir;
         const extra_index = decl.zir_decl_index + 8 + @intFromBool(decl.has_align) + 1;
-        return @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]);
+        return @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
     }
 
     pub fn relativeToLine(decl: Decl, offset: u32) u32 {
@@ -657,11 +657,11 @@ pub const Decl = struct {
     }
 
     pub fn relativeToNodeIndex(decl: Decl, offset: i32) Ast.Node.Index {
-        return @bitCast(Ast.Node.Index, offset + @bitCast(i32, decl.src_node));
+        return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(decl.src_node))));
     }
 
     pub fn nodeIndexToRelative(decl: Decl, node_index: Ast.Node.Index) i32 {
-        return @bitCast(i32, node_index) - @bitCast(i32, decl.src_node);
+        return @as(i32, @bitCast(node_index)) - @as(i32, @bitCast(decl.src_node));
     }
 
     pub fn tokSrcLoc(decl: Decl, token_index: Ast.TokenIndex) LazySrcLoc {
@@ -864,7 +864,7 @@ pub const Decl = struct {
 
     pub fn getAlignment(decl: Decl, mod: *Module) u32 {
         assert(decl.has_tv);
-        return @intCast(u32, decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod));
+        return @as(u32, @intCast(decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod)));
     }
 
     pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void {
@@ -922,7 +922,7 @@ pub const Struct = struct {
         _,
 
         pub fn toOptional(i: Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(i));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
         }
     };
 
@@ -931,12 +931,12 @@ pub const Struct = struct {
         _,
 
         pub fn init(oi: ?Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
         }
 
         pub fn unwrap(oi: OptionalIndex) ?Index {
             if (oi == .none) return null;
-            return @enumFromInt(Index, @intFromEnum(oi));
+            return @as(Index, @enumFromInt(@intFromEnum(oi)));
         }
     };
 
@@ -964,7 +964,7 @@ pub const Struct = struct {
         ) u32 {
             if (field.abi_align.toByteUnitsOptional()) |abi_align| {
                 assert(layout != .Packed);
-                return @intCast(u32, abi_align);
+                return @as(u32, @intCast(abi_align));
             }
 
             const target = mod.getTarget();
@@ -1042,7 +1042,7 @@ pub const Struct = struct {
         var bit_sum: u64 = 0;
         for (s.fields.values(), 0..) |field, i| {
             if (i == index) {
-                return @intCast(u16, bit_sum);
+                return @as(u16, @intCast(bit_sum));
             }
             bit_sum += field.ty.bitSize(mod);
         }
@@ -1123,7 +1123,7 @@ pub const Union = struct {
         _,
 
         pub fn toOptional(i: Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(i));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
         }
     };
 
@@ -1132,12 +1132,12 @@ pub const Union = struct {
         _,
 
         pub fn init(oi: ?Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
         }
 
         pub fn unwrap(oi: OptionalIndex) ?Index {
             if (oi == .none) return null;
-            return @enumFromInt(Index, @intFromEnum(oi));
+            return @as(Index, @enumFromInt(@intFromEnum(oi)));
         }
     };
 
@@ -1151,7 +1151,7 @@ pub const Union = struct {
         /// Keep implementation in sync with `Sema.unionFieldAlignment`.
         /// Prefer to call that function instead of this one during Sema.
         pub fn normalAlignment(field: Field, mod: *Module) u32 {
-            return @intCast(u32, field.abi_align.toByteUnitsOptional() orelse field.ty.abiAlignment(mod));
+            return @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse field.ty.abiAlignment(mod)));
         }
     };
 
@@ -1205,7 +1205,7 @@ pub const Union = struct {
                 most_index = i;
             }
         }
-        return @intCast(u32, most_index);
+        return @as(u32, @intCast(most_index));
     }
 
     /// Returns 0 if the union is represented with 0 bits at runtime.
@@ -1267,11 +1267,11 @@ pub const Union = struct {
             const field_size = field.ty.abiSize(mod);
             if (field_size > payload_size) {
                 payload_size = field_size;
-                biggest_field = @intCast(u32, i);
+                biggest_field = @as(u32, @intCast(i));
             }
             if (field_align > payload_align) {
-                payload_align = @intCast(u32, field_align);
-                most_aligned_field = @intCast(u32, i);
+                payload_align = @as(u32, @intCast(field_align));
+                most_aligned_field = @as(u32, @intCast(i));
                 most_aligned_field_size = field_size;
             }
         }
@@ -1303,7 +1303,7 @@ pub const Union = struct {
             size += payload_size;
             const prev_size = size;
             size = std.mem.alignForward(u64, size, tag_align);
-            padding = @intCast(u32, size - prev_size);
+            padding = @as(u32, @intCast(size - prev_size));
         } else {
             // {Payload, Tag}
             size += payload_size;
@@ -1311,7 +1311,7 @@ pub const Union = struct {
             size += tag_size;
             const prev_size = size;
             size = std.mem.alignForward(u64, size, payload_align);
-            padding = @intCast(u32, size - prev_size);
+            padding = @as(u32, @intCast(size - prev_size));
         }
         return .{
             .abi_size = size,
@@ -1409,7 +1409,7 @@ pub const Fn = struct {
         _,
 
         pub fn toOptional(i: Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(i));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
         }
     };
 
@@ -1418,12 +1418,12 @@ pub const Fn = struct {
         _,
 
         pub fn init(oi: ?Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
         }
 
         pub fn unwrap(oi: OptionalIndex) ?Index {
             if (oi == .none) return null;
-            return @enumFromInt(Index, @intFromEnum(oi));
+            return @as(Index, @enumFromInt(@intFromEnum(oi)));
         }
     };
 
@@ -1477,7 +1477,7 @@ pub const Fn = struct {
             _,
 
             pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
-                return @enumFromInt(InferredErrorSet.OptionalIndex, @intFromEnum(i));
+                return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i)));
             }
         };
 
@@ -1486,12 +1486,12 @@ pub const Fn = struct {
             _,
 
             pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
-                return @enumFromInt(InferredErrorSet.OptionalIndex, @intFromEnum(oi orelse return .none));
+                return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
             }
 
             pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index {
                 if (oi == .none) return null;
-                return @enumFromInt(InferredErrorSet.Index, @intFromEnum(oi));
+                return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi)));
             }
         };
 
@@ -1613,7 +1613,7 @@ pub const Namespace = struct {
         _,
 
         pub fn toOptional(i: Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(i));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
         }
     };
 
@@ -1622,12 +1622,12 @@ pub const Namespace = struct {
         _,
 
         pub fn init(oi: ?Index) OptionalIndex {
-            return @enumFromInt(OptionalIndex, @intFromEnum(oi orelse return .none));
+            return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
         }
 
         pub fn unwrap(oi: OptionalIndex) ?Index {
             if (oi == .none) return null;
-            return @enumFromInt(Index, @intFromEnum(oi));
+            return @as(Index, @enumFromInt(@intFromEnum(oi)));
         }
     };
 
@@ -1867,7 +1867,7 @@ pub const File = struct {
         if (stat.size > std.math.maxInt(u32))
             return error.FileTooBig;
 
-        const source = try gpa.allocSentinel(u8, @intCast(usize, stat.size), 0);
+        const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
         defer if (!file.source_loaded) gpa.free(source);
         const amt = try f.readAll(source);
         if (amt != stat.size)
@@ -2116,7 +2116,7 @@ pub const SrcLoc = struct {
     }
 
     pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.TokenIndex {
-        return @bitCast(Ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node));
+        return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(src_loc.parent_decl_node))));
     }
 
     pub const Span = struct {
@@ -2135,7 +2135,7 @@ pub const SrcLoc = struct {
             .token_abs => |tok_index| {
                 const tree = try src_loc.file_scope.getTree(gpa);
                 const start = tree.tokens.items(.start)[tok_index];
-                const end = start + @intCast(u32, tree.tokenSlice(tok_index).len);
+                const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
                 return Span{ .start = start, .end = end, .main = start };
             },
             .node_abs => |node| {
@@ -2146,14 +2146,14 @@ pub const SrcLoc = struct {
                 const tree = try src_loc.file_scope.getTree(gpa);
                 const tok_index = src_loc.declSrcToken();
                 const start = tree.tokens.items(.start)[tok_index] + byte_off;
-                const end = start + @intCast(u32, tree.tokenSlice(tok_index).len);
+                const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
                 return Span{ .start = start, .end = end, .main = start };
             },
             .token_offset => |tok_off| {
                 const tree = try src_loc.file_scope.getTree(gpa);
                 const tok_index = src_loc.declSrcToken() + tok_off;
                 const start = tree.tokens.items(.start)[tok_index];
-                const end = start + @intCast(u32, tree.tokenSlice(tok_index).len);
+                const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
                 return Span{ .start = start, .end = end, .main = start };
             },
             .node_offset => |traced_off| {
@@ -2206,7 +2206,7 @@ pub const SrcLoc = struct {
                 }
                 const tok_index = full.ast.mut_token + 1; // the name token
                 const start = tree.tokens.items(.start)[tok_index];
-                const end = start + @intCast(u32, tree.tokenSlice(tok_index).len);
+                const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
                 return Span{ .start = start, .end = end, .main = start };
             },
             .node_offset_var_decl_align => |node_off| {
@@ -2292,7 +2292,7 @@ pub const SrcLoc = struct {
                     else => tree.firstToken(node) - 2,
                 };
                 const start = tree.tokens.items(.start)[tok_index];
-                const end = start + @intCast(u32, tree.tokenSlice(tok_index).len);
+                const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
                 return Span{ .start = start, .end = end, .main = start };
             },
             .node_offset_deref_ptr => |node_off| {
@@ -2359,7 +2359,7 @@ pub const SrcLoc = struct {
                 // that contains this input.
                 const node_tags = tree.nodes.items(.tag);
                 for (node_tags, 0..) |node_tag, node_usize| {
-                    const node = @intCast(Ast.Node.Index, node_usize);
+                    const node = @as(Ast.Node.Index, @intCast(node_usize));
                     switch (node_tag) {
                         .for_simple, .@"for" => {
                             const for_full = tree.fullFor(node).?;
@@ -2479,7 +2479,7 @@ pub const SrcLoc = struct {
                 };
                 const start = tree.tokens.items(.start)[start_tok];
                 const end_start = tree.tokens.items(.start)[end_tok];
-                const end = end_start + @intCast(u32, tree.tokenSlice(end_tok).len);
+                const end = end_start + @as(u32, @intCast(tree.tokenSlice(end_tok).len));
                 return Span{ .start = start, .end = end, .main = start };
             },
             .node_offset_fn_type_align => |node_off| {
@@ -2539,7 +2539,7 @@ pub const SrcLoc = struct {
                 const tree = try src_loc.file_scope.getTree(gpa);
                 const token_tags = tree.tokens.items(.tag);
                 const main_token = tree.nodes.items(.main_token)[src_loc.parent_decl_node];
-                const tok_index = @bitCast(Ast.TokenIndex, token_off + @bitCast(i32, main_token));
+                const tok_index = @as(Ast.TokenIndex, @bitCast(token_off + @as(i32, @bitCast(main_token))));
 
                 var first_tok = tok_index;
                 while (true) switch (token_tags[first_tok - 1]) {
@@ -2568,7 +2568,7 @@ pub const SrcLoc = struct {
                 const full = tree.fullFnProto(&buf, parent_node).?;
                 const tok_index = full.lib_name.?;
                 const start = tree.tokens.items(.start)[tok_index];
-                const end = start + @intCast(u32, tree.tokenSlice(tok_index).len);
+                const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
                 return Span{ .start = start, .end = end, .main = start };
             },
 
@@ -2761,7 +2761,7 @@ pub const SrcLoc = struct {
             end_tok = main;
         }
         const start_off = token_starts[start_tok];
-        const end_off = token_starts[end_tok] + @intCast(u32, tree.tokenSlice(end_tok).len);
+        const end_off = token_starts[end_tok] + @as(u32, @intCast(tree.tokenSlice(end_tok).len));
         return Span{ .start = start_off, .end = end_off, .main = token_starts[main] };
     }
 };
@@ -3577,7 +3577,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
     if (stat.size > std.math.maxInt(u32))
         return error.FileTooBig;
 
-    const source = try gpa.allocSentinel(u8, @intCast(usize, stat.size), 0);
+    const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
     defer if (!file.source_loaded) gpa.free(source);
     const amt = try source_file.readAll(source);
     if (amt != stat.size)
@@ -3609,21 +3609,21 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
         if (file.zir.instructions.len == 0)
             @as([*]const u8, undefined)
         else
-            @ptrCast([*]const u8, safety_buffer.ptr)
+            @as([*]const u8, @ptrCast(safety_buffer.ptr))
     else
-        @ptrCast([*]const u8, file.zir.instructions.items(.data).ptr);
+        @as([*]const u8, @ptrCast(file.zir.instructions.items(.data).ptr));
     if (data_has_safety_tag) {
         // The `Data` union has a safety tag but in the file format we store it without.
         for (file.zir.instructions.items(.data), 0..) |*data, i| {
-            const as_struct = @ptrCast(*const HackDataLayout, data);
+            const as_struct = @as(*const HackDataLayout, @ptrCast(data));
             safety_buffer[i] = as_struct.data;
         }
     }
 
     const header: Zir.Header = .{
-        .instructions_len = @intCast(u32, file.zir.instructions.len),
-        .string_bytes_len = @intCast(u32, file.zir.string_bytes.len),
-        .extra_len = @intCast(u32, file.zir.extra.len),
+        .instructions_len = @as(u32, @intCast(file.zir.instructions.len)),
+        .string_bytes_len = @as(u32, @intCast(file.zir.string_bytes.len)),
+        .extra_len = @as(u32, @intCast(file.zir.extra.len)),
 
         .stat_size = stat.size,
         .stat_inode = stat.inode,
@@ -3631,11 +3631,11 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
     };
     var iovecs = [_]std.os.iovec_const{
         .{
-            .iov_base = @ptrCast([*]const u8, &header),
+            .iov_base = @as([*]const u8, @ptrCast(&header)),
             .iov_len = @sizeOf(Zir.Header),
         },
         .{
-            .iov_base = @ptrCast([*]const u8, file.zir.instructions.items(.tag).ptr),
+            .iov_base = @as([*]const u8, @ptrCast(file.zir.instructions.items(.tag).ptr)),
             .iov_len = file.zir.instructions.len,
         },
         .{
@@ -3647,7 +3647,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
             .iov_len = file.zir.string_bytes.len,
         },
         .{
-            .iov_base = @ptrCast([*]const u8, file.zir.extra.ptr),
+            .iov_base = @as([*]const u8, @ptrCast(file.zir.extra.ptr)),
             .iov_len = file.zir.extra.len * 4,
         },
     };
@@ -3722,13 +3722,13 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File)
     defer if (data_has_safety_tag) gpa.free(safety_buffer);
 
     const data_ptr = if (data_has_safety_tag)
-        @ptrCast([*]u8, safety_buffer.ptr)
+        @as([*]u8, @ptrCast(safety_buffer.ptr))
     else
-        @ptrCast([*]u8, zir.instructions.items(.data).ptr);
+        @as([*]u8, @ptrCast(zir.instructions.items(.data).ptr));
 
     var iovecs = [_]std.os.iovec{
         .{
-            .iov_base = @ptrCast([*]u8, zir.instructions.items(.tag).ptr),
+            .iov_base = @as([*]u8, @ptrCast(zir.instructions.items(.tag).ptr)),
             .iov_len = header.instructions_len,
         },
         .{
@@ -3740,7 +3740,7 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File)
             .iov_len = header.string_bytes_len,
         },
         .{
-            .iov_base = @ptrCast([*]u8, zir.extra.ptr),
+            .iov_base = @as([*]u8, @ptrCast(zir.extra.ptr)),
             .iov_len = header.extra_len * 4,
         },
     };
@@ -3753,7 +3753,7 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File)
         const tags = zir.instructions.items(.tag);
         for (zir.instructions.items(.data), 0..) |*data, i| {
             const union_tag = Zir.Inst.Tag.data_tags[@intFromEnum(tags[i])];
-            const as_struct = @ptrCast(*HackDataLayout, data);
+            const as_struct = @as(*HackDataLayout, @ptrCast(data));
             as_struct.* = .{
                 .safety_tag = @intFromEnum(union_tag),
                 .data = safety_buffer[i],
@@ -4394,7 +4394,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
         const struct_obj = mod.structPtr(struct_index);
         struct_obj.zir_index = main_struct_inst;
         const extended = file.zir.instructions.items(.data)[main_struct_inst].extended;
-        const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+        const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
         struct_obj.is_tuple = small.is_tuple;
 
         var sema_arena = std.heap.ArenaAllocator.init(gpa);
@@ -5051,13 +5051,13 @@ pub fn scanNamespace(
             cur_bit_bag = zir.extra[bit_bag_index];
             bit_bag_index += 1;
         }
-        const flags = @truncate(u4, cur_bit_bag);
+        const flags = @as(u4, @truncate(cur_bit_bag));
         cur_bit_bag >>= 4;
 
         const decl_sub_index = extra_index;
         extra_index += 8; // src_hash(4) + line(1) + name(1) + value(1) + doc_comment(1)
-        extra_index += @truncate(u1, flags >> 2); // Align
-        extra_index += @as(u2, @truncate(u1, flags >> 3)) * 2; // Link section or address space, consists of 2 Refs
+        extra_index += @as(u1, @truncate(flags >> 2)); // Align
+        extra_index += @as(u2, @as(u1, @truncate(flags >> 3))) * 2; // Link section or address space, consists of 2 Refs
 
         try scanDecl(&scan_decl_iter, decl_sub_index, flags);
     }
@@ -5195,7 +5195,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
         new_decl.is_exported = is_exported;
         new_decl.has_align = has_align;
         new_decl.has_linksection_or_addrspace = has_linksection_or_addrspace;
-        new_decl.zir_decl_index = @intCast(u32, decl_sub_index);
+        new_decl.zir_decl_index = @as(u32, @intCast(decl_sub_index));
         new_decl.alive = true; // This Decl corresponds to an AST node and therefore always alive.
         return;
     }
@@ -5229,7 +5229,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
     decl.kind = kind;
     decl.has_align = has_align;
     decl.has_linksection_or_addrspace = has_linksection_or_addrspace;
-    decl.zir_decl_index = @intCast(u32, decl_sub_index);
+    decl.zir_decl_index = @as(u32, @intCast(decl_sub_index));
     if (decl.getOwnedFunctionIndex(mod) != .none) {
         switch (comp.bin_file.tag) {
             .coff, .elf, .macho, .plan9 => {
@@ -5481,7 +5481,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
     // This could be a generic function instantiation, however, in which case we need to
     // map the comptime parameters to constant values and only emit arg AIR instructions
     // for the runtime ones.
-    const runtime_params_len = @intCast(u32, mod.typeToFunc(fn_ty).?.param_types.len);
+    const runtime_params_len = @as(u32, @intCast(mod.typeToFunc(fn_ty).?.param_types.len));
     try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len);
     try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType`
     try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
@@ -5524,13 +5524,13 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
             continue;
         }
         const air_ty = try sema.addType(param_ty);
-        const arg_index = @intCast(u32, sema.air_instructions.len);
+        const arg_index = @as(u32, @intCast(sema.air_instructions.len));
         inner_block.instructions.appendAssumeCapacity(arg_index);
         sema.air_instructions.appendAssumeCapacity(.{
             .tag = .arg,
             .data = .{ .arg = .{
                 .ty = air_ty,
-                .src_index = @intCast(u32, total_param_index),
+                .src_index = @as(u32, @intCast(total_param_index)),
             } },
         });
         sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(arg_index));
@@ -5593,7 +5593,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE
     try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
         inner_block.instructions.items.len);
     const main_block_index = sema.addExtraAssumeCapacity(Air.Block{
-        .body_len = @intCast(u32, inner_block.instructions.items.len),
+        .body_len = @as(u32, @intCast(inner_block.instructions.items.len)),
     });
     sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items);
     sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index;
@@ -5671,7 +5671,7 @@ pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index
     }
     const ptr = try mod.allocated_namespaces.addOne(mod.gpa);
     ptr.* = initialization;
-    return @enumFromInt(Namespace.Index, mod.allocated_namespaces.len - 1);
+    return @as(Namespace.Index, @enumFromInt(mod.allocated_namespaces.len - 1));
 }
 
 pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
@@ -5729,7 +5729,7 @@ pub fn allocateNewDecl(
         }
         break :d .{
             .new_decl = decl,
-            .decl_index = @enumFromInt(Decl.Index, mod.allocated_decls.len - 1),
+            .decl_index = @as(Decl.Index, @enumFromInt(mod.allocated_decls.len - 1)),
         };
     };
 
@@ -5767,7 +5767,7 @@ pub fn getErrorValue(
     name: InternPool.NullTerminatedString,
 ) Allocator.Error!ErrorInt {
     const gop = try mod.global_error_set.getOrPut(mod.gpa, name);
-    return @intCast(ErrorInt, gop.index);
+    return @as(ErrorInt, @intCast(gop.index));
 }
 
 pub fn getErrorValueFromSlice(
@@ -6139,7 +6139,7 @@ pub fn paramSrc(
         if (i == param_i) {
             if (param.anytype_ellipsis3) |some| {
                 const main_token = tree.nodes.items(.main_token)[decl.src_node];
-                return .{ .token_offset_param = @bitCast(i32, some) - @bitCast(i32, main_token) };
+                return .{ .token_offset_param = @as(i32, @bitCast(some)) - @as(i32, @bitCast(main_token)) };
             }
             return .{ .node_offset_param = decl.nodeIndexToRelative(param.type_expr) };
         }
@@ -6892,11 +6892,11 @@ pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocato
 /// losing data if the representation wasn't correct.
 pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
     const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(mod.getTarget())) {
-        16 => .{ .f16 = @floatCast(f16, x) },
-        32 => .{ .f32 = @floatCast(f32, x) },
-        64 => .{ .f64 = @floatCast(f64, x) },
-        80 => .{ .f80 = @floatCast(f80, x) },
-        128 => .{ .f128 = @floatCast(f128, x) },
+        16 => .{ .f16 = @as(f16, @floatCast(x)) },
+        32 => .{ .f32 = @as(f32, @floatCast(x)) },
+        64 => .{ .f64 = @as(f64, @floatCast(x)) },
+        80 => .{ .f80 = @as(f80, @floatCast(x)) },
+        128 => .{ .f128 = @as(f128, @floatCast(x)) },
         else => unreachable,
     };
     const i = try intern(mod, .{ .float = .{
@@ -6956,18 +6956,18 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
             assert(sign);
             // Protect against overflow in the following negation.
             if (x == std.math.minInt(i64)) return 64;
-            return Type.smallestUnsignedBits(@intCast(u64, -(x + 1))) + 1;
+            return Type.smallestUnsignedBits(@as(u64, @intCast(-(x + 1)))) + 1;
         },
         .u64 => |x| {
             return Type.smallestUnsignedBits(x) + @intFromBool(sign);
         },
         .big_int => |big| {
-            if (big.positive) return @intCast(u16, big.bitCountAbs() + @intFromBool(sign));
+            if (big.positive) return @as(u16, @intCast(big.bitCountAbs() + @intFromBool(sign)));
 
             // Zero is still a possibility, in which case unsigned is fine
             if (big.eqZero()) return 0;
 
-            return @intCast(u16, big.bitCountTwosComp());
+            return @as(u16, @intCast(big.bitCountTwosComp()));
         },
         .lazy_align => |lazy_ty| {
             return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @intFromBool(sign);
src/objcopy.zig
@@ -345,7 +345,7 @@ const BinaryElfOutput = struct {
 
             const shstrtab_shdr = (try section_headers.next()).?;
 
-            const buffer = try allocator.alloc(u8, @intCast(usize, shstrtab_shdr.sh_size));
+            const buffer = try allocator.alloc(u8, @as(usize, @intCast(shstrtab_shdr.sh_size)));
             errdefer allocator.free(buffer);
 
             const num_read = try elf_file.preadAll(buffer, shstrtab_shdr.sh_offset);
@@ -363,11 +363,11 @@ const BinaryElfOutput = struct {
 
                 newSection.binaryOffset = 0;
                 newSection.elfOffset = section.sh_offset;
-                newSection.fileSize = @intCast(usize, section.sh_size);
+                newSection.fileSize = @as(usize, @intCast(section.sh_size));
                 newSection.segment = null;
 
                 newSection.name = if (self.shstrtab) |shstrtab|
-                    std.mem.span(@ptrCast([*:0]const u8, &shstrtab[section.sh_name]))
+                    std.mem.span(@as([*:0]const u8, @ptrCast(&shstrtab[section.sh_name])))
                 else
                     null;
 
@@ -382,7 +382,7 @@ const BinaryElfOutput = struct {
 
                 newSegment.physicalAddress = if (phdr.p_paddr != 0) phdr.p_paddr else phdr.p_vaddr;
                 newSegment.virtualAddress = phdr.p_vaddr;
-                newSegment.fileSize = @intCast(usize, phdr.p_filesz);
+                newSegment.fileSize = @as(usize, @intCast(phdr.p_filesz));
                 newSegment.elfOffset = phdr.p_offset;
                 newSegment.binaryOffset = 0;
                 newSegment.firstSection = null;
@@ -478,8 +478,8 @@ const HexWriter = struct {
     const MAX_PAYLOAD_LEN: u8 = 16;
 
     fn addressParts(address: u16) [2]u8 {
-        const msb = @truncate(u8, address >> 8);
-        const lsb = @truncate(u8, address);
+        const msb = @as(u8, @truncate(address >> 8));
+        const lsb = @as(u8, @truncate(address));
         return [2]u8{ msb, lsb };
     }
 
@@ -508,14 +508,14 @@ const HexWriter = struct {
 
         fn Data(address: u32, data: []const u8) Record {
             return Record{
-                .address = @intCast(u16, address % 0x10000),
+                .address = @as(u16, @intCast(address % 0x10000)),
                 .payload = .{ .Data = data },
             };
         }
 
         fn Address(address: u32) Record {
             assert(address > 0xFFFF);
-            const segment = @intCast(u16, address / 0x10000);
+            const segment = @as(u16, @intCast(address / 0x10000));
             if (address > 0xFFFFF) {
                 return Record{
                     .address = 0,
@@ -540,7 +540,7 @@ const HexWriter = struct {
         fn checksum(self: Record) u8 {
             const payload_bytes = self.getPayloadBytes();
 
-            var sum: u8 = @intCast(u8, payload_bytes.len);
+            var sum: u8 = @as(u8, @intCast(payload_bytes.len));
             const parts = addressParts(self.address);
             sum +%= parts[0];
             sum +%= parts[1];
@@ -560,7 +560,7 @@ const HexWriter = struct {
             assert(payload_bytes.len <= MAX_PAYLOAD_LEN);
 
             const line = try std.fmt.bufPrint(&outbuf, ":{0X:0>2}{1X:0>4}{2X:0>2}{3s}{4X:0>2}" ++ linesep, .{
-                @intCast(u8, payload_bytes.len),
+                @as(u8, @intCast(payload_bytes.len)),
                 self.address,
                 @intFromEnum(self.payload),
                 std.fmt.fmtSliceHexUpper(payload_bytes),
@@ -574,10 +574,10 @@ const HexWriter = struct {
         var buf: [MAX_PAYLOAD_LEN]u8 = undefined;
         var bytes_read: usize = 0;
         while (bytes_read < segment.fileSize) {
-            const row_address = @intCast(u32, segment.physicalAddress + bytes_read);
+            const row_address = @as(u32, @intCast(segment.physicalAddress + bytes_read));
 
             const remaining = segment.fileSize - bytes_read;
-            const to_read = @intCast(usize, @min(remaining, MAX_PAYLOAD_LEN));
+            const to_read = @as(usize, @intCast(@min(remaining, MAX_PAYLOAD_LEN)));
             const did_read = try elf_file.preadAll(buf[0..to_read], segment.elfOffset + bytes_read);
             if (did_read < to_read) return error.UnexpectedEOF;
 
@@ -593,7 +593,7 @@ const HexWriter = struct {
             try Record.Address(address).write(self.out_file);
         }
         try record.write(self.out_file);
-        self.prev_addr = @intCast(u32, record.address + data.len);
+        self.prev_addr = @as(u32, @intCast(record.address + data.len));
     }
 
     fn writeEOF(self: HexWriter) File.WriteError!void {
@@ -814,7 +814,7 @@ fn ElfFile(comptime is_64: bool) type {
                 const need_strings = (idx == header.shstrndx);
 
                 if (need_data or need_strings) {
-                    const buffer = try allocator.alignedAlloc(u8, section_memory_align, @intCast(usize, section.section.sh_size));
+                    const buffer = try allocator.alignedAlloc(u8, section_memory_align, @as(usize, @intCast(section.section.sh_size)));
                     const bytes_read = try in_file.preadAll(buffer, section.section.sh_offset);
                     if (bytes_read != section.section.sh_size) return error.TRUNCATED_ELF;
                     section.payload = buffer;
@@ -831,7 +831,7 @@ fn ElfFile(comptime is_64: bool) type {
                 } else null;
 
                 if (section.section.sh_name != 0 and header.shstrndx != elf.SHN_UNDEF)
-                    section.name = std.mem.span(@ptrCast([*:0]const u8, &sections[header.shstrndx].payload.?[section.section.sh_name]));
+                    section.name = std.mem.span(@as([*:0]const u8, @ptrCast(&sections[header.shstrndx].payload.?[section.section.sh_name])));
 
                 const category_from_program: SectionCategory = if (section.segment != null) .exe else .debug;
                 section.category = switch (section.section.sh_type) {
@@ -935,7 +935,7 @@ fn ElfFile(comptime is_64: bool) type {
                 const update = &sections_update[self.raw_elf_header.e_shstrndx];
 
                 const name: []const u8 = ".gnu_debuglink";
-                const new_offset = @intCast(u32, strtab.payload.?.len);
+                const new_offset = @as(u32, @intCast(strtab.payload.?.len));
                 const buf = try allocator.alignedAlloc(u8, section_memory_align, new_offset + name.len + 1);
                 @memcpy(buf[0..new_offset], strtab.payload.?);
                 @memcpy(buf[new_offset..][0..name.len], name);
@@ -965,7 +965,7 @@ fn ElfFile(comptime is_64: bool) type {
                         update.payload = payload;
                         update.section = section.section;
                         update.section.?.sh_addralign = @alignOf(Elf_Chdr);
-                        update.section.?.sh_size = @intCast(Elf_OffSize, payload.len);
+                        update.section.?.sh_size = @as(Elf_OffSize, @intCast(payload.len));
                         update.section.?.sh_flags |= elf.SHF_COMPRESSED;
                     }
                 }
@@ -991,7 +991,7 @@ fn ElfFile(comptime is_64: bool) type {
                 const data = std.mem.sliceAsBytes(self.program_segments);
                 assert(data.len == @as(usize, updated_elf_header.e_phentsize) * updated_elf_header.e_phnum);
                 cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = data, .out_offset = updated_elf_header.e_phoff } });
-                eof_offset = updated_elf_header.e_phoff + @intCast(Elf_OffSize, data.len);
+                eof_offset = updated_elf_header.e_phoff + @as(Elf_OffSize, @intCast(data.len));
             }
 
             // update sections and queue payload writes
@@ -1032,7 +1032,7 @@ fn ElfFile(comptime is_64: bool) type {
                         dest.sh_info = sections_update[src.sh_info].remap_idx;
 
                     if (payload) |data|
-                        dest.sh_size = @intCast(Elf_OffSize, data.len);
+                        dest.sh_size = @as(Elf_OffSize, @intCast(data.len));
 
                     const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign;
                     dest.sh_offset = std.mem.alignForward(Elf_OffSize, eof_offset, addralign);
@@ -1056,7 +1056,7 @@ fn ElfFile(comptime is_64: bool) type {
                                     const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len);
                                     @memcpy(data, src_data);
 
-                                    const defs = @ptrCast([*]Elf_Verdef, data)[0 .. @intCast(usize, src.sh_size) / @sizeOf(Elf_Verdef)];
+                                    const defs = @as([*]Elf_Verdef, @ptrCast(data))[0 .. @as(usize, @intCast(src.sh_size)) / @sizeOf(Elf_Verdef)];
                                     for (defs) |*def| {
                                         if (def.vd_ndx != elf.SHN_UNDEF)
                                             def.vd_ndx = sections_update[src.sh_info].remap_idx;
@@ -1068,7 +1068,7 @@ fn ElfFile(comptime is_64: bool) type {
                                     const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len);
                                     @memcpy(data, src_data);
 
-                                    const syms = @ptrCast([*]Elf_Sym, data)[0 .. @intCast(usize, src.sh_size) / @sizeOf(Elf_Sym)];
+                                    const syms = @as([*]Elf_Sym, @ptrCast(data))[0 .. @as(usize, @intCast(src.sh_size)) / @sizeOf(Elf_Sym)];
                                     for (syms) |*sym| {
                                         if (sym.st_shndx != elf.SHN_UNDEF and sym.st_shndx < elf.SHN_LORESERVE)
                                             sym.st_shndx = sections_update[sym.st_shndx].remap_idx;
@@ -1110,7 +1110,7 @@ fn ElfFile(comptime is_64: bool) type {
                         .sh_flags = 0,
                         .sh_addr = 0,
                         .sh_offset = eof_offset,
-                        .sh_size = @intCast(Elf_OffSize, payload.len),
+                        .sh_size = @as(Elf_OffSize, @intCast(payload.len)),
                         .sh_link = elf.SHN_UNDEF,
                         .sh_info = elf.SHN_UNDEF,
                         .sh_addralign = 4,
@@ -1119,7 +1119,7 @@ fn ElfFile(comptime is_64: bool) type {
                     dest_section_idx += 1;
 
                     cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = payload, .out_offset = eof_offset } });
-                    eof_offset += @intCast(Elf_OffSize, payload.len);
+                    eof_offset += @as(Elf_OffSize, @intCast(payload.len));
                 }
 
                 assert(dest_section_idx == new_shnum);
@@ -1232,7 +1232,7 @@ const ElfFileHelper = struct {
                         fused_cmd = null;
                     }
                     if (data.out_offset > offset) {
-                        consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@intCast(usize, data.out_offset - offset)], .out_offset = offset } });
+                        consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@as(usize, @intCast(data.out_offset - offset))], .out_offset = offset } });
                     }
                     consolidated.appendAssumeCapacity(cmd);
                     offset = data.out_offset + data.data.len;
@@ -1249,7 +1249,7 @@ const ElfFileHelper = struct {
                         } else {
                             consolidated.appendAssumeCapacity(prev);
                             if (range.out_offset > offset) {
-                                consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@intCast(usize, range.out_offset - offset)], .out_offset = offset } });
+                                consolidated.appendAssumeCapacity(.{ .write_data = .{ .data = zeroes[0..@as(usize, @intCast(range.out_offset - offset))], .out_offset = offset } });
                             }
                             fused_cmd = cmd;
                         }
@@ -1286,7 +1286,7 @@ const ElfFileHelper = struct {
         var section_reader = std.io.limitedReader(in_file.reader(), size);
 
         // allocate as large as decompressed data. if the compression doesn't fit, keep the data uncompressed.
-        const compressed_data = try allocator.alignedAlloc(u8, 8, @intCast(usize, size));
+        const compressed_data = try allocator.alignedAlloc(u8, 8, @as(usize, @intCast(size)));
         var compressed_stream = std.io.fixedBufferStream(compressed_data);
 
         try compressed_stream.writer().writeAll(prefix);
@@ -1317,7 +1317,7 @@ const ElfFileHelper = struct {
             };
         }
 
-        const compressed_len = @intCast(usize, compressed_stream.getPos() catch unreachable);
+        const compressed_len = @as(usize, @intCast(compressed_stream.getPos() catch unreachable));
         const data = allocator.realloc(compressed_data, compressed_len) catch compressed_data;
         return data[0..compressed_len];
     }
src/Package.zig
@@ -390,10 +390,10 @@ const Report = struct {
             .src_loc = try eb.addSourceLocation(.{
                 .src_path = try eb.addString(file_path),
                 .span_start = token_starts[msg.tok],
-                .span_end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len),
+                .span_end = @as(u32, @intCast(token_starts[msg.tok] + ast.tokenSlice(msg.tok).len)),
                 .span_main = token_starts[msg.tok] + msg.off,
-                .line = @intCast(u32, start_loc.line),
-                .column = @intCast(u32, start_loc.column),
+                .line = @as(u32, @intCast(start_loc.line)),
+                .column = @as(u32, @intCast(start_loc.column)),
                 .source_line = try eb.addString(ast.source[start_loc.line_start..start_loc.line_end]),
             }),
             .notes_len = notes_len,
src/print_air.zig
@@ -91,7 +91,7 @@ const Writer = struct {
     fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void {
         for (w.air.instructions.items(.tag), 0..) |tag, i| {
             if (tag != .interned) continue;
-            const inst = @intCast(Air.Inst.Index, i);
+            const inst = @as(Air.Inst.Index, @intCast(i));
             try w.writeInst(s, inst);
             try s.writeByte('\n');
         }
@@ -424,8 +424,8 @@ const Writer = struct {
         const mod = w.module;
         const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
         const vector_ty = w.air.getRefType(ty_pl.ty);
-        const len = @intCast(usize, vector_ty.arrayLen(mod));
-        const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]);
+        const len = @as(usize, @intCast(vector_ty.arrayLen(mod)));
+        const elements = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[ty_pl.payload..][0..len]));
 
         try w.writeType(s, vector_ty);
         try s.writeAll(", [");
@@ -607,8 +607,8 @@ const Writer = struct {
     fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
         const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
         const extra = w.air.extraData(Air.Asm, ty_pl.payload);
-        const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
-        const clobbers_len = @truncate(u31, extra.data.flags);
+        const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
+        const clobbers_len = @as(u31, @truncate(extra.data.flags));
         var extra_i: usize = extra.end;
         var op_index: usize = 0;
 
@@ -619,9 +619,9 @@ const Writer = struct {
             try s.writeAll(", volatile");
         }
 
-        const outputs = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.outputs_len]);
+        const outputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra_i..][0..extra.data.outputs_len]));
         extra_i += outputs.len;
-        const inputs = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.inputs_len]);
+        const inputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra_i..][0..extra.data.inputs_len]));
         extra_i += inputs.len;
 
         for (outputs) |output| {
@@ -699,7 +699,7 @@ const Writer = struct {
     fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
         const pl_op = w.air.instructions.items(.data)[inst].pl_op;
         const extra = w.air.extraData(Air.Call, pl_op.payload);
-        const args = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]);
+        const args = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra.end..][0..extra.data.args_len]));
         try w.writeOperand(s, inst, 0, pl_op.operand);
         try s.writeAll(", [");
         for (args, 0..) |arg, i| {
@@ -855,7 +855,7 @@ const Writer = struct {
 
         while (case_i < switch_br.data.cases_len) : (case_i += 1) {
             const case = w.air.extraData(Air.SwitchBr.Case, extra_index);
-            const items = @ptrCast([]const Air.Inst.Ref, w.air.extra[case.end..][0..case.data.items_len]);
+            const items = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[case.end..][0..case.data.items_len]));
             const case_body = w.air.extra[case.end + items.len ..][0..case.data.body_len];
             extra_index = case.end + case.data.items_len + case_body.len;
 
@@ -934,13 +934,13 @@ const Writer = struct {
         const small_tomb_bits = Liveness.bpi - 1;
         const dies = if (w.liveness) |liveness| blk: {
             if (op_index < small_tomb_bits)
-                break :blk liveness.operandDies(inst, @intCast(Liveness.OperandInt, op_index));
+                break :blk liveness.operandDies(inst, @as(Liveness.OperandInt, @intCast(op_index)));
             var extra_index = liveness.special.get(inst).?;
             var tomb_op_index: usize = small_tomb_bits;
             while (true) {
                 const bits = liveness.extra[extra_index];
                 if (op_index < tomb_op_index + 31) {
-                    break :blk @truncate(u1, bits >> @intCast(u5, op_index - tomb_op_index)) != 0;
+                    break :blk @as(u1, @truncate(bits >> @as(u5, @intCast(op_index - tomb_op_index)))) != 0;
                 }
                 if ((bits >> 31) != 0) break :blk false;
                 extra_index += 1;
src/print_targets.zig
@@ -100,7 +100,7 @@ pub fn cmdTargets(
             try jws.objectField(model.name);
             try jws.beginArray();
             for (arch.allFeaturesList(), 0..) |feature, i_usize| {
-                const index = @intCast(Target.Cpu.Feature.Set.Index, i_usize);
+                const index = @as(Target.Cpu.Feature.Set.Index, @intCast(i_usize));
                 if (model.features.isEnabled(index)) {
                     try jws.arrayElem();
                     try jws.emitString(feature.name);
@@ -147,7 +147,7 @@ pub fn cmdTargets(
             try jws.objectField("features");
             try jws.beginArray();
             for (native_target.cpu.arch.allFeaturesList(), 0..) |feature, i_usize| {
-                const index = @intCast(Target.Cpu.Feature.Set.Index, i_usize);
+                const index = @as(Target.Cpu.Feature.Set.Index, @intCast(i_usize));
                 if (cpu.features.isEnabled(index)) {
                     try jws.arrayElem();
                     try jws.emitString(feature.name);
src/print_zir.zig
@@ -131,7 +131,7 @@ const Writer = struct {
     recurse_blocks: bool,
 
     fn relativeToNodeIndex(self: *Writer, offset: i32) Ast.Node.Index {
-        return @bitCast(Ast.Node.Index, offset + @bitCast(i32, self.parent_decl_node));
+        return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(self.parent_decl_node))));
     }
 
     fn writeInstToStream(
@@ -542,7 +542,7 @@ const Writer = struct {
     }
 
     fn writeExtNode(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
-        const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
+        const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand)));
         try stream.writeAll(")) ");
         try self.writeSrc(stream, src);
     }
@@ -631,25 +631,25 @@ const Writer = struct {
         var extra_index = extra.end;
         if (inst_data.flags.has_sentinel) {
             try stream.writeAll(", ");
-            try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]));
+            try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])));
             extra_index += 1;
         }
         if (inst_data.flags.has_align) {
             try stream.writeAll(", align(");
-            try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]));
+            try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])));
             extra_index += 1;
             if (inst_data.flags.has_bit_range) {
                 const bit_start = extra_index + @intFromBool(inst_data.flags.has_addrspace);
                 try stream.writeAll(":");
-                try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[bit_start]));
+                try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[bit_start])));
                 try stream.writeAll(":");
-                try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[bit_start + 1]));
+                try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[bit_start + 1])));
             }
             try stream.writeAll(")");
         }
         if (inst_data.flags.has_addrspace) {
             try stream.writeAll(", addrspace(");
-            try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]));
+            try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index])));
             try stream.writeAll(")");
         }
         try stream.writeAll(") ");
@@ -691,7 +691,7 @@ const Writer = struct {
         const src = inst_data.src();
         const number = extra.get();
         // TODO improve std.format to be able to print f128 values
-        try stream.print("{d}) ", .{@floatCast(f64, number)});
+        try stream.print("{d}) ", .{@as(f64, @floatCast(number))});
         try self.writeSrc(stream, src);
     }
 
@@ -964,7 +964,7 @@ const Writer = struct {
     }
 
     fn writePtrCastFull(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
-        const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small));
+        const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small))));
         const extra = self.code.extraData(Zir.Inst.BinNode, extended.operand).data;
         const src = LazySrcLoc.nodeOffset(extra.node);
         if (flags.ptr_cast) try stream.writeAll("ptr_cast, ");
@@ -980,7 +980,7 @@ const Writer = struct {
     }
 
     fn writePtrCastNoDest(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
-        const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small));
+        const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small))));
         const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
         const src = LazySrcLoc.nodeOffset(extra.node);
         if (flags.const_cast) try stream.writeAll("const_cast, ");
@@ -1103,14 +1103,14 @@ const Writer = struct {
     ) !void {
         const extra = self.code.extraData(Zir.Inst.Asm, extended.operand);
         const src = LazySrcLoc.nodeOffset(extra.data.src_node);
-        const outputs_len = @truncate(u5, extended.small);
-        const inputs_len = @truncate(u5, extended.small >> 5);
-        const clobbers_len = @truncate(u5, extended.small >> 10);
-        const is_volatile = @truncate(u1, extended.small >> 15) != 0;
+        const outputs_len = @as(u5, @truncate(extended.small));
+        const inputs_len = @as(u5, @truncate(extended.small >> 5));
+        const clobbers_len = @as(u5, @truncate(extended.small >> 10));
+        const is_volatile = @as(u1, @truncate(extended.small >> 15)) != 0;
 
         try self.writeFlag(stream, "volatile, ", is_volatile);
         if (tmpl_is_expr) {
-            try self.writeInstRef(stream, @enumFromInt(Zir.Inst.Ref, extra.data.asm_source));
+            try self.writeInstRef(stream, @as(Zir.Inst.Ref, @enumFromInt(extra.data.asm_source)));
             try stream.writeAll(", ");
         } else {
             const asm_source = self.code.nullTerminatedString(extra.data.asm_source);
@@ -1126,7 +1126,7 @@ const Writer = struct {
                 const output = self.code.extraData(Zir.Inst.Asm.Output, extra_i);
                 extra_i = output.end;
 
-                const is_type = @truncate(u1, output_type_bits) != 0;
+                const is_type = @as(u1, @truncate(output_type_bits)) != 0;
                 output_type_bits >>= 1;
 
                 const name = self.code.nullTerminatedString(output.data.name);
@@ -1205,7 +1205,7 @@ const Writer = struct {
         if (extra.data.flags.ensure_result_used) {
             try stream.writeAll("nodiscard ");
         }
-        try stream.print(".{s}, ", .{@tagName(@enumFromInt(std.builtin.CallModifier, extra.data.flags.packed_modifier))});
+        try stream.print(".{s}, ", .{@tagName(@as(std.builtin.CallModifier, @enumFromInt(extra.data.flags.packed_modifier)))});
         switch (kind) {
             .direct => try self.writeInstRef(stream, extra.data.callee),
             .field => {
@@ -1280,12 +1280,12 @@ const Writer = struct {
     }
 
     fn writeStructDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
-        const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+        const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
 
         var extra_index: usize = extended.operand;
 
         const src_node: ?i32 = if (small.has_src_node) blk: {
-            const src_node = @bitCast(i32, self.code.extra[extra_index]);
+            const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk src_node;
         } else null;
@@ -1313,7 +1313,7 @@ const Writer = struct {
             extra_index += 1;
             try stream.writeAll("Packed(");
             if (backing_int_body_len == 0) {
-                const backing_int_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                const backing_int_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                 extra_index += 1;
                 try self.writeInstRef(stream, backing_int_ref);
             } else {
@@ -1369,13 +1369,13 @@ const Writer = struct {
                         cur_bit_bag = self.code.extra[bit_bag_index];
                         bit_bag_index += 1;
                     }
-                    const has_align = @truncate(u1, cur_bit_bag) != 0;
+                    const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
                     cur_bit_bag >>= 1;
-                    const has_default = @truncate(u1, cur_bit_bag) != 0;
+                    const has_default = @as(u1, @truncate(cur_bit_bag)) != 0;
                     cur_bit_bag >>= 1;
-                    const is_comptime = @truncate(u1, cur_bit_bag) != 0;
+                    const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0;
                     cur_bit_bag >>= 1;
-                    const has_type_body = @truncate(u1, cur_bit_bag) != 0;
+                    const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
                     cur_bit_bag >>= 1;
 
                     var field_name: u32 = 0;
@@ -1395,7 +1395,7 @@ const Writer = struct {
                     if (has_type_body) {
                         fields[field_i].type_len = self.code.extra[extra_index];
                     } else {
-                        fields[field_i].type = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                        fields[field_i].type = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                     }
                     extra_index += 1;
 
@@ -1469,18 +1469,18 @@ const Writer = struct {
     }
 
     fn writeUnionDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
-        const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
+        const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small));
 
         var extra_index: usize = extended.operand;
 
         const src_node: ?i32 = if (small.has_src_node) blk: {
-            const src_node = @bitCast(i32, self.code.extra[extra_index]);
+            const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk src_node;
         } else null;
 
         const tag_type_ref = if (small.has_tag_type) blk: {
-            const tag_type_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk tag_type_ref;
         } else .none;
@@ -1557,13 +1557,13 @@ const Writer = struct {
                 cur_bit_bag = self.code.extra[bit_bag_index];
                 bit_bag_index += 1;
             }
-            const has_type = @truncate(u1, cur_bit_bag) != 0;
+            const has_type = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
-            const has_align = @truncate(u1, cur_bit_bag) != 0;
+            const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
-            const has_value = @truncate(u1, cur_bit_bag) != 0;
+            const has_value = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
-            const unused = @truncate(u1, cur_bit_bag) != 0;
+            const unused = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
 
             _ = unused;
@@ -1578,14 +1578,14 @@ const Writer = struct {
             try stream.print("{}", .{std.zig.fmtId(field_name)});
 
             if (has_type) {
-                const field_type = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                const field_type = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                 extra_index += 1;
 
                 try stream.writeAll(": ");
                 try self.writeInstRef(stream, field_type);
             }
             if (has_align) {
-                const align_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                const align_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                 extra_index += 1;
 
                 try stream.writeAll(" align(");
@@ -1593,7 +1593,7 @@ const Writer = struct {
                 try stream.writeAll(")");
             }
             if (has_value) {
-                const default_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                const default_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                 extra_index += 1;
 
                 try stream.writeAll(" = ");
@@ -1621,13 +1621,13 @@ const Writer = struct {
                 cur_bit_bag = self.code.extra[bit_bag_index];
                 bit_bag_index += 1;
             }
-            const is_pub = @truncate(u1, cur_bit_bag) != 0;
+            const is_pub = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
-            const is_exported = @truncate(u1, cur_bit_bag) != 0;
+            const is_exported = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
-            const has_align = @truncate(u1, cur_bit_bag) != 0;
+            const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
-            const has_section_or_addrspace = @truncate(u1, cur_bit_bag) != 0;
+            const has_section_or_addrspace = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
 
             const sub_index = extra_index;
@@ -1644,23 +1644,23 @@ const Writer = struct {
             extra_index += 1;
 
             const align_inst: Zir.Inst.Ref = if (!has_align) .none else inst: {
-                const inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                const inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                 extra_index += 1;
                 break :inst inst;
             };
             const section_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: {
-                const inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                const inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                 extra_index += 1;
                 break :inst inst;
             };
             const addrspace_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: {
-                const inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                const inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                 extra_index += 1;
                 break :inst inst;
             };
 
             const pub_str = if (is_pub) "pub " else "";
-            const hash_bytes = @bitCast([16]u8, hash_u32s.*);
+            const hash_bytes = @as([16]u8, @bitCast(hash_u32s.*));
             if (decl_name_index == 0) {
                 try stream.writeByteNTimes(' ', self.indent);
                 const name = if (is_exported) "usingnamespace" else "comptime";
@@ -1728,17 +1728,17 @@ const Writer = struct {
     }
 
     fn writeEnumDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
-        const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small);
+        const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small));
         var extra_index: usize = extended.operand;
 
         const src_node: ?i32 = if (small.has_src_node) blk: {
-            const src_node = @bitCast(i32, self.code.extra[extra_index]);
+            const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk src_node;
         } else null;
 
         const tag_type_ref = if (small.has_tag_type) blk: {
-            const tag_type_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk tag_type_ref;
         } else .none;
@@ -1808,7 +1808,7 @@ const Writer = struct {
                     cur_bit_bag = self.code.extra[bit_bag_index];
                     bit_bag_index += 1;
                 }
-                const has_tag_value = @truncate(u1, cur_bit_bag) != 0;
+                const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0;
                 cur_bit_bag >>= 1;
 
                 const field_name = self.code.nullTerminatedString(self.code.extra[extra_index]);
@@ -1823,7 +1823,7 @@ const Writer = struct {
                 try stream.print("{}", .{std.zig.fmtId(field_name)});
 
                 if (has_tag_value) {
-                    const tag_value_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                    const tag_value_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                     extra_index += 1;
 
                     try stream.writeAll(" = ");
@@ -1844,11 +1844,11 @@ const Writer = struct {
         stream: anytype,
         extended: Zir.Inst.Extended.InstData,
     ) !void {
-        const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small);
+        const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small));
         var extra_index: usize = extended.operand;
 
         const src_node: ?i32 = if (small.has_src_node) blk: {
-            const src_node = @bitCast(i32, self.code.extra[extra_index]);
+            const src_node = @as(i32, @bitCast(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk src_node;
         } else null;
@@ -1892,7 +1892,7 @@ const Writer = struct {
         try stream.writeAll("{\n");
         self.indent += 2;
 
-        var extra_index = @intCast(u32, extra.end);
+        var extra_index = @as(u32, @intCast(extra.end));
         const extra_index_end = extra_index + (extra.data.fields_len * 2);
         while (extra_index < extra_index_end) : (extra_index += 2) {
             const str_index = self.code.extra[extra_index];
@@ -1945,7 +1945,7 @@ const Writer = struct {
                 else => break :else_prong,
             };
 
-            const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, self.code.extra[extra_index]);
+            const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
             const capture_text = switch (info.capture) {
                 .none => "",
                 .by_val => "by_val ",
@@ -1966,9 +1966,9 @@ const Writer = struct {
             const scalar_cases_len = extra.data.bits.scalar_cases_len;
             var scalar_i: usize = 0;
             while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
-                const item_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                const item_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                 extra_index += 1;
-                const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, self.code.extra[extra_index]);
+                const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
                 extra_index += 1;
                 const body = self.code.extra[extra_index..][0..info.body_len];
                 extra_index += info.body_len;
@@ -1993,7 +1993,7 @@ const Writer = struct {
                 extra_index += 1;
                 const ranges_len = self.code.extra[extra_index];
                 extra_index += 1;
-                const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, self.code.extra[extra_index]);
+                const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
                 extra_index += 1;
                 const items = self.code.refSlice(extra_index, items_len);
                 extra_index += items_len;
@@ -2014,9 +2014,9 @@ const Writer = struct {
 
                 var range_i: usize = 0;
                 while (range_i < ranges_len) : (range_i += 1) {
-                    const item_first = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                    const item_first = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                     extra_index += 1;
-                    const item_last = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                    const item_last = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                     extra_index += 1;
 
                     if (range_i != 0 or items.len != 0) {
@@ -2117,7 +2117,7 @@ const Writer = struct {
                 ret_ty_ref = .void_type;
             },
             1 => {
-                ret_ty_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+                ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
                 extra_index += 1;
             },
             else => {
@@ -2188,7 +2188,7 @@ const Writer = struct {
             align_body = self.code.extra[extra_index..][0..body_len];
             extra_index += align_body.len;
         } else if (extra.data.bits.has_align_ref) {
-            align_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            align_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
         }
         if (extra.data.bits.has_addrspace_body) {
@@ -2197,7 +2197,7 @@ const Writer = struct {
             addrspace_body = self.code.extra[extra_index..][0..body_len];
             extra_index += addrspace_body.len;
         } else if (extra.data.bits.has_addrspace_ref) {
-            addrspace_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
         }
         if (extra.data.bits.has_section_body) {
@@ -2206,7 +2206,7 @@ const Writer = struct {
             section_body = self.code.extra[extra_index..][0..body_len];
             extra_index += section_body.len;
         } else if (extra.data.bits.has_section_ref) {
-            section_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            section_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
         }
         if (extra.data.bits.has_cc_body) {
@@ -2215,7 +2215,7 @@ const Writer = struct {
             cc_body = self.code.extra[extra_index..][0..body_len];
             extra_index += cc_body.len;
         } else if (extra.data.bits.has_cc_ref) {
-            cc_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            cc_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
         }
         if (extra.data.bits.has_ret_ty_body) {
@@ -2224,7 +2224,7 @@ const Writer = struct {
             ret_ty_body = self.code.extra[extra_index..][0..body_len];
             extra_index += ret_ty_body.len;
         } else if (extra.data.bits.has_ret_ty_ref) {
-            ret_ty_ref = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
         }
 
@@ -2266,7 +2266,7 @@ const Writer = struct {
 
     fn writeVarExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
         const extra = self.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
-        const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small);
+        const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small));
 
         try self.writeInstRef(stream, extra.data.var_type);
 
@@ -2277,12 +2277,12 @@ const Writer = struct {
             try stream.print(", lib_name=\"{}\"", .{std.zig.fmtEscapes(lib_name)});
         }
         const align_inst: Zir.Inst.Ref = if (!small.has_align) .none else blk: {
-            const align_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            const align_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk align_inst;
         };
         const init_inst: Zir.Inst.Ref = if (!small.has_init) .none else blk: {
-            const init_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            const init_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk init_inst;
         };
@@ -2295,17 +2295,17 @@ const Writer = struct {
 
     fn writeAllocExtended(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
         const extra = self.code.extraData(Zir.Inst.AllocExtended, extended.operand);
-        const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small);
+        const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(extended.small));
         const src = LazySrcLoc.nodeOffset(extra.data.src_node);
 
         var extra_index: usize = extra.end;
         const type_inst: Zir.Inst.Ref = if (!small.has_type) .none else blk: {
-            const type_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            const type_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk type_inst;
         };
         const align_inst: Zir.Inst.Ref = if (!small.has_align) .none else blk: {
-            const align_inst = @enumFromInt(Zir.Inst.Ref, self.code.extra[extra_index]);
+            const align_inst = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
             extra_index += 1;
             break :blk align_inst;
         };
@@ -2473,8 +2473,8 @@ const Writer = struct {
         try stream.writeAll(") ");
         if (body.len != 0) {
             try stream.print("(lbrace={d}:{d},rbrace={d}:{d}) ", .{
-                src_locs.lbrace_line + 1, @truncate(u16, src_locs.columns) + 1,
-                src_locs.rbrace_line + 1, @truncate(u16, src_locs.columns >> 16) + 1,
+                src_locs.lbrace_line + 1, @as(u16, @truncate(src_locs.columns)) + 1,
+                src_locs.rbrace_line + 1, @as(u16, @truncate(src_locs.columns >> 16)) + 1,
             });
         }
         try self.writeSrc(stream, src);
@@ -2507,7 +2507,7 @@ const Writer = struct {
 
     fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void {
         const i = @intFromEnum(ref);
-        if (i < InternPool.static_len) return stream.print("@{}", .{@enumFromInt(InternPool.Index, i)});
+        if (i < InternPool.static_len) return stream.print("@{}", .{@as(InternPool.Index, @enumFromInt(i))});
         return self.writeInstIndex(stream, i - InternPool.static_len);
     }
 
src/register_manager.zig
@@ -427,13 +427,13 @@ const MockRegister3 = enum(u3) {
 
     pub fn id(reg: MockRegister3) u3 {
         return switch (@intFromEnum(reg)) {
-            0...3 => @as(u3, @truncate(u2, @intFromEnum(reg))),
+            0...3 => @as(u3, @as(u2, @truncate(@intFromEnum(reg)))),
             4...7 => @intFromEnum(reg),
         };
     }
 
     pub fn enc(reg: MockRegister3) u2 {
-        return @truncate(u2, @intFromEnum(reg));
+        return @as(u2, @truncate(@intFromEnum(reg)));
     }
 
     const gp_regs = [_]MockRegister3{ .r0, .r1, .r2, .r3 };
src/Sema.zig
@@ -212,7 +212,7 @@ pub const InstMap = struct {
         while (true) {
             const extra_capacity = better_capacity / 2 + 16;
             better_capacity += extra_capacity;
-            better_start -|= @intCast(Zir.Inst.Index, extra_capacity / 2);
+            better_start -|= @as(Zir.Inst.Index, @intCast(extra_capacity / 2));
             if (better_start <= start and end < better_capacity + better_start)
                 break;
         }
@@ -225,7 +225,7 @@ pub const InstMap = struct {
 
         allocator.free(map.items);
         map.items = new_items;
-        map.start = @intCast(Zir.Inst.Index, better_start);
+        map.start = @as(Zir.Inst.Index, @intCast(better_start));
     }
 };
 
@@ -619,7 +619,7 @@ pub const Block = struct {
         const sema = block.sema;
         const ty_ref = try sema.addType(aggregate_ty);
         try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements.len);
-        const extra_index = @intCast(u32, sema.air_extra.items.len);
+        const extra_index = @as(u32, @intCast(sema.air_extra.items.len));
         sema.appendRefsAssumeCapacity(elements);
 
         return block.addInst(.{
@@ -660,7 +660,7 @@ pub const Block = struct {
         try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
         try block.instructions.ensureUnusedCapacity(gpa, 1);
 
-        const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len);
+        const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
         sema.air_instructions.appendAssumeCapacity(inst);
         block.instructions.appendAssumeCapacity(result_index);
         return result_index;
@@ -678,7 +678,7 @@ pub const Block = struct {
 
         try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
 
-        const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len);
+        const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
         sema.air_instructions.appendAssumeCapacity(inst);
 
         try block.instructions.insert(gpa, index, result_index);
@@ -1763,7 +1763,7 @@ pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
     const i = @intFromEnum(zir_ref);
     // First section of indexes correspond to a set number of constant values.
     // We intentionally map the same indexes to the same values between ZIR and AIR.
-    if (i < InternPool.static_len) return @enumFromInt(Air.Inst.Ref, i);
+    if (i < InternPool.static_len) return @as(Air.Inst.Ref, @enumFromInt(i));
     // The last section of indexes refers to the map of ZIR => AIR.
     const inst = sema.inst_map.get(i - InternPool.static_len).?;
     if (inst == .generic_poison) return error.GenericPoison;
@@ -2041,7 +2041,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime(
     // First section of indexes correspond to a set number of constant values.
     const int = @intFromEnum(inst);
     if (int < InternPool.static_len) {
-        return @enumFromInt(InternPool.Index, int).toValue();
+        return @as(InternPool.Index, @enumFromInt(int)).toValue();
     }
 
     const i = int - InternPool.static_len;
@@ -2430,7 +2430,7 @@ fn analyzeAsAlign(
     air_ref: Air.Inst.Ref,
 ) !Alignment {
     const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, "alignment must be comptime-known");
-    const alignment = @intCast(u32, alignment_big); // We coerce to u29 in the prev line.
+    const alignment = @as(u32, @intCast(alignment_big)); // We coerce to u29 in the prev line.
     try sema.validateAlign(block, src, alignment);
     return Alignment.fromNonzeroByteUnits(alignment);
 }
@@ -2737,7 +2737,7 @@ pub fn analyzeStructDecl(
     const struct_obj = mod.structPtr(struct_index);
     const extended = sema.code.instructions.items(.data)[inst].extended;
     assert(extended.opcode == .struct_decl);
-    const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+    const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
 
     struct_obj.known_non_opv = small.known_non_opv;
     if (small.known_comptime_only) {
@@ -2774,9 +2774,9 @@ fn zirStructDecl(
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
     const gpa = sema.gpa;
-    const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+    const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
     const src: LazySrcLoc = if (small.has_src_node) blk: {
-        const node_offset = @bitCast(i32, sema.code.extra[extended.operand]);
+        const node_offset = @as(i32, @bitCast(sema.code.extra[extended.operand]));
         break :blk LazySrcLoc.nodeOffset(node_offset);
     } else sema.src;
 
@@ -2937,18 +2937,18 @@ fn zirEnumDecl(
 
     const mod = sema.mod;
     const gpa = sema.gpa;
-    const small = @bitCast(Zir.Inst.EnumDecl.Small, extended.small);
+    const small = @as(Zir.Inst.EnumDecl.Small, @bitCast(extended.small));
     var extra_index: usize = extended.operand;
 
     const src: LazySrcLoc = if (small.has_src_node) blk: {
-        const node_offset = @bitCast(i32, sema.code.extra[extra_index]);
+        const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index]));
         extra_index += 1;
         break :blk LazySrcLoc.nodeOffset(node_offset);
     } else sema.src;
     const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x };
 
     const tag_type_ref = if (small.has_tag_type) blk: {
-        const tag_type_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+        const tag_type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
         extra_index += 1;
         break :blk tag_type_ref;
     } else .none;
@@ -3108,7 +3108,7 @@ fn zirEnumDecl(
             cur_bit_bag = sema.code.extra[bit_bag_index];
             bit_bag_index += 1;
         }
-        const has_tag_value = @truncate(u1, cur_bit_bag) != 0;
+        const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
 
         const field_name_zir = sema.code.nullTerminatedString(sema.code.extra[extra_index]);
@@ -3131,7 +3131,7 @@ fn zirEnumDecl(
         }
 
         const tag_overflow = if (has_tag_value) overflow: {
-            const tag_val_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+            const tag_val_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
             extra_index += 1;
             const tag_inst = try sema.resolveInst(tag_val_ref);
             last_tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) {
@@ -3213,11 +3213,11 @@ fn zirUnionDecl(
 
     const mod = sema.mod;
     const gpa = sema.gpa;
-    const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
+    const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small));
     var extra_index: usize = extended.operand;
 
     const src: LazySrcLoc = if (small.has_src_node) blk: {
-        const node_offset = @bitCast(i32, sema.code.extra[extra_index]);
+        const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index]));
         extra_index += 1;
         break :blk LazySrcLoc.nodeOffset(node_offset);
     } else sema.src;
@@ -3298,11 +3298,11 @@ fn zirOpaqueDecl(
     defer tracy.end();
 
     const mod = sema.mod;
-    const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small);
+    const small = @as(Zir.Inst.OpaqueDecl.Small, @bitCast(extended.small));
     var extra_index: usize = extended.operand;
 
     const src: LazySrcLoc = if (small.has_src_node) blk: {
-        const node_offset = @bitCast(i32, sema.code.extra[extra_index]);
+        const node_offset = @as(i32, @bitCast(sema.code.extra[extra_index]));
         extra_index += 1;
         break :blk LazySrcLoc.nodeOffset(node_offset);
     } else sema.src;
@@ -3369,7 +3369,7 @@ fn zirErrorSetDecl(
     var names: Module.Fn.InferredErrorSet.NameMap = .{};
     try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len);
 
-    var extra_index = @intCast(u32, extra.end);
+    var extra_index = @as(u32, @intCast(extra.end));
     const extra_index_end = extra_index + (extra.data.fields_len * 2);
     while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string
         const str_index = sema.code.extra[extra_index];
@@ -3569,18 +3569,18 @@ fn zirAllocExtended(
     const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
     const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node };
     const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node };
-    const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small);
+    const small = @as(Zir.Inst.AllocExtended.Small, @bitCast(extended.small));
 
     var extra_index: usize = extra.end;
 
     const var_ty: Type = if (small.has_type) blk: {
-        const type_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+        const type_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
         extra_index += 1;
         break :blk try sema.resolveType(block, ty_src, type_ref);
     } else undefined;
 
     const alignment = if (small.has_align) blk: {
-        const align_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+        const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
         extra_index += 1;
         const alignment = try sema.resolveAlign(block, align_src, align_ref);
         break :blk alignment;
@@ -3598,7 +3598,7 @@ fn zirAllocExtended(
                     .is_const = small.is_const,
                 } },
             });
-            return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
+            return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1)));
         }
     }
 
@@ -3730,7 +3730,7 @@ fn zirAllocInferredComptime(
             .is_const = is_const,
         } },
     });
-    return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
+    return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1)));
 }
 
 fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -3795,7 +3795,7 @@ fn zirAllocInferred(
                 .is_const = is_const,
             } },
         });
-        return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
+        return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1)));
     }
 
     const result_index = try block.addInstAsIndex(.{
@@ -4037,7 +4037,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
                     .data = .{ .ty_pl = .{
                         .ty = ty_inst,
                         .payload = sema.addExtraAssumeCapacity(Air.Block{
-                            .body_len = @intCast(u32, replacement_block.instructions.items.len),
+                            .body_len = @as(u32, @intCast(replacement_block.instructions.items.len)),
                         }),
                     } },
                 });
@@ -4121,7 +4121,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
 
     // First pass to look for comptime values.
     for (args, 0..) |zir_arg, i_usize| {
-        const i = @intCast(u32, i_usize);
+        const i = @as(u32, @intCast(i_usize));
         runtime_arg_lens[i] = .none;
         if (zir_arg == .none) continue;
         const object = try sema.resolveInst(zir_arg);
@@ -4192,7 +4192,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
             const msg = try sema.errMsg(block, src, "unbounded for loop", .{});
             errdefer msg.destroy(gpa);
             for (args, 0..) |zir_arg, i_usize| {
-                const i = @intCast(u32, i_usize);
+                const i = @as(u32, @intCast(i_usize));
                 if (zir_arg == .none) continue;
                 const object = try sema.resolveInst(zir_arg);
                 const object_ty = sema.typeOf(object);
@@ -4435,7 +4435,7 @@ fn validateUnionInit(
     }
 
     const tag_ty = union_ty.unionTagTypeHypothetical(mod);
-    const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
+    const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?));
     const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
 
     if (init_val) |val| {
@@ -4547,9 +4547,9 @@ fn validateStructInit(
 
             const field_src = init_src; // TODO better source location
             const default_field_ptr = if (struct_ty.isTuple(mod))
-                try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true)
+                try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @as(u32, @intCast(i)), true)
             else
-                try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true);
+                try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @as(u32, @intCast(i)), field_src, struct_ty, true);
             const init = try sema.addConstant(default_val);
             try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
         }
@@ -4729,9 +4729,9 @@ fn validateStructInit(
 
         const field_src = init_src; // TODO better source location
         const default_field_ptr = if (struct_ty.isTuple(mod))
-            try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true)
+            try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @as(u32, @intCast(i)), true)
         else
-            try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true);
+            try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @as(u32, @intCast(i)), field_src, struct_ty, true);
         const init = try sema.addConstant(field_values[i].toValue());
         try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
     }
@@ -5165,7 +5165,7 @@ fn storeToInferredAllocComptime(
 fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
-    const quota = @intCast(u32, try sema.resolveInt(block, src, inst_data.operand, Type.u32, "eval branch quota must be comptime-known"));
+    const quota = @as(u32, @intCast(try sema.resolveInt(block, src, inst_data.operand, Type.u32, "eval branch quota must be comptime-known")));
     sema.branch_quota = @max(sema.branch_quota, quota);
 }
 
@@ -5388,7 +5388,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError
     // Reserve space for a Loop instruction so that generated Break instructions can
     // point to it, even if it doesn't end up getting used because the code ends up being
     // comptime evaluated.
-    const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
+    const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
     const loop_inst = block_inst + 1;
     try sema.air_instructions.ensureUnusedCapacity(gpa, 2);
     sema.air_instructions.appendAssumeCapacity(.{
@@ -5436,7 +5436,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError
 
         try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + loop_block_len);
         sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity(
-            Air.Block{ .body_len = @intCast(u32, loop_block_len) },
+            Air.Block{ .body_len = @as(u32, @intCast(loop_block_len)) },
         );
         sema.air_extra.appendSliceAssumeCapacity(loop_block.instructions.items);
     }
@@ -5586,7 +5586,7 @@ fn zirBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index, force_compt
     // Reserve space for a Block instruction so that generated Break instructions can
     // point to it, even if it doesn't end up getting used because the code ends up being
     // comptime evaluated or is an unlabeled block.
-    const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
+    const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
     try sema.air_instructions.append(gpa, .{
         .tag = .block,
         .data = undefined,
@@ -5733,7 +5733,7 @@ fn analyzeBlockBody(
     sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{
         .ty = ty_inst,
         .payload = sema.addExtraAssumeCapacity(Air.Block{
-            .body_len = @intCast(u32, child_block.instructions.items.len),
+            .body_len = @as(u32, @intCast(child_block.instructions.items.len)),
         }),
     } };
     sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items);
@@ -5761,11 +5761,11 @@ fn analyzeBlockBody(
 
         // Convert the br instruction to a block instruction that has the coercion
         // and then a new br inside that returns the coerced instruction.
-        const sub_block_len = @intCast(u32, coerce_block.instructions.items.len + 1);
+        const sub_block_len = @as(u32, @intCast(coerce_block.instructions.items.len + 1));
         try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
             sub_block_len);
         try sema.air_instructions.ensureUnusedCapacity(gpa, 1);
-        const sub_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
+        const sub_br_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
 
         sema.air_instructions.items(.tag)[br] = .block;
         sema.air_instructions.items(.data)[br] = .{ .ty_pl = .{
@@ -6114,7 +6114,7 @@ fn addDbgVar(
     try sema.queueFullTypeResolution(operand_ty);
 
     // Add the name to the AIR.
-    const name_extra_index = @intCast(u32, sema.air_extra.items.len);
+    const name_extra_index = @as(u32, @intCast(sema.air_extra.items.len));
     const elements_used = name.len / 4 + 1;
     try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements_used);
     const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@@ -6314,7 +6314,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref
         .tag = .save_err_return_trace_index,
         .data = .{ .ty_pl = .{
             .ty = try sema.addType(stack_trace_ty),
-            .payload = @intCast(u32, field_index),
+            .payload = @as(u32, @intCast(field_index)),
         } },
     });
 }
@@ -6386,12 +6386,12 @@ fn popErrorReturnTrace(
             then_block.instructions.items.len + else_block.instructions.items.len +
             @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block
 
-        const cond_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
+        const cond_br_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
         try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{
             .operand = is_non_error_inst,
             .payload = sema.addExtraAssumeCapacity(Air.CondBr{
-                .then_body_len = @intCast(u32, then_block.instructions.items.len),
-                .else_body_len = @intCast(u32, else_block.instructions.items.len),
+                .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)),
+                .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)),
             }),
         } } });
         sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
@@ -6422,7 +6422,7 @@ fn zirCall(
     const extra = sema.code.extraData(ExtraType, inst_data.payload_index);
     const args_len = extra.data.flags.args_len;
 
-    const modifier = @enumFromInt(std.builtin.CallModifier, extra.data.flags.packed_modifier);
+    const modifier = @as(std.builtin.CallModifier, @enumFromInt(extra.data.flags.packed_modifier));
     const ensure_result_used = extra.data.flags.ensure_result_used;
     const pop_error_return_trace = extra.data.flags.pop_error_return_trace;
 
@@ -6460,7 +6460,7 @@ fn zirCall(
     const args_body = sema.code.extra[extra.end..];
 
     var input_is_error = false;
-    const block_index = @intCast(Air.Inst.Index, block.instructions.items.len);
+    const block_index = @as(Air.Inst.Index, @intCast(block.instructions.items.len));
 
     const fn_params_len = mod.typeToFunc(func_ty).?.param_types.len;
     const parent_comptime = block.is_comptime;
@@ -6477,7 +6477,7 @@ fn zirCall(
 
         // Generate args to comptime params in comptime block.
         defer block.is_comptime = parent_comptime;
-        if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) {
+        if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@as(u5, @intCast(arg_index)))) {
             block.is_comptime = true;
             // TODO set comptime_reason
         }
@@ -6533,7 +6533,7 @@ fn zirCall(
                 .tag = .save_err_return_trace_index,
                 .data = .{ .ty_pl = .{
                     .ty = try sema.addType(stack_trace_ty),
-                    .payload = @intCast(u32, field_index),
+                    .payload = @as(u32, @intCast(field_index)),
                 } },
             });
 
@@ -6809,7 +6809,7 @@ fn analyzeCall(
         // set to in the `Block`.
         // This block instruction will be used to capture the return value from the
         // inlined function.
-        const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
+        const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
         try sema.air_instructions.append(gpa, .{
             .tag = .block,
             .data = undefined,
@@ -7077,7 +7077,7 @@ fn analyzeCall(
             if (i < fn_params_len) {
                 const opts: CoerceOpts = .{ .param_src = .{
                     .func_inst = func,
-                    .param_i = @intCast(u32, i),
+                    .param_i = @as(u32, @intCast(i)),
                 } };
                 const param_ty = mod.typeToFunc(func_ty).?.param_types[i].toType();
                 args[i] = sema.analyzeCallArg(
@@ -7136,7 +7136,7 @@ fn analyzeCall(
             .data = .{ .pl_op = .{
                 .operand = func,
                 .payload = sema.addExtraAssumeCapacity(Air.Call{
-                    .args_len = @intCast(u32, args.len),
+                    .args_len = @as(u32, @intCast(args.len)),
                 }),
             } },
         });
@@ -7245,7 +7245,7 @@ fn analyzeInlineCallArg(
             }
             const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{
                 .func_inst = func_inst,
-                .param_i = @intCast(u32, arg_i.*),
+                .param_i = @as(u32, @intCast(arg_i.*)),
             } }) catch |err| switch (err) {
                 error.NotCoercible => unreachable,
                 else => |e| return e,
@@ -7419,14 +7419,14 @@ fn instantiateGenericCall(
             var is_anytype = false;
             switch (zir_tags[inst]) {
                 .param => {
-                    is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i));
+                    is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
                 },
                 .param_comptime => {
                     is_comptime = true;
                 },
                 .param_anytype => {
                     is_anytype = true;
-                    is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i));
+                    is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
                 },
                 .param_anytype_comptime => {
                     is_anytype = true;
@@ -7588,7 +7588,7 @@ fn instantiateGenericCall(
     // Make a runtime call to the new function, making sure to omit the comptime args.
     const comptime_args = callee.comptime_args.?;
     const func_ty = mod.declPtr(callee.owner_decl).ty;
-    const runtime_args_len = @intCast(u32, mod.typeToFunc(func_ty).?.param_types.len);
+    const runtime_args_len = @as(u32, @intCast(mod.typeToFunc(func_ty).?.param_types.len));
     const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len);
     {
         var runtime_i: u32 = 0;
@@ -7738,14 +7738,14 @@ fn resolveGenericInstantiationType(
         var is_anytype = false;
         switch (zir_tags[inst]) {
             .param => {
-                is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i));
+                is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
             },
             .param_comptime => {
                 is_comptime = true;
             },
             .param_anytype => {
                 is_anytype = true;
-                is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i));
+                is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
             },
             .param_anytype_comptime => {
                 is_anytype = true;
@@ -7779,7 +7779,7 @@ fn resolveGenericInstantiationType(
                     .tag = .arg,
                     .data = .{ .arg = .{
                         .ty = try child_sema.addType(arg_ty),
-                        .src_index = @intCast(u32, arg_i),
+                        .src_index = @as(u32, @intCast(arg_i)),
                     } },
                 });
                 child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg);
@@ -7799,7 +7799,7 @@ fn resolveGenericInstantiationType(
     const new_func = new_func_val.getFunctionIndex(mod).unwrap().?;
     assert(new_func == new_module_func);
 
-    const monomorphed_args_index = @intCast(u32, mod.monomorphed_func_keys.items.len);
+    const monomorphed_args_index = @as(u32, @intCast(mod.monomorphed_func_keys.items.len));
     const monomorphed_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, monomorphed_args_len);
     var monomorphed_arg_i: u32 = 0;
     try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, monomorphed_args_len + 1, .{ .mod = mod });
@@ -7811,14 +7811,14 @@ fn resolveGenericInstantiationType(
         var is_anytype = false;
         switch (zir_tags[inst]) {
             .param => {
-                is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i));
+                is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
             },
             .param_comptime => {
                 is_comptime = true;
             },
             .param_anytype => {
                 is_anytype = true;
-                is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i));
+                is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i)));
             },
             .param_anytype_comptime => {
                 is_anytype = true;
@@ -7984,7 +7984,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
     const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
     const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
-    const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"));
+    const len = @as(u32, @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known")));
     const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs);
     try sema.checkVectorElemType(block, elem_type_src, elem_type);
     const vector_type = try mod.vectorType(.{
@@ -8140,7 +8140,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
         switch (names.len) {
             0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)),
             1 => {
-                const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(names[0]).?);
+                const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(names[0]).?));
                 return sema.addIntUnsigned(Type.err_int, int);
             },
             else => {},
@@ -8727,7 +8727,7 @@ fn zirFunc(
     const ret_ty: Type = switch (extra.data.ret_body_len) {
         0 => Type.void,
         1 => blk: {
-            const ret_ty_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+            const ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
             extra_index += 1;
             if (sema.resolveType(block, ret_ty_src, ret_ty_ref)) |ret_ty| {
                 break :blk ret_ty;
@@ -8964,7 +8964,7 @@ fn funcCommon(
         for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| {
             const is_noalias = blk: {
                 const index = std.math.cast(u5, i) orelse break :blk false;
-                break :blk @truncate(u1, noalias_bits >> index) != 0;
+                break :blk @as(u1, @truncate(noalias_bits >> index)) != 0;
             };
             dest_param_ty.* = param.ty.toIntern();
             sema.analyzeParameter(
@@ -9199,8 +9199,8 @@ fn funcCommon(
         .hash = hash,
         .lbrace_line = src_locs.lbrace_line,
         .rbrace_line = src_locs.rbrace_line,
-        .lbrace_column = @truncate(u16, src_locs.columns),
-        .rbrace_column = @truncate(u16, src_locs.columns >> 16),
+        .lbrace_column = @as(u16, @truncate(src_locs.columns)),
+        .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
         .branch_quota = default_branch_quota,
         .is_noinline = is_noinline,
     };
@@ -9225,7 +9225,7 @@ fn analyzeParameter(
     const mod = sema.mod;
     const requires_comptime = try sema.typeRequiresComptime(param.ty);
     if (param.is_comptime or requires_comptime) {
-        comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error
+        comptime_bits.* |= @as(u32, 1) << @as(u5, @intCast(i)); // TODO: handle cast error
     }
     const this_generic = param.ty.isGenericPoison();
     is_generic.* = is_generic.* or this_generic;
@@ -9411,7 +9411,7 @@ fn zirParam(
         sema.inst_map.putAssumeCapacityNoClobber(inst, result);
     } else {
         // Otherwise we need a dummy runtime instruction.
-        const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len);
+        const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
         try sema.air_instructions.append(sema.gpa, .{
             .tag = .alloc,
             .data = .{ .ty = param_ty },
@@ -10287,7 +10287,7 @@ const SwitchProngAnalysis = struct {
         if (inline_case_capture != .none) {
             const item_val = sema.resolveConstValue(block, .unneeded, inline_case_capture, "") catch unreachable;
             if (operand_ty.zigTypeTag(mod) == .Union) {
-                const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, mod).?);
+                const field_index = @as(u32, @intCast(operand_ty.unionTagFieldIndex(item_val, mod).?));
                 const union_obj = mod.typeToUnion(operand_ty).?;
                 const field_ty = union_obj.fields.values()[field_index].ty;
                 if (capture_byref) {
@@ -10346,13 +10346,13 @@ const SwitchProngAnalysis = struct {
                 const union_obj = mod.typeToUnion(operand_ty).?;
                 const first_item_val = sema.resolveConstValue(block, .unneeded, case_vals[0], "") catch unreachable;
 
-                const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, mod).?);
+                const first_field_index = @as(u32, @intCast(operand_ty.unionTagFieldIndex(first_item_val, mod).?));
                 const first_field = union_obj.fields.values()[first_field_index];
 
                 const field_tys = try sema.arena.alloc(Type, case_vals.len);
                 for (case_vals, field_tys) |item, *field_ty| {
                     const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable;
-                    const field_idx = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?);
+                    const field_idx = @as(u32, @intCast(operand_ty.unionTagFieldIndex(item_val, sema.mod).?));
                     field_ty.* = union_obj.fields.values()[field_idx].ty;
                 }
 
@@ -10378,7 +10378,7 @@ const SwitchProngAnalysis = struct {
                             const multi_idx = raw_capture_src.multi_capture;
                             const src_decl_ptr = sema.mod.declPtr(block.src_decl);
                             for (case_srcs, 0..) |*case_src, i| {
-                                const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(u32, i) } };
+                                const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(i)) } };
                                 case_src.* = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
                             }
                             const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
@@ -10426,7 +10426,7 @@ const SwitchProngAnalysis = struct {
                                 const multi_idx = raw_capture_src.multi_capture;
                                 const src_decl_ptr = sema.mod.declPtr(block.src_decl);
                                 const capture_src = raw_capture_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
-                                const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(u32, i) } };
+                                const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(i)) } };
                                 const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
                                 const msg = msg: {
                                     const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{});
@@ -10529,12 +10529,12 @@ const SwitchProngAnalysis = struct {
                         var coerce_block = block.makeSubBlock();
                         defer coerce_block.instructions.deinit(sema.gpa);
 
-                        const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @intCast(u32, idx), field_tys[idx]);
+                        const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @as(u32, @intCast(idx)), field_tys[idx]);
                         const coerced = sema.coerce(&coerce_block, capture_ty, uncoerced, .unneeded) catch |err| switch (err) {
                             error.NeededSourceLocation => {
                                 const multi_idx = raw_capture_src.multi_capture;
                                 const src_decl_ptr = sema.mod.declPtr(block.src_decl);
-                                const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @intCast(u32, idx) } };
+                                const raw_case_src: Module.SwitchProngSrc = .{ .multi = .{ .prong = multi_idx, .item = @as(u32, @intCast(idx)) } };
                                 const case_src = raw_case_src.resolve(mod, src_decl_ptr, switch_node_offset, .none);
                                 _ = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src);
                                 unreachable;
@@ -10545,7 +10545,7 @@ const SwitchProngAnalysis = struct {
 
                         try cases_extra.ensureUnusedCapacity(3 + coerce_block.instructions.items.len);
                         cases_extra.appendAssumeCapacity(1); // items_len
-                        cases_extra.appendAssumeCapacity(@intCast(u32, coerce_block.instructions.items.len)); // body_len
+                        cases_extra.appendAssumeCapacity(@as(u32, @intCast(coerce_block.instructions.items.len))); // body_len
                         cases_extra.appendAssumeCapacity(@intFromEnum(case_vals[idx])); // item
                         cases_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); // body
                     }
@@ -10556,7 +10556,7 @@ const SwitchProngAnalysis = struct {
                     defer coerce_block.instructions.deinit(sema.gpa);
 
                     const first_imc = in_mem_coercible.findFirstSet().?;
-                    const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @intCast(u32, first_imc), field_tys[first_imc]);
+                    const uncoerced = try coerce_block.addStructFieldVal(spa.operand, @as(u32, @intCast(first_imc)), field_tys[first_imc]);
                     const coerced = try coerce_block.addBitCast(capture_ty, uncoerced);
                     _ = try coerce_block.addBr(capture_block_inst, coerced);
 
@@ -10569,14 +10569,14 @@ const SwitchProngAnalysis = struct {
                     @typeInfo(Air.Block).Struct.fields.len +
                     1);
 
-                const switch_br_inst = @intCast(u32, sema.air_instructions.len);
+                const switch_br_inst = @as(u32, @intCast(sema.air_instructions.len));
                 try sema.air_instructions.append(sema.gpa, .{
                     .tag = .switch_br,
                     .data = .{ .pl_op = .{
                         .operand = spa.cond,
                         .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{
-                            .cases_len = @intCast(u32, prong_count),
-                            .else_body_len = @intCast(u32, else_body_len),
+                            .cases_len = @as(u32, @intCast(prong_count)),
+                            .else_body_len = @as(u32, @intCast(else_body_len)),
                         }),
                     } },
                 });
@@ -10763,7 +10763,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             .has_tag_capture = false,
         },
         .under, .@"else" => blk: {
-            const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[header_extra_index]);
+            const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[header_extra_index]));
             const extra_body_start = header_extra_index + 1;
             break :blk .{
                 .body = sema.code.extra[extra_body_start..][0..info.body_len],
@@ -10833,9 +10833,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             {
                 var scalar_i: u32 = 0;
                 while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
-                    const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+                    const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1 + info.body_len;
 
                     case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum(
@@ -10856,7 +10856,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                     extra_index += 1;
                     const ranges_len = sema.code.extra[extra_index];
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1;
                     const items = sema.code.refSlice(extra_index, items_len);
                     extra_index += items_len + info.body_len;
@@ -10870,7 +10870,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                             item_ref,
                             operand_ty,
                             src_node_offset,
-                            .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
+                            .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
                         ));
                     }
 
@@ -10932,9 +10932,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             {
                 var scalar_i: u32 = 0;
                 while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
-                    const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+                    const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1 + info.body_len;
 
                     case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
@@ -10954,7 +10954,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                     extra_index += 1;
                     const ranges_len = sema.code.extra[extra_index];
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1;
                     const items = sema.code.refSlice(extra_index, items_len);
                     extra_index += items_len + info.body_len;
@@ -10967,7 +10967,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                             item_ref,
                             operand_ty,
                             src_node_offset,
-                            .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
+                            .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
                         ));
                     }
 
@@ -11073,9 +11073,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             {
                 var scalar_i: u32 = 0;
                 while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
-                    const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+                    const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1 + info.body_len;
 
                     case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt(
@@ -11095,7 +11095,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                     extra_index += 1;
                     const ranges_len = sema.code.extra[extra_index];
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1;
                     const items = sema.code.refSlice(extra_index, items_len);
                     extra_index += items_len;
@@ -11108,16 +11108,16 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                             item_ref,
                             operand_ty,
                             src_node_offset,
-                            .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
+                            .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
                         ));
                     }
 
                     try case_vals.ensureUnusedCapacity(gpa, 2 * ranges_len);
                     var range_i: u32 = 0;
                     while (range_i < ranges_len) : (range_i += 1) {
-                        const item_first = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+                        const item_first = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
                         extra_index += 1;
-                        const item_last = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+                        const item_last = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
                         extra_index += 1;
 
                         const vals = try sema.validateSwitchRange(
@@ -11168,9 +11168,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             {
                 var scalar_i: u32 = 0;
                 while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
-                    const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+                    const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1 + info.body_len;
 
                     case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool(
@@ -11190,7 +11190,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                     extra_index += 1;
                     const ranges_len = sema.code.extra[extra_index];
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1;
                     const items = sema.code.refSlice(extra_index, items_len);
                     extra_index += items_len + info.body_len;
@@ -11203,7 +11203,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                             &false_count,
                             item_ref,
                             src_node_offset,
-                            .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
+                            .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
                         ));
                     }
 
@@ -11250,9 +11250,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             {
                 var scalar_i: u32 = 0;
                 while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
-                    const item_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+                    const item_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1;
                     extra_index += info.body_len;
 
@@ -11273,7 +11273,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                     extra_index += 1;
                     const ranges_len = sema.code.extra[extra_index];
                     extra_index += 1;
-                    const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                    const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                     extra_index += 1;
                     const items = sema.code.refSlice(extra_index, items_len);
                     extra_index += items_len + info.body_len;
@@ -11286,7 +11286,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                             item_ref,
                             operand_ty,
                             src_node_offset,
-                            .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
+                            .{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } },
                         ));
                     }
 
@@ -11324,7 +11324,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
         .tag_capture_inst = tag_capture_inst,
     };
 
-    const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
+    const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
     try sema.air_instructions.append(gpa, .{
         .tag = .block,
         .data = undefined,
@@ -11368,7 +11368,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             var scalar_i: usize = 0;
             while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
                 extra_index += 1;
-                const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                 extra_index += 1;
                 const body = sema.code.extra[extra_index..][0..info.body_len];
                 extra_index += info.body_len;
@@ -11382,7 +11382,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                         .normal,
                         body,
                         info.capture,
-                        .{ .scalar_capture = @intCast(u32, scalar_i) },
+                        .{ .scalar_capture = @as(u32, @intCast(scalar_i)) },
                         &.{item},
                         if (info.is_inline) operand else .none,
                         info.has_tag_capture,
@@ -11399,7 +11399,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                 extra_index += 1;
                 const ranges_len = sema.code.extra[extra_index];
                 extra_index += 1;
-                const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+                const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
                 extra_index += 1 + items_len;
                 const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..info.body_len];
 
@@ -11416,7 +11416,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                             .normal,
                             body,
                             info.capture,
-                            .{ .multi_capture = @intCast(u32, multi_i) },
+                            .{ .multi_capture = @as(u32, @intCast(multi_i)) },
                             items,
                             if (info.is_inline) operand else .none,
                             info.has_tag_capture,
@@ -11443,7 +11443,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                             .normal,
                             body,
                             info.capture,
-                            .{ .multi_capture = @intCast(u32, multi_i) },
+                            .{ .multi_capture = @as(u32, @intCast(multi_i)) },
                             undefined, // case_vals may be undefined for ranges
                             if (info.is_inline) operand else .none,
                             info.has_tag_capture,
@@ -11528,7 +11528,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
     var scalar_i: usize = 0;
     while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
         extra_index += 1;
-        const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+        const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
         extra_index += 1;
         const body = sema.code.extra[extra_index..][0..info.body_len];
         extra_index += info.body_len;
@@ -11556,7 +11556,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                 .normal,
                 body,
                 info.capture,
-                .{ .scalar_capture = @intCast(u32, scalar_i) },
+                .{ .scalar_capture = @as(u32, @intCast(scalar_i)) },
                 &.{item},
                 if (info.is_inline) item else .none,
                 info.has_tag_capture,
@@ -11569,7 +11569,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
         try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
         cases_extra.appendAssumeCapacity(1); // items_len
-        cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
+        cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
         cases_extra.appendAssumeCapacity(@intFromEnum(item));
         cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
     }
@@ -11589,7 +11589,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
         extra_index += 1;
         const ranges_len = sema.code.extra[extra_index];
         extra_index += 1;
-        const info = @bitCast(Zir.Inst.SwitchBlock.ProngInfo, sema.code.extra[extra_index]);
+        const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(sema.code.extra[extra_index]));
         extra_index += 1 + items_len;
 
         const items = case_vals.items[case_val_idx..][0..items_len];
@@ -11654,7 +11654,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
                     try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
                     cases_extra.appendAssumeCapacity(1); // items_len
-                    cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
+                    cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
                     cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
                     cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
 
@@ -11676,7 +11676,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
                 if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) {
                     error.NeededSourceLocation => {
-                        const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } };
+                        const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @as(u32, @intCast(item_i)) } };
                         const decl = mod.declPtr(case_block.src_decl);
                         try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none));
                         unreachable;
@@ -11702,7 +11702,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
                 try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
                 cases_extra.appendAssumeCapacity(1); // items_len
-                cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
+                cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
                 cases_extra.appendAssumeCapacity(@intFromEnum(item));
                 cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
             }
@@ -11750,8 +11750,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
             try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len +
                 case_block.instructions.items.len);
 
-            cases_extra.appendAssumeCapacity(@intCast(u32, items.len));
-            cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
+            cases_extra.appendAssumeCapacity(@as(u32, @intCast(items.len)));
+            cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
 
             for (items) |item| {
                 cases_extra.appendAssumeCapacity(@intFromEnum(item));
@@ -11846,8 +11846,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
                 sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload =
                     sema.addExtraAssumeCapacity(Air.CondBr{
-                    .then_body_len = @intCast(u32, prev_then_body.len),
-                    .else_body_len = @intCast(u32, cond_body.len),
+                    .then_body_len = @as(u32, @intCast(prev_then_body.len)),
+                    .else_body_len = @as(u32, @intCast(cond_body.len)),
                 });
                 sema.air_extra.appendSliceAssumeCapacity(prev_then_body);
                 sema.air_extra.appendSliceAssumeCapacity(cond_body);
@@ -11872,7 +11872,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
                     if (f != null) continue;
                     cases_len += 1;
 
-                    const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(u32, i));
+                    const item_val = try mod.enumValueFieldIndex(operand_ty, @as(u32, @intCast(i)));
                     const item_ref = try sema.addConstant(item_val);
 
                     case_block.instructions.shrinkRetainingCapacity(0);
@@ -11903,7 +11903,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
                     try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
                     cases_extra.appendAssumeCapacity(1); // items_len
-                    cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
+                    cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
                     cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
                     cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
                 }
@@ -11944,7 +11944,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
                     try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
                     cases_extra.appendAssumeCapacity(1); // items_len
-                    cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
+                    cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
                     cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
                     cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
                 }
@@ -11975,7 +11975,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
                     try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
                     cases_extra.appendAssumeCapacity(1); // items_len
-                    cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
+                    cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
                     cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
                     cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
                 }
@@ -12003,7 +12003,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
                     try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
                     cases_extra.appendAssumeCapacity(1); // items_len
-                    cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
+                    cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
                     cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_true));
                     cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
                 }
@@ -12029,7 +12029,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
                     try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len);
                     cases_extra.appendAssumeCapacity(1); // items_len
-                    cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len));
+                    cases_extra.appendAssumeCapacity(@as(u32, @intCast(case_block.instructions.items.len)));
                     cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_false));
                     cases_extra.appendSliceAssumeCapacity(case_block.instructions.items);
                 }
@@ -12098,8 +12098,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
 
             sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload =
                 sema.addExtraAssumeCapacity(Air.CondBr{
-                .then_body_len = @intCast(u32, prev_then_body.len),
-                .else_body_len = @intCast(u32, case_block.instructions.items.len),
+                .then_body_len = @as(u32, @intCast(prev_then_body.len)),
+                .else_body_len = @as(u32, @intCast(case_block.instructions.items.len)),
             });
             sema.air_extra.appendSliceAssumeCapacity(prev_then_body);
             sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items);
@@ -12113,8 +12113,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
     _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{
         .operand = operand,
         .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{
-            .cases_len = @intCast(u32, cases_len),
-            .else_body_len = @intCast(u32, final_else_body.len),
+            .cases_len = @as(u32, @intCast(cases_len)),
+            .else_body_len = @as(u32, @intCast(final_else_body.len)),
         }),
     } } });
     sema.air_extra.appendSliceAssumeCapacity(cases_extra.items);
@@ -13527,7 +13527,7 @@ fn analyzeTupleMul(
     var i: u32 = 0;
     while (i < tuple_len) : (i += 1) {
         const operand_src = lhs_src; // TODO better source location
-        element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @intCast(u32, i), operand_ty);
+        element_refs[i] = try sema.tupleFieldValByIndex(block, operand_src, operand, @as(u32, @intCast(i)), operand_ty);
     }
     i = 1;
     while (i < factor) : (i += 1) {
@@ -15593,10 +15593,10 @@ fn analyzePtrArithmetic(
         // The resulting pointer is aligned to the lcd between the offset (an
         // arbitrary number) and the alignment factor (always a power of two,
         // non zero).
-        const new_align = @enumFromInt(Alignment, @min(
+        const new_align = @as(Alignment, @enumFromInt(@min(
             @ctz(addend),
             @intFromEnum(ptr_info.flags.alignment),
-        ));
+        )));
         assert(new_align != .none);
 
         break :t try mod.ptrType(.{
@@ -15675,14 +15675,14 @@ fn zirAsm(
     const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand);
     const src = LazySrcLoc.nodeOffset(extra.data.src_node);
     const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node };
-    const outputs_len = @truncate(u5, extended.small);
-    const inputs_len = @truncate(u5, extended.small >> 5);
-    const clobbers_len = @truncate(u5, extended.small >> 10);
-    const is_volatile = @truncate(u1, extended.small >> 15) != 0;
+    const outputs_len = @as(u5, @truncate(extended.small));
+    const inputs_len = @as(u5, @truncate(extended.small >> 5));
+    const clobbers_len = @as(u5, @truncate(extended.small >> 10));
+    const is_volatile = @as(u1, @truncate(extended.small >> 15)) != 0;
     const is_global_assembly = sema.func_index == .none;
 
     const asm_source: []const u8 = if (tmpl_is_expr) blk: {
-        const tmpl = @enumFromInt(Zir.Inst.Ref, extra.data.asm_source);
+        const tmpl = @as(Zir.Inst.Ref, @enumFromInt(extra.data.asm_source));
         const s: []const u8 = try sema.resolveConstString(block, src, tmpl, "assembly code must be comptime-known");
         break :blk s;
     } else sema.code.nullTerminatedString(extra.data.asm_source);
@@ -15721,7 +15721,7 @@ fn zirAsm(
         const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i);
         extra_i = output.end;
 
-        const is_type = @truncate(u1, output_type_bits) != 0;
+        const is_type = @as(u1, @truncate(output_type_bits)) != 0;
         output_type_bits >>= 1;
 
         if (is_type) {
@@ -15783,10 +15783,10 @@ fn zirAsm(
         .data = .{ .ty_pl = .{
             .ty = expr_ty,
             .payload = sema.addExtraAssumeCapacity(Air.Asm{
-                .source_len = @intCast(u32, asm_source.len),
+                .source_len = @as(u32, @intCast(asm_source.len)),
                 .outputs_len = outputs_len,
-                .inputs_len = @intCast(u32, args.len),
-                .flags = (@as(u32, @intFromBool(is_volatile)) << 31) | @intCast(u32, clobbers.len),
+                .inputs_len = @as(u32, @intCast(args.len)),
+                .flags = (@as(u32, @intFromBool(is_volatile)) << 31) | @as(u32, @intCast(clobbers.len)),
             }),
         } },
     });
@@ -16192,7 +16192,7 @@ fn zirThis(
 ) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
     const this_decl_index = mod.namespaceDeclIndex(block.namespace);
-    const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
+    const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand)));
     return sema.analyzeDeclVal(block, src, this_decl_index);
 }
 
@@ -16329,7 +16329,7 @@ fn zirFrameAddress(
     block: *Block,
     extended: Zir.Inst.Extended.InstData,
 ) CompileError!Air.Inst.Ref {
-    const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
+    const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand)));
     try sema.requireRuntimeBlock(block, src, null);
     return try block.addNoOp(.frame_addr);
 }
@@ -16482,7 +16482,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
 
                 const is_noalias = blk: {
                     const index = std.math.cast(u5, i) orelse break :blk false;
-                    break :blk @truncate(u1, info.noalias_bits >> index) != 0;
+                    break :blk @as(u1, @truncate(info.noalias_bits >> index)) != 0;
                 };
 
                 const param_fields = .{
@@ -16925,7 +16925,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
                 else
                     try mod.intern(.{ .int = .{
                         .ty = .comptime_int_type,
-                        .storage = .{ .u64 = @intCast(u64, i) },
+                        .storage = .{ .u64 = @as(u64, @intCast(i)) },
                     } });
                 // TODO: write something like getCoercedInts to avoid needing to dupe
                 const name = try sema.arena.dupe(u8, ip.stringToSlice(enum_type.names[i]));
@@ -17739,7 +17739,7 @@ fn zirBoolBr(
         return sema.resolveBody(parent_block, body, inst);
     }
 
-    const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
+    const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
     try sema.air_instructions.append(gpa, .{
         .tag = .block,
         .data = .{ .ty_pl = .{
@@ -17801,8 +17801,8 @@ fn finishCondBr(
         @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len + 1);
 
     const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
-        .then_body_len = @intCast(u32, then_block.instructions.items.len),
-        .else_body_len = @intCast(u32, else_block.instructions.items.len),
+        .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)),
+        .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)),
     });
     sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
     sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
@@ -17813,7 +17813,7 @@ fn finishCondBr(
     } } });
 
     sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity(
-        Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) },
+        Air.Block{ .body_len = @as(u32, @intCast(child_block.instructions.items.len)) },
     );
     sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items);
 
@@ -17976,8 +17976,8 @@ fn zirCondbr(
         .data = .{ .pl_op = .{
             .operand = cond,
             .payload = sema.addExtraAssumeCapacity(Air.CondBr{
-                .then_body_len = @intCast(u32, true_instructions.len),
-                .else_body_len = @intCast(u32, sub_block.instructions.items.len),
+                .then_body_len = @as(u32, @intCast(true_instructions.len)),
+                .else_body_len = @as(u32, @intCast(sub_block.instructions.items.len)),
             }),
         } },
     });
@@ -18024,7 +18024,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!
         .data = .{ .pl_op = .{
             .operand = err_union,
             .payload = sema.addExtraAssumeCapacity(Air.Try{
-                .body_len = @intCast(u32, sub_block.instructions.items.len),
+                .body_len = @as(u32, @intCast(sub_block.instructions.items.len)),
             }),
         } },
     });
@@ -18084,7 +18084,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
             .ty = res_ty_ref,
             .payload = sema.addExtraAssumeCapacity(Air.TryPtr{
                 .ptr = operand,
-                .body_len = @intCast(u32, sub_block.instructions.items.len),
+                .body_len = @as(u32, @intCast(sub_block.instructions.items.len)),
             }),
         } },
     });
@@ -18100,7 +18100,7 @@ fn addRuntimeBreak(sema: *Sema, child_block: *Block, break_data: BreakData) !voi
     const labeled_block = if (!gop.found_existing) blk: {
         try sema.post_hoc_blocks.ensureUnusedCapacity(sema.gpa, 1);
 
-        const new_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
+        const new_block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
         gop.value_ptr.* = Air.indexToRef(new_block_inst);
         try sema.air_instructions.append(sema.gpa, .{
             .tag = .block,
@@ -18296,8 +18296,8 @@ fn retWithErrTracing(
         @typeInfo(Air.Block).Struct.fields.len + 1);
 
     const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
-        .then_body_len = @intCast(u32, then_block.instructions.items.len),
-        .else_body_len = @intCast(u32, else_block.instructions.items.len),
+        .then_body_len = @as(u32, @intCast(then_block.instructions.items.len)),
+        .else_body_len = @as(u32, @intCast(else_block.instructions.items.len)),
     });
     sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items);
     sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items);
@@ -18486,7 +18486,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     var extra_i = extra.end;
 
     const sentinel = if (inst_data.flags.has_sentinel) blk: {
-        const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]);
+        const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
         extra_i += 1;
         const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src);
         const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known");
@@ -18494,7 +18494,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
     } else .none;
 
     const abi_align: Alignment = if (inst_data.flags.has_align) blk: {
-        const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]);
+        const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
         extra_i += 1;
         const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src);
         const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known");
@@ -18507,29 +18507,29 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
             },
             else => {},
         }
-        const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?);
+        const abi_align = @as(u32, @intCast((try val.getUnsignedIntAdvanced(mod, sema)).?));
         try sema.validateAlign(block, align_src, abi_align);
         break :blk Alignment.fromByteUnits(abi_align);
     } else .none;
 
     const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: {
-        const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]);
+        const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
         extra_i += 1;
         break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer);
     } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic;
 
     const bit_offset = if (inst_data.flags.has_bit_range) blk: {
-        const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]);
+        const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
         extra_i += 1;
         const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, "pointer bit-offset must be comptime-known");
-        break :blk @intCast(u16, bit_offset);
+        break :blk @as(u16, @intCast(bit_offset));
     } else 0;
 
     const host_size: u16 = if (inst_data.flags.has_bit_range) blk: {
-        const ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_i]);
+        const ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_i]));
         extra_i += 1;
         const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, "pointer host size must be comptime-known");
-        break :blk @intCast(u16, host_size);
+        break :blk @as(u16, @intCast(host_size));
     } else 0;
 
     if (host_size != 0 and bit_offset >= host_size * 8) {
@@ -18669,7 +18669,7 @@ fn unionInit(
 
     if (try sema.resolveMaybeUndefVal(init)) |init_val| {
         const tag_ty = union_ty.unionTagTypeHypothetical(mod);
-        const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
+        const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?));
         const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
         return sema.addConstant((try mod.intern(.{ .un = .{
             .ty = union_ty.toIntern(),
@@ -18771,7 +18771,7 @@ fn zirStructInit(
         const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start));
         const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
         const tag_ty = resolved_ty.unionTagTypeHypothetical(mod);
-        const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
+        const enum_field_index = @as(u32, @intCast(tag_ty.enumFieldIndex(field_name, mod).?));
         const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
 
         const init_inst = try sema.resolveInst(item.data.init);
@@ -18915,7 +18915,7 @@ fn finishStructInit(
         });
         const alloc = try block.addTy(.alloc, alloc_ty);
         for (field_inits, 0..) |field_init, i_usize| {
-            const i = @intCast(u32, i_usize);
+            const i = @as(u32, @intCast(i_usize));
             const field_src = dest_src;
             const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, alloc, i, field_src, struct_ty, true);
             try sema.storePtr(block, dest_src, field_ptr, field_init);
@@ -18958,7 +18958,7 @@ fn zirStructInitAnon(
         var runtime_index: ?usize = null;
         var extra_index = extra.end;
         for (types, 0..) |*field_ty, i_usize| {
-            const i = @intCast(u32, i_usize);
+            const i = @as(u32, @intCast(i_usize));
             const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
             extra_index = item.end;
 
@@ -19037,7 +19037,7 @@ fn zirStructInitAnon(
         const alloc = try block.addTy(.alloc, alloc_ty);
         var extra_index = extra.end;
         for (types, 0..) |field_ty, i_usize| {
-            const i = @intCast(u32, i_usize);
+            const i = @as(u32, @intCast(i_usize));
             const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
             extra_index = item.end;
 
@@ -19109,7 +19109,7 @@ fn zirArrayInit(
 
     const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| {
         const comptime_known = try sema.isComptimeKnown(arg);
-        if (!comptime_known) break @intCast(u32, i);
+        if (!comptime_known) break @as(u32, @intCast(i));
     } else null;
 
     const runtime_index = opt_runtime_index orelse {
@@ -19244,7 +19244,7 @@ fn zirArrayInitAnon(
         });
         const alloc = try block.addTy(.alloc, alloc_ty);
         for (operands, 0..) |operand, i_usize| {
-            const i = @intCast(u32, i_usize);
+            const i = @as(u32, @intCast(i_usize));
             const field_ptr_ty = try mod.ptrType(.{
                 .child = types[i],
                 .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
@@ -19395,7 +19395,7 @@ fn zirFrame(
     block: *Block,
     extended: Zir.Inst.Extended.InstData,
 ) CompileError!Air.Inst.Ref {
-    const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
+    const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand)));
     return sema.failWithUseOfAsync(block, src);
 }
 
@@ -19588,7 +19588,7 @@ fn zirReify(
     const mod = sema.mod;
     const gpa = sema.gpa;
     const ip = &mod.intern_pool;
-    const name_strategy = @enumFromInt(Zir.Inst.NameStrategy, extended.small);
+    const name_strategy = @as(Zir.Inst.NameStrategy, @enumFromInt(extended.small));
     const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
     const src = LazySrcLoc.nodeOffset(extra.node);
     const type_info_ty = try sema.getBuiltinType("Type");
@@ -19600,7 +19600,7 @@ fn zirReify(
     const target = mod.getTarget();
     if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src);
     const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?;
-    switch (@enumFromInt(std.builtin.TypeId, tag_index)) {
+    switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) {
         .Type => return Air.Inst.Ref.type_type,
         .Void => return Air.Inst.Ref.void_type,
         .Bool => return Air.Inst.Ref.bool_type,
@@ -19623,7 +19623,7 @@ fn zirReify(
             );
 
             const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
-            const bits = @intCast(u16, bits_val.toUnsignedInt(mod));
+            const bits = @as(u16, @intCast(bits_val.toUnsignedInt(mod)));
             const ty = try mod.intType(signedness, bits);
             return sema.addType(ty);
         },
@@ -19636,7 +19636,7 @@ fn zirReify(
                 try ip.getOrPutString(gpa, "child"),
             ).?);
 
-            const len = @intCast(u32, len_val.toUnsignedInt(mod));
+            const len = @as(u32, @intCast(len_val.toUnsignedInt(mod)));
             const child_ty = child_val.toType();
 
             try sema.checkVectorElemType(block, src, child_ty);
@@ -19653,7 +19653,7 @@ fn zirReify(
                 try ip.getOrPutString(gpa, "bits"),
             ).?);
 
-            const bits = @intCast(u16, bits_val.toUnsignedInt(mod));
+            const bits = @as(u16, @intCast(bits_val.toUnsignedInt(mod)));
             const ty = switch (bits) {
                 16 => Type.f16,
                 32 => Type.f32,
@@ -19925,7 +19925,7 @@ fn zirReify(
             }
 
             // Define our empty enum decl
-            const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
+            const fields_len = @as(u32, @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod))));
             const incomplete_enum = try ip.getIncompleteEnum(gpa, .{
                 .decl = new_decl_index,
                 .namespace = .none,
@@ -20288,7 +20288,7 @@ fn zirReify(
                 if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
                     return sema.fail(block, src, "alignment must fit in 'u32'", .{});
                 }
-                const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod));
+                const alignment = @as(u29, @intCast(alignment_val.toUnsignedInt(mod)));
                 if (alignment == target_util.defaultFunctionAlignment(target)) {
                     break :alignment .none;
                 } else {
@@ -20565,7 +20565,7 @@ fn reifyStruct(
             try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
             struct_obj.backing_int_ty = backing_int_ty;
         } else {
-            struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum));
+            struct_obj.backing_int_ty = try mod.intType(.unsigned, @as(u16, @intCast(fields_bit_sum)));
         }
 
         struct_obj.status = .have_layout;
@@ -20636,7 +20636,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
 }
 
 fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
-    const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
+    const src = LazySrcLoc.nodeOffset(@as(i32, @bitCast(extended.operand)));
 
     const va_list_ty = try sema.getBuiltinType("VaList");
     try sema.requireRuntimeBlock(block, src, null);
@@ -20903,7 +20903,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
 }
 
 fn zirPtrCastFull(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
-    const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small));
+    const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small))));
     const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
     const src = LazySrcLoc.nodeOffset(extra.node);
     const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
@@ -21310,7 +21310,7 @@ fn ptrCastFull(
 
 fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
     const mod = sema.mod;
-    const flags = @bitCast(Zir.Inst.FullPtrCastFlags, @truncate(u5, extended.small));
+    const flags = @as(Zir.Inst.FullPtrCastFlags, @bitCast(@as(u5, @truncate(extended.small))));
     const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
     const src = LazySrcLoc.nodeOffset(extra.node);
     const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
@@ -22271,7 +22271,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
     const scalar_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
-    const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector splat destination length must be comptime-known"));
+    const len = @as(u32, @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector splat destination length must be comptime-known")));
     const scalar = try sema.resolveInst(extra.rhs);
     const scalar_ty = sema.typeOf(scalar);
     try sema.checkVectorElemType(block, scalar_src, scalar_ty);
@@ -22376,12 +22376,12 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
         else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}),
     };
     mask_ty = try mod.vectorType(.{
-        .len = @intCast(u32, mask_len),
+        .len = @as(u32, @intCast(mask_len)),
         .child = .i32_type,
     });
     mask = try sema.coerce(block, mask_ty, mask, mask_src);
     const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known");
-    return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @intCast(u32, mask_len));
+    return sema.analyzeShuffle(block, inst_data.src_node, elem_ty, a, b, mask_val, @as(u32, @intCast(mask_len)));
 }
 
 fn analyzeShuffle(
@@ -22425,8 +22425,8 @@ fn analyzeShuffle(
     if (maybe_a_len == null and maybe_b_len == null) {
         return sema.addConstUndef(res_ty);
     }
-    const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?);
-    const b_len = @intCast(u32, maybe_b_len orelse a_len);
+    const a_len = @as(u32, @intCast(maybe_a_len orelse maybe_b_len.?));
+    const b_len = @as(u32, @intCast(maybe_b_len orelse a_len));
 
     const a_ty = try mod.vectorType(.{
         .len = a_len,
@@ -22445,17 +22445,17 @@ fn analyzeShuffle(
         .{ b_len, b_src, b_ty },
     };
 
-    for (0..@intCast(usize, mask_len)) |i| {
+    for (0..@as(usize, @intCast(mask_len))) |i| {
         const elem = try mask.elemValue(sema.mod, i);
         if (elem.isUndef(mod)) continue;
         const int = elem.toSignedInt(mod);
         var unsigned: u32 = undefined;
         var chosen: u32 = undefined;
         if (int >= 0) {
-            unsigned = @intCast(u32, int);
+            unsigned = @as(u32, @intCast(int));
             chosen = 0;
         } else {
-            unsigned = @intCast(u32, ~int);
+            unsigned = @as(u32, @intCast(~int));
             chosen = 1;
         }
         if (unsigned >= operand_info[chosen][0]) {
@@ -22488,7 +22488,7 @@ fn analyzeShuffle(
                     continue;
                 }
                 const int = mask_elem_val.toSignedInt(mod);
-                const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int);
+                const unsigned = if (int >= 0) @as(u32, @intCast(int)) else @as(u32, @intCast(~int));
                 values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod);
             }
             return sema.addConstant((try mod.intern(.{ .aggregate = .{
@@ -22509,23 +22509,23 @@ fn analyzeShuffle(
         const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len));
 
         const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len);
-        for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| {
+        for (@as(usize, @intCast(0))..@as(usize, @intCast(min_len))) |i| {
             expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern();
         }
-        for (@intCast(usize, min_len)..@intCast(usize, max_len)) |i| {
+        for (@as(usize, @intCast(min_len))..@as(usize, @intCast(max_len))) |i| {
             expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern();
         }
         const expand_mask = try mod.intern(.{ .aggregate = .{
-            .ty = (try mod.vectorType(.{ .len = @intCast(u32, max_len), .child = .comptime_int_type })).toIntern(),
+            .ty = (try mod.vectorType(.{ .len = @as(u32, @intCast(max_len)), .child = .comptime_int_type })).toIntern(),
             .storage = .{ .elems = expand_mask_values },
         } });
 
         if (a_len < b_len) {
             const undef = try sema.addConstUndef(a_ty);
-            a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @intCast(u32, max_len));
+            a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @as(u32, @intCast(max_len)));
         } else {
             const undef = try sema.addConstUndef(b_ty);
-            b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @intCast(u32, max_len));
+            b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @as(u32, @intCast(max_len)));
         }
     }
 
@@ -22562,7 +22562,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
         .Vector, .Array => pred_ty.arrayLen(mod),
         else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}),
     };
-    const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64));
+    const vec_len = @as(u32, @intCast(try sema.usizeCast(block, pred_src, vec_len_u64)));
 
     const bool_vec_ty = try mod.vectorType(.{
         .len = vec_len,
@@ -22930,7 +22930,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
 
     var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod));
     for (resolved_args, 0..) |*resolved, i| {
-        resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty);
+        resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @as(u32, @intCast(i)), args_ty);
     }
 
     const callee_ty = sema.typeOf(func);
@@ -23048,7 +23048,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
             .ty = try sema.addType(result_ptr),
             .payload = try block.sema.addExtra(Air.FieldParentPtr{
                 .field_ptr = casted_field_ptr,
-                .field_index = @intCast(u32, field_index),
+                .field_index = @as(u32, @intCast(field_index)),
             }),
         } },
     });
@@ -23684,7 +23684,7 @@ fn zirVarExtended(
     const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
     const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 };
     const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 };
-    const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small);
+    const small = @as(Zir.Inst.ExtendedVar.Small, @bitCast(extended.small));
 
     var extra_index: usize = extra.end;
 
@@ -23699,7 +23699,7 @@ fn zirVarExtended(
     assert(!small.has_align);
 
     const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: {
-        const init_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+        const init_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
         extra_index += 1;
         break :blk try sema.resolveInst(init_ref);
     } else .none;
@@ -23776,7 +23776,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         if (val.isGenericPoison()) {
             break :blk null;
         }
-        const alignment = @intCast(u32, val.toUnsignedInt(mod));
+        const alignment = @as(u32, @intCast(val.toUnsignedInt(mod)));
         try sema.validateAlign(block, align_src, alignment);
         if (alignment == target_util.defaultFunctionAlignment(target)) {
             break :blk .none;
@@ -23784,7 +23784,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
             break :blk Alignment.fromNonzeroByteUnits(alignment);
         }
     } else if (extra.data.bits.has_align_ref) blk: {
-        const align_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+        const align_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
         extra_index += 1;
         const align_tv = sema.resolveInstConst(block, align_src, align_ref, "alignment must be comptime-known") catch |err| switch (err) {
             error.GenericPoison => {
@@ -23792,7 +23792,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
             },
             else => |e| return e,
         };
-        const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod));
+        const alignment = @as(u32, @intCast(align_tv.val.toUnsignedInt(mod)));
         try sema.validateAlign(block, align_src, alignment);
         if (alignment == target_util.defaultFunctionAlignment(target)) {
             break :blk .none;
@@ -23814,7 +23814,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         }
         break :blk mod.toEnum(std.builtin.AddressSpace, val);
     } else if (extra.data.bits.has_addrspace_ref) blk: {
-        const addrspace_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+        const addrspace_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
         extra_index += 1;
         const addrspace_tv = sema.resolveInstConst(block, addrspace_src, addrspace_ref, "addrespace must be comptime-known") catch |err| switch (err) {
             error.GenericPoison => {
@@ -23838,7 +23838,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         }
         break :blk FuncLinkSection{ .explicit = try val.toIpString(ty, mod) };
     } else if (extra.data.bits.has_section_ref) blk: {
-        const section_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+        const section_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
         extra_index += 1;
         const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) {
             error.GenericPoison => {
@@ -23862,7 +23862,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         }
         break :blk mod.toEnum(std.builtin.CallingConvention, val);
     } else if (extra.data.bits.has_cc_ref) blk: {
-        const cc_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+        const cc_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
         extra_index += 1;
         const cc_tv = sema.resolveInstConst(block, cc_src, cc_ref, "calling convention must be comptime-known") catch |err| switch (err) {
             error.GenericPoison => {
@@ -23886,7 +23886,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
         const ty = val.toType();
         break :blk ty;
     } else if (extra.data.bits.has_ret_ty_ref) blk: {
-        const ret_ty_ref = @enumFromInt(Zir.Inst.Ref, sema.code.extra[extra_index]);
+        const ret_ty_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index]));
         extra_index += 1;
         const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, "return type must be comptime-known") catch |err| switch (err) {
             error.GenericPoison => {
@@ -23995,7 +23995,7 @@ fn zirWasmMemorySize(
         return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
     }
 
-    const index = @intCast(u32, try sema.resolveInt(block, index_src, extra.operand, Type.u32, "wasm memory size index must be comptime-known"));
+    const index = @as(u32, @intCast(try sema.resolveInt(block, index_src, extra.operand, Type.u32, "wasm memory size index must be comptime-known")));
     try sema.requireRuntimeBlock(block, builtin_src, null);
     return block.addInst(.{
         .tag = .wasm_memory_size,
@@ -24020,7 +24020,7 @@ fn zirWasmMemoryGrow(
         return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
     }
 
-    const index = @intCast(u32, try sema.resolveInt(block, index_src, extra.lhs, Type.u32, "wasm memory size index must be comptime-known"));
+    const index = @as(u32, @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, "wasm memory size index must be comptime-known")));
     const delta = try sema.coerce(block, Type.u32, try sema.resolveInst(extra.rhs), delta_src);
 
     try sema.requireRuntimeBlock(block, builtin_src, null);
@@ -24060,7 +24060,7 @@ fn resolvePrefetchOptions(
 
     return std.builtin.PrefetchOptions{
         .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val),
-        .locality = @intCast(u2, locality_val.toUnsignedInt(mod)),
+        .locality = @as(u2, @intCast(locality_val.toUnsignedInt(mod))),
         .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val),
     };
 }
@@ -24259,7 +24259,7 @@ fn zirWorkItem(
         },
     }
 
-    const dimension = @intCast(u32, try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, "dimension must be comptime-known"));
+    const dimension = @as(u32, @intCast(try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, "dimension must be comptime-known")));
     try sema.requireRuntimeBlock(block, builtin_src, null);
 
     return block.addInst(.{
@@ -24814,7 +24814,7 @@ fn addSafetyCheckExtra(
         fail_block.instructions.items.len);
 
     try sema.air_instructions.ensureUnusedCapacity(gpa, 3);
-    const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len);
+    const block_inst = @as(Air.Inst.Index, @intCast(sema.air_instructions.len));
     const cond_br_inst = block_inst + 1;
     const br_inst = cond_br_inst + 1;
     sema.air_instructions.appendAssumeCapacity(.{
@@ -24834,7 +24834,7 @@ fn addSafetyCheckExtra(
             .operand = ok,
             .payload = sema.addExtraAssumeCapacity(Air.CondBr{
                 .then_body_len = 1,
-                .else_body_len = @intCast(u32, fail_block.instructions.items.len),
+                .else_body_len = @as(u32, @intCast(fail_block.instructions.items.len)),
             }),
         } },
     });
@@ -25210,7 +25210,7 @@ fn fieldVal(
                     const union_ty = try sema.resolveTypeFields(child_type);
                     if (union_ty.unionTagType(mod)) |enum_ty| {
                         if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| {
-                            const field_index = @intCast(u32, field_index_usize);
+                            const field_index = @as(u32, @intCast(field_index_usize));
                             return sema.addConstant(
                                 try mod.enumValueFieldIndex(enum_ty, field_index),
                             );
@@ -25226,7 +25226,7 @@ fn fieldVal(
                     }
                     const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse
                         return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
-                    const field_index = @intCast(u32, field_index_usize);
+                    const field_index = @as(u32, @intCast(field_index_usize));
                     const enum_val = try mod.enumValueFieldIndex(child_type, field_index);
                     return sema.addConstant(enum_val);
                 },
@@ -25438,7 +25438,7 @@ fn fieldPtr(
                     const union_ty = try sema.resolveTypeFields(child_type);
                     if (union_ty.unionTagType(mod)) |enum_ty| {
                         if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| {
-                            const field_index_u32 = @intCast(u32, field_index);
+                            const field_index_u32 = @as(u32, @intCast(field_index));
                             var anon_decl = try block.startAnonDecl();
                             defer anon_decl.deinit();
                             return sema.analyzeDeclRef(try anon_decl.finish(
@@ -25459,7 +25459,7 @@ fn fieldPtr(
                     const field_index = child_type.enumFieldIndex(field_name, mod) orelse {
                         return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
                     };
-                    const field_index_u32 = @intCast(u32, field_index);
+                    const field_index_u32 = @as(u32, @intCast(field_index));
                     var anon_decl = try block.startAnonDecl();
                     defer anon_decl.deinit();
                     return sema.analyzeDeclRef(try anon_decl.finish(
@@ -25544,7 +25544,7 @@ fn fieldCallBind(
                 if (mod.typeToStruct(struct_ty)) |struct_obj| {
                     const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
                         break :find_field;
-                    const field_index = @intCast(u32, field_index_usize);
+                    const field_index = @as(u32, @intCast(field_index_usize));
                     const field = struct_obj.fields.values()[field_index];
 
                     return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr);
@@ -25559,7 +25559,7 @@ fn fieldCallBind(
                 } else {
                     const max = struct_ty.structFieldCount(mod);
                     for (0..max) |i_usize| {
-                        const i = @intCast(u32, i_usize);
+                        const i = @as(u32, @intCast(i_usize));
                         if (field_name == struct_ty.structFieldName(i, mod)) {
                             return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr);
                         }
@@ -25570,7 +25570,7 @@ fn fieldCallBind(
                 const union_ty = try sema.resolveTypeFields(concrete_ty);
                 const fields = union_ty.unionFields(mod);
                 const field_index_usize = fields.getIndex(field_name) orelse break :find_field;
-                const field_index = @intCast(u32, field_index_usize);
+                const field_index = @as(u32, @intCast(field_index_usize));
                 const field = fields.values()[field_index];
 
                 return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr);
@@ -25792,7 +25792,7 @@ fn structFieldPtr(
 
     const field_index_big = struct_obj.fields.getIndex(field_name) orelse
         return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
-    const field_index = @intCast(u32, field_index_big);
+    const field_index = @as(u32, @intCast(field_index_big));
 
     return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing);
 }
@@ -25838,7 +25838,7 @@ fn structFieldPtrByIndex(
             if (i == field_index) {
                 ptr_ty_data.packed_offset.bit_offset = running_bits;
             }
-            running_bits += @intCast(u16, f.ty.bitSize(mod));
+            running_bits += @as(u16, @intCast(f.ty.bitSize(mod)));
         }
         ptr_ty_data.packed_offset.host_size = (running_bits + 7) / 8;
 
@@ -25868,7 +25868,7 @@ fn structFieldPtrByIndex(
             const elem_size_bits = ptr_ty_data.child.toType().bitSize(mod);
             if (elem_size_bytes * 8 == elem_size_bits) {
                 const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8;
-                const new_align = @enumFromInt(Alignment, @ctz(byte_offset | parent_align));
+                const new_align = @as(Alignment, @enumFromInt(@ctz(byte_offset | parent_align)));
                 assert(new_align != .none);
                 ptr_ty_data.flags.alignment = new_align;
                 ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
@@ -25923,7 +25923,7 @@ fn structFieldVal(
 
             const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
                 return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
-            const field_index = @intCast(u32, field_index_usize);
+            const field_index = @as(u32, @intCast(field_index_usize));
             const field = struct_obj.fields.values()[field_index];
 
             if (field.is_comptime) {
@@ -26058,7 +26058,7 @@ fn unionFieldPtr(
             .address_space = union_ptr_ty.ptrAddressSpace(mod),
         },
     });
-    const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?);
+    const enum_field_index = @as(u32, @intCast(union_obj.tag_ty.enumFieldIndex(field_name, mod).?));
 
     if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) {
         const msg = msg: {
@@ -26146,7 +26146,7 @@ fn unionFieldVal(
     const union_obj = mod.typeToUnion(union_ty).?;
     const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
     const field = union_obj.fields.values()[field_index];
-    const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?);
+    const enum_field_index = @as(u32, @intCast(union_obj.tag_ty.enumFieldIndex(field_name, mod).?));
 
     if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| {
         if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty);
@@ -26226,7 +26226,7 @@ fn elemPtr(
         .Struct => {
             // Tuple field access.
             const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
-            const index = @intCast(u32, index_val.toUnsignedInt(mod));
+            const index = @as(u32, @intCast(index_val.toUnsignedInt(mod)));
             return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
         },
         else => {
@@ -26261,7 +26261,7 @@ fn elemPtrOneLayerOnly(
             const runtime_src = rs: {
                 const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
                 const index_val = maybe_index_val orelse break :rs elem_index_src;
-                const index = @intCast(usize, index_val.toUnsignedInt(mod));
+                const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
                 const result_ty = try sema.elemPtrType(indexable_ty, index);
                 const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod);
                 return sema.addConstant(elem_ptr);
@@ -26280,7 +26280,7 @@ fn elemPtrOneLayerOnly(
                 .Struct => {
                     assert(child_ty.isTuple(mod));
                     const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
-                    const index = @intCast(u32, index_val.toUnsignedInt(mod));
+                    const index = @as(u32, @intCast(index_val.toUnsignedInt(mod)));
                     return sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false);
                 },
                 else => unreachable, // Guaranteed by checkIndexable
@@ -26318,7 +26318,7 @@ fn elemVal(
                 const runtime_src = rs: {
                     const indexable_val = maybe_indexable_val orelse break :rs indexable_src;
                     const index_val = maybe_index_val orelse break :rs elem_index_src;
-                    const index = @intCast(usize, index_val.toUnsignedInt(mod));
+                    const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
                     const elem_ty = indexable_ty.elemType2(mod);
                     const many_ptr_ty = try mod.manyConstPtrType(elem_ty);
                     const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty);
@@ -26355,7 +26355,7 @@ fn elemVal(
         .Struct => {
             // Tuple field access.
             const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
-            const index = @intCast(u32, index_val.toUnsignedInt(mod));
+            const index = @as(u32, @intCast(index_val.toUnsignedInt(mod)));
             return sema.tupleField(block, indexable_src, indexable, elem_index_src, index);
         },
         else => unreachable,
@@ -26516,7 +26516,7 @@ fn elemValArray(
     const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
 
     if (maybe_index_val) |index_val| {
-        const index = @intCast(usize, index_val.toUnsignedInt(mod));
+        const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
         if (array_sent) |s| {
             if (index == array_len) {
                 return sema.addConstant(s);
@@ -26532,7 +26532,7 @@ fn elemValArray(
             return sema.addConstUndef(elem_ty);
         }
         if (maybe_index_val) |index_val| {
-            const index = @intCast(usize, index_val.toUnsignedInt(mod));
+            const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
             const elem_val = try array_val.elemValue(mod, index);
             return sema.addConstant(elem_val);
         }
@@ -26644,7 +26644,7 @@ fn elemValSlice(
             return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
         }
         if (maybe_index_val) |index_val| {
-            const index = @intCast(usize, index_val.toUnsignedInt(mod));
+            const index = @as(usize, @intCast(index_val.toUnsignedInt(mod)));
             if (index >= slice_len_s) {
                 const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
                 return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
@@ -27287,7 +27287,7 @@ fn coerceExtra(
                     return sema.failWithOwnedErrorMsg(msg);
                 };
                 return sema.addConstant(
-                    try mod.enumValueFieldIndex(dest_ty, @intCast(u32, field_index)),
+                    try mod.enumValueFieldIndex(dest_ty, @as(u32, @intCast(field_index))),
                 );
             },
             .Union => blk: {
@@ -27692,8 +27692,8 @@ const InMemoryCoercionResult = union(enum) {
                 var index: u6 = 0;
                 var actual_noalias = false;
                 while (true) : (index += 1) {
-                    const actual = @truncate(u1, param.actual >> index);
-                    const wanted = @truncate(u1, param.wanted >> index);
+                    const actual = @as(u1, @truncate(param.actual >> index));
+                    const wanted = @as(u1, @truncate(param.wanted >> index));
                     if (actual != wanted) {
                         actual_noalias = actual == 1;
                         break;
@@ -28218,7 +28218,7 @@ fn coerceInMemoryAllowedFns(
         const dest_param_ty = dest_info.param_types[param_i].toType();
         const src_param_ty = src_info.param_types[param_i].toType();
 
-        const param_i_small = @intCast(u5, param_i);
+        const param_i_small = @as(u5, @intCast(param_i));
         if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) {
             return InMemoryCoercionResult{ .fn_param_comptime = .{
                 .index = param_i,
@@ -28832,7 +28832,7 @@ fn beginComptimePtrMutation(
                                     // bytes.len may be one greater than dest_len because of the case when
                                     // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted.
                                     assert(bytes.len >= dest_len);
-                                    const elems = try arena.alloc(Value, @intCast(usize, dest_len));
+                                    const elems = try arena.alloc(Value, @as(usize, @intCast(dest_len)));
                                     for (elems, 0..) |*elem, i| {
                                         elem.* = try mod.intValue(elem_ty, bytes[i]);
                                     }
@@ -28844,7 +28844,7 @@ fn beginComptimePtrMutation(
                                         block,
                                         src,
                                         elem_ty,
-                                        &elems[@intCast(usize, elem_ptr.index)],
+                                        &elems[@as(usize, @intCast(elem_ptr.index))],
                                         ptr_elem_ty,
                                         parent.mut_decl,
                                     );
@@ -28872,7 +28872,7 @@ fn beginComptimePtrMutation(
                                         block,
                                         src,
                                         elem_ty,
-                                        &elems[@intCast(usize, elem_ptr.index)],
+                                        &elems[@as(usize, @intCast(elem_ptr.index))],
                                         ptr_elem_ty,
                                         parent.mut_decl,
                                     );
@@ -28883,7 +28883,7 @@ fn beginComptimePtrMutation(
                                     block,
                                     src,
                                     elem_ty,
-                                    &val_ptr.castTag(.aggregate).?.data[@intCast(usize, elem_ptr.index)],
+                                    &val_ptr.castTag(.aggregate).?.data[@as(usize, @intCast(elem_ptr.index))],
                                     ptr_elem_ty,
                                     parent.mut_decl,
                                 ),
@@ -28909,7 +28909,7 @@ fn beginComptimePtrMutation(
                                         block,
                                         src,
                                         elem_ty,
-                                        &elems[@intCast(usize, elem_ptr.index)],
+                                        &elems[@as(usize, @intCast(elem_ptr.index))],
                                         ptr_elem_ty,
                                         parent.mut_decl,
                                     );
@@ -28964,7 +28964,7 @@ fn beginComptimePtrMutation(
         },
         .field => |field_ptr| {
             const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
-            const field_index = @intCast(u32, field_ptr.index);
+            const field_index = @as(u32, @intCast(field_ptr.index));
 
             var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty);
             switch (parent.pointee) {
@@ -29401,12 +29401,12 @@ fn beginComptimePtrLoad(
                 }
                 deref.pointee = TypedValue{
                     .ty = elem_ty,
-                    .val = try array_tv.val.elemValue(mod, @intCast(usize, elem_ptr.index)),
+                    .val = try array_tv.val.elemValue(mod, @as(usize, @intCast(elem_ptr.index))),
                 };
                 break :blk deref;
             },
             .field => |field_ptr| blk: {
-                const field_index = @intCast(u32, field_ptr.index);
+                const field_index = @as(u32, @intCast(field_ptr.index));
                 const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
                 var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty);
 
@@ -29990,7 +29990,7 @@ fn coerceTupleToArray(
 
     var runtime_src: ?LazySrcLoc = null;
     for (element_vals, element_refs, 0..) |*val, *ref, i_usize| {
-        const i = @intCast(u32, i_usize);
+        const i = @as(u32, @intCast(i_usize));
         if (i_usize == inst_len) {
             const sentinel_val = dest_ty.sentinel(mod).?;
             val.* = sentinel_val.toIntern();
@@ -30101,7 +30101,7 @@ fn coerceTupleToStruct(
         else => unreachable,
     };
     for (0..field_count) |field_index_usize| {
-        const field_i = @intCast(u32, field_index_usize);
+        const field_i = @as(u32, @intCast(field_index_usize));
         const field_src = inst_src; // TODO better source location
         // https://github.com/ziglang/zig/issues/15709
         const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) {
@@ -30217,7 +30217,7 @@ fn coerceTupleToTuple(
 
     var runtime_src: ?LazySrcLoc = null;
     for (0..dest_field_count) |field_index_usize| {
-        const field_i = @intCast(u32, field_index_usize);
+        const field_i = @as(u32, @intCast(field_index_usize));
         const field_src = inst_src; // TODO better source location
         // https://github.com/ziglang/zig/issues/15709
         const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) {
@@ -31532,7 +31532,7 @@ fn compareIntsOnlyPossibleResult(
 
         const ty = try mod.intType(
             if (is_negative) .signed else .unsigned,
-            @intCast(u16, req_bits),
+            @as(u16, @intCast(req_bits)),
         );
         const pop_count = lhs_val.popCount(ty, mod);
 
@@ -32294,7 +32294,7 @@ fn resolvePeerTypesInner(
             };
 
             return .{ .success = try mod.vectorType(.{
-                .len = @intCast(u32, len.?),
+                .len = @as(u32, @intCast(len.?)),
                 .child = child_ty.toIntern(),
             }) };
         },
@@ -33402,7 +33402,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
 
             for (struct_obj.fields.values(), 0..) |field, i| {
                 optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty))
-                    @intCast(u32, i)
+                    @as(u32, @intCast(i))
                 else
                     Module.Struct.omitted_field;
             }
@@ -33443,7 +33443,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
     const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
     const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
     assert(extended.opcode == .struct_decl);
-    const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+    const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
 
     if (small.has_backing_int) {
         var extra_index: usize = extended.operand;
@@ -33497,7 +33497,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
         const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 };
         const backing_int_ty = blk: {
             if (backing_int_body_len == 0) {
-                const backing_int_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]);
+                const backing_int_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
                 break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref);
             } else {
                 const body = zir.extra[extra_index..][0..backing_int_body_len];
@@ -33543,7 +33543,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
             };
             return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
         }
-        struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum));
+        struct_obj.backing_int_ty = try mod.intType(.unsigned, @as(u16, @intCast(fields_bit_sum)));
     }
 }
 
@@ -34178,7 +34178,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
     const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
     const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
     assert(extended.opcode == .struct_decl);
-    const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
+    const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
     var extra_index: usize = extended.operand;
 
     const src = LazySrcLoc.nodeOffset(0);
@@ -34288,13 +34288,13 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
                 cur_bit_bag = zir.extra[bit_bag_index];
                 bit_bag_index += 1;
             }
-            const has_align = @truncate(u1, cur_bit_bag) != 0;
+            const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
-            const has_init = @truncate(u1, cur_bit_bag) != 0;
+            const has_init = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
-            const is_comptime = @truncate(u1, cur_bit_bag) != 0;
+            const is_comptime = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
-            const has_type_body = @truncate(u1, cur_bit_bag) != 0;
+            const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
             cur_bit_bag >>= 1;
 
             var field_name_zir: ?[:0]const u8 = null;
@@ -34309,7 +34309,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
             if (has_type_body) {
                 fields[field_i].type_body_len = zir.extra[extra_index];
             } else {
-                fields[field_i].type_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]);
+                fields[field_i].type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
             }
             extra_index += 1;
 
@@ -34529,14 +34529,14 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
     const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir;
     const extended = zir.instructions.items(.data)[union_obj.zir_index].extended;
     assert(extended.opcode == .union_decl);
-    const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small);
+    const small = @as(Zir.Inst.UnionDecl.Small, @bitCast(extended.small));
     var extra_index: usize = extended.operand;
 
     const src = LazySrcLoc.nodeOffset(0);
     extra_index += @intFromBool(small.has_src_node);
 
     const tag_type_ref: Zir.Inst.Ref = if (small.has_tag_type) blk: {
-        const ty_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]);
+        const ty_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
         extra_index += 1;
         break :blk ty_ref;
     } else .none;
@@ -34684,13 +34684,13 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
             cur_bit_bag = zir.extra[bit_bag_index];
             bit_bag_index += 1;
         }
-        const has_type = @truncate(u1, cur_bit_bag) != 0;
+        const has_type = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
-        const has_align = @truncate(u1, cur_bit_bag) != 0;
+        const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
-        const has_tag = @truncate(u1, cur_bit_bag) != 0;
+        const has_tag = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
-        const unused = @truncate(u1, cur_bit_bag) != 0;
+        const unused = @as(u1, @truncate(cur_bit_bag)) != 0;
         cur_bit_bag >>= 1;
         _ = unused;
 
@@ -34701,19 +34701,19 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
         extra_index += 1;
 
         const field_type_ref: Zir.Inst.Ref = if (has_type) blk: {
-            const field_type_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]);
+            const field_type_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
             extra_index += 1;
             break :blk field_type_ref;
         } else .none;
 
         const align_ref: Zir.Inst.Ref = if (has_align) blk: {
-            const align_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]);
+            const align_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
             extra_index += 1;
             break :blk align_ref;
         } else .none;
 
         const tag_ref: Air.Inst.Ref = if (has_tag) blk: {
-            const tag_ref = @enumFromInt(Zir.Inst.Ref, zir.extra[extra_index]);
+            const tag_ref = @as(Zir.Inst.Ref, @enumFromInt(zir.extra[extra_index]));
             extra_index += 1;
             break :blk try sema.resolveInst(tag_ref);
         } else .none;
@@ -35427,12 +35427,12 @@ pub fn getTmpAir(sema: Sema) Air {
 
 pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
     if (@intFromEnum(ty.toIntern()) < Air.ref_start_index)
-        return @enumFromInt(Air.Inst.Ref, @intFromEnum(ty.toIntern()));
+        return @as(Air.Inst.Ref, @enumFromInt(@intFromEnum(ty.toIntern())));
     try sema.air_instructions.append(sema.gpa, .{
         .tag = .interned,
         .data = .{ .interned = ty.toIntern() },
     });
-    return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
+    return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1)));
 }
 
 fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref {
@@ -35446,12 +35446,12 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref {
 
 pub fn addConstant(sema: *Sema, val: Value) SemaError!Air.Inst.Ref {
     if (@intFromEnum(val.toIntern()) < Air.ref_start_index)
-        return @enumFromInt(Air.Inst.Ref, @intFromEnum(val.toIntern()));
+        return @as(Air.Inst.Ref, @enumFromInt(@intFromEnum(val.toIntern())));
     try sema.air_instructions.append(sema.gpa, .{
         .tag = .interned,
         .data = .{ .interned = val.toIntern() },
     });
-    return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
+    return Air.indexToRef(@as(u32, @intCast(sema.air_instructions.len - 1)));
 }
 
 pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 {
@@ -35462,12 +35462,12 @@ pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 {
 
 pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    const result = @intCast(u32, sema.air_extra.items.len);
+    const result = @as(u32, @intCast(sema.air_extra.items.len));
     inline for (fields) |field| {
         sema.air_extra.appendAssumeCapacity(switch (field.type) {
             u32 => @field(extra, field.name),
             Air.Inst.Ref => @intFromEnum(@field(extra, field.name)),
-            i32 => @bitCast(u32, @field(extra, field.name)),
+            i32 => @as(u32, @bitCast(@field(extra, field.name))),
             InternPool.Index => @intFromEnum(@field(extra, field.name)),
             else => @compileError("bad field type: " ++ @typeName(field.type)),
         });
@@ -35476,7 +35476,7 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 {
 }
 
 fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void {
-    const coerced = @ptrCast([]const u32, refs);
+    const coerced = @as([]const u32, @ptrCast(refs));
     sema.air_extra.appendSliceAssumeCapacity(coerced);
 }
 
@@ -35916,10 +35916,10 @@ fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 {
 /// Not valid to call for packed unions.
 /// Keep implementation in sync with `Module.Union.Field.normalAlignment`.
 fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 {
-    return @intCast(u32, if (field.ty.isNoReturn(sema.mod))
+    return @as(u32, @intCast(if (field.ty.isNoReturn(sema.mod))
         0
     else
-        field.abi_align.toByteUnitsOptional() orelse try sema.typeAbiAlignment(field.ty));
+        field.abi_align.toByteUnitsOptional() orelse try sema.typeAbiAlignment(field.ty)));
 }
 
 /// Synchronize logic with `Type.isFnOrHasRuntimeBits`.
@@ -35951,7 +35951,7 @@ fn unionFieldIndex(
     const union_obj = mod.typeToUnion(union_ty).?;
     const field_index_usize = union_obj.fields.getIndex(field_name) orelse
         return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
-    return @intCast(u32, field_index_usize);
+    return @as(u32, @intCast(field_index_usize));
 }
 
 fn structFieldIndex(
@@ -35969,7 +35969,7 @@ fn structFieldIndex(
         const struct_obj = mod.typeToStruct(struct_ty).?;
         const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
             return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
-        return @intCast(u32, field_index_usize);
+        return @as(u32, @intCast(field_index_usize));
     }
 }
 
@@ -35983,12 +35983,12 @@ fn anonStructFieldIndex(
     const mod = sema.mod;
     switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) {
         .anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| {
-            if (name == field_name) return @intCast(u32, i);
+            if (name == field_name) return @as(u32, @intCast(i));
         },
         .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
             for (struct_obj.fields.keys(), 0..) |name, i| {
                 if (name == field_name) {
-                    return @intCast(u32, i);
+                    return @as(u32, @intCast(i));
                 }
             }
         },
@@ -36586,9 +36586,9 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
         if (!is_packed) break :blk .{};
 
         break :blk .{
-            .host_size = @intCast(u16, parent_ty.arrayLen(mod)),
-            .alignment = @intCast(u32, parent_ty.abiAlignment(mod)),
-            .vector_index = if (offset) |some| @enumFromInt(VI, some) else .runtime,
+            .host_size = @as(u16, @intCast(parent_ty.arrayLen(mod))),
+            .alignment = @as(u32, @intCast(parent_ty.abiAlignment(mod))),
+            .vector_index = if (offset) |some| @as(VI, @enumFromInt(some)) else .runtime,
         };
     } else .{};
 
@@ -36607,10 +36607,10 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
         // The resulting pointer is aligned to the lcd between the offset (an
         // arbitrary number) and the alignment factor (always a power of two,
         // non zero).
-        const new_align = @enumFromInt(Alignment, @min(
+        const new_align = @as(Alignment, @enumFromInt(@min(
             @ctz(addend),
             @intFromEnum(ptr_info.flags.alignment),
-        ));
+        )));
         assert(new_align != .none);
         break :a new_align;
     };
src/tracy.zig
@@ -132,7 +132,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
         }
 
         fn allocFn(ptr: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr));
+            const self: *Self = @ptrCast(@alignCast(ptr));
             const result = self.parent_allocator.rawAlloc(len, ptr_align, ret_addr);
             if (result) |data| {
                 if (len != 0) {
@@ -149,7 +149,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
         }
 
         fn resizeFn(ptr: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr));
+            const self: *Self = @ptrCast(@alignCast(ptr));
             if (self.parent_allocator.rawResize(buf, buf_align, new_len, ret_addr)) {
                 if (name) |n| {
                     freeNamed(buf.ptr, n);
@@ -168,7 +168,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
         }
 
         fn freeFn(ptr: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void {
-            const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr));
+            const self: *Self = @ptrCast(@alignCast(ptr));
             self.parent_allocator.rawFree(buf, buf_align, ret_addr);
             // this condition is to handle free being called on an empty slice that was never even allocated
             // example case: `std.process.getSelfExeSharedLibPaths` can return `&[_][:0]u8{}`
src/translate_c.zig
@@ -467,7 +467,7 @@ fn prepopulateGlobalNameTable(ast_unit: *clang.ASTUnit, c: *Context) !void {
         const entity = it.deref();
         switch (entity.getKind()) {
             .MacroDefinitionKind => {
-                const macro = @ptrCast(*clang.MacroDefinitionRecord, entity);
+                const macro = @as(*clang.MacroDefinitionRecord, @ptrCast(entity));
                 const raw_name = macro.getName_getNameStart();
                 const name = try c.str(raw_name);
 
@@ -481,13 +481,13 @@ fn prepopulateGlobalNameTable(ast_unit: *clang.ASTUnit, c: *Context) !void {
 }
 
 fn declVisitorNamesOnlyC(context: ?*anyopaque, decl: *const clang.Decl) callconv(.C) bool {
-    const c = @ptrCast(*Context, @alignCast(@alignOf(Context), context));
+    const c: *Context = @ptrCast(@alignCast(context));
     declVisitorNamesOnly(c, decl) catch return false;
     return true;
 }
 
 fn declVisitorC(context: ?*anyopaque, decl: *const clang.Decl) callconv(.C) bool {
-    const c = @ptrCast(*Context, @alignCast(@alignOf(Context), context));
+    const c: *Context = @ptrCast(@alignCast(context));
     declVisitor(c, decl) catch return false;
     return true;
 }
@@ -499,37 +499,37 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void {
 
         // Check for typedefs with unnamed enum/record child types.
         if (decl.getKind() == .Typedef) {
-            const typedef_decl = @ptrCast(*const clang.TypedefNameDecl, decl);
+            const typedef_decl = @as(*const clang.TypedefNameDecl, @ptrCast(decl));
             var child_ty = typedef_decl.getUnderlyingType().getTypePtr();
             const addr: usize = while (true) switch (child_ty.getTypeClass()) {
                 .Enum => {
-                    const enum_ty = @ptrCast(*const clang.EnumType, child_ty);
+                    const enum_ty = @as(*const clang.EnumType, @ptrCast(child_ty));
                     const enum_decl = enum_ty.getDecl();
                     // check if this decl is unnamed
-                    if (@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin()[0] != 0) return;
+                    if (@as(*const clang.NamedDecl, @ptrCast(enum_decl)).getName_bytes_begin()[0] != 0) return;
                     break @intFromPtr(enum_decl.getCanonicalDecl());
                 },
                 .Record => {
-                    const record_ty = @ptrCast(*const clang.RecordType, child_ty);
+                    const record_ty = @as(*const clang.RecordType, @ptrCast(child_ty));
                     const record_decl = record_ty.getDecl();
                     // check if this decl is unnamed
-                    if (@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin()[0] != 0) return;
+                    if (@as(*const clang.NamedDecl, @ptrCast(record_decl)).getName_bytes_begin()[0] != 0) return;
                     break @intFromPtr(record_decl.getCanonicalDecl());
                 },
                 .Elaborated => {
-                    const elaborated_ty = @ptrCast(*const clang.ElaboratedType, child_ty);
+                    const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(child_ty));
                     child_ty = elaborated_ty.getNamedType().getTypePtr();
                 },
                 .Decayed => {
-                    const decayed_ty = @ptrCast(*const clang.DecayedType, child_ty);
+                    const decayed_ty = @as(*const clang.DecayedType, @ptrCast(child_ty));
                     child_ty = decayed_ty.getDecayedType().getTypePtr();
                 },
                 .Attributed => {
-                    const attributed_ty = @ptrCast(*const clang.AttributedType, child_ty);
+                    const attributed_ty = @as(*const clang.AttributedType, @ptrCast(child_ty));
                     child_ty = attributed_ty.getEquivalentType().getTypePtr();
                 },
                 .MacroQualified => {
-                    const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, child_ty);
+                    const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(child_ty));
                     child_ty = macroqualified_ty.getModifiedType().getTypePtr();
                 },
                 else => return,
@@ -552,25 +552,25 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void {
 fn declVisitor(c: *Context, decl: *const clang.Decl) Error!void {
     switch (decl.getKind()) {
         .Function => {
-            return visitFnDecl(c, @ptrCast(*const clang.FunctionDecl, decl));
+            return visitFnDecl(c, @as(*const clang.FunctionDecl, @ptrCast(decl)));
         },
         .Typedef => {
-            try transTypeDef(c, &c.global_scope.base, @ptrCast(*const clang.TypedefNameDecl, decl));
+            try transTypeDef(c, &c.global_scope.base, @as(*const clang.TypedefNameDecl, @ptrCast(decl)));
         },
         .Enum => {
-            try transEnumDecl(c, &c.global_scope.base, @ptrCast(*const clang.EnumDecl, decl));
+            try transEnumDecl(c, &c.global_scope.base, @as(*const clang.EnumDecl, @ptrCast(decl)));
         },
         .Record => {
-            try transRecordDecl(c, &c.global_scope.base, @ptrCast(*const clang.RecordDecl, decl));
+            try transRecordDecl(c, &c.global_scope.base, @as(*const clang.RecordDecl, @ptrCast(decl)));
         },
         .Var => {
-            return visitVarDecl(c, @ptrCast(*const clang.VarDecl, decl), null);
+            return visitVarDecl(c, @as(*const clang.VarDecl, @ptrCast(decl)), null);
         },
         .Empty => {
             // Do nothing
         },
         .FileScopeAsm => {
-            try transFileScopeAsm(c, &c.global_scope.base, @ptrCast(*const clang.FileScopeAsmDecl, decl));
+            try transFileScopeAsm(c, &c.global_scope.base, @as(*const clang.FileScopeAsmDecl, @ptrCast(decl)));
         },
         else => {
             const decl_name = try c.str(decl.getDeclKindName());
@@ -595,7 +595,7 @@ fn transFileScopeAsm(c: *Context, scope: *Scope, file_scope_asm: *const clang.Fi
 }
 
 fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
-    const fn_name = try c.str(@ptrCast(*const clang.NamedDecl, fn_decl).getName_bytes_begin());
+    const fn_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(fn_decl)).getName_bytes_begin());
     if (c.global_scope.sym_table.contains(fn_name))
         return; // Avoid processing this decl twice
 
@@ -630,22 +630,22 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
 
         switch (fn_type.getTypeClass()) {
             .Attributed => {
-                const attr_type = @ptrCast(*const clang.AttributedType, fn_type);
+                const attr_type = @as(*const clang.AttributedType, @ptrCast(fn_type));
                 fn_qt = attr_type.getEquivalentType();
             },
             .Paren => {
-                const paren_type = @ptrCast(*const clang.ParenType, fn_type);
+                const paren_type = @as(*const clang.ParenType, @ptrCast(fn_type));
                 fn_qt = paren_type.getInnerType();
             },
             else => break fn_type,
         }
     };
-    const fn_ty = @ptrCast(*const clang.FunctionType, fn_type);
+    const fn_ty = @as(*const clang.FunctionType, @ptrCast(fn_type));
     const return_qt = fn_ty.getReturnType();
 
     const proto_node = switch (fn_type.getTypeClass()) {
         .FunctionProto => blk: {
-            const fn_proto_type = @ptrCast(*const clang.FunctionProtoType, fn_type);
+            const fn_proto_type = @as(*const clang.FunctionProtoType, @ptrCast(fn_type));
             if (has_body and fn_proto_type.isVariadic()) {
                 decl_ctx.has_body = false;
                 decl_ctx.storage_class = .Extern;
@@ -661,7 +661,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
             };
         },
         .FunctionNoProto => blk: {
-            const fn_no_proto_type = @ptrCast(*const clang.FunctionType, fn_type);
+            const fn_no_proto_type = @as(*const clang.FunctionType, @ptrCast(fn_type));
             break :blk transFnNoProto(c, fn_no_proto_type, fn_decl_loc, decl_ctx, true) catch |err| switch (err) {
                 error.UnsupportedType => {
                     return failDecl(c, fn_decl_loc, fn_name, "unable to resolve prototype of function", .{});
@@ -714,7 +714,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
         param_id += 1;
     }
 
-    const casted_body = @ptrCast(*const clang.CompoundStmt, body_stmt);
+    const casted_body = @as(*const clang.CompoundStmt, @ptrCast(body_stmt));
     transCompoundStmtInline(c, casted_body, &block_scope) catch |err| switch (err) {
         error.OutOfMemory => |e| return e,
         error.UnsupportedTranslation,
@@ -788,7 +788,7 @@ fn stringLiteralToCharStar(c: *Context, str: Node) Error!Node {
 
 /// if mangled_name is not null, this var decl was declared in a block scope.
 fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]const u8) Error!void {
-    const var_name = mangled_name orelse try c.str(@ptrCast(*const clang.NamedDecl, var_decl).getName_bytes_begin());
+    const var_name = mangled_name orelse try c.str(@as(*const clang.NamedDecl, @ptrCast(var_decl)).getName_bytes_begin());
     if (c.global_scope.sym_table.contains(var_name))
         return; // Avoid processing this decl twice
 
@@ -830,7 +830,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co
     if (has_init) trans_init: {
         if (decl_init) |expr| {
             const node_or_error = if (expr.getStmtClass() == .StringLiteralClass)
-                transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node)
+                transStringLiteralInitializer(c, @as(*const clang.StringLiteral, @ptrCast(expr)), type_node)
             else
                 transExprCoercing(c, scope, expr, .used);
             init_node = node_or_error catch |err| switch (err) {
@@ -918,7 +918,7 @@ fn transTypeDef(c: *Context, scope: *Scope, typedef_decl: *const clang.TypedefNa
     const toplevel = scope.id == .root;
     const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
 
-    var name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin());
+    var name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(typedef_decl)).getName_bytes_begin());
     try c.typedefs.put(c.gpa, name, {});
 
     if (builtin_typedef_map.get(name)) |builtin| {
@@ -981,7 +981,7 @@ fn buildFlexibleArrayFn(
         .is_noalias = false,
     };
 
-    const array_type = @ptrCast(*const clang.ArrayType, field_qt.getTypePtr());
+    const array_type = @as(*const clang.ArrayType, @ptrCast(field_qt.getTypePtr()));
     const element_qt = array_type.getElementType();
     const element_type = try transQualType(c, scope, element_qt, field_decl.getLocation());
 
@@ -1077,7 +1077,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD
 
     var is_union = false;
     var container_kind_name: []const u8 = undefined;
-    var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, record_decl).getName_bytes_begin());
+    var bare_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(record_decl)).getName_bytes_begin());
 
     if (record_decl.isUnion()) {
         container_kind_name = "union";
@@ -1138,7 +1138,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD
             }
 
             var is_anon = false;
-            var field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin());
+            var field_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin());
             if (field_decl.isAnonymousStructOrUnion() or field_name.len == 0) {
                 // Context.getMangle() is not used here because doing so causes unpredictable field names for anonymous fields.
                 field_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{unnamed_field_count});
@@ -1167,7 +1167,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD
             };
 
             const alignment = if (has_flexible_array and field_decl.getFieldIndex() == 0)
-                @intCast(c_uint, record_alignment)
+                @as(c_uint, @intCast(record_alignment))
             else
                 ClangAlignment.forField(c, field_decl, record_def).zigAlignment();
 
@@ -1224,7 +1224,7 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E
     const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
 
     var is_unnamed = false;
-    var bare_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_decl).getName_bytes_begin());
+    var bare_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(enum_decl)).getName_bytes_begin());
     var name = bare_name;
     if (c.unnamed_typedefs.get(@intFromPtr(enum_decl.getCanonicalDecl()))) |typedef_name| {
         bare_name = typedef_name;
@@ -1244,13 +1244,13 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E
         const end_it = enum_def.enumerator_end();
         while (it.neq(end_it)) : (it = it.next()) {
             const enum_const = it.deref();
-            var enum_val_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, enum_const).getName_bytes_begin());
+            var enum_val_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(enum_const)).getName_bytes_begin());
             if (!toplevel) {
                 enum_val_name = try bs.makeMangledName(c, enum_val_name);
             }
 
-            const enum_const_qt = @ptrCast(*const clang.ValueDecl, enum_const).getType();
-            const enum_const_loc = @ptrCast(*const clang.Decl, enum_const).getLocation();
+            const enum_const_qt = @as(*const clang.ValueDecl, @ptrCast(enum_const)).getType();
+            const enum_const_loc = @as(*const clang.Decl, @ptrCast(enum_const)).getLocation();
             const enum_const_type_node: ?Node = transQualType(c, scope, enum_const_qt, enum_const_loc) catch |err| switch (err) {
                 error.UnsupportedType => null,
                 else => |e| return e,
@@ -1325,77 +1325,77 @@ fn transStmt(
 ) TransError!Node {
     const sc = stmt.getStmtClass();
     switch (sc) {
-        .BinaryOperatorClass => return transBinaryOperator(c, scope, @ptrCast(*const clang.BinaryOperator, stmt), result_used),
-        .CompoundStmtClass => return transCompoundStmt(c, scope, @ptrCast(*const clang.CompoundStmt, stmt)),
-        .CStyleCastExprClass => return transCStyleCastExprClass(c, scope, @ptrCast(*const clang.CStyleCastExpr, stmt), result_used),
-        .DeclStmtClass => return transDeclStmt(c, scope, @ptrCast(*const clang.DeclStmt, stmt)),
-        .DeclRefExprClass => return transDeclRefExpr(c, scope, @ptrCast(*const clang.DeclRefExpr, stmt)),
-        .ImplicitCastExprClass => return transImplicitCastExpr(c, scope, @ptrCast(*const clang.ImplicitCastExpr, stmt), result_used),
-        .IntegerLiteralClass => return transIntegerLiteral(c, scope, @ptrCast(*const clang.IntegerLiteral, stmt), result_used, .with_as),
-        .ReturnStmtClass => return transReturnStmt(c, scope, @ptrCast(*const clang.ReturnStmt, stmt)),
-        .StringLiteralClass => return transStringLiteral(c, scope, @ptrCast(*const clang.StringLiteral, stmt), result_used),
+        .BinaryOperatorClass => return transBinaryOperator(c, scope, @as(*const clang.BinaryOperator, @ptrCast(stmt)), result_used),
+        .CompoundStmtClass => return transCompoundStmt(c, scope, @as(*const clang.CompoundStmt, @ptrCast(stmt))),
+        .CStyleCastExprClass => return transCStyleCastExprClass(c, scope, @as(*const clang.CStyleCastExpr, @ptrCast(stmt)), result_used),
+        .DeclStmtClass => return transDeclStmt(c, scope, @as(*const clang.DeclStmt, @ptrCast(stmt))),
+        .DeclRefExprClass => return transDeclRefExpr(c, scope, @as(*const clang.DeclRefExpr, @ptrCast(stmt))),
+        .ImplicitCastExprClass => return transImplicitCastExpr(c, scope, @as(*const clang.ImplicitCastExpr, @ptrCast(stmt)), result_used),
+        .IntegerLiteralClass => return transIntegerLiteral(c, scope, @as(*const clang.IntegerLiteral, @ptrCast(stmt)), result_used, .with_as),
+        .ReturnStmtClass => return transReturnStmt(c, scope, @as(*const clang.ReturnStmt, @ptrCast(stmt))),
+        .StringLiteralClass => return transStringLiteral(c, scope, @as(*const clang.StringLiteral, @ptrCast(stmt)), result_used),
         .ParenExprClass => {
-            const expr = try transExpr(c, scope, @ptrCast(*const clang.ParenExpr, stmt).getSubExpr(), .used);
+            const expr = try transExpr(c, scope, @as(*const clang.ParenExpr, @ptrCast(stmt)).getSubExpr(), .used);
             return maybeSuppressResult(c, result_used, expr);
         },
-        .InitListExprClass => return transInitListExpr(c, scope, @ptrCast(*const clang.InitListExpr, stmt), result_used),
-        .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @ptrCast(*const clang.Expr, stmt)),
-        .IfStmtClass => return transIfStmt(c, scope, @ptrCast(*const clang.IfStmt, stmt)),
-        .WhileStmtClass => return transWhileLoop(c, scope, @ptrCast(*const clang.WhileStmt, stmt)),
-        .DoStmtClass => return transDoWhileLoop(c, scope, @ptrCast(*const clang.DoStmt, stmt)),
+        .InitListExprClass => return transInitListExpr(c, scope, @as(*const clang.InitListExpr, @ptrCast(stmt)), result_used),
+        .ImplicitValueInitExprClass => return transImplicitValueInitExpr(c, scope, @as(*const clang.Expr, @ptrCast(stmt))),
+        .IfStmtClass => return transIfStmt(c, scope, @as(*const clang.IfStmt, @ptrCast(stmt))),
+        .WhileStmtClass => return transWhileLoop(c, scope, @as(*const clang.WhileStmt, @ptrCast(stmt))),
+        .DoStmtClass => return transDoWhileLoop(c, scope, @as(*const clang.DoStmt, @ptrCast(stmt))),
         .NullStmtClass => {
             return Tag.empty_block.init();
         },
         .ContinueStmtClass => return Tag.@"continue".init(),
         .BreakStmtClass => return Tag.@"break".init(),
-        .ForStmtClass => return transForLoop(c, scope, @ptrCast(*const clang.ForStmt, stmt)),
-        .FloatingLiteralClass => return transFloatingLiteral(c, @ptrCast(*const clang.FloatingLiteral, stmt), result_used),
+        .ForStmtClass => return transForLoop(c, scope, @as(*const clang.ForStmt, @ptrCast(stmt))),
+        .FloatingLiteralClass => return transFloatingLiteral(c, @as(*const clang.FloatingLiteral, @ptrCast(stmt)), result_used),
         .ConditionalOperatorClass => {
-            return transConditionalOperator(c, scope, @ptrCast(*const clang.ConditionalOperator, stmt), result_used);
+            return transConditionalOperator(c, scope, @as(*const clang.ConditionalOperator, @ptrCast(stmt)), result_used);
         },
         .BinaryConditionalOperatorClass => {
-            return transBinaryConditionalOperator(c, scope, @ptrCast(*const clang.BinaryConditionalOperator, stmt), result_used);
+            return transBinaryConditionalOperator(c, scope, @as(*const clang.BinaryConditionalOperator, @ptrCast(stmt)), result_used);
         },
-        .SwitchStmtClass => return transSwitch(c, scope, @ptrCast(*const clang.SwitchStmt, stmt)),
+        .SwitchStmtClass => return transSwitch(c, scope, @as(*const clang.SwitchStmt, @ptrCast(stmt))),
         .CaseStmtClass, .DefaultStmtClass => {
             return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "TODO complex switch", .{});
         },
-        .ConstantExprClass => return transConstantExpr(c, scope, @ptrCast(*const clang.Expr, stmt), result_used),
-        .PredefinedExprClass => return transPredefinedExpr(c, scope, @ptrCast(*const clang.PredefinedExpr, stmt), result_used),
-        .CharacterLiteralClass => return transCharLiteral(c, scope, @ptrCast(*const clang.CharacterLiteral, stmt), result_used, .with_as),
-        .StmtExprClass => return transStmtExpr(c, scope, @ptrCast(*const clang.StmtExpr, stmt), result_used),
-        .MemberExprClass => return transMemberExpr(c, scope, @ptrCast(*const clang.MemberExpr, stmt), result_used),
-        .ArraySubscriptExprClass => return transArrayAccess(c, scope, @ptrCast(*const clang.ArraySubscriptExpr, stmt), result_used),
-        .CallExprClass => return transCallExpr(c, scope, @ptrCast(*const clang.CallExpr, stmt), result_used),
-        .UnaryExprOrTypeTraitExprClass => return transUnaryExprOrTypeTraitExpr(c, scope, @ptrCast(*const clang.UnaryExprOrTypeTraitExpr, stmt), result_used),
-        .UnaryOperatorClass => return transUnaryOperator(c, scope, @ptrCast(*const clang.UnaryOperator, stmt), result_used),
-        .CompoundAssignOperatorClass => return transCompoundAssignOperator(c, scope, @ptrCast(*const clang.CompoundAssignOperator, stmt), result_used),
+        .ConstantExprClass => return transConstantExpr(c, scope, @as(*const clang.Expr, @ptrCast(stmt)), result_used),
+        .PredefinedExprClass => return transPredefinedExpr(c, scope, @as(*const clang.PredefinedExpr, @ptrCast(stmt)), result_used),
+        .CharacterLiteralClass => return transCharLiteral(c, scope, @as(*const clang.CharacterLiteral, @ptrCast(stmt)), result_used, .with_as),
+        .StmtExprClass => return transStmtExpr(c, scope, @as(*const clang.StmtExpr, @ptrCast(stmt)), result_used),
+        .MemberExprClass => return transMemberExpr(c, scope, @as(*const clang.MemberExpr, @ptrCast(stmt)), result_used),
+        .ArraySubscriptExprClass => return transArrayAccess(c, scope, @as(*const clang.ArraySubscriptExpr, @ptrCast(stmt)), result_used),
+        .CallExprClass => return transCallExpr(c, scope, @as(*const clang.CallExpr, @ptrCast(stmt)), result_used),
+        .UnaryExprOrTypeTraitExprClass => return transUnaryExprOrTypeTraitExpr(c, scope, @as(*const clang.UnaryExprOrTypeTraitExpr, @ptrCast(stmt)), result_used),
+        .UnaryOperatorClass => return transUnaryOperator(c, scope, @as(*const clang.UnaryOperator, @ptrCast(stmt)), result_used),
+        .CompoundAssignOperatorClass => return transCompoundAssignOperator(c, scope, @as(*const clang.CompoundAssignOperator, @ptrCast(stmt)), result_used),
         .OpaqueValueExprClass => {
-            const source_expr = @ptrCast(*const clang.OpaqueValueExpr, stmt).getSourceExpr().?;
+            const source_expr = @as(*const clang.OpaqueValueExpr, @ptrCast(stmt)).getSourceExpr().?;
             const expr = try transExpr(c, scope, source_expr, .used);
             return maybeSuppressResult(c, result_used, expr);
         },
-        .OffsetOfExprClass => return transOffsetOfExpr(c, @ptrCast(*const clang.OffsetOfExpr, stmt), result_used),
+        .OffsetOfExprClass => return transOffsetOfExpr(c, @as(*const clang.OffsetOfExpr, @ptrCast(stmt)), result_used),
         .CompoundLiteralExprClass => {
-            const compound_literal = @ptrCast(*const clang.CompoundLiteralExpr, stmt);
+            const compound_literal = @as(*const clang.CompoundLiteralExpr, @ptrCast(stmt));
             return transExpr(c, scope, compound_literal.getInitializer(), result_used);
         },
         .GenericSelectionExprClass => {
-            const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, stmt);
+            const gen_sel = @as(*const clang.GenericSelectionExpr, @ptrCast(stmt));
             return transExpr(c, scope, gen_sel.getResultExpr(), result_used);
         },
         .ConvertVectorExprClass => {
-            const conv_vec = @ptrCast(*const clang.ConvertVectorExpr, stmt);
+            const conv_vec = @as(*const clang.ConvertVectorExpr, @ptrCast(stmt));
             const conv_vec_node = try transConvertVectorExpr(c, scope, conv_vec);
             return maybeSuppressResult(c, result_used, conv_vec_node);
         },
         .ShuffleVectorExprClass => {
-            const shuffle_vec_expr = @ptrCast(*const clang.ShuffleVectorExpr, stmt);
+            const shuffle_vec_expr = @as(*const clang.ShuffleVectorExpr, @ptrCast(stmt));
             const shuffle_vec_node = try transShuffleVectorExpr(c, scope, shuffle_vec_expr);
             return maybeSuppressResult(c, result_used, shuffle_vec_node);
         },
         .ChooseExprClass => {
-            const choose_expr = @ptrCast(*const clang.ChooseExpr, stmt);
+            const choose_expr = @as(*const clang.ChooseExpr, @ptrCast(stmt));
             return transExpr(c, scope, choose_expr.getChosenSubExpr(), result_used);
         },
         // When adding new cases here, see comment for maybeBlockify()
@@ -1421,21 +1421,21 @@ fn transConvertVectorExpr(
     scope: *Scope,
     expr: *const clang.ConvertVectorExpr,
 ) TransError!Node {
-    const base_stmt = @ptrCast(*const clang.Stmt, expr);
+    const base_stmt = @as(*const clang.Stmt, @ptrCast(expr));
 
     var block_scope = try Scope.Block.init(c, scope, true);
     defer block_scope.deinit();
 
     const src_expr = expr.getSrcExpr();
     const src_type = qualTypeCanon(src_expr.getType());
-    const src_vector_ty = @ptrCast(*const clang.VectorType, src_type);
+    const src_vector_ty = @as(*const clang.VectorType, @ptrCast(src_type));
     const src_element_qt = src_vector_ty.getElementType();
 
     const src_expr_node = try transExpr(c, &block_scope.base, src_expr, .used);
 
     const dst_qt = expr.getTypeSourceInfo_getType();
     const dst_type_node = try transQualType(c, &block_scope.base, dst_qt, base_stmt.getBeginLoc());
-    const dst_vector_ty = @ptrCast(*const clang.VectorType, qualTypeCanon(dst_qt));
+    const dst_vector_ty = @as(*const clang.VectorType, @ptrCast(qualTypeCanon(dst_qt)));
     const num_elements = dst_vector_ty.getNumElements();
     const dst_element_qt = dst_vector_ty.getElementType();
 
@@ -1490,7 +1490,7 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE
     const init_list = try c.arena.alloc(Node, mask_len);
 
     for (init_list, 0..) |*init, i| {
-        const index_expr = try transExprCoercing(c, scope, expr.getExpr(@intCast(c_uint, i + 2)), .used);
+        const index_expr = try transExprCoercing(c, scope, expr.getExpr(@as(c_uint, @intCast(i + 2))), .used);
         const converted_index = try Tag.helpers_shuffle_vector_index.create(c.arena, .{ .lhs = index_expr, .rhs = vector_len });
         init.* = converted_index;
     }
@@ -1514,7 +1514,7 @@ fn transShuffleVectorExpr(
     scope: *Scope,
     expr: *const clang.ShuffleVectorExpr,
 ) TransError!Node {
-    const base_expr = @ptrCast(*const clang.Expr, expr);
+    const base_expr = @as(*const clang.Expr, @ptrCast(expr));
     const num_subexprs = expr.getNumSubExprs();
     if (num_subexprs < 3) return fail(c, error.UnsupportedTranslation, base_expr.getBeginLoc(), "ShuffleVector needs at least 1 index", .{});
 
@@ -1545,7 +1545,7 @@ fn transSimpleOffsetOfExpr(c: *Context, expr: *const clang.OffsetOfExpr) TransEr
             if (c.decl_table.get(@intFromPtr(record_decl.getCanonicalDecl()))) |type_name| {
                 const type_node = try Tag.type.create(c.arena, type_name);
 
-                var raw_field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin());
+                var raw_field_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin());
                 const quoted_field_name = try std.fmt.allocPrint(c.arena, "\"{s}\"", .{raw_field_name});
                 const field_name_node = try Tag.string_literal.create(c.arena, quoted_field_name);
 
@@ -1829,7 +1829,7 @@ fn transCStyleCastExprClass(
     stmt: *const clang.CStyleCastExpr,
     result_used: ResultUsed,
 ) TransError!Node {
-    const cast_expr = @ptrCast(*const clang.CastExpr, stmt);
+    const cast_expr = @as(*const clang.CastExpr, @ptrCast(stmt));
     const sub_expr = stmt.getSubExpr();
     const dst_type = stmt.getType();
     const src_type = sub_expr.getType();
@@ -1838,7 +1838,7 @@ fn transCStyleCastExprClass(
 
     const cast_node = if (cast_expr.getCastKind() == .ToUnion) blk: {
         const field_decl = cast_expr.getTargetFieldForToUnionCast(dst_type, src_type).?; // C syntax error if target field is null
-        const field_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin());
+        const field_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin());
 
         const union_ty = try transQualType(c, scope, dst_type, loc);
 
@@ -1923,12 +1923,12 @@ fn transDeclStmtOne(
 ) TransError!void {
     switch (decl.getKind()) {
         .Var => {
-            const var_decl = @ptrCast(*const clang.VarDecl, decl);
+            const var_decl = @as(*const clang.VarDecl, @ptrCast(decl));
             const decl_init = var_decl.getInit();
             const loc = decl.getLocation();
 
             const qual_type = var_decl.getTypeSourceInfo_getType();
-            const name = try c.str(@ptrCast(*const clang.NamedDecl, var_decl).getName_bytes_begin());
+            const name = try c.str(@as(*const clang.NamedDecl, @ptrCast(var_decl)).getName_bytes_begin());
             const mangled_name = try block_scope.makeMangledName(c, name);
 
             if (var_decl.getStorageClass() == .Extern) {
@@ -1945,7 +1945,7 @@ fn transDeclStmtOne(
 
             var init_node = if (decl_init) |expr|
                 if (expr.getStmtClass() == .StringLiteralClass)
-                    try transStringLiteralInitializer(c, @ptrCast(*const clang.StringLiteral, expr), type_node)
+                    try transStringLiteralInitializer(c, @as(*const clang.StringLiteral, @ptrCast(expr)), type_node)
                 else
                     try transExprCoercing(c, scope, expr, .used)
             else if (is_static_local)
@@ -1980,7 +1980,7 @@ fn transDeclStmtOne(
 
             const cleanup_attr = var_decl.getCleanupAttribute();
             if (cleanup_attr) |fn_decl| {
-                const cleanup_fn_name = try c.str(@ptrCast(*const clang.NamedDecl, fn_decl).getName_bytes_begin());
+                const cleanup_fn_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(fn_decl)).getName_bytes_begin());
                 const fn_id = try Tag.identifier.create(c.arena, cleanup_fn_name);
 
                 const varname = try Tag.identifier.create(c.arena, mangled_name);
@@ -1995,16 +1995,16 @@ fn transDeclStmtOne(
             }
         },
         .Typedef => {
-            try transTypeDef(c, scope, @ptrCast(*const clang.TypedefNameDecl, decl));
+            try transTypeDef(c, scope, @as(*const clang.TypedefNameDecl, @ptrCast(decl)));
         },
         .Record => {
-            try transRecordDecl(c, scope, @ptrCast(*const clang.RecordDecl, decl));
+            try transRecordDecl(c, scope, @as(*const clang.RecordDecl, @ptrCast(decl)));
         },
         .Enum => {
-            try transEnumDecl(c, scope, @ptrCast(*const clang.EnumDecl, decl));
+            try transEnumDecl(c, scope, @as(*const clang.EnumDecl, @ptrCast(decl)));
         },
         .Function => {
-            try visitFnDecl(c, @ptrCast(*const clang.FunctionDecl, decl));
+            try visitFnDecl(c, @as(*const clang.FunctionDecl, @ptrCast(decl)));
         },
         else => {
             const decl_name = try c.str(decl.getDeclKindName());
@@ -2030,15 +2030,15 @@ fn transDeclRefExpr(
     expr: *const clang.DeclRefExpr,
 ) TransError!Node {
     const value_decl = expr.getDecl();
-    const name = try c.str(@ptrCast(*const clang.NamedDecl, value_decl).getName_bytes_begin());
+    const name = try c.str(@as(*const clang.NamedDecl, @ptrCast(value_decl)).getName_bytes_begin());
     const mangled_name = scope.getAlias(name);
-    var ref_expr = if (cIsFunctionDeclRef(@ptrCast(*const clang.Expr, expr)))
+    var ref_expr = if (cIsFunctionDeclRef(@as(*const clang.Expr, @ptrCast(expr))))
         try Tag.fn_identifier.create(c.arena, mangled_name)
     else
         try Tag.identifier.create(c.arena, mangled_name);
 
-    if (@ptrCast(*const clang.Decl, value_decl).getKind() == .Var) {
-        const var_decl = @ptrCast(*const clang.VarDecl, value_decl);
+    if (@as(*const clang.Decl, @ptrCast(value_decl)).getKind() == .Var) {
+        const var_decl = @as(*const clang.VarDecl, @ptrCast(value_decl));
         if (var_decl.isStaticLocal()) {
             ref_expr = try Tag.field_access.create(c.arena, .{
                 .lhs = ref_expr,
@@ -2057,7 +2057,7 @@ fn transImplicitCastExpr(
     result_used: ResultUsed,
 ) TransError!Node {
     const sub_expr = expr.getSubExpr();
-    const dest_type = getExprQualType(c, @ptrCast(*const clang.Expr, expr));
+    const dest_type = getExprQualType(c, @as(*const clang.Expr, @ptrCast(expr)));
     const src_type = getExprQualType(c, sub_expr);
     switch (expr.getCastKind()) {
         .BitCast, .FloatingCast, .FloatingToIntegral, .IntegralToFloating, .IntegralCast, .PointerToIntegral, .IntegralToPointer => {
@@ -2111,7 +2111,7 @@ fn transImplicitCastExpr(
         else => |kind| return fail(
             c,
             error.UnsupportedTranslation,
-            @ptrCast(*const clang.Stmt, expr).getBeginLoc(),
+            @as(*const clang.Stmt, @ptrCast(expr)).getBeginLoc(),
             "unsupported CastKind {s}",
             .{@tagName(kind)},
         ),
@@ -2141,9 +2141,9 @@ fn transBoolExpr(
     expr: *const clang.Expr,
     used: ResultUsed,
 ) TransError!Node {
-    if (@ptrCast(*const clang.Stmt, expr).getStmtClass() == .IntegerLiteralClass) {
+    if (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass() == .IntegerLiteralClass) {
         var signum: c_int = undefined;
-        if (!(@ptrCast(*const clang.IntegerLiteral, expr).getSignum(&signum, c.clang_context))) {
+        if (!(@as(*const clang.IntegerLiteral, @ptrCast(expr)).getSignum(&signum, c.clang_context))) {
             return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "invalid integer literal", .{});
         }
         const is_zero = signum == 0;
@@ -2168,20 +2168,20 @@ fn exprIsBooleanType(expr: *const clang.Expr) bool {
 fn exprIsNarrowStringLiteral(expr: *const clang.Expr) bool {
     switch (expr.getStmtClass()) {
         .StringLiteralClass => {
-            const string_lit = @ptrCast(*const clang.StringLiteral, expr);
+            const string_lit = @as(*const clang.StringLiteral, @ptrCast(expr));
             return string_lit.getCharByteWidth() == 1;
         },
         .PredefinedExprClass => return true,
         .UnaryOperatorClass => {
-            const op_expr = @ptrCast(*const clang.UnaryOperator, expr).getSubExpr();
+            const op_expr = @as(*const clang.UnaryOperator, @ptrCast(expr)).getSubExpr();
             return exprIsNarrowStringLiteral(op_expr);
         },
         .ParenExprClass => {
-            const op_expr = @ptrCast(*const clang.ParenExpr, expr).getSubExpr();
+            const op_expr = @as(*const clang.ParenExpr, @ptrCast(expr)).getSubExpr();
             return exprIsNarrowStringLiteral(op_expr);
         },
         .GenericSelectionExprClass => {
-            const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr);
+            const gen_sel = @as(*const clang.GenericSelectionExpr, @ptrCast(expr));
             return exprIsNarrowStringLiteral(gen_sel.getResultExpr());
         },
         else => return false,
@@ -2190,11 +2190,11 @@ fn exprIsNarrowStringLiteral(expr: *const clang.Expr) bool {
 
 fn exprIsFlexibleArrayRef(c: *Context, expr: *const clang.Expr) bool {
     if (expr.getStmtClass() == .MemberExprClass) {
-        const member_expr = @ptrCast(*const clang.MemberExpr, expr);
+        const member_expr = @as(*const clang.MemberExpr, @ptrCast(expr));
         const member_decl = member_expr.getMemberDecl();
-        const decl_kind = @ptrCast(*const clang.Decl, member_decl).getKind();
+        const decl_kind = @as(*const clang.Decl, @ptrCast(member_decl)).getKind();
         if (decl_kind == .Field) {
-            const field_decl = @ptrCast(*const clang.FieldDecl, member_decl);
+            const field_decl = @as(*const clang.FieldDecl, @ptrCast(member_decl));
             return isFlexibleArrayFieldDecl(c, field_decl);
         }
     }
@@ -2229,7 +2229,7 @@ fn finishBoolExpr(
 ) TransError!Node {
     switch (ty.getTypeClass()) {
         .Builtin => {
-            const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
+            const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty));
 
             switch (builtin_ty.getKind()) {
                 .Bool => return node,
@@ -2273,7 +2273,7 @@ fn finishBoolExpr(
             return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.null_literal.init() });
         },
         .Typedef => {
-            const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
+            const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty));
             const typedef_decl = typedef_ty.getDecl();
             const underlying_type = typedef_decl.getUnderlyingType();
             return finishBoolExpr(c, scope, loc, underlying_type.getTypePtr(), node, used);
@@ -2283,7 +2283,7 @@ fn finishBoolExpr(
             return Tag.not_equal.create(c.arena, .{ .lhs = node, .rhs = Tag.zero_literal.init() });
         },
         .Elaborated => {
-            const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
+            const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty));
             const named_type = elaborated_ty.getNamedType();
             return finishBoolExpr(c, scope, loc, named_type.getTypePtr(), node, used);
         },
@@ -2325,7 +2325,7 @@ fn transIntegerLiteral(
     // But the first step is to be correct, and the next step is to make the output more elegant.
 
     // @as(T, x)
-    const expr_base = @ptrCast(*const clang.Expr, expr);
+    const expr_base = @as(*const clang.Expr, @ptrCast(expr));
     const ty_node = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc());
     const rhs = try transCreateNodeAPInt(c, eval_result.Val.getInt());
     const as = try Tag.as.create(c.arena, .{ .lhs = ty_node, .rhs = rhs });
@@ -2374,7 +2374,7 @@ fn transStringLiteral(
             const str_type = @tagName(stmt.getKind());
             const name = try std.fmt.allocPrint(c.arena, "zig.{s}_string_{d}", .{ str_type, c.getMangle() });
 
-            const expr_base = @ptrCast(*const clang.Expr, stmt);
+            const expr_base = @as(*const clang.Expr, @ptrCast(stmt));
             const array_type = try transQualTypeInitialized(c, scope, expr_base.getType(), expr_base, expr_base.getBeginLoc());
             const lit_array = try transStringLiteralInitializer(c, stmt, array_type);
             const decl = try Tag.var_simple.create(c.arena, .{ .name = name, .init = lit_array });
@@ -2451,11 +2451,11 @@ fn transStringLiteralInitializer(
 /// both operands resolve to addresses. The C standard requires that both operands
 /// point to elements of the same array object, but we do not verify that here.
 fn cIsPointerDiffExpr(stmt: *const clang.BinaryOperator) bool {
-    const lhs = @ptrCast(*const clang.Stmt, stmt.getLHS());
-    const rhs = @ptrCast(*const clang.Stmt, stmt.getRHS());
+    const lhs = @as(*const clang.Stmt, @ptrCast(stmt.getLHS()));
+    const rhs = @as(*const clang.Stmt, @ptrCast(stmt.getRHS()));
     return stmt.getOpcode() == .Sub and
-        qualTypeIsPtr(@ptrCast(*const clang.Expr, lhs).getType()) and
-        qualTypeIsPtr(@ptrCast(*const clang.Expr, rhs).getType());
+        qualTypeIsPtr(@as(*const clang.Expr, @ptrCast(lhs)).getType()) and
+        qualTypeIsPtr(@as(*const clang.Expr, @ptrCast(rhs)).getType());
 }
 
 fn cIsEnum(qt: clang.QualType) bool {
@@ -2472,7 +2472,7 @@ fn cIsVector(qt: clang.QualType) bool {
 fn cIntTypeForEnum(enum_qt: clang.QualType) clang.QualType {
     assert(cIsEnum(enum_qt));
     const ty = enum_qt.getCanonicalType().getTypePtr();
-    const enum_ty = @ptrCast(*const clang.EnumType, ty);
+    const enum_ty = @as(*const clang.EnumType, @ptrCast(ty));
     const enum_decl = enum_ty.getDecl();
     return enum_decl.getIntegerType();
 }
@@ -2588,29 +2588,29 @@ fn transCCast(
 }
 
 fn transExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node {
-    return transStmt(c, scope, @ptrCast(*const clang.Stmt, expr), used);
+    return transStmt(c, scope, @as(*const clang.Stmt, @ptrCast(expr)), used);
 }
 
 /// Same as `transExpr` but with the knowledge that the operand will be type coerced, and therefore
 /// an `@as` would be redundant. This is used to prevent redundant `@as` in integer literals.
 fn transExprCoercing(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node {
-    switch (@ptrCast(*const clang.Stmt, expr).getStmtClass()) {
+    switch (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass()) {
         .IntegerLiteralClass => {
-            return transIntegerLiteral(c, scope, @ptrCast(*const clang.IntegerLiteral, expr), .used, .no_as);
+            return transIntegerLiteral(c, scope, @as(*const clang.IntegerLiteral, @ptrCast(expr)), .used, .no_as);
         },
         .CharacterLiteralClass => {
-            return transCharLiteral(c, scope, @ptrCast(*const clang.CharacterLiteral, expr), .used, .no_as);
+            return transCharLiteral(c, scope, @as(*const clang.CharacterLiteral, @ptrCast(expr)), .used, .no_as);
         },
         .UnaryOperatorClass => {
-            const un_expr = @ptrCast(*const clang.UnaryOperator, expr);
+            const un_expr = @as(*const clang.UnaryOperator, @ptrCast(expr));
             if (un_expr.getOpcode() == .Extension) {
                 return transExprCoercing(c, scope, un_expr.getSubExpr(), used);
             }
         },
         .ImplicitCastExprClass => {
-            const cast_expr = @ptrCast(*const clang.ImplicitCastExpr, expr);
+            const cast_expr = @as(*const clang.ImplicitCastExpr, @ptrCast(expr));
             const sub_expr = cast_expr.getSubExpr();
-            switch (@ptrCast(*const clang.Stmt, sub_expr).getStmtClass()) {
+            switch (@as(*const clang.Stmt, @ptrCast(sub_expr)).getStmtClass()) {
                 .IntegerLiteralClass, .CharacterLiteralClass => switch (cast_expr.getCastKind()) {
                     .IntegralToFloating => return transExprCoercing(c, scope, sub_expr, used),
                     .IntegralCast => {
@@ -2634,15 +2634,15 @@ fn literalFitsInType(c: *Context, expr: *const clang.Expr, qt: clang.QualType) b
     const is_signed = cIsSignedInteger(qt);
     const width_max_int = (@as(u64, 1) << math.lossyCast(u6, width - @intFromBool(is_signed))) - 1;
 
-    switch (@ptrCast(*const clang.Stmt, expr).getStmtClass()) {
+    switch (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass()) {
         .CharacterLiteralClass => {
-            const char_lit = @ptrCast(*const clang.CharacterLiteral, expr);
+            const char_lit = @as(*const clang.CharacterLiteral, @ptrCast(expr));
             const val = char_lit.getValue();
             // If the val is less than the max int then it fits.
             return val <= width_max_int;
         },
         .IntegerLiteralClass => {
-            const int_lit = @ptrCast(*const clang.IntegerLiteral, expr);
+            const int_lit = @as(*const clang.IntegerLiteral, @ptrCast(expr));
             var eval_result: clang.ExprEvalResult = undefined;
             if (!int_lit.EvaluateAsInt(&eval_result, c.clang_context)) {
                 return false;
@@ -2695,7 +2695,7 @@ fn transInitListExprRecord(
 
         // Generate the field assignment expression:
         //     .field_name = expr
-        var raw_name = try c.str(@ptrCast(*const clang.NamedDecl, field_decl).getName_bytes_begin());
+        var raw_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(field_decl)).getName_bytes_begin());
         if (field_decl.isAnonymousStructOrUnion()) {
             const name = c.decl_table.get(@intFromPtr(field_decl.getCanonicalDecl())).?;
             raw_name = try c.arena.dupe(u8, name);
@@ -2736,8 +2736,8 @@ fn transInitListExprArray(
     const child_qt = arr_type.getElementType();
     const child_type = try transQualType(c, scope, child_qt, loc);
     const init_count = expr.getNumInits();
-    assert(@ptrCast(*const clang.Type, arr_type).isConstantArrayType());
-    const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, arr_type);
+    assert(@as(*const clang.Type, @ptrCast(arr_type)).isConstantArrayType());
+    const const_arr_ty = @as(*const clang.ConstantArrayType, @ptrCast(arr_type));
     const size_ap_int = const_arr_ty.getSize();
     const all_count = size_ap_int.getLimitedValue(usize);
     const leftover_count = all_count - init_count;
@@ -2757,7 +2757,7 @@ fn transInitListExprArray(
         const init_list = try c.arena.alloc(Node, init_count);
 
         for (init_list, 0..) |*init, i| {
-            const elem_expr = expr.getInit(@intCast(c_uint, i));
+            const elem_expr = expr.getInit(@as(c_uint, @intCast(i)));
             init.* = try transExprCoercing(c, scope, elem_expr, .used);
         }
         const init_node = try Tag.array_init.create(c.arena, .{
@@ -2791,8 +2791,8 @@ fn transInitListExprVector(
     loc: clang.SourceLocation,
     expr: *const clang.InitListExpr,
 ) TransError!Node {
-    const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr));
-    const vector_ty = @ptrCast(*const clang.VectorType, qualTypeCanon(qt));
+    const qt = getExprQualType(c, @as(*const clang.Expr, @ptrCast(expr)));
+    const vector_ty = @as(*const clang.VectorType, @ptrCast(qualTypeCanon(qt)));
 
     const init_count = expr.getNumInits();
     const num_elements = vector_ty.getNumElements();
@@ -2822,7 +2822,7 @@ fn transInitListExprVector(
     var i: usize = 0;
     while (i < init_count) : (i += 1) {
         const mangled_name = try block_scope.makeMangledName(c, "tmp");
-        const init_expr = expr.getInit(@intCast(c_uint, i));
+        const init_expr = expr.getInit(@as(c_uint, @intCast(i)));
         const tmp_decl_node = try Tag.var_simple.create(c.arena, .{
             .name = mangled_name,
             .init = try transExpr(c, &block_scope.base, init_expr, .used),
@@ -2860,9 +2860,9 @@ fn transInitListExpr(
     expr: *const clang.InitListExpr,
     used: ResultUsed,
 ) TransError!Node {
-    const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr));
+    const qt = getExprQualType(c, @as(*const clang.Expr, @ptrCast(expr)));
     var qual_type = qt.getTypePtr();
-    const source_loc = @ptrCast(*const clang.Expr, expr).getBeginLoc();
+    const source_loc = @as(*const clang.Expr, @ptrCast(expr)).getBeginLoc();
 
     if (qualTypeWasDemotedToOpaque(c, qt)) {
         return fail(c, error.UnsupportedTranslation, source_loc, "cannot initialize opaque type", .{});
@@ -2900,7 +2900,7 @@ fn transZeroInitExpr(
 ) TransError!Node {
     switch (ty.getTypeClass()) {
         .Builtin => {
-            const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
+            const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty));
             switch (builtin_ty.getKind()) {
                 .Bool => return Tag.false_literal.init(),
                 .Char_U,
@@ -2929,7 +2929,7 @@ fn transZeroInitExpr(
         },
         .Pointer => return Tag.null_literal.init(),
         .Typedef => {
-            const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
+            const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty));
             const typedef_decl = typedef_ty.getDecl();
             return transZeroInitExpr(
                 c,
@@ -2998,7 +2998,7 @@ fn transIfStmt(
         },
     };
     defer cond_scope.deinit();
-    const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond());
+    const cond_expr = @as(*const clang.Expr, @ptrCast(stmt.getCond()));
     const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used);
 
     const then_stmt = stmt.getThen();
@@ -3034,7 +3034,7 @@ fn transWhileLoop(
         },
     };
     defer cond_scope.deinit();
-    const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond());
+    const cond_expr = @as(*const clang.Expr, @ptrCast(stmt.getCond()));
     const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used);
 
     var loop_scope = Scope{
@@ -3063,7 +3063,7 @@ fn transDoWhileLoop(
         },
     };
     defer cond_scope.deinit();
-    const cond = try transBoolExpr(c, &cond_scope.base, @ptrCast(*const clang.Expr, stmt.getCond()), .used);
+    const cond = try transBoolExpr(c, &cond_scope.base, @as(*const clang.Expr, @ptrCast(stmt.getCond())), .used);
     const if_not_break = switch (cond.tag()) {
         .true_literal => {
             const body_node = try maybeBlockify(c, scope, stmt.getBody());
@@ -3184,7 +3184,7 @@ fn transSwitch(
 
     const body = stmt.getBody();
     assert(body.getStmtClass() == .CompoundStmtClass);
-    const compound_stmt = @ptrCast(*const clang.CompoundStmt, body);
+    const compound_stmt = @as(*const clang.CompoundStmt, @ptrCast(body));
     var it = compound_stmt.body_begin();
     const end_it = compound_stmt.body_end();
     // Iterate over switch body and collect all cases.
@@ -3211,12 +3211,12 @@ fn transSwitch(
             },
             .DefaultStmtClass => {
                 has_default = true;
-                const default_stmt = @ptrCast(*const clang.DefaultStmt, it[0]);
+                const default_stmt = @as(*const clang.DefaultStmt, @ptrCast(it[0]));
 
                 var sub = default_stmt.getSubStmt();
                 while (true) switch (sub.getStmtClass()) {
-                    .CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(),
-                    .DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(),
+                    .CaseStmtClass => sub = @as(*const clang.CaseStmt, @ptrCast(sub)).getSubStmt(),
+                    .DefaultStmtClass => sub = @as(*const clang.DefaultStmt, @ptrCast(sub)).getSubStmt(),
                     else => break,
                 };
 
@@ -3255,11 +3255,11 @@ fn transCaseStmt(c: *Context, scope: *Scope, stmt: *const clang.Stmt, items: *st
             .DefaultStmtClass => {
                 seen_default = true;
                 items.items.len = 0;
-                const default_stmt = @ptrCast(*const clang.DefaultStmt, sub);
+                const default_stmt = @as(*const clang.DefaultStmt, @ptrCast(sub));
                 sub = default_stmt.getSubStmt();
             },
             .CaseStmtClass => {
-                const case_stmt = @ptrCast(*const clang.CaseStmt, sub);
+                const case_stmt = @as(*const clang.CaseStmt, @ptrCast(sub));
 
                 if (seen_default) {
                     items.items.len = 0;
@@ -3326,10 +3326,10 @@ fn transSwitchProngStmtInline(
                 return;
             },
             .CaseStmtClass => {
-                var sub = @ptrCast(*const clang.CaseStmt, it[0]).getSubStmt();
+                var sub = @as(*const clang.CaseStmt, @ptrCast(it[0])).getSubStmt();
                 while (true) switch (sub.getStmtClass()) {
-                    .CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(),
-                    .DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(),
+                    .CaseStmtClass => sub = @as(*const clang.CaseStmt, @ptrCast(sub)).getSubStmt(),
+                    .DefaultStmtClass => sub = @as(*const clang.DefaultStmt, @ptrCast(sub)).getSubStmt(),
                     else => break,
                 };
                 const result = try transStmt(c, &block.base, sub, .unused);
@@ -3340,10 +3340,10 @@ fn transSwitchProngStmtInline(
                 }
             },
             .DefaultStmtClass => {
-                var sub = @ptrCast(*const clang.DefaultStmt, it[0]).getSubStmt();
+                var sub = @as(*const clang.DefaultStmt, @ptrCast(it[0])).getSubStmt();
                 while (true) switch (sub.getStmtClass()) {
-                    .CaseStmtClass => sub = @ptrCast(*const clang.CaseStmt, sub).getSubStmt(),
-                    .DefaultStmtClass => sub = @ptrCast(*const clang.DefaultStmt, sub).getSubStmt(),
+                    .CaseStmtClass => sub = @as(*const clang.CaseStmt, @ptrCast(sub)).getSubStmt(),
+                    .DefaultStmtClass => sub = @as(*const clang.DefaultStmt, @ptrCast(sub)).getSubStmt(),
                     else => break,
                 };
                 const result = try transStmt(c, &block.base, sub, .unused);
@@ -3354,7 +3354,7 @@ fn transSwitchProngStmtInline(
                 }
             },
             .CompoundStmtClass => {
-                const result = try transCompoundStmt(c, &block.base, @ptrCast(*const clang.CompoundStmt, it[0]));
+                const result = try transCompoundStmt(c, &block.base, @as(*const clang.CompoundStmt, @ptrCast(it[0])));
                 try block.statements.append(result);
                 if (result.isNoreturn(true)) {
                     return;
@@ -3381,7 +3381,7 @@ fn transConstantExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used:
         .Int => {
             // See comment in `transIntegerLiteral` for why this code is here.
             // @as(T, x)
-            const expr_base = @ptrCast(*const clang.Expr, expr);
+            const expr_base = @as(*const clang.Expr, @ptrCast(expr));
             const as_node = try Tag.as.create(c.arena, .{
                 .lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()),
                 .rhs = try transCreateNodeAPInt(c, result.Val.getInt()),
@@ -3400,7 +3400,7 @@ fn transPredefinedExpr(c: *Context, scope: *Scope, expr: *const clang.Predefined
 
 fn transCreateCharLitNode(c: *Context, narrow: bool, val: u32) TransError!Node {
     return Tag.char_literal.create(c.arena, if (narrow)
-        try std.fmt.allocPrint(c.arena, "'{'}'", .{std.zig.fmtEscapes(&.{@intCast(u8, val)})})
+        try std.fmt.allocPrint(c.arena, "'{'}'", .{std.zig.fmtEscapes(&.{@as(u8, @intCast(val))})})
     else
         try std.fmt.allocPrint(c.arena, "'\\u{{{x}}}'", .{val}));
 }
@@ -3427,7 +3427,7 @@ fn transCharLiteral(
     }
     // See comment in `transIntegerLiteral` for why this code is here.
     // @as(T, x)
-    const expr_base = @ptrCast(*const clang.Expr, stmt);
+    const expr_base = @as(*const clang.Expr, @ptrCast(stmt));
     const as_node = try Tag.as.create(c.arena, .{
         .lhs = try transQualType(c, scope, expr_base.getType(), expr_base.getBeginLoc()),
         .rhs = int_lit_node,
@@ -3469,22 +3469,22 @@ fn transMemberExpr(c: *Context, scope: *Scope, stmt: *const clang.MemberExpr, re
 
     const member_decl = stmt.getMemberDecl();
     const name = blk: {
-        const decl_kind = @ptrCast(*const clang.Decl, member_decl).getKind();
+        const decl_kind = @as(*const clang.Decl, @ptrCast(member_decl)).getKind();
         // If we're referring to a anonymous struct/enum find the bogus name
         // we've assigned to it during the RecordDecl translation
         if (decl_kind == .Field) {
-            const field_decl = @ptrCast(*const clang.FieldDecl, member_decl);
+            const field_decl = @as(*const clang.FieldDecl, @ptrCast(member_decl));
             if (field_decl.isAnonymousStructOrUnion()) {
                 const name = c.decl_table.get(@intFromPtr(field_decl.getCanonicalDecl())).?;
                 break :blk try c.arena.dupe(u8, name);
             }
         }
-        const decl = @ptrCast(*const clang.NamedDecl, member_decl);
+        const decl = @as(*const clang.NamedDecl, @ptrCast(member_decl));
         break :blk try c.str(decl.getName_bytes_begin());
     };
 
     var node = try Tag.field_access.create(c.arena, .{ .lhs = container_node, .field_name = name });
-    if (exprIsFlexibleArrayRef(c, @ptrCast(*const clang.Expr, stmt))) {
+    if (exprIsFlexibleArrayRef(c, @as(*const clang.Expr, @ptrCast(stmt)))) {
         node = try Tag.call.create(c.arena, .{ .lhs = node, .args = &.{} });
     }
     return maybeSuppressResult(c, result_used, node);
@@ -3582,8 +3582,8 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip
     // Unwrap the base statement if it's an array decayed to a bare pointer type
     // so that we index the array itself
     var unwrapped_base = base_stmt;
-    if (@ptrCast(*const clang.Stmt, base_stmt).getStmtClass() == .ImplicitCastExprClass) {
-        const implicit_cast = @ptrCast(*const clang.ImplicitCastExpr, base_stmt);
+    if (@as(*const clang.Stmt, @ptrCast(base_stmt)).getStmtClass() == .ImplicitCastExprClass) {
+        const implicit_cast = @as(*const clang.ImplicitCastExpr, @ptrCast(base_stmt));
 
         if (implicit_cast.getCastKind() == .ArrayToPointerDecay) {
             unwrapped_base = implicit_cast.getSubExpr();
@@ -3620,17 +3620,17 @@ fn transArrayAccess(c: *Context, scope: *Scope, stmt: *const clang.ArraySubscrip
 fn cIsFunctionDeclRef(expr: *const clang.Expr) bool {
     switch (expr.getStmtClass()) {
         .ParenExprClass => {
-            const op_expr = @ptrCast(*const clang.ParenExpr, expr).getSubExpr();
+            const op_expr = @as(*const clang.ParenExpr, @ptrCast(expr)).getSubExpr();
             return cIsFunctionDeclRef(op_expr);
         },
         .DeclRefExprClass => {
-            const decl_ref = @ptrCast(*const clang.DeclRefExpr, expr);
+            const decl_ref = @as(*const clang.DeclRefExpr, @ptrCast(expr));
             const value_decl = decl_ref.getDecl();
             const qt = value_decl.getType();
             return qualTypeChildIsFnProto(qt);
         },
         .ImplicitCastExprClass => {
-            const implicit_cast = @ptrCast(*const clang.ImplicitCastExpr, expr);
+            const implicit_cast = @as(*const clang.ImplicitCastExpr, @ptrCast(expr));
             const cast_kind = implicit_cast.getCastKind();
             if (cast_kind == .BuiltinFnToFnPtr) return true;
             if (cast_kind == .FunctionToPointerDecay) {
@@ -3639,12 +3639,12 @@ fn cIsFunctionDeclRef(expr: *const clang.Expr) bool {
             return false;
         },
         .UnaryOperatorClass => {
-            const un_op = @ptrCast(*const clang.UnaryOperator, expr);
+            const un_op = @as(*const clang.UnaryOperator, @ptrCast(expr));
             const opcode = un_op.getOpcode();
             return (opcode == .AddrOf or opcode == .Deref) and cIsFunctionDeclRef(un_op.getSubExpr());
         },
         .GenericSelectionExprClass => {
-            const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr);
+            const gen_sel = @as(*const clang.GenericSelectionExpr, @ptrCast(expr));
             return cIsFunctionDeclRef(gen_sel.getResultExpr());
         },
         else => return false,
@@ -3679,11 +3679,11 @@ fn transCallExpr(c: *Context, scope: *Scope, stmt: *const clang.CallExpr, result
                 .Proto => |fn_proto| {
                     const param_count = fn_proto.getNumParams();
                     if (i < param_count) {
-                        const param_qt = fn_proto.getParamType(@intCast(c_uint, i));
+                        const param_qt = fn_proto.getParamType(@as(c_uint, @intCast(i)));
                         if (isBoolRes(arg) and cIsNativeInt(param_qt)) {
                             arg = try Tag.int_from_bool.create(c.arena, arg);
                         } else if (arg.tag() == .string_literal and qualTypeIsCharStar(param_qt)) {
-                            const loc = @ptrCast(*const clang.Stmt, stmt).getBeginLoc();
+                            const loc = @as(*const clang.Stmt, @ptrCast(stmt)).getBeginLoc();
                             const dst_type_node = try transQualType(c, scope, param_qt, loc);
                             arg = try removeCVQualifiers(c, dst_type_node, arg);
                         }
@@ -3729,10 +3729,10 @@ fn qualTypeGetFnProto(qt: clang.QualType, is_ptr: *bool) ?ClangFunctionType {
         ty = child_qt.getTypePtr();
     }
     if (ty.getTypeClass() == .FunctionProto) {
-        return ClangFunctionType{ .Proto = @ptrCast(*const clang.FunctionProtoType, ty) };
+        return ClangFunctionType{ .Proto = @as(*const clang.FunctionProtoType, @ptrCast(ty)) };
     }
     if (ty.getTypeClass() == .FunctionNoProto) {
-        return ClangFunctionType{ .NoProto = @ptrCast(*const clang.FunctionType, ty) };
+        return ClangFunctionType{ .NoProto = @as(*const clang.FunctionType, @ptrCast(ty)) };
     }
     return null;
 }
@@ -4141,9 +4141,9 @@ fn transFloatingLiteral(c: *Context, expr: *const clang.FloatingLiteral, used: R
 fn transBinaryConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.BinaryConditionalOperator, used: ResultUsed) TransError!Node {
     // GNU extension of the ternary operator where the middle expression is
     // omitted, the condition itself is returned if it evaluates to true
-    const qt = @ptrCast(*const clang.Expr, stmt).getType();
+    const qt = @as(*const clang.Expr, @ptrCast(stmt)).getType();
     const res_is_bool = qualTypeIsBoolean(qt);
-    const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt);
+    const casted_stmt = @as(*const clang.AbstractConditionalOperator, @ptrCast(stmt));
     const cond_expr = casted_stmt.getCond();
     const false_expr = casted_stmt.getFalseExpr();
 
@@ -4203,9 +4203,9 @@ fn transConditionalOperator(c: *Context, scope: *Scope, stmt: *const clang.Condi
     };
     defer cond_scope.deinit();
 
-    const qt = @ptrCast(*const clang.Expr, stmt).getType();
+    const qt = @as(*const clang.Expr, @ptrCast(stmt)).getType();
     const res_is_bool = qualTypeIsBoolean(qt);
-    const casted_stmt = @ptrCast(*const clang.AbstractConditionalOperator, stmt);
+    const casted_stmt = @as(*const clang.AbstractConditionalOperator, @ptrCast(stmt));
     const cond_expr = casted_stmt.getCond();
     const true_expr = casted_stmt.getTrueExpr();
     const false_expr = casted_stmt.getFalseExpr();
@@ -4246,7 +4246,7 @@ fn addTopLevelDecl(c: *Context, name: []const u8, decl_node: Node) !void {
 
 fn transQualTypeInitializedStringLiteral(c: *Context, elem_ty: Node, string_lit: *const clang.StringLiteral) TypeError!Node {
     const string_lit_size = string_lit.getLength();
-    const array_size = @intCast(usize, string_lit_size);
+    const array_size = @as(usize, @intCast(string_lit_size));
 
     // incomplete array initialized with empty string, will be translated as [1]T{0}
     // see https://github.com/ziglang/zig/issues/8256
@@ -4266,16 +4266,16 @@ fn transQualTypeInitialized(
 ) TypeError!Node {
     const ty = qt.getTypePtr();
     if (ty.getTypeClass() == .IncompleteArray) {
-        const incomplete_array_ty = @ptrCast(*const clang.IncompleteArrayType, ty);
+        const incomplete_array_ty = @as(*const clang.IncompleteArrayType, @ptrCast(ty));
         const elem_ty = try transType(c, scope, incomplete_array_ty.getElementType().getTypePtr(), source_loc);
 
         switch (decl_init.getStmtClass()) {
             .StringLiteralClass => {
-                const string_lit = @ptrCast(*const clang.StringLiteral, decl_init);
+                const string_lit = @as(*const clang.StringLiteral, @ptrCast(decl_init));
                 return transQualTypeInitializedStringLiteral(c, elem_ty, string_lit);
             },
             .InitListExprClass => {
-                const init_expr = @ptrCast(*const clang.InitListExpr, decl_init);
+                const init_expr = @as(*const clang.InitListExpr, @ptrCast(decl_init));
                 const size = init_expr.getNumInits();
 
                 if (init_expr.isStringLiteralInit()) {
@@ -4306,7 +4306,7 @@ fn transQualTypeIntWidthOf(c: *Context, ty: clang.QualType, is_signed: bool) Typ
 /// Asserts the type is an integer.
 fn transTypeIntWidthOf(c: *Context, ty: *const clang.Type, is_signed: bool) TypeError!Node {
     assert(ty.getTypeClass() == .Builtin);
-    const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
+    const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty));
     return Tag.type.create(c.arena, switch (builtin_ty.getKind()) {
         .Char_U, .Char_S, .UChar, .SChar, .Char8 => if (is_signed) "i8" else "u8",
         .UShort, .Short => if (is_signed) "c_short" else "c_ushort",
@@ -4324,7 +4324,7 @@ fn isCBuiltinType(qt: clang.QualType, kind: clang.BuiltinTypeKind) bool {
     const c_type = qualTypeCanon(qt);
     if (c_type.getTypeClass() != .Builtin)
         return false;
-    const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
+    const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type));
     return builtin_ty.getKind() == kind;
 }
 
@@ -4341,7 +4341,7 @@ fn qualTypeIntBitWidth(c: *Context, qt: clang.QualType) !u32 {
 
     switch (ty.getTypeClass()) {
         .Builtin => {
-            const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
+            const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty));
 
             switch (builtin_ty.getKind()) {
                 .Char_U,
@@ -4358,9 +4358,9 @@ fn qualTypeIntBitWidth(c: *Context, qt: clang.QualType) !u32 {
             unreachable;
         },
         .Typedef => {
-            const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
+            const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty));
             const typedef_decl = typedef_ty.getDecl();
-            const type_name = try c.str(@ptrCast(*const clang.NamedDecl, typedef_decl).getName_bytes_begin());
+            const type_name = try c.str(@as(*const clang.NamedDecl, @ptrCast(typedef_decl)).getName_bytes_begin());
 
             if (mem.eql(u8, type_name, "uint8_t") or mem.eql(u8, type_name, "int8_t")) {
                 return 8;
@@ -4396,12 +4396,12 @@ fn getExprQualType(c: *Context, expr: *const clang.Expr) clang.QualType {
     blk: {
         // If this is a C `char *`, turn it into a `const char *`
         if (expr.getStmtClass() != .ImplicitCastExprClass) break :blk;
-        const cast_expr = @ptrCast(*const clang.ImplicitCastExpr, expr);
+        const cast_expr = @as(*const clang.ImplicitCastExpr, @ptrCast(expr));
         if (cast_expr.getCastKind() != .ArrayToPointerDecay) break :blk;
         const sub_expr = cast_expr.getSubExpr();
         if (sub_expr.getStmtClass() != .StringLiteralClass) break :blk;
         const array_qt = sub_expr.getType();
-        const array_type = @ptrCast(*const clang.ArrayType, array_qt.getTypePtr());
+        const array_type = @as(*const clang.ArrayType, @ptrCast(array_qt.getTypePtr()));
         var pointee_qt = array_type.getElementType();
         pointee_qt.addConst();
         return c.clang_context.getPointerType(pointee_qt);
@@ -4412,11 +4412,11 @@ fn getExprQualType(c: *Context, expr: *const clang.Expr) clang.QualType {
 fn typeIsOpaque(c: *Context, ty: *const clang.Type, loc: clang.SourceLocation) bool {
     switch (ty.getTypeClass()) {
         .Builtin => {
-            const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
+            const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty));
             return builtin_ty.getKind() == .Void;
         },
         .Record => {
-            const record_ty = @ptrCast(*const clang.RecordType, ty);
+            const record_ty = @as(*const clang.RecordType, @ptrCast(ty));
             const record_decl = record_ty.getDecl();
             const record_def = record_decl.getDefinition() orelse
                 return true;
@@ -4432,12 +4432,12 @@ fn typeIsOpaque(c: *Context, ty: *const clang.Type, loc: clang.SourceLocation) b
             return false;
         },
         .Elaborated => {
-            const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
+            const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty));
             const qt = elaborated_ty.getNamedType();
             return typeIsOpaque(c, qt.getTypePtr(), loc);
         },
         .Typedef => {
-            const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
+            const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty));
             const typedef_decl = typedef_ty.getDecl();
             const underlying_type = typedef_decl.getUnderlyingType();
             return typeIsOpaque(c, underlying_type.getTypePtr(), loc);
@@ -4459,7 +4459,7 @@ fn qualTypeIsCharStar(qt: clang.QualType) bool {
 fn cIsUnqualifiedChar(qt: clang.QualType) bool {
     const c_type = qualTypeCanon(qt);
     if (c_type.getTypeClass() != .Builtin) return false;
-    const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
+    const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type));
     return switch (builtin_ty.getKind()) {
         .Char_S, .Char_U => true,
         else => false,
@@ -4473,7 +4473,7 @@ fn cIsInteger(qt: clang.QualType) bool {
 fn cIsUnsignedInteger(qt: clang.QualType) bool {
     const c_type = qualTypeCanon(qt);
     if (c_type.getTypeClass() != .Builtin) return false;
-    const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
+    const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type));
     return switch (builtin_ty.getKind()) {
         .Char_U,
         .UChar,
@@ -4492,7 +4492,7 @@ fn cIsUnsignedInteger(qt: clang.QualType) bool {
 fn cIntTypeToIndex(qt: clang.QualType) u8 {
     const c_type = qualTypeCanon(qt);
     assert(c_type.getTypeClass() == .Builtin);
-    const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
+    const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type));
     return switch (builtin_ty.getKind()) {
         .Bool, .Char_U, .Char_S, .UChar, .SChar, .Char8 => 1,
         .WChar_U, .WChar_S => 2,
@@ -4513,9 +4513,9 @@ fn cIntTypeCmp(a: clang.QualType, b: clang.QualType) math.Order {
 
 /// Checks if expr is an integer literal >= 0
 fn cIsNonNegativeIntLiteral(c: *Context, expr: *const clang.Expr) bool {
-    if (@ptrCast(*const clang.Stmt, expr).getStmtClass() == .IntegerLiteralClass) {
+    if (@as(*const clang.Stmt, @ptrCast(expr)).getStmtClass() == .IntegerLiteralClass) {
         var signum: c_int = undefined;
-        if (!(@ptrCast(*const clang.IntegerLiteral, expr).getSignum(&signum, c.clang_context))) {
+        if (!(@as(*const clang.IntegerLiteral, @ptrCast(expr)).getSignum(&signum, c.clang_context))) {
             return false;
         }
         return signum >= 0;
@@ -4526,7 +4526,7 @@ fn cIsNonNegativeIntLiteral(c: *Context, expr: *const clang.Expr) bool {
 fn cIsSignedInteger(qt: clang.QualType) bool {
     const c_type = qualTypeCanon(qt);
     if (c_type.getTypeClass() != .Builtin) return false;
-    const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
+    const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type));
     return switch (builtin_ty.getKind()) {
         .SChar,
         .Short,
@@ -4543,14 +4543,14 @@ fn cIsSignedInteger(qt: clang.QualType) bool {
 fn cIsNativeInt(qt: clang.QualType) bool {
     const c_type = qualTypeCanon(qt);
     if (c_type.getTypeClass() != .Builtin) return false;
-    const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
+    const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type));
     return builtin_ty.getKind() == .Int;
 }
 
 fn cIsFloating(qt: clang.QualType) bool {
     const c_type = qualTypeCanon(qt);
     if (c_type.getTypeClass() != .Builtin) return false;
-    const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
+    const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type));
     return switch (builtin_ty.getKind()) {
         .Float,
         .Double,
@@ -4564,7 +4564,7 @@ fn cIsFloating(qt: clang.QualType) bool {
 fn cIsLongLongInteger(qt: clang.QualType) bool {
     const c_type = qualTypeCanon(qt);
     if (c_type.getTypeClass() != .Builtin) return false;
-    const builtin_ty = @ptrCast(*const clang.BuiltinType, c_type);
+    const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(c_type));
     return switch (builtin_ty.getKind()) {
         .LongLong, .ULongLong, .Int128, .UInt128 => true,
         else => false,
@@ -4681,8 +4681,8 @@ fn transCreateNodeAPInt(c: *Context, int: *const clang.APSInt) !Node {
                 limb_i += 2;
                 data_i += 1;
             }) {
-                limbs[limb_i] = @truncate(u32, data[data_i]);
-                limbs[limb_i + 1] = @truncate(u32, data[data_i] >> 32);
+                limbs[limb_i] = @as(u32, @truncate(data[data_i]));
+                limbs[limb_i + 1] = @as(u32, @truncate(data[data_i] >> 32));
             }
         },
         else => @compileError("unimplemented"),
@@ -4772,7 +4772,7 @@ fn transCreateNodeShiftOp(
 fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clang.SourceLocation) TypeError!Node {
     switch (ty.getTypeClass()) {
         .Builtin => {
-            const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
+            const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty));
             return Tag.type.create(c.arena, switch (builtin_ty.getKind()) {
                 .Void => "anyopaque",
                 .Bool => "bool",
@@ -4797,17 +4797,17 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
             });
         },
         .FunctionProto => {
-            const fn_proto_ty = @ptrCast(*const clang.FunctionProtoType, ty);
+            const fn_proto_ty = @as(*const clang.FunctionProtoType, @ptrCast(ty));
             const fn_proto = try transFnProto(c, null, fn_proto_ty, source_loc, null, false);
             return Node.initPayload(&fn_proto.base);
         },
         .FunctionNoProto => {
-            const fn_no_proto_ty = @ptrCast(*const clang.FunctionType, ty);
+            const fn_no_proto_ty = @as(*const clang.FunctionType, @ptrCast(ty));
             const fn_proto = try transFnNoProto(c, fn_no_proto_ty, source_loc, null, false);
             return Node.initPayload(&fn_proto.base);
         },
         .Paren => {
-            const paren_ty = @ptrCast(*const clang.ParenType, ty);
+            const paren_ty = @as(*const clang.ParenType, @ptrCast(ty));
             return transQualType(c, scope, paren_ty.getInnerType(), source_loc);
         },
         .Pointer => {
@@ -4832,7 +4832,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
             return Tag.c_pointer.create(c.arena, ptr_info);
         },
         .ConstantArray => {
-            const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, ty);
+            const const_arr_ty = @as(*const clang.ConstantArrayType, @ptrCast(ty));
 
             const size_ap_int = const_arr_ty.getSize();
             const size = size_ap_int.getLimitedValue(usize);
@@ -4841,7 +4841,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
             return Tag.array_type.create(c.arena, .{ .len = size, .elem_type = elem_type });
         },
         .IncompleteArray => {
-            const incomplete_array_ty = @ptrCast(*const clang.IncompleteArrayType, ty);
+            const incomplete_array_ty = @as(*const clang.IncompleteArrayType, @ptrCast(ty));
 
             const child_qt = incomplete_array_ty.getElementType();
             const is_const = child_qt.isConstQualified();
@@ -4851,11 +4851,11 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
             return Tag.c_pointer.create(c.arena, .{ .is_const = is_const, .is_volatile = is_volatile, .elem_type = elem_type });
         },
         .Typedef => {
-            const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
+            const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty));
 
             const typedef_decl = typedef_ty.getDecl();
             var trans_scope = scope;
-            if (@ptrCast(*const clang.Decl, typedef_decl).castToNamedDecl()) |named_decl| {
+            if (@as(*const clang.Decl, @ptrCast(typedef_decl)).castToNamedDecl()) |named_decl| {
                 const decl_name = try c.str(named_decl.getName_bytes_begin());
                 if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base;
                 if (builtin_typedef_map.get(decl_name)) |builtin| return Tag.type.create(c.arena, builtin);
@@ -4865,11 +4865,11 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
             return Tag.identifier.create(c.arena, name);
         },
         .Record => {
-            const record_ty = @ptrCast(*const clang.RecordType, ty);
+            const record_ty = @as(*const clang.RecordType, @ptrCast(ty));
 
             const record_decl = record_ty.getDecl();
             var trans_scope = scope;
-            if (@ptrCast(*const clang.Decl, record_decl).castToNamedDecl()) |named_decl| {
+            if (@as(*const clang.Decl, @ptrCast(record_decl)).castToNamedDecl()) |named_decl| {
                 const decl_name = try c.str(named_decl.getName_bytes_begin());
                 if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base;
             }
@@ -4878,11 +4878,11 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
             return Tag.identifier.create(c.arena, name);
         },
         .Enum => {
-            const enum_ty = @ptrCast(*const clang.EnumType, ty);
+            const enum_ty = @as(*const clang.EnumType, @ptrCast(ty));
 
             const enum_decl = enum_ty.getDecl();
             var trans_scope = scope;
-            if (@ptrCast(*const clang.Decl, enum_decl).castToNamedDecl()) |named_decl| {
+            if (@as(*const clang.Decl, @ptrCast(enum_decl)).castToNamedDecl()) |named_decl| {
                 const decl_name = try c.str(named_decl.getName_bytes_begin());
                 if (c.global_names.get(decl_name)) |_| trans_scope = &c.global_scope.base;
             }
@@ -4891,27 +4891,27 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
             return Tag.identifier.create(c.arena, name);
         },
         .Elaborated => {
-            const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
+            const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty));
             return transQualType(c, scope, elaborated_ty.getNamedType(), source_loc);
         },
         .Decayed => {
-            const decayed_ty = @ptrCast(*const clang.DecayedType, ty);
+            const decayed_ty = @as(*const clang.DecayedType, @ptrCast(ty));
             return transQualType(c, scope, decayed_ty.getDecayedType(), source_loc);
         },
         .Attributed => {
-            const attributed_ty = @ptrCast(*const clang.AttributedType, ty);
+            const attributed_ty = @as(*const clang.AttributedType, @ptrCast(ty));
             return transQualType(c, scope, attributed_ty.getEquivalentType(), source_loc);
         },
         .MacroQualified => {
-            const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty);
+            const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(ty));
             return transQualType(c, scope, macroqualified_ty.getModifiedType(), source_loc);
         },
         .TypeOf => {
-            const typeof_ty = @ptrCast(*const clang.TypeOfType, ty);
+            const typeof_ty = @as(*const clang.TypeOfType, @ptrCast(ty));
             return transQualType(c, scope, typeof_ty.getUnmodifiedType(), source_loc);
         },
         .TypeOfExpr => {
-            const typeofexpr_ty = @ptrCast(*const clang.TypeOfExprType, ty);
+            const typeofexpr_ty = @as(*const clang.TypeOfExprType, @ptrCast(ty));
             const underlying_expr = transExpr(c, scope, typeofexpr_ty.getUnderlyingExpr(), .used) catch |err| switch (err) {
                 error.UnsupportedTranslation => {
                     return fail(c, error.UnsupportedType, source_loc, "unsupported underlying expression for TypeOfExpr", .{});
@@ -4921,7 +4921,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
             return Tag.typeof.create(c.arena, underlying_expr);
         },
         .Vector => {
-            const vector_ty = @ptrCast(*const clang.VectorType, ty);
+            const vector_ty = @as(*const clang.VectorType, @ptrCast(ty));
             const num_elements = vector_ty.getNumElements();
             const element_qt = vector_ty.getElementType();
             return Tag.vector.create(c.arena, .{
@@ -4944,14 +4944,14 @@ fn qualTypeWasDemotedToOpaque(c: *Context, qt: clang.QualType) bool {
     const ty = qt.getTypePtr();
     switch (qt.getTypeClass()) {
         .Typedef => {
-            const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
+            const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty));
 
             const typedef_decl = typedef_ty.getDecl();
             const underlying_type = typedef_decl.getUnderlyingType();
             return qualTypeWasDemotedToOpaque(c, underlying_type);
         },
         .Record => {
-            const record_ty = @ptrCast(*const clang.RecordType, ty);
+            const record_ty = @as(*const clang.RecordType, @ptrCast(ty));
 
             const record_decl = record_ty.getDecl();
             const canonical = @intFromPtr(record_decl.getCanonicalDecl());
@@ -4967,26 +4967,26 @@ fn qualTypeWasDemotedToOpaque(c: *Context, qt: clang.QualType) bool {
             return false;
         },
         .Enum => {
-            const enum_ty = @ptrCast(*const clang.EnumType, ty);
+            const enum_ty = @as(*const clang.EnumType, @ptrCast(ty));
 
             const enum_decl = enum_ty.getDecl();
             const canonical = @intFromPtr(enum_decl.getCanonicalDecl());
             return c.opaque_demotes.contains(canonical);
         },
         .Elaborated => {
-            const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
+            const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty));
             return qualTypeWasDemotedToOpaque(c, elaborated_ty.getNamedType());
         },
         .Decayed => {
-            const decayed_ty = @ptrCast(*const clang.DecayedType, ty);
+            const decayed_ty = @as(*const clang.DecayedType, @ptrCast(ty));
             return qualTypeWasDemotedToOpaque(c, decayed_ty.getDecayedType());
         },
         .Attributed => {
-            const attributed_ty = @ptrCast(*const clang.AttributedType, ty);
+            const attributed_ty = @as(*const clang.AttributedType, @ptrCast(ty));
             return qualTypeWasDemotedToOpaque(c, attributed_ty.getEquivalentType());
         },
         .MacroQualified => {
-            const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty);
+            const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(ty));
             return qualTypeWasDemotedToOpaque(c, macroqualified_ty.getModifiedType());
         },
         else => return false,
@@ -4997,28 +4997,28 @@ fn isAnyopaque(qt: clang.QualType) bool {
     const ty = qt.getTypePtr();
     switch (ty.getTypeClass()) {
         .Builtin => {
-            const builtin_ty = @ptrCast(*const clang.BuiltinType, ty);
+            const builtin_ty = @as(*const clang.BuiltinType, @ptrCast(ty));
             return builtin_ty.getKind() == .Void;
         },
         .Typedef => {
-            const typedef_ty = @ptrCast(*const clang.TypedefType, ty);
+            const typedef_ty = @as(*const clang.TypedefType, @ptrCast(ty));
             const typedef_decl = typedef_ty.getDecl();
             return isAnyopaque(typedef_decl.getUnderlyingType());
         },
         .Elaborated => {
-            const elaborated_ty = @ptrCast(*const clang.ElaboratedType, ty);
+            const elaborated_ty = @as(*const clang.ElaboratedType, @ptrCast(ty));
             return isAnyopaque(elaborated_ty.getNamedType().getCanonicalType());
         },
         .Decayed => {
-            const decayed_ty = @ptrCast(*const clang.DecayedType, ty);
+            const decayed_ty = @as(*const clang.DecayedType, @ptrCast(ty));
             return isAnyopaque(decayed_ty.getDecayedType().getCanonicalType());
         },
         .Attributed => {
-            const attributed_ty = @ptrCast(*const clang.AttributedType, ty);
+            const attributed_ty = @as(*const clang.AttributedType, @ptrCast(ty));
             return isAnyopaque(attributed_ty.getEquivalentType().getCanonicalType());
         },
         .MacroQualified => {
-            const macroqualified_ty = @ptrCast(*const clang.MacroQualifiedType, ty);
+            const macroqualified_ty = @as(*const clang.MacroQualifiedType, @ptrCast(ty));
             return isAnyopaque(macroqualified_ty.getModifiedType().getCanonicalType());
         },
         else => return false,
@@ -5066,7 +5066,7 @@ fn transFnProto(
     fn_decl_context: ?FnDeclContext,
     is_pub: bool,
 ) !*ast.Payload.Func {
-    const fn_ty = @ptrCast(*const clang.FunctionType, fn_proto_ty);
+    const fn_ty = @as(*const clang.FunctionType, @ptrCast(fn_proto_ty));
     const cc = try transCC(c, fn_ty, source_loc);
     const is_var_args = fn_proto_ty.isVariadic();
     return finishTransFnProto(c, fn_decl, fn_proto_ty, fn_ty, source_loc, fn_decl_context, is_var_args, cc, is_pub);
@@ -5108,14 +5108,14 @@ fn finishTransFnProto(
 
     var i: usize = 0;
     while (i < param_count) : (i += 1) {
-        const param_qt = fn_proto_ty.?.getParamType(@intCast(c_uint, i));
+        const param_qt = fn_proto_ty.?.getParamType(@as(c_uint, @intCast(i)));
         const is_noalias = param_qt.isRestrictQualified();
 
         const param_name: ?[]const u8 =
             if (fn_decl) |decl|
         blk: {
-            const param = decl.getParamDecl(@intCast(c_uint, i));
-            const param_name: []const u8 = try c.str(@ptrCast(*const clang.NamedDecl, param).getName_bytes_begin());
+            const param = decl.getParamDecl(@as(c_uint, @intCast(i)));
+            const param_name: []const u8 = try c.str(@as(*const clang.NamedDecl, @ptrCast(param)).getName_bytes_begin());
             if (param_name.len < 1)
                 break :blk null;
 
@@ -5576,7 +5576,7 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
         tok_list.items.len = 0;
         switch (entity.getKind()) {
             .MacroDefinitionKind => {
-                const macro = @ptrCast(*clang.MacroDefinitionRecord, entity);
+                const macro = @as(*clang.MacroDefinitionRecord, @ptrCast(entity));
                 const raw_name = macro.getName_getNameStart();
                 const begin_loc = macro.getSourceRange_getBegin();
 
@@ -6046,7 +6046,7 @@ fn escapeUnprintables(ctx: *Context, m: *MacroCtx) ![]const u8 {
     if (std.unicode.utf8ValidateSlice(zigified)) return zigified;
 
     const formatter = std.fmt.fmtSliceEscapeLower(zigified);
-    const encoded_size = @intCast(usize, std.fmt.count("{s}", .{formatter}));
+    const encoded_size = @as(usize, @intCast(std.fmt.count("{s}", .{formatter})));
     var output = try ctx.arena.alloc(u8, encoded_size);
     return std.fmt.bufPrint(output, "{s}", .{formatter}) catch |err| switch (err) {
         error.NoSpaceLeft => unreachable,
src/type.zig
@@ -807,7 +807,7 @@ pub const Type = struct {
         return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
             .ptr_type => |ptr_type| {
                 if (ptr_type.flags.alignment.toByteUnitsOptional()) |a| {
-                    return @intCast(u32, a);
+                    return @as(u32, @intCast(a));
                 } else if (opt_sema) |sema| {
                     const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema });
                     return res.scalar;
@@ -886,7 +886,7 @@ pub const Type = struct {
                 },
                 .vector_type => |vector_type| {
                     const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema);
-                    const bits = @intCast(u32, bits_u64);
+                    const bits = @as(u32, @intCast(bits_u64));
                     const bytes = ((bits * vector_type.len) + 7) / 8;
                     const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
                     return AbiAlignmentAdvanced{ .scalar = alignment };
@@ -901,7 +901,7 @@ pub const Type = struct {
                 // represents machine code; not a pointer
                 .func_type => |func_type| return AbiAlignmentAdvanced{
                     .scalar = if (func_type.alignment.toByteUnitsOptional()) |a|
-                        @intCast(u32, a)
+                        @as(u32, @intCast(a))
                     else
                         target_util.defaultFunctionAlignment(target),
                 },
@@ -1015,7 +1015,7 @@ pub const Type = struct {
                             else => |e| return e,
                         })) continue;
 
-                        const field_align = @intCast(u32, field.abi_align.toByteUnitsOptional() orelse
+                        const field_align = @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse
                             switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
                             .scalar => |a| a,
                             .val => switch (strat) {
@@ -1026,7 +1026,7 @@ pub const Type = struct {
                                     .storage = .{ .lazy_align = ty.toIntern() },
                                 } })).toValue() },
                             },
-                        });
+                        }));
                         big_align = @max(big_align, field_align);
 
                         // This logic is duplicated in Module.Struct.Field.alignment.
@@ -1221,7 +1221,7 @@ pub const Type = struct {
                 else => |e| return e,
             })) continue;
 
-            const field_align = @intCast(u32, field.abi_align.toByteUnitsOptional() orelse
+            const field_align = @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse
                 switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
                 .scalar => |a| a,
                 .val => switch (strat) {
@@ -1232,7 +1232,7 @@ pub const Type = struct {
                         .storage = .{ .lazy_align = ty.toIntern() },
                     } })).toValue() },
                 },
-            });
+            }));
             max_align = @max(max_align, field_align);
         }
         return AbiAlignmentAdvanced{ .scalar = max_align };
@@ -1307,7 +1307,7 @@ pub const Type = struct {
                         } })).toValue() },
                     };
                     const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema);
-                    const elem_bits = @intCast(u32, elem_bits_u64);
+                    const elem_bits = @as(u32, @intCast(elem_bits_u64));
                     const total_bits = elem_bits * vector_type.len;
                     const total_bytes = (total_bits + 7) / 8;
                     const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
@@ -1573,12 +1573,12 @@ pub const Type = struct {
 
     fn intAbiSize(bits: u16, target: Target) u64 {
         const alignment = intAbiAlignment(bits, target);
-        return std.mem.alignForward(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment);
+        return std.mem.alignForward(u64, @as(u16, @intCast((@as(u17, bits) + 7) / 8)), alignment);
     }
 
     fn intAbiAlignment(bits: u16, target: Target) u32 {
         return @min(
-            std.math.ceilPowerOfTwoPromote(u16, @intCast(u16, (@as(u17, bits) + 7) / 8)),
+            std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))),
             target.maxIntAlignment(),
         );
     }
@@ -2166,7 +2166,7 @@ pub const Type = struct {
     pub fn vectorLen(ty: Type, mod: *const Module) u32 {
         return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
             .vector_type => |vector_type| vector_type.len,
-            .anon_struct_type => |tuple| @intCast(u32, tuple.types.len),
+            .anon_struct_type => |tuple| @as(u32, @intCast(tuple.types.len)),
             else => unreachable,
         };
     }
@@ -3124,7 +3124,7 @@ pub const Type = struct {
         for (struct_obj.fields.values(), 0..) |f, i| {
             if (!f.ty.hasRuntimeBits(mod)) continue;
 
-            const field_bits = @intCast(u16, f.ty.bitSize(mod));
+            const field_bits = @as(u16, @intCast(f.ty.bitSize(mod)));
             if (i == field_index) {
                 bit_offset = running_bits;
                 elem_size_bits = field_bits;
@@ -3385,8 +3385,8 @@ pub const Type = struct {
     pub fn smallestUnsignedBits(max: u64) u16 {
         if (max == 0) return 0;
         const base = std.math.log2(max);
-        const upper = (@as(u64, 1) << @intCast(u6, base)) - 1;
-        return @intCast(u16, base + @intFromBool(upper < max));
+        const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1;
+        return @as(u16, @intCast(base + @intFromBool(upper < max)));
     }
 
     /// This is only used for comptime asserts. Bump this number when you make a change
src/TypedValue.zig
@@ -250,7 +250,7 @@ pub fn print(
             },
             .empty_enum_value => return writer.writeAll("(empty enum value)"),
             .float => |float| switch (float.storage) {
-                inline else => |x| return writer.print("{d}", .{@floatCast(f64, x)}),
+                inline else => |x| return writer.print("{d}", .{@as(f64, @floatCast(x))}),
             },
             .ptr => |ptr| {
                 if (ptr.addr == .int) {
@@ -273,7 +273,7 @@ pub fn print(
                         for (buf[0..max_len], 0..) |*c, i| {
                             const elem = try val.elemValue(mod, i);
                             if (elem.isUndef(mod)) break :str;
-                            c.* = @intCast(u8, elem.toUnsignedInt(mod));
+                            c.* = @as(u8, @intCast(elem.toUnsignedInt(mod)));
                         }
                         const truncated = if (len > max_string_len) " (truncated)" else "";
                         return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated });
@@ -352,11 +352,11 @@ pub fn print(
                                 if (container_ty.isTuple(mod)) {
                                     try writer.print("[{d}]", .{field.index});
                                 }
-                                const field_name = container_ty.structFieldName(@intCast(usize, field.index), mod);
+                                const field_name = container_ty.structFieldName(@as(usize, @intCast(field.index)), mod);
                                 try writer.print(".{i}", .{field_name.fmt(ip)});
                             },
                             .Union => {
-                                const field_name = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)];
+                                const field_name = container_ty.unionFields(mod).keys()[@as(usize, @intCast(field.index))];
                                 try writer.print(".{i}", .{field_name.fmt(ip)});
                             },
                             .Pointer => {
src/value.zig
@@ -112,7 +112,7 @@ pub const Value = struct {
             return self.castTag(T.base_tag);
         }
         inline for (@typeInfo(Tag).Enum.fields) |field| {
-            const t = @enumFromInt(Tag, field.value);
+            const t = @as(Tag, @enumFromInt(field.value));
             if (self.legacy.ptr_otherwise.tag == t) {
                 if (T == t.Type()) {
                     return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise);
@@ -203,8 +203,8 @@ pub const Value = struct {
                 .bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes),
                 .elems => try arrayToIpString(val, ty.arrayLen(mod), mod),
                 .repeated_elem => |elem| {
-                    const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod));
-                    const len = @intCast(usize, ty.arrayLen(mod));
+                    const byte = @as(u8, @intCast(elem.toValue().toUnsignedInt(mod)));
+                    const len = @as(usize, @intCast(ty.arrayLen(mod)));
                     try ip.string_bytes.appendNTimes(mod.gpa, byte, len);
                     return ip.getOrPutTrailingString(mod.gpa, len);
                 },
@@ -226,8 +226,8 @@ pub const Value = struct {
                 .bytes => |bytes| try allocator.dupe(u8, bytes),
                 .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod),
                 .repeated_elem => |elem| {
-                    const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod));
-                    const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod)));
+                    const byte = @as(u8, @intCast(elem.toValue().toUnsignedInt(mod)));
+                    const result = try allocator.alloc(u8, @as(usize, @intCast(ty.arrayLen(mod))));
                     @memset(result, byte);
                     return result;
                 },
@@ -237,10 +237,10 @@ pub const Value = struct {
     }
 
     fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 {
-        const result = try allocator.alloc(u8, @intCast(usize, len));
+        const result = try allocator.alloc(u8, @as(usize, @intCast(len)));
         for (result, 0..) |*elem, i| {
             const elem_val = try val.elemValue(mod, i);
-            elem.* = @intCast(u8, elem_val.toUnsignedInt(mod));
+            elem.* = @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
         }
         return result;
     }
@@ -248,7 +248,7 @@ pub const Value = struct {
     fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString {
         const gpa = mod.gpa;
         const ip = &mod.intern_pool;
-        const len = @intCast(usize, len_u64);
+        const len = @as(usize, @intCast(len_u64));
         try ip.string_bytes.ensureUnusedCapacity(gpa, len);
         for (0..len) |i| {
             // I don't think elemValue has the possibility to affect ip.string_bytes. Let's
@@ -256,7 +256,7 @@ pub const Value = struct {
             const prev = ip.string_bytes.items.len;
             const elem_val = try val.elemValue(mod, i);
             assert(ip.string_bytes.items.len == prev);
-            const byte = @intCast(u8, elem_val.toUnsignedInt(mod));
+            const byte = @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
             ip.string_bytes.appendAssumeCapacity(byte);
         }
         return ip.getOrPutTrailingString(gpa, len);
@@ -303,7 +303,7 @@ pub const Value = struct {
                 } });
             },
             .aggregate => {
-                const len = @intCast(usize, ty.arrayLen(mod));
+                const len = @as(usize, @intCast(ty.arrayLen(mod)));
                 const old_elems = val.castTag(.aggregate).?.data[0..len];
                 const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len);
                 defer mod.gpa.free(new_elems);
@@ -534,7 +534,7 @@ pub const Value = struct {
                         const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
                         const struct_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod);
                         if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty);
-                        return base_addr + struct_ty.structFieldOffset(@intCast(usize, field.index), mod);
+                        return base_addr + struct_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod);
                     },
                     else => null,
                 },
@@ -561,9 +561,9 @@ pub const Value = struct {
                 .int => |int| switch (int.storage) {
                     .big_int => |big_int| big_int.to(i64) catch unreachable,
                     .i64 => |x| x,
-                    .u64 => |x| @intCast(i64, x),
-                    .lazy_align => |ty| @intCast(i64, ty.toType().abiAlignment(mod)),
-                    .lazy_size => |ty| @intCast(i64, ty.toType().abiSize(mod)),
+                    .u64 => |x| @as(i64, @intCast(x)),
+                    .lazy_align => |ty| @as(i64, @intCast(ty.toType().abiAlignment(mod))),
+                    .lazy_size => |ty| @as(i64, @intCast(ty.toType().abiSize(mod))),
                 },
                 else => unreachable,
             },
@@ -604,7 +604,7 @@ pub const Value = struct {
         const target = mod.getTarget();
         const endian = target.cpu.arch.endian();
         if (val.isUndef(mod)) {
-            const size = @intCast(usize, ty.abiSize(mod));
+            const size = @as(usize, @intCast(ty.abiSize(mod)));
             @memset(buffer[0..size], 0xaa);
             return;
         }
@@ -623,17 +623,17 @@ pub const Value = struct {
                 bigint.writeTwosComplement(buffer[0..byte_count], endian);
             },
             .Float => switch (ty.floatBits(target)) {
-                16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian),
-                32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32, mod)), endian),
-                64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64, mod)), endian),
-                80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80, mod)), endian),
-                128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128, mod)), endian),
+                16 => std.mem.writeInt(u16, buffer[0..2], @as(u16, @bitCast(val.toFloat(f16, mod))), endian),
+                32 => std.mem.writeInt(u32, buffer[0..4], @as(u32, @bitCast(val.toFloat(f32, mod))), endian),
+                64 => std.mem.writeInt(u64, buffer[0..8], @as(u64, @bitCast(val.toFloat(f64, mod))), endian),
+                80 => std.mem.writeInt(u80, buffer[0..10], @as(u80, @bitCast(val.toFloat(f80, mod))), endian),
+                128 => std.mem.writeInt(u128, buffer[0..16], @as(u128, @bitCast(val.toFloat(f128, mod))), endian),
                 else => unreachable,
             },
             .Array => {
                 const len = ty.arrayLen(mod);
                 const elem_ty = ty.childType(mod);
-                const elem_size = @intCast(usize, elem_ty.abiSize(mod));
+                const elem_size = @as(usize, @intCast(elem_ty.abiSize(mod)));
                 var elem_i: usize = 0;
                 var buf_off: usize = 0;
                 while (elem_i < len) : (elem_i += 1) {
@@ -645,13 +645,13 @@ pub const Value = struct {
             .Vector => {
                 // We use byte_count instead of abi_size here, so that any padding bytes
                 // follow the data bytes, on both big- and little-endian systems.
-                const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
+                const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
                 return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
             },
             .Struct => switch (ty.containerLayout(mod)) {
                 .Auto => return error.IllDefinedMemoryLayout,
                 .Extern => for (ty.structFields(mod).values(), 0..) |field, i| {
-                    const off = @intCast(usize, ty.structFieldOffset(i, mod));
+                    const off = @as(usize, @intCast(ty.structFieldOffset(i, mod)));
                     const field_val = switch (val.ip_index) {
                         .none => switch (val.tag()) {
                             .bytes => {
@@ -674,7 +674,7 @@ pub const Value = struct {
                     try writeToMemory(field_val, field.ty, mod, buffer[off..]);
                 },
                 .Packed => {
-                    const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
+                    const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
                     return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
                 },
             },
@@ -686,14 +686,14 @@ pub const Value = struct {
                     .error_union => |error_union| error_union.val.err_name,
                     else => unreachable,
                 };
-                const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?);
-                std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian);
+                const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(name).?));
+                std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @as(Int, @intCast(int)), endian);
             },
             .Union => switch (ty.containerLayout(mod)) {
                 .Auto => return error.IllDefinedMemoryLayout,
                 .Extern => return error.Unimplemented,
                 .Packed => {
-                    const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
+                    const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
                     return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
                 },
             },
@@ -730,7 +730,7 @@ pub const Value = struct {
         const target = mod.getTarget();
         const endian = target.cpu.arch.endian();
         if (val.isUndef(mod)) {
-            const bit_size = @intCast(usize, ty.bitSize(mod));
+            const bit_size = @as(usize, @intCast(ty.bitSize(mod)));
             std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
             return;
         }
@@ -742,9 +742,9 @@ pub const Value = struct {
                     .Big => buffer.len - bit_offset / 8 - 1,
                 };
                 if (val.toBool()) {
-                    buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8));
+                    buffer[byte_index] |= (@as(u8, 1) << @as(u3, @intCast(bit_offset % 8)));
                 } else {
-                    buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8));
+                    buffer[byte_index] &= ~(@as(u8, 1) << @as(u3, @intCast(bit_offset % 8)));
                 }
             },
             .Int, .Enum => {
@@ -759,17 +759,17 @@ pub const Value = struct {
                 }
             },
             .Float => switch (ty.floatBits(target)) {
-                16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16, mod)), endian),
-                32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32, mod)), endian),
-                64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64, mod)), endian),
-                80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80, mod)), endian),
-                128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128, mod)), endian),
+                16 => std.mem.writePackedInt(u16, buffer, bit_offset, @as(u16, @bitCast(val.toFloat(f16, mod))), endian),
+                32 => std.mem.writePackedInt(u32, buffer, bit_offset, @as(u32, @bitCast(val.toFloat(f32, mod))), endian),
+                64 => std.mem.writePackedInt(u64, buffer, bit_offset, @as(u64, @bitCast(val.toFloat(f64, mod))), endian),
+                80 => std.mem.writePackedInt(u80, buffer, bit_offset, @as(u80, @bitCast(val.toFloat(f80, mod))), endian),
+                128 => std.mem.writePackedInt(u128, buffer, bit_offset, @as(u128, @bitCast(val.toFloat(f128, mod))), endian),
                 else => unreachable,
             },
             .Vector => {
                 const elem_ty = ty.childType(mod);
-                const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod));
-                const len = @intCast(usize, ty.arrayLen(mod));
+                const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod)));
+                const len = @as(usize, @intCast(ty.arrayLen(mod)));
 
                 var bits: u16 = 0;
                 var elem_i: usize = 0;
@@ -789,7 +789,7 @@ pub const Value = struct {
                     const fields = ty.structFields(mod).values();
                     const storage = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage;
                     for (fields, 0..) |field, i| {
-                        const field_bits = @intCast(u16, field.ty.bitSize(mod));
+                        const field_bits = @as(u16, @intCast(field.ty.bitSize(mod)));
                         const field_val = switch (storage) {
                             .bytes => unreachable,
                             .elems => |elems| elems[i],
@@ -865,12 +865,12 @@ pub const Value = struct {
                 if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64
                     .signed => {
                         const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian);
-                        const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits);
+                        const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
                         return mod.getCoerced(try mod.intValue(int_ty, result), ty);
                     },
                     .unsigned => {
                         const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
-                        const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits);
+                        const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
                         return mod.getCoerced(try mod.intValue(int_ty, result), ty);
                     },
                 } else { // Slow path, we have to construct a big-int
@@ -886,22 +886,22 @@ pub const Value = struct {
             .Float => return (try mod.intern(.{ .float = .{
                 .ty = ty.toIntern(),
                 .storage = switch (ty.floatBits(target)) {
-                    16 => .{ .f16 = @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian)) },
-                    32 => .{ .f32 = @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian)) },
-                    64 => .{ .f64 = @bitCast(f64, std.mem.readInt(u64, buffer[0..8], endian)) },
-                    80 => .{ .f80 = @bitCast(f80, std.mem.readInt(u80, buffer[0..10], endian)) },
-                    128 => .{ .f128 = @bitCast(f128, std.mem.readInt(u128, buffer[0..16], endian)) },
+                    16 => .{ .f16 = @as(f16, @bitCast(std.mem.readInt(u16, buffer[0..2], endian))) },
+                    32 => .{ .f32 = @as(f32, @bitCast(std.mem.readInt(u32, buffer[0..4], endian))) },
+                    64 => .{ .f64 = @as(f64, @bitCast(std.mem.readInt(u64, buffer[0..8], endian))) },
+                    80 => .{ .f80 = @as(f80, @bitCast(std.mem.readInt(u80, buffer[0..10], endian))) },
+                    128 => .{ .f128 = @as(f128, @bitCast(std.mem.readInt(u128, buffer[0..16], endian))) },
                     else => unreachable,
                 },
             } })).toValue(),
             .Array => {
                 const elem_ty = ty.childType(mod);
                 const elem_size = elem_ty.abiSize(mod);
-                const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod)));
+                const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod))));
                 var offset: usize = 0;
                 for (elems) |*elem| {
                     elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod);
-                    offset += @intCast(usize, elem_size);
+                    offset += @as(usize, @intCast(elem_size));
                 }
                 return (try mod.intern(.{ .aggregate = .{
                     .ty = ty.toIntern(),
@@ -911,7 +911,7 @@ pub const Value = struct {
             .Vector => {
                 // We use byte_count instead of abi_size here, so that any padding bytes
                 // follow the data bytes, on both big- and little-endian systems.
-                const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
+                const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
                 return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
             },
             .Struct => switch (ty.containerLayout(mod)) {
@@ -920,8 +920,8 @@ pub const Value = struct {
                     const fields = ty.structFields(mod).values();
                     const field_vals = try arena.alloc(InternPool.Index, fields.len);
                     for (field_vals, fields, 0..) |*field_val, field, i| {
-                        const off = @intCast(usize, ty.structFieldOffset(i, mod));
-                        const sz = @intCast(usize, field.ty.abiSize(mod));
+                        const off = @as(usize, @intCast(ty.structFieldOffset(i, mod)));
+                        const sz = @as(usize, @intCast(field.ty.abiSize(mod)));
                         field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod);
                     }
                     return (try mod.intern(.{ .aggregate = .{
@@ -930,7 +930,7 @@ pub const Value = struct {
                     } })).toValue();
                 },
                 .Packed => {
-                    const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
+                    const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
                     return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
                 },
             },
@@ -938,7 +938,7 @@ pub const Value = struct {
                 // TODO revisit this when we have the concept of the error tag type
                 const Int = u16;
                 const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian);
-                const name = mod.global_error_set.keys()[@intCast(usize, int)];
+                const name = mod.global_error_set.keys()[@as(usize, @intCast(int))];
                 return (try mod.intern(.{ .err = .{
                     .ty = ty.toIntern(),
                     .name = name,
@@ -977,7 +977,7 @@ pub const Value = struct {
                     .Big => buffer[buffer.len - bit_offset / 8 - 1],
                     .Little => buffer[bit_offset / 8],
                 };
-                if (((byte >> @intCast(u3, bit_offset % 8)) & 1) == 0) {
+                if (((byte >> @as(u3, @intCast(bit_offset % 8))) & 1) == 0) {
                     return Value.false;
                 } else {
                     return Value.true;
@@ -1009,7 +1009,7 @@ pub const Value = struct {
                 }
 
                 // Slow path, we have to construct a big-int
-                const abi_size = @intCast(usize, ty.abiSize(mod));
+                const abi_size = @as(usize, @intCast(ty.abiSize(mod)));
                 const Limb = std.math.big.Limb;
                 const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb);
                 const limbs_buffer = try arena.alloc(Limb, limb_count);
@@ -1021,20 +1021,20 @@ pub const Value = struct {
             .Float => return (try mod.intern(.{ .float = .{
                 .ty = ty.toIntern(),
                 .storage = switch (ty.floatBits(target)) {
-                    16 => .{ .f16 = @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian)) },
-                    32 => .{ .f32 = @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian)) },
-                    64 => .{ .f64 = @bitCast(f64, std.mem.readPackedInt(u64, buffer, bit_offset, endian)) },
-                    80 => .{ .f80 = @bitCast(f80, std.mem.readPackedInt(u80, buffer, bit_offset, endian)) },
-                    128 => .{ .f128 = @bitCast(f128, std.mem.readPackedInt(u128, buffer, bit_offset, endian)) },
+                    16 => .{ .f16 = @as(f16, @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian))) },
+                    32 => .{ .f32 = @as(f32, @bitCast(std.mem.readPackedInt(u32, buffer, bit_offset, endian))) },
+                    64 => .{ .f64 = @as(f64, @bitCast(std.mem.readPackedInt(u64, buffer, bit_offset, endian))) },
+                    80 => .{ .f80 = @as(f80, @bitCast(std.mem.readPackedInt(u80, buffer, bit_offset, endian))) },
+                    128 => .{ .f128 = @as(f128, @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian))) },
                     else => unreachable,
                 },
             } })).toValue(),
             .Vector => {
                 const elem_ty = ty.childType(mod);
-                const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod)));
+                const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod))));
 
                 var bits: u16 = 0;
-                const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod));
+                const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod)));
                 for (elems, 0..) |_, i| {
                     // On big-endian systems, LLVM reverses the element order of vectors by default
                     const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i;
@@ -1054,7 +1054,7 @@ pub const Value = struct {
                     const fields = ty.structFields(mod).values();
                     const field_vals = try arena.alloc(InternPool.Index, fields.len);
                     for (fields, 0..) |field, i| {
-                        const field_bits = @intCast(u16, field.ty.bitSize(mod));
+                        const field_bits = @as(u16, @intCast(field.ty.bitSize(mod)));
                         field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod);
                         bits += field_bits;
                     }
@@ -1081,18 +1081,18 @@ pub const Value = struct {
     pub fn toFloat(val: Value, comptime T: type, mod: *Module) T {
         return switch (mod.intern_pool.indexToKey(val.toIntern())) {
             .int => |int| switch (int.storage) {
-                .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)),
+                .big_int => |big_int| @as(T, @floatCast(bigIntToFloat(big_int.limbs, big_int.positive))),
                 inline .u64, .i64 => |x| {
                     if (T == f80) {
                         @panic("TODO we can't lower this properly on non-x86 llvm backend yet");
                     }
-                    return @floatFromInt(T, x);
+                    return @as(T, @floatFromInt(x));
                 },
-                .lazy_align => |ty| @floatFromInt(T, ty.toType().abiAlignment(mod)),
-                .lazy_size => |ty| @floatFromInt(T, ty.toType().abiSize(mod)),
+                .lazy_align => |ty| @as(T, @floatFromInt(ty.toType().abiAlignment(mod))),
+                .lazy_size => |ty| @as(T, @floatFromInt(ty.toType().abiSize(mod))),
             },
             .float => |float| switch (float.storage) {
-                inline else => |x| @floatCast(T, x),
+                inline else => |x| @as(T, @floatCast(x)),
             },
             else => unreachable,
         };
@@ -1107,7 +1107,7 @@ pub const Value = struct {
         var i: usize = limbs.len;
         while (i != 0) {
             i -= 1;
-            const limb: f128 = @floatFromInt(f128, limbs[i]);
+            const limb: f128 = @as(f128, @floatFromInt(limbs[i]));
             result = @mulAdd(f128, base, result, limb);
         }
         if (positive) {
@@ -1132,7 +1132,7 @@ pub const Value = struct {
     pub fn popCount(val: Value, ty: Type, mod: *Module) u64 {
         var bigint_buf: BigIntSpace = undefined;
         const bigint = val.toBigInt(&bigint_buf, mod);
-        return @intCast(u64, bigint.popCount(ty.intInfo(mod).bits));
+        return @as(u64, @intCast(bigint.popCount(ty.intInfo(mod).bits)));
     }
 
     pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
@@ -1505,10 +1505,10 @@ pub const Value = struct {
                     .int, .eu_payload => unreachable,
                     .opt_payload => |base| base.toValue().elemValue(mod, index),
                     .comptime_field => |field_val| field_val.toValue().elemValue(mod, index),
-                    .elem => |elem| elem.base.toValue().elemValue(mod, index + @intCast(usize, elem.index)),
+                    .elem => |elem| elem.base.toValue().elemValue(mod, index + @as(usize, @intCast(elem.index))),
                     .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| {
                         const base_decl = mod.declPtr(decl_index);
-                        const field_val = try base_decl.val.fieldValue(mod, @intCast(usize, field.index));
+                        const field_val = try base_decl.val.fieldValue(mod, @as(usize, @intCast(field.index)));
                         return field_val.elemValue(mod, index);
                     } else unreachable,
                 },
@@ -1604,18 +1604,18 @@ pub const Value = struct {
                     .comptime_field => |comptime_field| comptime_field.toValue()
                         .sliceArray(mod, arena, start, end),
                     .elem => |elem| elem.base.toValue()
-                        .sliceArray(mod, arena, start + @intCast(usize, elem.index), end + @intCast(usize, elem.index)),
+                        .sliceArray(mod, arena, start + @as(usize, @intCast(elem.index)), end + @as(usize, @intCast(elem.index))),
                     else => unreachable,
                 },
                 .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{
                     .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) {
                         .array_type => |array_type| try mod.arrayType(.{
-                            .len = @intCast(u32, end - start),
+                            .len = @as(u32, @intCast(end - start)),
                             .child = array_type.child,
                             .sentinel = if (end == array_type.len) array_type.sentinel else .none,
                         }),
                         .vector_type => |vector_type| try mod.vectorType(.{
-                            .len = @intCast(u32, end - start),
+                            .len = @as(u32, @intCast(end - start)),
                             .child = vector_type.child,
                         }),
                         else => unreachable,
@@ -1734,7 +1734,7 @@ pub const Value = struct {
                 .simple_value => |v| v == .undefined,
                 .ptr => |ptr| switch (ptr.len) {
                     .none => false,
-                    else => for (0..@intCast(usize, ptr.len.toValue().toUnsignedInt(mod))) |index| {
+                    else => for (0..@as(usize, @intCast(ptr.len.toValue().toUnsignedInt(mod)))) |index| {
                         if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true;
                     } else false,
                 },
@@ -1783,7 +1783,7 @@ pub const Value = struct {
 
     pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt {
         return if (getErrorName(val, mod).unwrap()) |err_name|
-            @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err_name).?)
+            @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err_name).?))
         else
             0;
     }
@@ -1868,11 +1868,11 @@ pub const Value = struct {
     fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value {
         const target = mod.getTarget();
         const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) {
-            16 => .{ .f16 = @floatFromInt(f16, x) },
-            32 => .{ .f32 = @floatFromInt(f32, x) },
-            64 => .{ .f64 = @floatFromInt(f64, x) },
-            80 => .{ .f80 = @floatFromInt(f80, x) },
-            128 => .{ .f128 = @floatFromInt(f128, x) },
+            16 => .{ .f16 = @as(f16, @floatFromInt(x)) },
+            32 => .{ .f32 = @as(f32, @floatFromInt(x)) },
+            64 => .{ .f64 = @as(f64, @floatFromInt(x)) },
+            80 => .{ .f80 = @as(f80, @floatFromInt(x)) },
+            128 => .{ .f128 = @as(f128, @floatFromInt(x)) },
             else => unreachable,
         };
         return (try mod.intern(.{ .float = .{
@@ -1887,7 +1887,7 @@ pub const Value = struct {
         }
 
         const w_value = @fabs(scalar);
-        return @divFloor(@intFromFloat(std.math.big.Limb, std.math.log2(w_value)), @typeInfo(std.math.big.Limb).Int.bits) + 1;
+        return @divFloor(@as(std.math.big.Limb, @intFromFloat(std.math.log2(w_value))), @typeInfo(std.math.big.Limb).Int.bits) + 1;
     }
 
     pub const OverflowArithmeticResult = struct {
@@ -2738,14 +2738,14 @@ pub const Value = struct {
             for (result_data, 0..) |*scalar, i| {
                 const elem_val = try val.elemValue(mod, i);
                 const bits_elem = try bits.elemValue(mod, i);
-                scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod)).intern(scalar_ty, mod);
+                scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @as(u16, @intCast(bits_elem.toUnsignedInt(mod))), mod)).intern(scalar_ty, mod);
             }
             return (try mod.intern(.{ .aggregate = .{
                 .ty = ty.toIntern(),
                 .storage = .{ .elems = result_data },
             } })).toValue();
         }
-        return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod);
+        return intTruncScalar(val, ty, allocator, signedness, @as(u16, @intCast(bits.toUnsignedInt(mod))), mod);
     }
 
     pub fn intTruncScalar(
@@ -2793,7 +2793,7 @@ pub const Value = struct {
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
         const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
-        const shift = @intCast(usize, rhs.toUnsignedInt(mod));
+        const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod)));
         const limbs = try allocator.alloc(
             std.math.big.Limb,
             lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -2855,7 +2855,7 @@ pub const Value = struct {
         const info = ty.intInfo(mod);
         var lhs_space: Value.BigIntSpace = undefined;
         const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
-        const shift = @intCast(usize, rhs.toUnsignedInt(mod));
+        const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod)));
         const limbs = try allocator.alloc(
             std.math.big.Limb,
             lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -2912,7 +2912,7 @@ pub const Value = struct {
 
         var lhs_space: Value.BigIntSpace = undefined;
         const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
-        const shift = @intCast(usize, rhs.toUnsignedInt(mod));
+        const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod)));
         const limbs = try arena.alloc(
             std.math.big.Limb,
             std.math.big.int.calcTwosCompLimbCount(info.bits) + 1,
@@ -2984,7 +2984,7 @@ pub const Value = struct {
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
         const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
-        const shift = @intCast(usize, rhs.toUnsignedInt(mod));
+        const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod)));
 
         const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
         if (result_limbs == 0) {
src/Zir.zig
@@ -74,12 +74,12 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
     inline for (fields) |field| {
         @field(result, field.name) = switch (field.type) {
             u32 => code.extra[i],
-            Inst.Ref => @enumFromInt(Inst.Ref, code.extra[i]),
-            i32 => @bitCast(i32, code.extra[i]),
-            Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]),
-            Inst.BuiltinCall.Flags => @bitCast(Inst.BuiltinCall.Flags, code.extra[i]),
-            Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]),
-            Inst.FuncFancy.Bits => @bitCast(Inst.FuncFancy.Bits, code.extra[i]),
+            Inst.Ref => @as(Inst.Ref, @enumFromInt(code.extra[i])),
+            i32 => @as(i32, @bitCast(code.extra[i])),
+            Inst.Call.Flags => @as(Inst.Call.Flags, @bitCast(code.extra[i])),
+            Inst.BuiltinCall.Flags => @as(Inst.BuiltinCall.Flags, @bitCast(code.extra[i])),
+            Inst.SwitchBlock.Bits => @as(Inst.SwitchBlock.Bits, @bitCast(code.extra[i])),
+            Inst.FuncFancy.Bits => @as(Inst.FuncFancy.Bits, @bitCast(code.extra[i])),
             else => @compileError("bad field type"),
         };
         i += 1;
@@ -101,7 +101,7 @@ pub fn nullTerminatedString(code: Zir, index: usize) [:0]const u8 {
 
 pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref {
     const raw_slice = code.extra[start..][0..len];
-    return @ptrCast([]Inst.Ref, raw_slice);
+    return @as([]Inst.Ref, @ptrCast(raw_slice));
 }
 
 pub fn hasCompileErrors(code: Zir) bool {
@@ -2992,7 +2992,7 @@ pub const Inst = struct {
                 (@as(u128, self.piece1) << 32) |
                 (@as(u128, self.piece2) << 64) |
                 (@as(u128, self.piece3) << 96);
-            return @bitCast(f128, int_bits);
+            return @as(f128, @bitCast(int_bits));
         }
     };
 
@@ -3228,15 +3228,15 @@ pub const DeclIterator = struct {
         }
         it.decl_i += 1;
 
-        const flags = @truncate(u4, it.cur_bit_bag);
+        const flags = @as(u4, @truncate(it.cur_bit_bag));
         it.cur_bit_bag >>= 4;
 
-        const sub_index = @intCast(u32, it.extra_index);
+        const sub_index = @as(u32, @intCast(it.extra_index));
         it.extra_index += 5; // src_hash(4) + line(1)
         const name = it.zir.nullTerminatedString(it.zir.extra[it.extra_index]);
         it.extra_index += 3; // name(1) + value(1) + doc_comment(1)
-        it.extra_index += @truncate(u1, flags >> 2);
-        it.extra_index += @truncate(u1, flags >> 3);
+        it.extra_index += @as(u1, @truncate(flags >> 2));
+        it.extra_index += @as(u1, @truncate(flags >> 3));
 
         return Item{
             .sub_index = sub_index,
@@ -3258,7 +3258,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
             const extended = datas[decl_inst].extended;
             switch (extended.opcode) {
                 .struct_decl => {
-                    const small = @bitCast(Inst.StructDecl.Small, extended.small);
+                    const small = @as(Inst.StructDecl.Small, @bitCast(extended.small));
                     var extra_index: usize = extended.operand;
                     extra_index += @intFromBool(small.has_src_node);
                     extra_index += @intFromBool(small.has_fields_len);
@@ -3281,7 +3281,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
                     return declIteratorInner(zir, extra_index, decls_len);
                 },
                 .enum_decl => {
-                    const small = @bitCast(Inst.EnumDecl.Small, extended.small);
+                    const small = @as(Inst.EnumDecl.Small, @bitCast(extended.small));
                     var extra_index: usize = extended.operand;
                     extra_index += @intFromBool(small.has_src_node);
                     extra_index += @intFromBool(small.has_tag_type);
@@ -3296,7 +3296,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
                     return declIteratorInner(zir, extra_index, decls_len);
                 },
                 .union_decl => {
-                    const small = @bitCast(Inst.UnionDecl.Small, extended.small);
+                    const small = @as(Inst.UnionDecl.Small, @bitCast(extended.small));
                     var extra_index: usize = extended.operand;
                     extra_index += @intFromBool(small.has_src_node);
                     extra_index += @intFromBool(small.has_tag_type);
@@ -3311,7 +3311,7 @@ pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
                     return declIteratorInner(zir, extra_index, decls_len);
                 },
                 .opaque_decl => {
-                    const small = @bitCast(Inst.OpaqueDecl.Small, extended.small);
+                    const small = @as(Inst.OpaqueDecl.Small, @bitCast(extended.small));
                     var extra_index: usize = extended.operand;
                     extra_index += @intFromBool(small.has_src_node);
                     const decls_len = if (small.has_decls_len) decls_len: {
@@ -3507,7 +3507,7 @@ fn findDeclsSwitch(
 
     const special_prong = extra.data.bits.specialProng();
     if (special_prong != .none) {
-        const body_len = @truncate(u31, zir.extra[extra_index]);
+        const body_len = @as(u31, @truncate(zir.extra[extra_index]));
         extra_index += 1;
         const body = zir.extra[extra_index..][0..body_len];
         extra_index += body.len;
@@ -3520,7 +3520,7 @@ fn findDeclsSwitch(
         var scalar_i: usize = 0;
         while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
             extra_index += 1;
-            const body_len = @truncate(u31, zir.extra[extra_index]);
+            const body_len = @as(u31, @truncate(zir.extra[extra_index]));
             extra_index += 1;
             const body = zir.extra[extra_index..][0..body_len];
             extra_index += body_len;
@@ -3535,7 +3535,7 @@ fn findDeclsSwitch(
             extra_index += 1;
             const ranges_len = zir.extra[extra_index];
             extra_index += 1;
-            const body_len = @truncate(u31, zir.extra[extra_index]);
+            const body_len = @as(u31, @truncate(zir.extra[extra_index]));
             extra_index += 1;
             const items = zir.refSlice(extra_index, items_len);
             extra_index += items_len;
@@ -3617,7 +3617,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
                     ret_ty_ref = .void_type;
                 },
                 1 => {
-                    ret_ty_ref = @enumFromInt(Inst.Ref, zir.extra[extra_index]);
+                    ret_ty_ref = @as(Inst.Ref, @enumFromInt(zir.extra[extra_index]));
                     extra_index += 1;
                 },
                 else => {
@@ -3671,7 +3671,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
                 ret_ty_body = zir.extra[extra_index..][0..body_len];
                 extra_index += ret_ty_body.len;
             } else if (extra.data.bits.has_ret_ty_ref) {
-                ret_ty_ref = @enumFromInt(Inst.Ref, zir.extra[extra_index]);
+                ret_ty_ref = @as(Inst.Ref, @enumFromInt(zir.extra[extra_index]));
                 extra_index += 1;
             }
 
@@ -3715,7 +3715,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
 pub const ref_start_index: u32 = InternPool.static_len;
 
 pub fn indexToRef(inst: Inst.Index) Inst.Ref {
-    return @enumFromInt(Inst.Ref, ref_start_index + inst);
+    return @as(Inst.Ref, @enumFromInt(ref_start_index + inst));
 }
 
 pub fn refToIndex(inst: Inst.Ref) ?Inst.Index {
test/behavior/bugs/11995.zig
@@ -25,7 +25,7 @@ test {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     var string: [5]u8 = "hello".*;
-    const arg_data = wuffs_base__slice_u8{ .ptr = @ptrCast([*c]u8, &string), .len = string.len };
+    const arg_data = wuffs_base__slice_u8{ .ptr = @as([*c]u8, @ptrCast(&string)), .len = string.len };
     var arg_meta = wuffs_base__io_buffer_meta{ .wi = 1, .ri = 2, .pos = 3, .closed = true };
     wuffs_base__make_io_buffer(arg_data, &arg_meta);
     try std.testing.expectEqualStrings("wello", arg_data.ptr[0..arg_data.len]);
test/behavior/bugs/12051.zig
@@ -30,8 +30,8 @@ const Y = struct {
         return .{
             .a = 0,
             .b = false,
-            .c = @bitCast(Z, @as(u32, 0)),
-            .d = @bitCast(Z, @as(u32, 0)),
+            .c = @as(Z, @bitCast(@as(u32, 0))),
+            .d = @as(Z, @bitCast(@as(u32, 0))),
         };
     }
 };
test/behavior/bugs/12119.zig
@@ -12,6 +12,6 @@ test {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     const zerox32: u8x32 = [_]u8{0} ** 32;
-    const bigsum: u32x8 = @bitCast(u32x8, zerox32);
+    const bigsum: u32x8 = @as(u32x8, @bitCast(zerox32));
     try std.testing.expectEqual(0, @reduce(.Add, bigsum));
 }
test/behavior/bugs/12450.zig
@@ -16,7 +16,7 @@ test {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
-    var f1: *align(16) Foo = @alignCast(16, @ptrCast(*align(1) Foo, &buffer[0]));
+    var f1: *align(16) Foo = @alignCast(@as(*align(1) Foo, @ptrCast(&buffer[0])));
     try expect(@typeInfo(@TypeOf(f1)).Pointer.alignment == 16);
     try expect(@intFromPtr(f1) == @intFromPtr(&f1.a));
     try expect(@typeInfo(@TypeOf(&f1.a)).Pointer.alignment == 16);
test/behavior/bugs/12723.zig
@@ -3,6 +3,6 @@ const expect = @import("std").testing.expect;
 test "Non-exhaustive enum backed by comptime_int" {
     const E = enum(comptime_int) { a, b, c, _ };
     comptime var e: E = .a;
-    e = @enumFromInt(E, 378089457309184723749);
+    e = @as(E, @enumFromInt(378089457309184723749));
     try expect(@intFromEnum(e) == 378089457309184723749);
 }
test/behavior/bugs/13664.zig
@@ -21,7 +21,7 @@ test {
 
     const timestamp: i64 = value();
     const id = ID{ .fields = Fields{
-        .timestamp = @intCast(u50, timestamp),
+        .timestamp = @as(u50, @intCast(timestamp)),
         .random_bits = 420,
     } };
     try std.testing.expect((ID{ .value = id.value }).fields.timestamp == timestamp);
test/behavior/bugs/421.zig
@@ -16,6 +16,6 @@ fn testBitCastArray() !void {
 }
 
 fn extractOne64(a: u128) u64 {
-    const x = @bitCast([2]u64, a);
+    const x = @as([2]u64, @bitCast(a));
     return x[1];
 }
test/behavior/bugs/6781.zig
@@ -23,7 +23,7 @@ pub const JournalHeader = packed struct {
 
         var target: [32]u8 = undefined;
         std.crypto.hash.Blake3.hash(entry[checksum_offset + checksum_size ..], target[0..], .{});
-        return @bitCast(u128, target[0..checksum_size].*);
+        return @as(u128, @bitCast(target[0..checksum_size].*));
     }
 
     pub fn calculate_hash_chain_root(self: *const JournalHeader) u128 {
@@ -42,16 +42,16 @@ pub const JournalHeader = packed struct {
 
         assert(prev_hash_chain_root_offset + prev_hash_chain_root_size == checksum_offset);
 
-        const header = @bitCast([@sizeOf(JournalHeader)]u8, self.*);
+        const header = @as([@sizeOf(JournalHeader)]u8, @bitCast(self.*));
         const source = header[prev_hash_chain_root_offset .. checksum_offset + checksum_size];
         assert(source.len == prev_hash_chain_root_size + checksum_size);
         var target: [32]u8 = undefined;
         std.crypto.hash.Blake3.hash(source, target[0..], .{});
         if (segfault) {
-            return @bitCast(u128, target[0..hash_chain_root_size].*);
+            return @as(u128, @bitCast(target[0..hash_chain_root_size].*));
         } else {
             var array = target[0..hash_chain_root_size].*;
-            return @bitCast(u128, array);
+            return @as(u128, @bitCast(array));
         }
     }
 
test/behavior/bugs/718.zig
@@ -15,7 +15,7 @@ test "zero keys with @memset" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
-    @memset(@ptrCast([*]u8, &keys)[0..@sizeOf(@TypeOf(keys))], 0);
+    @memset(@as([*]u8, @ptrCast(&keys))[0..@sizeOf(@TypeOf(keys))], 0);
     try expect(!keys.up);
     try expect(!keys.down);
     try expect(!keys.left);
test/behavior/bugs/726.zig
@@ -8,7 +8,7 @@ test "@ptrCast from const to nullable" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     const c: u8 = 4;
-    var x: ?*const u8 = @ptrCast(?*const u8, &c);
+    var x: ?*const u8 = @as(?*const u8, @ptrCast(&c));
     try expect(x.?.* == 4);
 }
 
@@ -21,6 +21,6 @@ test "@ptrCast from var in empty struct to nullable" {
     const container = struct {
         var c: u8 = 4;
     };
-    var x: ?*const u8 = @ptrCast(?*const u8, &container.c);
+    var x: ?*const u8 = @as(?*const u8, @ptrCast(&container.c));
     try expect(x.?.* == 4);
 }
test/behavior/align.zig
@@ -24,7 +24,7 @@ test "slicing array of length 1 can not assume runtime index is always zero" {
     const slice = @as(*align(4) [1]u8, &foo)[runtime_index..];
     try expect(@TypeOf(slice) == []u8);
     try expect(slice.len == 0);
-    try expect(@truncate(u2, @intFromPtr(slice.ptr) - 1) == 0);
+    try expect(@as(u2, @truncate(@intFromPtr(slice.ptr) - 1)) == 0);
 }
 
 test "default alignment allows unspecified in type syntax" {
@@ -47,7 +47,7 @@ test "@alignCast pointers" {
     try expect(x == 2);
 }
 fn expectsOnly1(x: *align(1) u32) void {
-    expects4(@alignCast(4, x));
+    expects4(@alignCast(x));
 }
 fn expects4(x: *align(4) u32) void {
     x.* += 1;
@@ -213,12 +213,6 @@ test "alignment and size of structs with 128-bit fields" {
     }
 }
 
-test "@ptrCast preserves alignment of bigger source" {
-    var x: u32 align(16) = 1234;
-    const ptr = @ptrCast(*u8, &x);
-    try expect(@TypeOf(ptr) == *align(16) u8);
-}
-
 test "alignstack" {
     try expect(fnWithAlignedStack() == 1234);
 }
@@ -249,7 +243,7 @@ test "specifying alignment allows pointer cast" {
 }
 fn testBytesAlign(b: u8) !void {
     var bytes align(4) = [_]u8{ b, b, b, b };
-    const ptr = @ptrCast(*u32, &bytes[0]);
+    const ptr = @as(*u32, @ptrCast(&bytes[0]));
     try expect(ptr.* == 0x33333333);
 }
 
@@ -265,7 +259,7 @@ test "@alignCast slices" {
     try expect(slice[0] == 2);
 }
 fn sliceExpectsOnly1(slice: []align(1) u32) void {
-    sliceExpects4(@alignCast(4, slice));
+    sliceExpects4(@alignCast(slice));
 }
 fn sliceExpects4(slice: []align(4) u32) void {
     slice[0] += 1;
@@ -302,8 +296,8 @@ test "page aligned array on stack" {
     try expect(@intFromPtr(&array[0]) & 0xFFF == 0);
     try expect(array[3] == 4);
 
-    try expect(@truncate(u4, @intFromPtr(&number1)) == 0);
-    try expect(@truncate(u4, @intFromPtr(&number2)) == 0);
+    try expect(@as(u4, @truncate(@intFromPtr(&number1))) == 0);
+    try expect(@as(u4, @truncate(@intFromPtr(&number2))) == 0);
     try expect(number1 == 42);
     try expect(number2 == 43);
 }
@@ -366,7 +360,7 @@ test "@alignCast functions" {
     try expect(fnExpectsOnly1(simple4) == 0x19);
 }
 fn fnExpectsOnly1(ptr: *const fn () align(1) i32) i32 {
-    return fnExpects4(@alignCast(4, ptr));
+    return fnExpects4(@alignCast(ptr));
 }
 fn fnExpects4(ptr: *const fn () align(4) i32) i32 {
     return ptr();
@@ -461,9 +455,11 @@ fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void {
 test "alignment of function with c calling convention" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
+    const a = @alignOf(@TypeOf(nothing));
+
     var runtime_nothing = &nothing;
-    const casted1 = @ptrCast(*const u8, runtime_nothing);
-    const casted2 = @ptrCast(*const fn () callconv(.C) void, casted1);
+    const casted1: *align(a) const u8 = @ptrCast(runtime_nothing);
+    const casted2: *const fn () callconv(.C) void = @ptrCast(casted1);
     casted2();
 }
 
@@ -588,7 +584,7 @@ test "@alignCast null" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     var ptr: ?*anyopaque = null;
-    const aligned: ?*anyopaque = @alignCast(@alignOf(?*anyopaque), ptr);
+    const aligned: ?*anyopaque = @alignCast(ptr);
     try expect(aligned == null);
 }
 
test/behavior/array.zig
@@ -170,7 +170,7 @@ test "array with sentinels" {
             {
                 var zero_sized: [0:0xde]u8 = [_:0xde]u8{};
                 try expect(zero_sized[0] == 0xde);
-                var reinterpreted = @ptrCast(*[1]u8, &zero_sized);
+                var reinterpreted = @as(*[1]u8, @ptrCast(&zero_sized));
                 try expect(reinterpreted[0] == 0xde);
             }
             var arr: [3:0x55]u8 = undefined;
@@ -694,7 +694,7 @@ test "array init of container level array variable" {
 test "runtime initialized sentinel-terminated array literal" {
     var c: u16 = 300;
     const f = &[_:0x9999]u16{c};
-    const g = @ptrCast(*const [4]u8, f);
+    const g = @as(*const [4]u8, @ptrCast(f));
     try std.testing.expect(g[2] == 0x99);
     try std.testing.expect(g[3] == 0x99);
 }
test/behavior/async_fn.zig
@@ -136,12 +136,12 @@ test "@frameSize" {
     const S = struct {
         fn doTheTest() !void {
             {
-                var ptr = @ptrCast(fn (i32) callconv(.Async) void, other);
+                var ptr = @as(fn (i32) callconv(.Async) void, @ptrCast(other));
                 const size = @frameSize(ptr);
                 try expect(size == @sizeOf(@Frame(other)));
             }
             {
-                var ptr = @ptrCast(fn () callconv(.Async) void, first);
+                var ptr = @as(fn () callconv(.Async) void, @ptrCast(first));
                 const size = @frameSize(ptr);
                 try expect(size == @sizeOf(@Frame(first)));
             }
@@ -1184,7 +1184,7 @@ test "using @TypeOf on a generic function call" {
                 global_frame = @frame();
             }
             const F = @TypeOf(async amain(x - 1));
-            const frame = @ptrFromInt(*F, @intFromPtr(&buf));
+            const frame = @as(*F, @ptrFromInt(@intFromPtr(&buf)));
             return await @asyncCall(frame, {}, amain, .{x - 1});
         }
     };
@@ -1212,7 +1212,7 @@ test "recursive call of await @asyncCall with struct return type" {
                 global_frame = @frame();
             }
             const F = @TypeOf(async amain(x - 1));
-            const frame = @ptrFromInt(*F, @intFromPtr(&buf));
+            const frame = @as(*F, @ptrFromInt(@intFromPtr(&buf)));
             return await @asyncCall(frame, {}, amain, .{x - 1});
         }
 
@@ -1833,7 +1833,7 @@ test "avoid forcing frame alignment resolution implicit cast to *anyopaque" {
         }
     };
     var frame = async S.foo();
-    resume @ptrCast(anyframe->bool, @alignCast(@alignOf(@Frame(S.foo)), S.x));
+    resume @as(anyframe->bool, @ptrCast(@alignCast(S.x)));
     try expect(nosuspend await frame);
 }
 
test/behavior/atomics.zig
@@ -326,7 +326,7 @@ fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void {
     const uint = std.meta.Int(.unsigned, 128);
     const int = std.meta.Int(signedness, 128);
 
-    const initial: int = @bitCast(int, @as(uint, 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd));
+    const initial: int = @as(int, @bitCast(@as(uint, 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd)));
     const replacement: int = 0x00000000_00000005_00000000_00000003;
 
     var x: int align(16) = initial;
test/behavior/basic.zig
@@ -20,7 +20,7 @@ test "truncate" {
     try comptime expect(testTruncate(0x10fd) == 0xfd);
 }
 fn testTruncate(x: u32) u8 {
-    return @truncate(u8, x);
+    return @as(u8, @truncate(x));
 }
 
 test "truncate to non-power-of-two integers" {
@@ -56,7 +56,7 @@ test "truncate to non-power-of-two integers from 128-bit" {
 }
 
 fn testTrunc(comptime Big: type, comptime Little: type, big: Big, little: Little) !void {
-    try expect(@truncate(Little, big) == little);
+    try expect(@as(Little, @truncate(big)) == little);
 }
 
 const g1: i32 = 1233 + 1;
@@ -229,9 +229,9 @@ test "opaque types" {
 
 const global_a: i32 = 1234;
 const global_b: *const i32 = &global_a;
-const global_c: *const f32 = @ptrCast(*const f32, global_b);
+const global_c: *const f32 = @as(*const f32, @ptrCast(global_b));
 test "compile time global reinterpret" {
-    const d = @ptrCast(*const i32, global_c);
+    const d = @as(*const i32, @ptrCast(global_c));
     try expect(d.* == 1234);
 }
 
@@ -362,7 +362,7 @@ test "variable is allowed to be a pointer to an opaque type" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     var x: i32 = 1234;
-    _ = hereIsAnOpaqueType(@ptrCast(*OpaqueA, &x));
+    _ = hereIsAnOpaqueType(@as(*OpaqueA, @ptrCast(&x)));
 }
 fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
     var a = ptr;
@@ -442,7 +442,7 @@ test "array 3D const double ptr with offset" {
 }
 
 fn testArray2DConstDoublePtr(ptr: *const f32) !void {
-    const ptr2 = @ptrCast([*]const f32, ptr);
+    const ptr2 = @as([*]const f32, @ptrCast(ptr));
     try expect(ptr2[0] == 1.0);
     try expect(ptr2[1] == 2.0);
 }
@@ -574,9 +574,9 @@ test "constant equal function pointers" {
 
 fn emptyFn() void {}
 
-const addr1 = @ptrCast(*const u8, &emptyFn);
+const addr1 = @as(*const u8, @ptrCast(&emptyFn));
 test "comptime cast fn to ptr" {
-    const addr2 = @ptrCast(*const u8, &emptyFn);
+    const addr2 = @as(*const u8, @ptrCast(&emptyFn));
     try comptime expect(addr1 == addr2);
 }
 
@@ -667,7 +667,7 @@ test "string escapes" {
 
 test "explicit cast optional pointers" {
     const a: ?*i32 = undefined;
-    const b: ?*f32 = @ptrCast(?*f32, a);
+    const b: ?*f32 = @as(?*f32, @ptrCast(a));
     _ = b;
 }
 
@@ -752,7 +752,7 @@ test "auto created variables have correct alignment" {
 
     const S = struct {
         fn foo(str: [*]const u8) u32 {
-            for (@ptrCast([*]align(1) const u32, str)[0..1]) |v| {
+            for (@as([*]align(1) const u32, @ptrCast(str))[0..1]) |v| {
                 return v;
             }
             return 0;
@@ -772,7 +772,7 @@ test "extern variable with non-pointer opaque type" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     @export(var_to_export, .{ .name = "opaque_extern_var" });
-    try expect(@ptrCast(*align(1) u32, &opaque_extern_var).* == 42);
+    try expect(@as(*align(1) u32, @ptrCast(&opaque_extern_var)).* == 42);
 }
 extern var opaque_extern_var: opaque {};
 var var_to_export: u32 = 42;
test/behavior/bit_shifting.zig
@@ -28,7 +28,7 @@ fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, compt
             // TODO: https://github.com/ziglang/zig/issues/1544
             // This cast could be implicit if we teach the compiler that
             // u32 >> 30 -> u2
-            return @intCast(ShardKey, shard_key);
+            return @as(ShardKey, @intCast(shard_key));
         }
 
         pub fn put(self: *Self, node: *Node) void {
@@ -85,14 +85,14 @@ fn testShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, c
     var table = Table.create();
     var node_buffer: [node_count]Table.Node = undefined;
     for (&node_buffer, 0..) |*node, i| {
-        const key = @intCast(Key, i);
+        const key = @as(Key, @intCast(i));
         try expect(table.get(key) == null);
         node.init(key, {});
         table.put(node);
     }
 
     for (&node_buffer, 0..) |*node, i| {
-        try expect(table.get(@intCast(Key, i)) == node);
+        try expect(table.get(@as(Key, @intCast(i))) == node);
     }
 }
 
test/behavior/bitcast.zig
@@ -71,11 +71,11 @@ fn testBitCast(comptime N: usize) !void {
 }
 
 fn conv_iN(comptime N: usize, x: std.meta.Int(.signed, N)) std.meta.Int(.unsigned, N) {
-    return @bitCast(std.meta.Int(.unsigned, N), x);
+    return @as(std.meta.Int(.unsigned, N), @bitCast(x));
 }
 
 fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signed, N) {
-    return @bitCast(std.meta.Int(.signed, N), x);
+    return @as(std.meta.Int(.signed, N), @bitCast(x));
 }
 
 test "bitcast uX to bytes" {
@@ -114,14 +114,14 @@ fn testBitCastuXToBytes(comptime N: usize) !void {
                 while (byte_i < (byte_count - 1)) : (byte_i += 1) {
                     try expect(bytes[byte_i] == 0xff);
                 }
-                try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0);
+                try expect(((bytes[byte_i] ^ 0xff) << -%@as(u3, @truncate(N))) == 0);
             },
             .Big => {
                 var byte_i = byte_count - 1;
                 while (byte_i > 0) : (byte_i -= 1) {
                     try expect(bytes[byte_i] == 0xff);
                 }
-                try expect(((bytes[byte_i] ^ 0xff) << -%@truncate(u3, N)) == 0);
+                try expect(((bytes[byte_i] ^ 0xff) << -%@as(u3, @truncate(N))) == 0);
             },
         }
     }
@@ -130,12 +130,12 @@ fn testBitCastuXToBytes(comptime N: usize) !void {
 test "nested bitcast" {
     const S = struct {
         fn moo(x: isize) !void {
-            try expect(@intCast(isize, 42) == x);
+            try expect(@as(isize, @intCast(42)) == x);
         }
 
         fn foo(x: isize) !void {
             try @This().moo(
-                @bitCast(isize, if (x != 0) @bitCast(usize, x) else @bitCast(usize, x)),
+                @as(isize, @bitCast(if (x != 0) @as(usize, @bitCast(x)) else @as(usize, @bitCast(x)))),
             );
         }
     };
@@ -146,7 +146,7 @@ test "nested bitcast" {
 
 // issue #3010: compiler segfault
 test "bitcast literal [4]u8 param to u32" {
-    const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 });
+    const ip = @as(u32, @bitCast([_]u8{ 255, 255, 255, 255 }));
     try expect(ip == maxInt(u32));
 }
 
@@ -154,7 +154,7 @@ test "bitcast generates a temporary value" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     var y = @as(u16, 0x55AA);
-    const x = @bitCast(u16, @bitCast([2]u8, y));
+    const x = @as(u16, @bitCast(@as([2]u8, @bitCast(y))));
     try expect(y == x);
 }
 
@@ -175,7 +175,7 @@ test "@bitCast packed structs at runtime and comptime" {
     const S = struct {
         fn doTheTest() !void {
             var full = Full{ .number = 0x1234 };
-            var two_halves = @bitCast(Divided, full);
+            var two_halves = @as(Divided, @bitCast(full));
             try expect(two_halves.half1 == 0x34);
             try expect(two_halves.quarter3 == 0x2);
             try expect(two_halves.quarter4 == 0x1);
@@ -200,7 +200,7 @@ test "@bitCast extern structs at runtime and comptime" {
     const S = struct {
         fn doTheTest() !void {
             var full = Full{ .number = 0x1234 };
-            var two_halves = @bitCast(TwoHalves, full);
+            var two_halves = @as(TwoHalves, @bitCast(full));
             switch (native_endian) {
                 .Big => {
                     try expect(two_halves.half1 == 0x12);
@@ -230,8 +230,8 @@ test "bitcast packed struct to integer and back" {
     const S = struct {
         fn doTheTest() !void {
             var move = LevelUpMove{ .move_id = 1, .level = 2 };
-            var v = @bitCast(u16, move);
-            var back_to_a_move = @bitCast(LevelUpMove, v);
+            var v = @as(u16, @bitCast(move));
+            var back_to_a_move = @as(LevelUpMove, @bitCast(v));
             try expect(back_to_a_move.move_id == 1);
             try expect(back_to_a_move.level == 2);
         }
@@ -250,7 +250,7 @@ test "implicit cast to error union by returning" {
             try expect((func(-1) catch unreachable) == maxInt(u64));
         }
         pub fn func(sz: i64) anyerror!u64 {
-            return @bitCast(u64, sz);
+            return @as(u64, @bitCast(sz));
         }
     };
     try S.entry();
@@ -261,7 +261,7 @@ test "bitcast packed struct literal to byte" {
     const Foo = packed struct {
         value: u8,
     };
-    const casted = @bitCast(u8, Foo{ .value = 0xF });
+    const casted = @as(u8, @bitCast(Foo{ .value = 0xF }));
     try expect(casted == 0xf);
 }
 
@@ -269,7 +269,7 @@ test "comptime bitcast used in expression has the correct type" {
     const Foo = packed struct {
         value: u8,
     };
-    try expect(@bitCast(u8, Foo{ .value = 0xF }) == 0xf);
+    try expect(@as(u8, @bitCast(Foo{ .value = 0xF })) == 0xf);
 }
 
 test "bitcast passed as tuple element" {
@@ -279,7 +279,7 @@ test "bitcast passed as tuple element" {
             try expect(args[0] == 12.34);
         }
     };
-    try S.foo(.{@bitCast(f32, @as(u32, 0x414570A4))});
+    try S.foo(.{@as(f32, @bitCast(@as(u32, 0x414570A4)))});
 }
 
 test "triple level result location with bitcast sandwich passed as tuple element" {
@@ -289,7 +289,7 @@ test "triple level result location with bitcast sandwich passed as tuple element
             try expect(args[0] > 12.33 and args[0] < 12.35);
         }
     };
-    try S.foo(.{@as(f64, @bitCast(f32, @as(u32, 0x414570A4)))});
+    try S.foo(.{@as(f64, @as(f32, @bitCast(@as(u32, 0x414570A4))))});
 }
 
 test "@bitCast packed struct of floats" {
@@ -318,7 +318,7 @@ test "@bitCast packed struct of floats" {
     const S = struct {
         fn doTheTest() !void {
             var foo = Foo{};
-            var v = @bitCast(Foo2, foo);
+            var v = @as(Foo2, @bitCast(foo));
             try expect(v.a == foo.a);
             try expect(v.b == foo.b);
             try expect(v.c == foo.c);
@@ -360,12 +360,12 @@ test "comptime @bitCast packed struct to int and back" {
 
     // S -> Int
     var s: S = .{};
-    try expectEqual(@bitCast(Int, s), comptime @bitCast(Int, S{}));
+    try expectEqual(@as(Int, @bitCast(s)), comptime @as(Int, @bitCast(S{})));
 
     // Int -> S
     var i: Int = 0;
-    const rt_cast = @bitCast(S, i);
-    const ct_cast = comptime @bitCast(S, @as(Int, 0));
+    const rt_cast = @as(S, @bitCast(i));
+    const ct_cast = comptime @as(S, @bitCast(@as(Int, 0)));
     inline for (@typeInfo(S).Struct.fields) |field| {
         try expectEqual(@field(rt_cast, field.name), @field(ct_cast, field.name));
     }
@@ -381,10 +381,10 @@ test "comptime bitcast with fields following f80" {
 
     const FloatT = extern struct { f: f80, x: u128 align(16) };
     const x: FloatT = .{ .f = 0.5, .x = 123 };
-    var x_as_uint: u256 = comptime @bitCast(u256, x);
+    var x_as_uint: u256 = comptime @as(u256, @bitCast(x));
 
-    try expect(x.f == @bitCast(FloatT, x_as_uint).f);
-    try expect(x.x == @bitCast(FloatT, x_as_uint).x);
+    try expect(x.f == @as(FloatT, @bitCast(x_as_uint)).f);
+    try expect(x.x == @as(FloatT, @bitCast(x_as_uint)).x);
 }
 
 test "bitcast vector to integer and back" {
@@ -398,20 +398,20 @@ test "bitcast vector to integer and back" {
     const arr: [16]bool = [_]bool{ true, false } ++ [_]bool{true} ** 14;
     var x = @splat(16, true);
     x[1] = false;
-    try expect(@bitCast(u16, x) == comptime @bitCast(u16, @as(@Vector(16, bool), arr)));
+    try expect(@as(u16, @bitCast(x)) == comptime @as(u16, @bitCast(@as(@Vector(16, bool), arr))));
 }
 
 fn bitCastWrapper16(x: f16) u16 {
-    return @bitCast(u16, x);
+    return @as(u16, @bitCast(x));
 }
 fn bitCastWrapper32(x: f32) u32 {
-    return @bitCast(u32, x);
+    return @as(u32, @bitCast(x));
 }
 fn bitCastWrapper64(x: f64) u64 {
-    return @bitCast(u64, x);
+    return @as(u64, @bitCast(x));
 }
 fn bitCastWrapper128(x: f128) u128 {
-    return @bitCast(u128, x);
+    return @as(u128, @bitCast(x));
 }
 test "bitcast nan float does modify signaling bit" {
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -425,37 +425,37 @@ test "bitcast nan float does modify signaling bit" {
 
     // 16 bit
     const snan_f16_const = math.nan_f16;
-    try expectEqual(math.nan_u16, @bitCast(u16, snan_f16_const));
+    try expectEqual(math.nan_u16, @as(u16, @bitCast(snan_f16_const)));
     try expectEqual(math.nan_u16, bitCastWrapper16(snan_f16_const));
 
     var snan_f16_var = math.nan_f16;
-    try expectEqual(math.nan_u16, @bitCast(u16, snan_f16_var));
+    try expectEqual(math.nan_u16, @as(u16, @bitCast(snan_f16_var)));
     try expectEqual(math.nan_u16, bitCastWrapper16(snan_f16_var));
 
     // 32 bit
     const snan_f32_const = math.nan_f32;
-    try expectEqual(math.nan_u32, @bitCast(u32, snan_f32_const));
+    try expectEqual(math.nan_u32, @as(u32, @bitCast(snan_f32_const)));
     try expectEqual(math.nan_u32, bitCastWrapper32(snan_f32_const));
 
     var snan_f32_var = math.nan_f32;
-    try expectEqual(math.nan_u32, @bitCast(u32, snan_f32_var));
+    try expectEqual(math.nan_u32, @as(u32, @bitCast(snan_f32_var)));
     try expectEqual(math.nan_u32, bitCastWrapper32(snan_f32_var));
 
     // 64 bit
     const snan_f64_const = math.nan_f64;
-    try expectEqual(math.nan_u64, @bitCast(u64, snan_f64_const));
+    try expectEqual(math.nan_u64, @as(u64, @bitCast(snan_f64_const)));
     try expectEqual(math.nan_u64, bitCastWrapper64(snan_f64_const));
 
     var snan_f64_var = math.nan_f64;
-    try expectEqual(math.nan_u64, @bitCast(u64, snan_f64_var));
+    try expectEqual(math.nan_u64, @as(u64, @bitCast(snan_f64_var)));
     try expectEqual(math.nan_u64, bitCastWrapper64(snan_f64_var));
 
     // 128 bit
     const snan_f128_const = math.nan_f128;
-    try expectEqual(math.nan_u128, @bitCast(u128, snan_f128_const));
+    try expectEqual(math.nan_u128, @as(u128, @bitCast(snan_f128_const)));
     try expectEqual(math.nan_u128, bitCastWrapper128(snan_f128_const));
 
     var snan_f128_var = math.nan_f128;
-    try expectEqual(math.nan_u128, @bitCast(u128, snan_f128_var));
+    try expectEqual(math.nan_u128, @as(u128, @bitCast(snan_f128_var)));
     try expectEqual(math.nan_u128, bitCastWrapper128(snan_f128_var));
 }
test/behavior/bitreverse.zig
@@ -62,20 +62,20 @@ fn testBitReverse() !void {
 
     // using comptime_ints, signed, positive
     try expect(@bitReverse(@as(u8, 0)) == 0);
-    try expect(@bitReverse(@bitCast(i8, @as(u8, 0x92))) == @bitCast(i8, @as(u8, 0x49)));
-    try expect(@bitReverse(@bitCast(i16, @as(u16, 0x1234))) == @bitCast(i16, @as(u16, 0x2c48)));
-    try expect(@bitReverse(@bitCast(i24, @as(u24, 0x123456))) == @bitCast(i24, @as(u24, 0x6a2c48)));
-    try expect(@bitReverse(@bitCast(i24, @as(u24, 0x12345f))) == @bitCast(i24, @as(u24, 0xfa2c48)));
-    try expect(@bitReverse(@bitCast(i24, @as(u24, 0xf23456))) == @bitCast(i24, @as(u24, 0x6a2c4f)));
-    try expect(@bitReverse(@bitCast(i32, @as(u32, 0x12345678))) == @bitCast(i32, @as(u32, 0x1e6a2c48)));
-    try expect(@bitReverse(@bitCast(i32, @as(u32, 0xf2345678))) == @bitCast(i32, @as(u32, 0x1e6a2c4f)));
-    try expect(@bitReverse(@bitCast(i32, @as(u32, 0x1234567f))) == @bitCast(i32, @as(u32, 0xfe6a2c48)));
-    try expect(@bitReverse(@bitCast(i40, @as(u40, 0x123456789a))) == @bitCast(i40, @as(u40, 0x591e6a2c48)));
-    try expect(@bitReverse(@bitCast(i48, @as(u48, 0x123456789abc))) == @bitCast(i48, @as(u48, 0x3d591e6a2c48)));
-    try expect(@bitReverse(@bitCast(i56, @as(u56, 0x123456789abcde))) == @bitCast(i56, @as(u56, 0x7b3d591e6a2c48)));
-    try expect(@bitReverse(@bitCast(i64, @as(u64, 0x123456789abcdef1))) == @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48)));
-    try expect(@bitReverse(@bitCast(i96, @as(u96, 0x123456789abcdef111213141))) == @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48)));
-    try expect(@bitReverse(@bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181))) == @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48)));
+    try expect(@bitReverse(@as(i8, @bitCast(@as(u8, 0x92)))) == @as(i8, @bitCast(@as(u8, 0x49))));
+    try expect(@bitReverse(@as(i16, @bitCast(@as(u16, 0x1234)))) == @as(i16, @bitCast(@as(u16, 0x2c48))));
+    try expect(@bitReverse(@as(i24, @bitCast(@as(u24, 0x123456)))) == @as(i24, @bitCast(@as(u24, 0x6a2c48))));
+    try expect(@bitReverse(@as(i24, @bitCast(@as(u24, 0x12345f)))) == @as(i24, @bitCast(@as(u24, 0xfa2c48))));
+    try expect(@bitReverse(@as(i24, @bitCast(@as(u24, 0xf23456)))) == @as(i24, @bitCast(@as(u24, 0x6a2c4f))));
+    try expect(@bitReverse(@as(i32, @bitCast(@as(u32, 0x12345678)))) == @as(i32, @bitCast(@as(u32, 0x1e6a2c48))));
+    try expect(@bitReverse(@as(i32, @bitCast(@as(u32, 0xf2345678)))) == @as(i32, @bitCast(@as(u32, 0x1e6a2c4f))));
+    try expect(@bitReverse(@as(i32, @bitCast(@as(u32, 0x1234567f)))) == @as(i32, @bitCast(@as(u32, 0xfe6a2c48))));
+    try expect(@bitReverse(@as(i40, @bitCast(@as(u40, 0x123456789a)))) == @as(i40, @bitCast(@as(u40, 0x591e6a2c48))));
+    try expect(@bitReverse(@as(i48, @bitCast(@as(u48, 0x123456789abc)))) == @as(i48, @bitCast(@as(u48, 0x3d591e6a2c48))));
+    try expect(@bitReverse(@as(i56, @bitCast(@as(u56, 0x123456789abcde)))) == @as(i56, @bitCast(@as(u56, 0x7b3d591e6a2c48))));
+    try expect(@bitReverse(@as(i64, @bitCast(@as(u64, 0x123456789abcdef1)))) == @as(i64, @bitCast(@as(u64, 0x8f7b3d591e6a2c48))));
+    try expect(@bitReverse(@as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141)))) == @as(i96, @bitCast(@as(u96, 0x828c84888f7b3d591e6a2c48))));
+    try expect(@bitReverse(@as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181)))) == @as(i128, @bitCast(@as(u128, 0x818e868a828c84888f7b3d591e6a2c48))));
 
     // using signed, negative. Compare to runtime ints returned from llvm.
     var neg8: i8 = -18;
test/behavior/bool.zig
@@ -15,8 +15,8 @@ test "cast bool to int" {
     const f = false;
     try expectEqual(@as(u32, 1), @intFromBool(t));
     try expectEqual(@as(u32, 0), @intFromBool(f));
-    try expectEqual(-1, @bitCast(i1, @intFromBool(t)));
-    try expectEqual(0, @bitCast(i1, @intFromBool(f)));
+    try expectEqual(-1, @as(i1, @bitCast(@intFromBool(t))));
+    try expectEqual(0, @as(i1, @bitCast(@intFromBool(f))));
     try expectEqual(u1, @TypeOf(@intFromBool(t)));
     try expectEqual(u1, @TypeOf(@intFromBool(f)));
     try nonConstCastIntFromBool(t, f);
@@ -25,8 +25,8 @@ test "cast bool to int" {
 fn nonConstCastIntFromBool(t: bool, f: bool) !void {
     try expectEqual(@as(u32, 1), @intFromBool(t));
     try expectEqual(@as(u32, 0), @intFromBool(f));
-    try expectEqual(@as(i1, -1), @bitCast(i1, @intFromBool(t)));
-    try expectEqual(@as(i1, 0), @bitCast(i1, @intFromBool(f)));
+    try expectEqual(@as(i1, -1), @as(i1, @bitCast(@intFromBool(t))));
+    try expectEqual(@as(i1, 0), @as(i1, @bitCast(@intFromBool(f))));
     try expectEqual(u1, @TypeOf(@intFromBool(t)));
     try expectEqual(u1, @TypeOf(@intFromBool(f)));
 }
test/behavior/builtin_functions_returning_void_or_noreturn.zig
@@ -17,8 +17,8 @@ test {
     try testing.expectEqual(void, @TypeOf(@breakpoint()));
     try testing.expectEqual({}, @export(x, .{ .name = "x" }));
     try testing.expectEqual({}, @fence(.Acquire));
-    try testing.expectEqual({}, @memcpy(@ptrFromInt([*]u8, 1)[0..0], @ptrFromInt([*]u8, 1)[0..0]));
-    try testing.expectEqual({}, @memset(@ptrFromInt([*]u8, 1)[0..0], undefined));
+    try testing.expectEqual({}, @memcpy(@as([*]u8, @ptrFromInt(1))[0..0], @as([*]u8, @ptrFromInt(1))[0..0]));
+    try testing.expectEqual({}, @memset(@as([*]u8, @ptrFromInt(1))[0..0], undefined));
     try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {}));
     try testing.expectEqual({}, @prefetch(&val, .{}));
     try testing.expectEqual({}, @setAlignStack(16));
test/behavior/byteswap.zig
@@ -16,13 +16,13 @@ test "@byteSwap integers" {
             try t(u8, 0x12, 0x12);
             try t(u16, 0x1234, 0x3412);
             try t(u24, 0x123456, 0x563412);
-            try t(i24, @bitCast(i24, @as(u24, 0xf23456)), 0x5634f2);
-            try t(i24, 0x1234f6, @bitCast(i24, @as(u24, 0xf63412)));
+            try t(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2);
+            try t(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412))));
             try t(u32, 0x12345678, 0x78563412);
-            try t(i32, @bitCast(i32, @as(u32, 0xf2345678)), 0x785634f2);
-            try t(i32, 0x123456f8, @bitCast(i32, @as(u32, 0xf8563412)));
+            try t(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2);
+            try t(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412))));
             try t(u40, 0x123456789a, 0x9a78563412);
-            try t(i48, 0x123456789abc, @bitCast(i48, @as(u48, 0xbc9a78563412)));
+            try t(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
             try t(u56, 0x123456789abcde, 0xdebc9a78563412);
             try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412);
             try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412);
@@ -31,19 +31,19 @@ test "@byteSwap integers" {
 
             try t(u0, @as(u0, 0), 0);
             try t(i8, @as(i8, -50), -50);
-            try t(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x3412)));
-            try t(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x563412)));
-            try t(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x78563412)));
-            try t(u40, @bitCast(i40, @as(u40, 0x123456789a)), @as(u40, 0x9a78563412));
-            try t(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0xbc9a78563412)));
-            try t(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0xdebc9a78563412)));
-            try t(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0xf1debc9a78563412)));
-            try t(i88, @bitCast(i88, @as(u88, 0x123456789abcdef1112131)), @bitCast(i88, @as(u88, 0x312111f1debc9a78563412)));
-            try t(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x41312111f1debc9a78563412)));
+            try t(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412))));
+            try t(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412))));
+            try t(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412))));
+            try t(u40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(u40, 0x9a78563412));
+            try t(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
+            try t(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412))));
+            try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
+            try t(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412))));
+            try t(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412))));
             try t(
                 i128,
-                @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)),
-                @bitCast(i128, @as(u128, 0x8171615141312111f1debc9a78563412)),
+                @as(i128, @bitCast(@as(u128, 0x123456789abcdef11121314151617181))),
+                @as(i128, @bitCast(@as(u128, 0x8171615141312111f1debc9a78563412))),
             );
         }
         fn t(comptime I: type, input: I, expected_output: I) !void {
test/behavior/call.zig
@@ -368,7 +368,7 @@ test "Enum constructed by @Type passed as generic argument" {
         }
     };
     inline for (@typeInfo(S.E).Enum.fields, 0..) |_, i| {
-        try S.foo(@enumFromInt(S.E, i), i);
+        try S.foo(@as(S.E, @enumFromInt(i)), i);
     }
 }
 
test/behavior/cast.zig
@@ -10,13 +10,13 @@ const native_endian = builtin.target.cpu.arch.endian();
 
 test "int to ptr cast" {
     const x = @as(usize, 13);
-    const y = @ptrFromInt(*u8, x);
+    const y = @as(*u8, @ptrFromInt(x));
     const z = @intFromPtr(y);
     try expect(z == 13);
 }
 
 test "integer literal to pointer cast" {
-    const vga_mem = @ptrFromInt(*u16, 0xB8000);
+    const vga_mem = @as(*u16, @ptrFromInt(0xB8000));
     try expect(@intFromPtr(vga_mem) == 0xB8000);
 }
 
@@ -52,7 +52,7 @@ fn testResolveUndefWithInt(b: bool, x: i32) !void {
 }
 
 test "@intCast to comptime_int" {
-    try expect(@intCast(comptime_int, 0) == 0);
+    try expect(@as(comptime_int, @intCast(0)) == 0);
 }
 
 test "implicit cast comptime numbers to any type when the value fits" {
@@ -68,29 +68,29 @@ test "implicit cast comptime_int to comptime_float" {
 
 test "comptime_int @floatFromInt" {
     {
-        const result = @floatFromInt(f16, 1234);
+        const result = @as(f16, @floatFromInt(1234));
         try expect(@TypeOf(result) == f16);
         try expect(result == 1234.0);
     }
     {
-        const result = @floatFromInt(f32, 1234);
+        const result = @as(f32, @floatFromInt(1234));
         try expect(@TypeOf(result) == f32);
         try expect(result == 1234.0);
     }
     {
-        const result = @floatFromInt(f64, 1234);
+        const result = @as(f64, @floatFromInt(1234));
         try expect(@TypeOf(result) == f64);
         try expect(result == 1234.0);
     }
 
     {
-        const result = @floatFromInt(f128, 1234);
+        const result = @as(f128, @floatFromInt(1234));
         try expect(@TypeOf(result) == f128);
         try expect(result == 1234.0);
     }
     // big comptime_int (> 64 bits) to f128 conversion
     {
-        const result = @floatFromInt(f128, 0x1_0000_0000_0000_0000);
+        const result = @as(f128, @floatFromInt(0x1_0000_0000_0000_0000));
         try expect(@TypeOf(result) == f128);
         try expect(result == 0x1_0000_0000_0000_0000.0);
     }
@@ -107,8 +107,8 @@ test "@floatFromInt" {
         }
 
         fn testIntToFloat(k: i32) !void {
-            const f = @floatFromInt(f32, k);
-            const i = @intFromFloat(i32, f);
+            const f = @as(f32, @floatFromInt(k));
+            const i = @as(i32, @intFromFloat(f));
             try expect(i == k);
         }
     };
@@ -131,8 +131,8 @@ test "@floatFromInt(f80)" {
 
         fn testIntToFloat(comptime Int: type, k: Int) !void {
             @setRuntimeSafety(false); // TODO
-            const f = @floatFromInt(f80, k);
-            const i = @intFromFloat(Int, f);
+            const f = @as(f80, @floatFromInt(k));
+            const i = @as(Int, @intFromFloat(f));
             try expect(i == k);
         }
     };
@@ -165,7 +165,7 @@ test "@intFromFloat" {
 fn testIntFromFloats() !void {
     const x = @as(i32, 1e4);
     try expect(x == 10000);
-    const y = @intFromFloat(i32, @as(f32, 1e4));
+    const y = @as(i32, @intFromFloat(@as(f32, 1e4)));
     try expect(y == 10000);
     try expectIntFromFloat(f32, 255.1, u8, 255);
     try expectIntFromFloat(f32, 127.2, i8, 127);
@@ -173,7 +173,7 @@ fn testIntFromFloats() !void {
 }
 
 fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void {
-    try expect(@intFromFloat(I, f) == i);
+    try expect(@as(I, @intFromFloat(f)) == i);
 }
 
 test "implicitly cast indirect pointer to maybe-indirect pointer" {
@@ -208,29 +208,29 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" {
 }
 
 test "@intCast comptime_int" {
-    const result = @intCast(i32, 1234);
+    const result = @as(i32, @intCast(1234));
     try expect(@TypeOf(result) == i32);
     try expect(result == 1234);
 }
 
 test "@floatCast comptime_int and comptime_float" {
     {
-        const result = @floatCast(f16, 1234);
+        const result = @as(f16, @floatCast(1234));
         try expect(@TypeOf(result) == f16);
         try expect(result == 1234.0);
     }
     {
-        const result = @floatCast(f16, 1234.0);
+        const result = @as(f16, @floatCast(1234.0));
         try expect(@TypeOf(result) == f16);
         try expect(result == 1234.0);
     }
     {
-        const result = @floatCast(f32, 1234);
+        const result = @as(f32, @floatCast(1234));
         try expect(@TypeOf(result) == f32);
         try expect(result == 1234.0);
     }
     {
-        const result = @floatCast(f32, 1234.0);
+        const result = @as(f32, @floatCast(1234.0));
         try expect(@TypeOf(result) == f32);
         try expect(result == 1234.0);
     }
@@ -276,21 +276,21 @@ test "*usize to *void" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     var i = @as(usize, 0);
-    var v = @ptrCast(*void, &i);
+    var v = @as(*void, @ptrCast(&i));
     v.* = {};
 }
 
 test "@enumFromInt passed a comptime_int to an enum with one item" {
     const E = enum { A };
-    const x = @enumFromInt(E, 0);
+    const x = @as(E, @enumFromInt(0));
     try expect(x == E.A);
 }
 
 test "@intCast to u0 and use the result" {
     const S = struct {
         fn doTheTest(zero: u1, one: u1, bigzero: i32) !void {
-            try expect((one << @intCast(u0, bigzero)) == 1);
-            try expect((zero << @intCast(u0, bigzero)) == 0);
+            try expect((one << @as(u0, @intCast(bigzero))) == 1);
+            try expect((zero << @as(u0, @intCast(bigzero))) == 0);
         }
     };
     try S.doTheTest(0, 1, 0);
@@ -605,7 +605,7 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
 
     const window_name = [1][*]const u8{"window name"};
     const x: [*]const ?[*]const u8 = &window_name;
-    try expect(mem.eql(u8, std.mem.sliceTo(@ptrCast([*:0]const u8, x[0].?), 0), "window name"));
+    try expect(mem.eql(u8, std.mem.sliceTo(@as([*:0]const u8, @ptrCast(x[0].?)), 0), "window name"));
 }
 
 test "vector casts" {
@@ -625,9 +625,9 @@ test "vector casts" {
             var up3 = @as(@Vector(2, u64), up0);
             // Downcast (safety-checked)
             var down0 = up3;
-            var down1 = @intCast(@Vector(2, u32), down0);
-            var down2 = @intCast(@Vector(2, u16), down0);
-            var down3 = @intCast(@Vector(2, u8), down0);
+            var down1 = @as(@Vector(2, u32), @intCast(down0));
+            var down2 = @as(@Vector(2, u16), @intCast(down0));
+            var down3 = @as(@Vector(2, u8), @intCast(down0));
 
             try expect(mem.eql(u16, &@as([2]u16, up1), &[2]u16{ 0x55, 0xaa }));
             try expect(mem.eql(u32, &@as([2]u32, up2), &[2]u32{ 0x55, 0xaa }));
@@ -660,12 +660,12 @@ test "@floatCast cast down" {
 
     {
         var double: f64 = 0.001534;
-        var single = @floatCast(f32, double);
+        var single = @as(f32, @floatCast(double));
         try expect(single == 0.001534);
     }
     {
         const double: f64 = 0.001534;
-        const single = @floatCast(f32, double);
+        const single = @as(f32, @floatCast(double));
         try expect(single == 0.001534);
     }
 }
@@ -1041,7 +1041,7 @@ test "cast between C pointer with different but compatible types" {
         }
         fn doTheTest() !void {
             var x = [_]u16{ 4, 2, 1, 3 };
-            try expect(foo(@ptrCast([*]u16, &x)) == 4);
+            try expect(foo(@as([*]u16, @ptrCast(&x))) == 4);
         }
     };
     try S.doTheTest();
@@ -1093,10 +1093,10 @@ test "peer type resolve array pointer and unknown pointer" {
 test "comptime float casts" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
-    const a = @floatFromInt(comptime_float, 1);
+    const a = @as(comptime_float, @floatFromInt(1));
     try expect(a == 1);
     try expect(@TypeOf(a) == comptime_float);
-    const b = @intFromFloat(comptime_int, 2);
+    const b = @as(comptime_int, @intFromFloat(2));
     try expect(b == 2);
     try expect(@TypeOf(b) == comptime_int);
 
@@ -1111,7 +1111,7 @@ test "pointer reinterpret const float to int" {
     // The hex representation is 0x3fe3333333333303.
     const float: f64 = 5.99999999999994648725e-01;
     const float_ptr = &float;
-    const int_ptr = @ptrCast(*const i32, float_ptr);
+    const int_ptr = @as(*const i32, @ptrCast(float_ptr));
     const int_val = int_ptr.*;
     if (native_endian == .Little)
         try expect(int_val == 0x33333303)
@@ -1134,7 +1134,7 @@ test "implicit cast from [*]T to ?*anyopaque" {
 fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void {
     var n: usize = 0;
     while (n < len) : (n += 1) {
-        @ptrCast([*]u8, array.?)[n] += 1;
+        @as([*]u8, @ptrCast(array.?))[n] += 1;
     }
 }
 
@@ -1146,7 +1146,7 @@ test "compile time int to ptr of function" {
 
 // On some architectures function pointers must be aligned.
 const hardcoded_fn_addr = maxInt(usize) & ~@as(usize, 0xf);
-pub const FUNCTION_CONSTANT = @ptrFromInt(PFN_void, hardcoded_fn_addr);
+pub const FUNCTION_CONSTANT = @as(PFN_void, @ptrFromInt(hardcoded_fn_addr));
 pub const PFN_void = *const fn (*anyopaque) callconv(.C) void;
 
 fn foobar(func: PFN_void) !void {
@@ -1161,10 +1161,10 @@ test "implicit ptr to *anyopaque" {
 
     var a: u32 = 1;
     var ptr: *align(@alignOf(u32)) anyopaque = &a;
-    var b: *u32 = @ptrCast(*u32, ptr);
+    var b: *u32 = @as(*u32, @ptrCast(ptr));
     try expect(b.* == 1);
     var ptr2: ?*align(@alignOf(u32)) anyopaque = &a;
-    var c: *u32 = @ptrCast(*u32, ptr2.?);
+    var c: *u32 = @as(*u32, @ptrCast(ptr2.?));
     try expect(c.* == 1);
 }
 
@@ -1235,11 +1235,11 @@ fn testCast128() !void {
 }
 
 fn cast128Int(x: f128) u128 {
-    return @bitCast(u128, x);
+    return @as(u128, @bitCast(x));
 }
 
 fn cast128Float(x: u128) f128 {
-    return @bitCast(f128, x);
+    return @as(f128, @bitCast(x));
 }
 
 test "implicit cast from *[N]T to ?[*]T" {
@@ -1270,7 +1270,7 @@ test "implicit cast from *T to ?*anyopaque" {
 }
 
 fn incrementVoidPtrValue(value: ?*anyopaque) void {
-    @ptrCast(*u8, value.?).* += 1;
+    @as(*u8, @ptrCast(value.?)).* += 1;
 }
 
 test "implicit cast *[0]T to E![]const u8" {
@@ -1284,11 +1284,11 @@ test "implicit cast *[0]T to E![]const u8" {
 
 var global_array: [4]u8 = undefined;
 test "cast from array reference to fn: comptime fn ptr" {
-    const f = @ptrCast(*align(1) const fn () callconv(.C) void, &global_array);
+    const f = @as(*align(1) const fn () callconv(.C) void, @ptrCast(&global_array));
     try expect(@intFromPtr(f) == @intFromPtr(&global_array));
 }
 test "cast from array reference to fn: runtime fn ptr" {
-    var f = @ptrCast(*align(1) const fn () callconv(.C) void, &global_array);
+    var f = @as(*align(1) const fn () callconv(.C) void, @ptrCast(&global_array));
     try expect(@intFromPtr(f) == @intFromPtr(&global_array));
 }
 
@@ -1337,7 +1337,7 @@ test "assignment to optional pointer result loc" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     var foo: struct { ptr: ?*anyopaque } = .{ .ptr = &global_struct };
-    try expect(foo.ptr.? == @ptrCast(*anyopaque, &global_struct));
+    try expect(foo.ptr.? == @as(*anyopaque, @ptrCast(&global_struct)));
 }
 
 test "cast between *[N]void and []void" {
@@ -1393,9 +1393,9 @@ test "cast f128 to narrower types" {
     const S = struct {
         fn doTheTest() !void {
             var x: f128 = 1234.0;
-            try expect(@as(f16, 1234.0) == @floatCast(f16, x));
-            try expect(@as(f32, 1234.0) == @floatCast(f32, x));
-            try expect(@as(f64, 1234.0) == @floatCast(f64, x));
+            try expect(@as(f16, 1234.0) == @as(f16, @floatCast(x)));
+            try expect(@as(f32, 1234.0) == @as(f32, @floatCast(x)));
+            try expect(@as(f64, 1234.0) == @as(f64, @floatCast(x)));
         }
     };
     try S.doTheTest();
@@ -1500,8 +1500,8 @@ test "coerce between pointers of compatible differently-named floats" {
 }
 
 test "peer type resolution of const and non-const pointer to array" {
-    const a = @ptrFromInt(*[1024]u8, 42);
-    const b = @ptrFromInt(*const [1024]u8, 42);
+    const a = @as(*[1024]u8, @ptrFromInt(42));
+    const b = @as(*const [1024]u8, @ptrFromInt(42));
     try std.testing.expect(@TypeOf(a, b) == *const [1024]u8);
     try std.testing.expect(a == b);
 }
@@ -1512,7 +1512,7 @@ test "intFromFloat to zero-bit int" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
     const a: f32 = 0.0;
-    try comptime std.testing.expect(@intFromFloat(u0, a) == 0);
+    try comptime std.testing.expect(@as(u0, @intFromFloat(a)) == 0);
 }
 
 test "peer type resolution of function pointer and function body" {
@@ -1547,10 +1547,10 @@ test "bitcast packed struct with u0" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
 
     const S = packed struct(u2) { a: u0, b: u2 };
-    const s = @bitCast(S, @as(u2, 2));
+    const s = @as(S, @bitCast(@as(u2, 2)));
     try expect(s.a == 0);
     try expect(s.b == 2);
-    const i = @bitCast(u2, s);
+    const i = @as(u2, @bitCast(s));
     try expect(i == 2);
 }
 
@@ -1560,7 +1560,7 @@ test "optional pointer coerced to optional allowzero pointer" {
 
     var p: ?*u32 = undefined;
     var q: ?*allowzero u32 = undefined;
-    p = @ptrFromInt(*u32, 4);
+    p = @as(*u32, @ptrFromInt(4));
     q = p;
     try expect(@intFromPtr(q.?) == 4);
 }
@@ -1583,7 +1583,7 @@ test "peer type resolution forms error union" {
         0 => unreachable,
         42 => error.AccessDenied,
         else => unreachable,
-    } else @intCast(u32, foo);
+    } else @as(u32, @intCast(foo));
     try expect(try result == 123);
 }
 
@@ -1623,8 +1623,8 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice"
 
     const S = struct {
         fn doTheTest(comptime T: type, comptime s: T) !void {
-            var a: [:s]const T = @ptrFromInt(*const [2:s]T, 0x1000);
-            var b: []T = @ptrFromInt(*[3]T, 0x2000);
+            var a: [:s]const T = @as(*const [2:s]T, @ptrFromInt(0x1000));
+            var b: []T = @as(*[3]T, @ptrFromInt(0x2000));
             comptime assert(@TypeOf(a, b) == []const T);
             comptime assert(@TypeOf(b, a) == []const T);
 
@@ -1634,8 +1634,8 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice"
 
             const R = @TypeOf(r1);
 
-            try expectEqual(@as(R, @ptrFromInt(*const [2:s]T, 0x1000)), r1);
-            try expectEqual(@as(R, @ptrFromInt(*const [3]T, 0x2000)), r2);
+            try expectEqual(@as(R, @as(*const [2:s]T, @ptrFromInt(0x1000))), r1);
+            try expectEqual(@as(R, @as(*const [3]T, @ptrFromInt(0x2000))), r2);
         }
     };
 
@@ -1815,7 +1815,7 @@ test "peer type resolution: three-way resolution combines error set and optional
 
     const E = error{Foo};
     var a: E = error.Foo;
-    var b: *const [5:0]u8 = @ptrFromInt(*const [5:0]u8, 0x1000);
+    var b: *const [5:0]u8 = @as(*const [5:0]u8, @ptrFromInt(0x1000));
     var c: ?[*:0]u8 = null;
     comptime assert(@TypeOf(a, b, c) == E!?[*:0]const u8);
     comptime assert(@TypeOf(a, c, b) == E!?[*:0]const u8);
@@ -1844,7 +1844,7 @@ test "peer type resolution: three-way resolution combines error set and optional
     const T = @TypeOf(r1);
 
     try expectEqual(@as(T, error.Foo), r1);
-    try expectEqual(@as(T, @ptrFromInt([*:0]u8, 0x1000)), r2);
+    try expectEqual(@as(T, @as([*:0]u8, @ptrFromInt(0x1000))), r2);
     try expectEqual(@as(T, null), r3);
 }
 
@@ -2114,7 +2114,7 @@ test "peer type resolution: many compatible pointers" {
             4 => "foo-4",
             else => unreachable,
         };
-        try expectEqualSlices(u8, expected, std.mem.span(@ptrCast([*:0]const u8, r)));
+        try expectEqualSlices(u8, expected, std.mem.span(@as([*:0]const u8, @ptrCast(r))));
     }
 }
 
test/behavior/cast_int.zig
@@ -11,6 +11,6 @@ test "@intCast i32 to u7" {
 
     var x: u128 = maxInt(u128);
     var y: i32 = 120;
-    var z = x >> @intCast(u7, y);
+    var z = x >> @as(u7, @intCast(y));
     try expect(z == 0xff);
 }
test/behavior/comptime_memory.zig
@@ -6,7 +6,7 @@ const ptr_size = @sizeOf(usize);
 test "type pun signed and unsigned as single pointer" {
     comptime {
         var x: u32 = 0;
-        const y = @ptrCast(*i32, &x);
+        const y = @as(*i32, @ptrCast(&x));
         y.* = -1;
         try testing.expectEqual(@as(u32, 0xFFFFFFFF), x);
     }
@@ -15,7 +15,7 @@ test "type pun signed and unsigned as single pointer" {
 test "type pun signed and unsigned as many pointer" {
     comptime {
         var x: u32 = 0;
-        const y = @ptrCast([*]i32, &x);
+        const y = @as([*]i32, @ptrCast(&x));
         y[0] = -1;
         try testing.expectEqual(@as(u32, 0xFFFFFFFF), x);
     }
@@ -24,7 +24,7 @@ test "type pun signed and unsigned as many pointer" {
 test "type pun signed and unsigned as array pointer" {
     comptime {
         var x: u32 = 0;
-        const y = @ptrCast(*[1]i32, &x);
+        const y = @as(*[1]i32, @ptrCast(&x));
         y[0] = -1;
         try testing.expectEqual(@as(u32, 0xFFFFFFFF), x);
     }
@@ -38,7 +38,7 @@ test "type pun signed and unsigned as offset many pointer" {
 
     comptime {
         var x: u32 = 0;
-        var y = @ptrCast([*]i32, &x);
+        var y = @as([*]i32, @ptrCast(&x));
         y -= 10;
         y[10] = -1;
         try testing.expectEqual(@as(u32, 0xFFFFFFFF), x);
@@ -53,7 +53,7 @@ test "type pun signed and unsigned as array pointer with pointer arithemtic" {
 
     comptime {
         var x: u32 = 0;
-        const y = @ptrCast([*]i32, &x) - 10;
+        const y = @as([*]i32, @ptrCast(&x)) - 10;
         const z: *[15]i32 = y[0..15];
         z[10] = -1;
         try testing.expectEqual(@as(u32, 0xFFFFFFFF), x);
@@ -64,9 +64,9 @@ test "type pun value and struct" {
     comptime {
         const StructOfU32 = extern struct { x: u32 };
         var inst: StructOfU32 = .{ .x = 0 };
-        @ptrCast(*i32, &inst.x).* = -1;
+        @as(*i32, @ptrCast(&inst.x)).* = -1;
         try testing.expectEqual(@as(u32, 0xFFFFFFFF), inst.x);
-        @ptrCast(*i32, &inst).* = -2;
+        @as(*i32, @ptrCast(&inst)).* = -2;
         try testing.expectEqual(@as(u32, 0xFFFFFFFE), inst.x);
     }
 }
@@ -81,8 +81,8 @@ test "type pun endianness" {
     comptime {
         const StructOfBytes = extern struct { x: [4]u8 };
         var inst: StructOfBytes = .{ .x = [4]u8{ 0, 0, 0, 0 } };
-        const structPtr = @ptrCast(*align(1) u32, &inst);
-        const arrayPtr = @ptrCast(*align(1) u32, &inst.x);
+        const structPtr = @as(*align(1) u32, @ptrCast(&inst));
+        const arrayPtr = @as(*align(1) u32, @ptrCast(&inst.x));
         inst.x[0] = 0xFE;
         inst.x[2] = 0xBE;
         try testing.expectEqual(bigToNativeEndian(u32, 0xFE00BE00), structPtr.*);
@@ -124,8 +124,8 @@ fn shuffle(ptr: usize, comptime From: type, comptime To: type) usize {
         @compileError("Mismatched sizes! " ++ @typeName(From) ++ " and " ++ @typeName(To) ++ " must have the same size!");
     const array_len = @divExact(ptr_size, @sizeOf(From));
     var result: usize = 0;
-    const pSource = @ptrCast(*align(1) const [array_len]From, &ptr);
-    const pResult = @ptrCast(*align(1) [array_len]To, &result);
+    const pSource = @as(*align(1) const [array_len]From, @ptrCast(&ptr));
+    const pResult = @as(*align(1) [array_len]To, @ptrCast(&result));
     var i: usize = 0;
     while (i < array_len) : (i += 1) {
         inline for (@typeInfo(To).Struct.fields) |f| {
@@ -136,8 +136,8 @@ fn shuffle(ptr: usize, comptime From: type, comptime To: type) usize {
 }
 
 fn doTypePunBitsTest(as_bits: *Bits) !void {
-    const as_u32 = @ptrCast(*align(1) u32, as_bits);
-    const as_bytes = @ptrCast(*[4]u8, as_bits);
+    const as_u32 = @as(*align(1) u32, @ptrCast(as_bits));
+    const as_bytes = @as(*[4]u8, @ptrCast(as_bits));
     as_u32.* = bigToNativeEndian(u32, 0xB0A7DEED);
     try testing.expectEqual(@as(u1, 0x00), as_bits.p0);
     try testing.expectEqual(@as(u4, 0x08), as_bits.p1);
@@ -176,7 +176,7 @@ test "type pun bits" {
 
     comptime {
         var v: u32 = undefined;
-        try doTypePunBitsTest(@ptrCast(*Bits, &v));
+        try doTypePunBitsTest(@as(*Bits, @ptrCast(&v)));
     }
 }
 
@@ -194,7 +194,7 @@ test "basic pointer preservation" {
     comptime {
         const lazy_address = @intFromPtr(&imports.global_u32);
         try testing.expectEqual(@intFromPtr(&imports.global_u32), lazy_address);
-        try testing.expectEqual(&imports.global_u32, @ptrFromInt(*u32, lazy_address));
+        try testing.expectEqual(&imports.global_u32, @as(*u32, @ptrFromInt(lazy_address)));
     }
 }
 
@@ -207,8 +207,8 @@ test "byte copy preserves linker value" {
     const ct_value = comptime blk: {
         const lazy = &imports.global_u32;
         var result: *u32 = undefined;
-        const pSource = @ptrCast(*const [ptr_size]u8, &lazy);
-        const pResult = @ptrCast(*[ptr_size]u8, &result);
+        const pSource = @as(*const [ptr_size]u8, @ptrCast(&lazy));
+        const pResult = @as(*[ptr_size]u8, @ptrCast(&result));
         var i: usize = 0;
         while (i < ptr_size) : (i += 1) {
             pResult[i] = pSource[i];
@@ -230,8 +230,8 @@ test "unordered byte copy preserves linker value" {
     const ct_value = comptime blk: {
         const lazy = &imports.global_u32;
         var result: *u32 = undefined;
-        const pSource = @ptrCast(*const [ptr_size]u8, &lazy);
-        const pResult = @ptrCast(*[ptr_size]u8, &result);
+        const pSource = @as(*const [ptr_size]u8, @ptrCast(&lazy));
+        const pResult = @as(*[ptr_size]u8, @ptrCast(&result));
         if (ptr_size > 8) @compileError("This array needs to be expanded for platform with very big pointers");
         const shuffled_indices = [_]usize{ 4, 5, 2, 6, 1, 3, 0, 7 };
         for (shuffled_indices) |i| {
@@ -274,12 +274,12 @@ test "dance on linker values" {
         arr[0] = @intFromPtr(&imports.global_u32);
         arr[1] = @intFromPtr(&imports.global_u32);
 
-        const weird_ptr = @ptrCast([*]Bits, @ptrCast([*]u8, &arr) + @sizeOf(usize) - 3);
+        const weird_ptr = @as([*]Bits, @ptrCast(@as([*]u8, @ptrCast(&arr)) + @sizeOf(usize) - 3));
         try doTypePunBitsTest(&weird_ptr[0]);
         if (ptr_size > @sizeOf(Bits))
             try doTypePunBitsTest(&weird_ptr[1]);
 
-        var arr_bytes = @ptrCast(*[2][ptr_size]u8, &arr);
+        var arr_bytes = @as(*[2][ptr_size]u8, @ptrCast(&arr));
 
         var rebuilt_bytes: [ptr_size]u8 = undefined;
         var i: usize = 0;
@@ -290,7 +290,7 @@ test "dance on linker values" {
             rebuilt_bytes[i] = arr_bytes[1][i];
         }
 
-        try testing.expectEqual(&imports.global_u32, @ptrFromInt(*u32, @bitCast(usize, rebuilt_bytes)));
+        try testing.expectEqual(&imports.global_u32, @as(*u32, @ptrFromInt(@as(usize, @bitCast(rebuilt_bytes)))));
     }
 }
 
@@ -316,7 +316,7 @@ test "offset array ptr by element size" {
         try testing.expectEqual(@intFromPtr(&arr[2]), address + 2 * @sizeOf(VirtualStruct));
         try testing.expectEqual(@intFromPtr(&arr[3]), address + @sizeOf(VirtualStruct) * 3);
 
-        const secondElement = @ptrFromInt(*VirtualStruct, @intFromPtr(&arr[0]) + 2 * @sizeOf(VirtualStruct));
+        const secondElement = @as(*VirtualStruct, @ptrFromInt(@intFromPtr(&arr[0]) + 2 * @sizeOf(VirtualStruct)));
         try testing.expectEqual(bigToNativeEndian(u32, 0x02060a0e), secondElement.x);
     }
 }
@@ -334,15 +334,15 @@ test "offset instance by field size" {
         var ptr = @intFromPtr(&inst);
         ptr -= 4;
         ptr += @offsetOf(VirtualStruct, "x");
-        try testing.expectEqual(@as(u32, 0), @ptrFromInt([*]u32, ptr)[1]);
+        try testing.expectEqual(@as(u32, 0), @as([*]u32, @ptrFromInt(ptr))[1]);
         ptr -= @offsetOf(VirtualStruct, "x");
         ptr += @offsetOf(VirtualStruct, "y");
-        try testing.expectEqual(@as(u32, 1), @ptrFromInt([*]u32, ptr)[1]);
+        try testing.expectEqual(@as(u32, 1), @as([*]u32, @ptrFromInt(ptr))[1]);
         ptr = ptr - @offsetOf(VirtualStruct, "y") + @offsetOf(VirtualStruct, "z");
-        try testing.expectEqual(@as(u32, 2), @ptrFromInt([*]u32, ptr)[1]);
+        try testing.expectEqual(@as(u32, 2), @as([*]u32, @ptrFromInt(ptr))[1]);
         ptr = @intFromPtr(&inst.z) - 4 - @offsetOf(VirtualStruct, "z");
         ptr += @offsetOf(VirtualStruct, "w");
-        try testing.expectEqual(@as(u32, 3), @ptrFromInt(*u32, ptr + 4).*);
+        try testing.expectEqual(@as(u32, 3), @as(*u32, @ptrFromInt(ptr + 4)).*);
     }
 }
 
@@ -363,13 +363,13 @@ test "offset field ptr by enclosing array element size" {
 
         var i: usize = 0;
         while (i < 4) : (i += 1) {
-            var ptr: [*]u8 = @ptrCast([*]u8, &arr[0]);
+            var ptr: [*]u8 = @as([*]u8, @ptrCast(&arr[0]));
             ptr += i;
             ptr += @offsetOf(VirtualStruct, "x");
             var j: usize = 0;
             while (j < 4) : (j += 1) {
                 const base = ptr + j * @sizeOf(VirtualStruct);
-                try testing.expectEqual(@intCast(u8, i * 4 + j), base[0]);
+                try testing.expectEqual(@as(u8, @intCast(i * 4 + j)), base[0]);
             }
         }
     }
@@ -393,7 +393,7 @@ test "accessing reinterpreted memory of parent object" {
             .c = 2.6,
         };
         const ptr = &x.b[0];
-        const b = @ptrCast([*c]const u8, ptr)[5];
+        const b = @as([*c]const u8, @ptrCast(ptr))[5];
         try testing.expect(b == expected);
     }
 }
@@ -407,11 +407,11 @@ test "bitcast packed union to integer" {
     comptime {
         const a = U{ .x = 1 };
         const b = U{ .y = 2 };
-        const cast_a = @bitCast(u2, a);
-        const cast_b = @bitCast(u2, b);
+        const cast_a = @as(u2, @bitCast(a));
+        const cast_b = @as(u2, @bitCast(b));
 
         // truncated because the upper bit is garbage memory that we don't care about
-        try testing.expectEqual(@as(u1, 1), @truncate(u1, cast_a));
+        try testing.expectEqual(@as(u1, 1), @as(u1, @truncate(cast_a)));
         try testing.expectEqual(@as(u2, 2), cast_b);
     }
 }
@@ -435,6 +435,6 @@ test "dereference undefined pointer to zero-bit type" {
 test "type pun extern struct" {
     const S = extern struct { f: u8 };
     comptime var s = S{ .f = 123 };
-    @ptrCast(*u8, &s).* = 72;
+    @as(*u8, @ptrCast(&s)).* = 72;
     try testing.expectEqual(@as(u8, 72), s.f);
 }
test/behavior/enum.zig
@@ -20,7 +20,7 @@ test "enum to int" {
 }
 
 fn testIntToEnumEval(x: i32) !void {
-    try expect(@enumFromInt(IntToEnumNumber, x) == IntToEnumNumber.Three);
+    try expect(@as(IntToEnumNumber, @enumFromInt(x)) == IntToEnumNumber.Three);
 }
 const IntToEnumNumber = enum { Zero, One, Two, Three, Four };
 
@@ -629,7 +629,7 @@ test "non-exhaustive enum" {
                 .b => true,
                 _ => false,
             });
-            e = @enumFromInt(E, 12);
+            e = @as(E, @enumFromInt(12));
             try expect(switch (e) {
                 .a => false,
                 .b => false,
@@ -648,9 +648,9 @@ test "non-exhaustive enum" {
             });
 
             try expect(@typeInfo(E).Enum.fields.len == 2);
-            e = @enumFromInt(E, 12);
+            e = @as(E, @enumFromInt(12));
             try expect(@intFromEnum(e) == 12);
-            e = @enumFromInt(E, y);
+            e = @as(E, @enumFromInt(y));
             try expect(@intFromEnum(e) == 52);
             try expect(@typeInfo(E).Enum.is_exhaustive == false);
         }
@@ -666,7 +666,7 @@ test "empty non-exhaustive enum" {
         const E = enum(u8) { _ };
 
         fn doTheTest(y: u8) !void {
-            var e = @enumFromInt(E, y);
+            var e = @as(E, @enumFromInt(y));
             try expect(switch (e) {
                 _ => true,
             });
@@ -693,7 +693,7 @@ test "single field non-exhaustive enum" {
                 .a => true,
                 _ => false,
             });
-            e = @enumFromInt(E, 12);
+            e = @as(E, @enumFromInt(12));
             try expect(switch (e) {
                 .a => false,
                 _ => true,
@@ -709,7 +709,7 @@ test "single field non-exhaustive enum" {
                 else => false,
             });
 
-            try expect(@intFromEnum(@enumFromInt(E, y)) == y);
+            try expect(@intFromEnum(@as(E, @enumFromInt(y))) == y);
             try expect(@typeInfo(E).Enum.fields.len == 1);
             try expect(@typeInfo(E).Enum.is_exhaustive == false);
         }
@@ -741,8 +741,8 @@ const MultipleChoice2 = enum(u32) {
 };
 
 test "cast integer literal to enum" {
-    try expect(@enumFromInt(MultipleChoice2, 0) == MultipleChoice2.Unspecified1);
-    try expect(@enumFromInt(MultipleChoice2, 40) == MultipleChoice2.B);
+    try expect(@as(MultipleChoice2, @enumFromInt(0)) == MultipleChoice2.Unspecified1);
+    try expect(@as(MultipleChoice2, @enumFromInt(40)) == MultipleChoice2.B);
 }
 
 test "enum with specified and unspecified tag values" {
@@ -1155,7 +1155,7 @@ test "size of enum with only one tag which has explicit integer tag type" {
     var s1: S1 = undefined;
     s1.e = .nope;
     try expect(s1.e == .nope);
-    const ptr = @ptrCast(*u8, &s1);
+    const ptr = @as(*u8, @ptrCast(&s1));
     try expect(ptr.* == 10);
 
     var s0: S0 = undefined;
@@ -1183,7 +1183,7 @@ test "Non-exhaustive enum with nonstandard int size behaves correctly" {
 test "runtime int to enum with one possible value" {
     const E = enum { one };
     var runtime: usize = 0;
-    if (@enumFromInt(E, runtime) != .one) {
+    if (@as(E, @enumFromInt(runtime)) != .one) {
         @compileError("test failed");
     }
 }
@@ -1194,7 +1194,7 @@ test "enum tag from a local variable" {
             return enum(Inner) { _ };
         }
     };
-    const i = @enumFromInt(S.Int(u32), 0);
+    const i = @as(S.Int(u32), @enumFromInt(0));
     try std.testing.expect(@intFromEnum(i) == 0);
 }
 
@@ -1203,12 +1203,12 @@ test "auto-numbered enum with signed tag type" {
 
     try std.testing.expectEqual(@as(i32, 0), @intFromEnum(E.a));
     try std.testing.expectEqual(@as(i32, 1), @intFromEnum(E.b));
-    try std.testing.expectEqual(E.a, @enumFromInt(E, 0));
-    try std.testing.expectEqual(E.b, @enumFromInt(E, 1));
-    try std.testing.expectEqual(E.a, @enumFromInt(E, @as(i32, 0)));
-    try std.testing.expectEqual(E.b, @enumFromInt(E, @as(i32, 1)));
-    try std.testing.expectEqual(E.a, @enumFromInt(E, @as(u32, 0)));
-    try std.testing.expectEqual(E.b, @enumFromInt(E, @as(u32, 1)));
+    try std.testing.expectEqual(E.a, @as(E, @enumFromInt(0)));
+    try std.testing.expectEqual(E.b, @as(E, @enumFromInt(1)));
+    try std.testing.expectEqual(E.a, @as(E, @enumFromInt(@as(i32, 0))));
+    try std.testing.expectEqual(E.b, @as(E, @enumFromInt(@as(i32, 1))));
+    try std.testing.expectEqual(E.a, @as(E, @enumFromInt(@as(u32, 0))));
+    try std.testing.expectEqual(E.b, @as(E, @enumFromInt(@as(u32, 1))));
     try std.testing.expectEqualStrings("a", @tagName(E.a));
     try std.testing.expectEqualStrings("b", @tagName(E.b));
 }
test/behavior/error.zig
@@ -234,9 +234,9 @@ const Set1 = error{ A, B };
 const Set2 = error{ A, C };
 
 fn testExplicitErrorSetCast(set1: Set1) !void {
-    var x = @errSetCast(Set2, set1);
+    var x = @as(Set2, @errSetCast(set1));
     try expect(@TypeOf(x) == Set2);
-    var y = @errSetCast(Set1, x);
+    var y = @as(Set1, @errSetCast(x));
     try expect(@TypeOf(y) == Set1);
     try expect(y == error.A);
 }
test/behavior/eval.zig
@@ -9,7 +9,7 @@ test "compile time recursion" {
 
     try expect(some_data.len == 21);
 }
-var some_data: [@intCast(usize, fibonacci(7))]u8 = undefined;
+var some_data: [@as(usize, @intCast(fibonacci(7)))]u8 = undefined;
 fn fibonacci(x: i32) i32 {
     if (x <= 1) return 1;
     return fibonacci(x - 1) + fibonacci(x - 2);
@@ -123,7 +123,7 @@ fn fnWithSetRuntimeSafety() i32 {
 test "compile-time downcast when the bits fit" {
     comptime {
         const spartan_count: u16 = 255;
-        const byte = @intCast(u8, spartan_count);
+        const byte = @as(u8, @intCast(spartan_count));
         try expect(byte == 255);
     }
 }
@@ -149,7 +149,7 @@ test "a type constructed in a global expression" {
     l.array[0] = 10;
     l.array[1] = 11;
     l.array[2] = 12;
-    const ptr = @ptrCast([*]u8, &l.array);
+    const ptr = @as([*]u8, @ptrCast(&l.array));
     try expect(ptr[0] == 10);
     try expect(ptr[1] == 11);
     try expect(ptr[2] == 12);
@@ -332,7 +332,7 @@ fn generateTable(comptime T: type) [1010]T {
     var res: [1010]T = undefined;
     var i: usize = 0;
     while (i < 1010) : (i += 1) {
-        res[i] = @intCast(T, i);
+        res[i] = @as(T, @intCast(i));
     }
     return res;
 }
@@ -460,7 +460,7 @@ test "binary math operator in partially inlined function" {
     var b: [16]u8 = undefined;
 
     for (&b, 0..) |*r, i|
-        r.* = @intCast(u8, i + 1);
+        r.* = @as(u8, @intCast(i + 1));
 
     copyWithPartialInline(s[0..], b[0..]);
     try expect(s[0] == 0x1020304);
@@ -942,7 +942,7 @@ test "comptime pointer load through elem_ptr" {
                 .x = i,
             };
         }
-        var ptr = @ptrCast([*]S, &array);
+        var ptr = @as([*]S, @ptrCast(&array));
         var x = ptr[0].x;
         assert(x == 0);
         ptr += 1;
@@ -1281,9 +1281,9 @@ test "comptime write through extern struct reinterpreted as array" {
             c: u8,
         };
         var s: S = undefined;
-        @ptrCast(*[3]u8, &s)[0] = 1;
-        @ptrCast(*[3]u8, &s)[1] = 2;
-        @ptrCast(*[3]u8, &s)[2] = 3;
+        @as(*[3]u8, @ptrCast(&s))[0] = 1;
+        @as(*[3]u8, @ptrCast(&s))[1] = 2;
+        @as(*[3]u8, @ptrCast(&s))[2] = 3;
         assert(s.a == 1);
         assert(s.b == 2);
         assert(s.c == 3);
@@ -1371,7 +1371,7 @@ test "lazy value is resolved as slice operand" {
     var a: [512]u64 = undefined;
 
     const ptr1 = a[0..@sizeOf(A)];
-    const ptr2 = @ptrCast([*]u8, &a)[0..@sizeOf(A)];
+    const ptr2 = @as([*]u8, @ptrCast(&a))[0..@sizeOf(A)];
     try expect(@intFromPtr(ptr1) == @intFromPtr(ptr2));
     try expect(ptr1.len == ptr2.len);
 }
test/behavior/export.zig
@@ -7,7 +7,7 @@ const builtin = @import("builtin");
 
 // can't really run this test but we can make sure it has no compile error
 // and generates code
-const vram = @ptrFromInt([*]volatile u8, 0x20000000)[0..0x8000];
+const vram = @as([*]volatile u8, @ptrFromInt(0x20000000))[0..0x8000];
 export fn writeToVRam() void {
     vram[0] = 'X';
 }
test/behavior/floatop.zig
@@ -94,7 +94,7 @@ test "negative f128 intFromFloat at compile-time" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
     const a: f128 = -2;
-    var b = @intFromFloat(i64, a);
+    var b = @as(i64, @intFromFloat(a));
     try expect(@as(i64, -2) == b);
 }
 
@@ -387,11 +387,11 @@ fn testLog() !void {
     }
     {
         var a: f32 = e;
-        try expect(@log(a) == 1 or @log(a) == @bitCast(f32, @as(u32, 0x3f7fffff)));
+        try expect(@log(a) == 1 or @log(a) == @as(f32, @bitCast(@as(u32, 0x3f7fffff))));
     }
     {
         var a: f64 = e;
-        try expect(@log(a) == 1 or @log(a) == @bitCast(f64, @as(u64, 0x3ff0000000000000)));
+        try expect(@log(a) == 1 or @log(a) == @as(f64, @bitCast(@as(u64, 0x3ff0000000000000))));
     }
     inline for ([_]type{ f16, f32, f64 }) |ty| {
         const eps = epsForType(ty);
test/behavior/fn.zig
@@ -326,7 +326,7 @@ test "function pointers" {
         &fn4,
     };
     for (fns, 0..) |f, i| {
-        try expect(f() == @intCast(u32, i) + 5);
+        try expect(f() == @as(u32, @intCast(i)) + 5);
     }
 }
 fn fn1() u32 {
@@ -512,8 +512,8 @@ test "using @ptrCast on function pointers" {
 
         fn run() !void {
             const a = A{ .data = "abcd".* };
-            const casted_fn = @ptrCast(*const fn (*const anyopaque, usize) *const u8, &at);
-            const casted_impl = @ptrCast(*const anyopaque, &a);
+            const casted_fn = @as(*const fn (*const anyopaque, usize) *const u8, @ptrCast(&at));
+            const casted_impl = @as(*const anyopaque, @ptrCast(&a));
             const ptr = casted_fn(casted_impl, 2);
             try expect(ptr.* == 'c');
         }
@@ -575,7 +575,7 @@ test "lazy values passed to anytype parameter" {
     try B.foo(.{ .x = @sizeOf(B) });
 
     const C = struct {};
-    try expect(@truncate(u32, @sizeOf(C)) == 0);
+    try expect(@as(u32, @truncate(@sizeOf(C))) == 0);
 
     const D = struct {};
     try expect(@sizeOf(D) << 1 == 0);
test/behavior/fn_in_struct_in_comptime.zig
@@ -14,5 +14,5 @@ fn get_foo() fn (*u8) usize {
 
 test "define a function in an anonymous struct in comptime" {
     const foo = get_foo();
-    try expect(foo(@ptrFromInt(*u8, 12345)) == 12345);
+    try expect(foo(@as(*u8, @ptrFromInt(12345))) == 12345);
 }
test/behavior/for.zig
@@ -84,7 +84,7 @@ test "basic for loop" {
     }
     for (array, 0..) |item, index| {
         _ = item;
-        buffer[buf_index] = @intCast(u8, index);
+        buffer[buf_index] = @as(u8, @intCast(index));
         buf_index += 1;
     }
     const array_ptr = &array;
@@ -94,7 +94,7 @@ test "basic for loop" {
     }
     for (array_ptr, 0..) |item, index| {
         _ = item;
-        buffer[buf_index] = @intCast(u8, index);
+        buffer[buf_index] = @as(u8, @intCast(index));
         buf_index += 1;
     }
     const unknown_size: []const u8 = &array;
@@ -103,7 +103,7 @@ test "basic for loop" {
         buf_index += 1;
     }
     for (unknown_size, 0..) |_, index| {
-        buffer[buf_index] = @intCast(u8, index);
+        buffer[buf_index] = @as(u8, @intCast(index));
         buf_index += 1;
     }
 
@@ -208,7 +208,7 @@ test "for on slice with allowzero ptr" {
 
     const S = struct {
         fn doTheTest(slice: []const u8) !void {
-            var ptr = @ptrCast([*]allowzero const u8, slice.ptr)[0..slice.len];
+            var ptr = @as([*]allowzero const u8, @ptrCast(slice.ptr))[0..slice.len];
             for (ptr, 0..) |x, i| try expect(x == i + 1);
             for (ptr, 0..) |*x, i| try expect(x.* == i + 1);
         }
@@ -393,7 +393,7 @@ test "raw pointer and counter" {
     const ptr: [*]u8 = &buf;
 
     for (ptr, 0..4) |*a, b| {
-        a.* = @intCast(u8, 'A' + b);
+        a.* = @as(u8, @intCast('A' + b));
     }
 
     try expect(buf[0] == 'A');
test/behavior/generics.zig
@@ -97,7 +97,7 @@ test "type constructed by comptime function call" {
     l.array[0] = 10;
     l.array[1] = 11;
     l.array[2] = 12;
-    const ptr = @ptrCast([*]u8, &l.array);
+    const ptr = @as([*]u8, @ptrCast(&l.array));
     try expect(ptr[0] == 10);
     try expect(ptr[1] == 11);
     try expect(ptr[2] == 12);
@@ -171,7 +171,7 @@ fn getByte(ptr: ?*const u8) u8 {
     return ptr.?.*;
 }
 fn getFirstByte(comptime T: type, mem: []const T) u8 {
-    return getByte(@ptrCast(*const u8, &mem[0]));
+    return getByte(@as(*const u8, @ptrCast(&mem[0])));
 }
 
 test "generic fn keeps non-generic parameter types" {
@@ -428,7 +428,7 @@ test "null sentinel pointer passed as generic argument" {
             try std.testing.expect(@intFromPtr(a) == 8);
         }
     };
-    try S.doTheTest((@ptrFromInt([*:null]const [*c]const u8, 8)));
+    try S.doTheTest((@as([*:null]const [*c]const u8, @ptrFromInt(8))));
 }
 
 test "generic function passed as comptime argument" {
test/behavior/int128.zig
@@ -38,7 +38,7 @@ test "undefined 128 bit int" {
 
     var undef: u128 = undefined;
     var undef_signed: i128 = undefined;
-    try expect(undef == 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa and @bitCast(u128, undef_signed) == undef);
+    try expect(undef == 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa and @as(u128, @bitCast(undef_signed)) == undef);
 }
 
 test "int128" {
@@ -49,7 +49,7 @@ test "int128" {
 
     var buff: i128 = -1;
     try expect(buff < 0 and (buff + 1) == 0);
-    try expect(@intCast(i8, buff) == @as(i8, -1));
+    try expect(@as(i8, @intCast(buff)) == @as(i8, -1));
 
     buff = minInt(i128);
     try expect(buff < 0);
@@ -73,16 +73,16 @@ test "truncate int128" {
 
     {
         var buff: u128 = maxInt(u128);
-        try expect(@truncate(u64, buff) == maxInt(u64));
-        try expect(@truncate(u90, buff) == maxInt(u90));
-        try expect(@truncate(u128, buff) == maxInt(u128));
+        try expect(@as(u64, @truncate(buff)) == maxInt(u64));
+        try expect(@as(u90, @truncate(buff)) == maxInt(u90));
+        try expect(@as(u128, @truncate(buff)) == maxInt(u128));
     }
 
     {
         var buff: i128 = maxInt(i128);
-        try expect(@truncate(i64, buff) == -1);
-        try expect(@truncate(i90, buff) == -1);
-        try expect(@truncate(i128, buff) == maxInt(i128));
+        try expect(@as(i64, @truncate(buff)) == -1);
+        try expect(@as(i90, @truncate(buff)) == -1);
+        try expect(@as(i128, @truncate(buff)) == maxInt(i128));
     }
 }
 
test/behavior/math.zig
@@ -391,11 +391,11 @@ test "binary not 128-bit" {
         break :x ~@as(u128, 0x55555555_55555555_55555555_55555555) == 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa;
     });
     try expect(comptime x: {
-        break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa));
+        break :x ~@as(i128, 0x55555555_55555555_55555555_55555555) == @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)));
     });
 
     try testBinaryNot128(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa);
-    try testBinaryNot128(i128, @bitCast(i128, @as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa)));
+    try testBinaryNot128(i128, @as(i128, @bitCast(@as(u128, 0xaaaaaaaa_aaaaaaaa_aaaaaaaa_aaaaaaaa))));
 }
 
 fn testBinaryNot128(comptime Type: type, x: Type) !void {
@@ -1156,29 +1156,29 @@ test "quad hex float literal parsing accurate" {
 
     // implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing.
     const expected: u128 = 0x3fff1111222233334444555566667777;
-    try expect(@bitCast(u128, a) == expected);
+    try expect(@as(u128, @bitCast(a)) == expected);
 
     // non-normalized
     const b: f128 = 0x11.111222233334444555566667777p-4;
-    try expect(@bitCast(u128, b) == expected);
+    try expect(@as(u128, @bitCast(b)) == expected);
 
     const S = struct {
         fn doTheTest() !void {
             {
                 var f: f128 = 0x1.2eab345678439abcdefea56782346p+5;
-                try expect(@bitCast(u128, f) == 0x40042eab345678439abcdefea5678234);
+                try expect(@as(u128, @bitCast(f)) == 0x40042eab345678439abcdefea5678234);
             }
             {
                 var f: f128 = 0x1.edcb34a235253948765432134674fp-1;
-                try expect(@bitCast(u128, f) == 0x3ffeedcb34a235253948765432134675); // round-to-even
+                try expect(@as(u128, @bitCast(f)) == 0x3ffeedcb34a235253948765432134675); // round-to-even
             }
             {
                 var f: f128 = 0x1.353e45674d89abacc3a2ebf3ff4ffp-50;
-                try expect(@bitCast(u128, f) == 0x3fcd353e45674d89abacc3a2ebf3ff50);
+                try expect(@as(u128, @bitCast(f)) == 0x3fcd353e45674d89abacc3a2ebf3ff50);
             }
             {
                 var f: f128 = 0x1.ed8764648369535adf4be3214567fp-9;
-                try expect(@bitCast(u128, f) == 0x3ff6ed8764648369535adf4be3214568);
+                try expect(@as(u128, @bitCast(f)) == 0x3ff6ed8764648369535adf4be3214568);
             }
             const exp2ft = [_]f64{
                 0x1.6a09e667f3bcdp-1,
@@ -1233,7 +1233,7 @@ test "quad hex float literal parsing accurate" {
             };
 
             for (exp2ft, 0..) |x, i| {
-                try expect(@bitCast(u64, x) == answers[i]);
+                try expect(@as(u64, @bitCast(x)) == answers[i]);
             }
         }
     };
@@ -1586,7 +1586,7 @@ test "signed zeros are represented properly" {
         fn testOne(comptime T: type) !void {
             const ST = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
             var as_fp_val = -@as(T, 0.0);
-            var as_uint_val = @bitCast(ST, as_fp_val);
+            var as_uint_val = @as(ST, @bitCast(as_fp_val));
             // Ensure the sign bit is set.
             try expect(as_uint_val >> (@typeInfo(T).Float.bits - 1) == 1);
         }
test/behavior/memcpy.zig
@@ -59,7 +59,7 @@ fn testMemcpyDestManyPtr() !void {
     var str = "hello".*;
     var buf: [5]u8 = undefined;
     var len: usize = 5;
-    @memcpy(@ptrCast([*]u8, &buf), @ptrCast([*]const u8, &str)[0..len]);
+    @memcpy(@as([*]u8, @ptrCast(&buf)), @as([*]const u8, @ptrCast(&str))[0..len]);
     try expect(buf[0] == 'h');
     try expect(buf[1] == 'e');
     try expect(buf[2] == 'l');
test/behavior/packed-struct.zig
@@ -166,7 +166,7 @@ test "correct sizeOf and offsets in packed structs" {
     try expectEqual(4, @sizeOf(PStruct));
 
     if (native_endian == .Little) {
-        const s1 = @bitCast(PStruct, @as(u32, 0x12345678));
+        const s1 = @as(PStruct, @bitCast(@as(u32, 0x12345678)));
         try expectEqual(false, s1.bool_a);
         try expectEqual(false, s1.bool_b);
         try expectEqual(false, s1.bool_c);
@@ -180,7 +180,7 @@ test "correct sizeOf and offsets in packed structs" {
         try expectEqual(@as(u10, 0b1101000101), s1.u10_a);
         try expectEqual(@as(u10, 0b0001001000), s1.u10_b);
 
-        const s2 = @bitCast(packed struct { x: u1, y: u7, z: u24 }, @as(u32, 0xd5c71ff4));
+        const s2 = @as(packed struct { x: u1, y: u7, z: u24 }, @bitCast(@as(u32, 0xd5c71ff4)));
         try expectEqual(@as(u1, 0), s2.x);
         try expectEqual(@as(u7, 0b1111010), s2.y);
         try expectEqual(@as(u24, 0xd5c71f), s2.z);
@@ -207,7 +207,7 @@ test "nested packed structs" {
     try expectEqual(24, @bitOffsetOf(S3, "y"));
 
     if (native_endian == .Little) {
-        const s3 = @bitCast(S3Padded, @as(u64, 0xe952d5c71ff4)).s3;
+        const s3 = @as(S3Padded, @bitCast(@as(u64, 0xe952d5c71ff4))).s3;
         try expectEqual(@as(u8, 0xf4), s3.x.a);
         try expectEqual(@as(u8, 0x1f), s3.x.b);
         try expectEqual(@as(u8, 0xc7), s3.x.c);
@@ -600,7 +600,7 @@ test "packed struct initialized in bitcast" {
 
     const T = packed struct { val: u8 };
     var val: u8 = 123;
-    const t = @bitCast(u8, T{ .val = val });
+    const t = @as(u8, @bitCast(T{ .val = val }));
     try expect(t == val);
 }
 
@@ -627,7 +627,7 @@ test "pointer to container level packed struct field" {
         },
         var arr = [_]u32{0} ** 2;
     };
-    @ptrCast(*S, &S.arr[0]).other_bits.enable_3 = true;
+    @as(*S, @ptrCast(&S.arr[0])).other_bits.enable_3 = true;
     try expect(S.arr[0] == 0x10000000);
 }
 
test/behavior/packed_struct_explicit_backing_int.zig
@@ -25,7 +25,7 @@ test "packed struct explicit backing integer" {
     try expectEqual(24, @bitOffsetOf(S3, "y"));
 
     if (native_endian == .Little) {
-        const s3 = @bitCast(S3Padded, @as(u64, 0xe952d5c71ff4)).s3;
+        const s3 = @as(S3Padded, @bitCast(@as(u64, 0xe952d5c71ff4))).s3;
         try expectEqual(@as(u8, 0xf4), s3.x.a);
         try expectEqual(@as(u8, 0x1f), s3.x.b);
         try expectEqual(@as(u8, 0xc7), s3.x.c);
test/behavior/pointers.zig
@@ -184,8 +184,8 @@ test "implicit cast error unions with non-optional to optional pointer" {
 }
 
 test "compare equality of optional and non-optional pointer" {
-    const a = @ptrFromInt(*const usize, 0x12345678);
-    const b = @ptrFromInt(?*usize, 0x12345678);
+    const a = @as(*const usize, @ptrFromInt(0x12345678));
+    const b = @as(?*usize, @ptrFromInt(0x12345678));
     try expect(a == b);
     try expect(b == a);
 }
@@ -197,7 +197,7 @@ test "allowzero pointer and slice" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
-    var ptr = @ptrFromInt([*]allowzero i32, 0);
+    var ptr = @as([*]allowzero i32, @ptrFromInt(0));
     var opt_ptr: ?[*]allowzero i32 = ptr;
     try expect(opt_ptr != null);
     try expect(@intFromPtr(ptr) == 0);
@@ -286,9 +286,9 @@ test "null terminated pointer" {
     const S = struct {
         fn doTheTest() !void {
             var array_with_zero = [_:0]u8{ 'h', 'e', 'l', 'l', 'o' };
-            var zero_ptr: [*:0]const u8 = @ptrCast([*:0]const u8, &array_with_zero);
+            var zero_ptr: [*:0]const u8 = @as([*:0]const u8, @ptrCast(&array_with_zero));
             var no_zero_ptr: [*]const u8 = zero_ptr;
-            var zero_ptr_again = @ptrCast([*:0]const u8, no_zero_ptr);
+            var zero_ptr_again = @as([*:0]const u8, @ptrCast(no_zero_ptr));
             try expect(std.mem.eql(u8, std.mem.sliceTo(zero_ptr_again, 0), "hello"));
         }
     };
@@ -367,7 +367,7 @@ test "pointer sentinel with +inf" {
 }
 
 test "pointer to array at fixed address" {
-    const array = @ptrFromInt(*volatile [2]u32, 0x10);
+    const array = @as(*volatile [2]u32, @ptrFromInt(0x10));
     // Silly check just to reference `array`
     try expect(@intFromPtr(&array[0]) == 0x10);
     try expect(@intFromPtr(&array[1]) == 0x14);
@@ -406,13 +406,13 @@ test "pointer arithmetic affects the alignment" {
 
 test "@intFromPtr on null optional at comptime" {
     {
-        const pointer = @ptrFromInt(?*u8, 0x000);
+        const pointer = @as(?*u8, @ptrFromInt(0x000));
         const x = @intFromPtr(pointer);
         _ = x;
         try comptime expect(0 == @intFromPtr(pointer));
     }
     {
-        const pointer = @ptrFromInt(?*u8, 0xf00);
+        const pointer = @as(?*u8, @ptrFromInt(0xf00));
         try comptime expect(0xf00 == @intFromPtr(pointer));
     }
 }
@@ -463,8 +463,8 @@ test "element pointer arithmetic to slice" {
             };
 
             const elem_ptr = &cases[0]; // *[2]i32
-            const many = @ptrCast([*][2]i32, elem_ptr);
-            const many_elem = @ptrCast(*[2]i32, &many[1]);
+            const many = @as([*][2]i32, @ptrCast(elem_ptr));
+            const many_elem = @as(*[2]i32, @ptrCast(&many[1]));
             const items: []i32 = many_elem;
             try testing.expect(items.len == 2);
             try testing.expect(items[1] == 3);
@@ -512,7 +512,7 @@ test "ptrCast comptime known slice to C pointer" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     const s: [:0]const u8 = "foo";
-    var p = @ptrCast([*c]const u8, s);
+    var p = @as([*c]const u8, @ptrCast(s));
     try std.testing.expectEqualStrings(s, std.mem.sliceTo(p, 0));
 }
 
@@ -550,7 +550,7 @@ test "pointer to array has explicit alignment" {
         const Base = extern struct { a: u8 };
         const Base2 = extern struct { a: u8 };
         fn func(ptr: *[4]Base) *align(1) [4]Base2 {
-            return @alignCast(1, @ptrCast(*[4]Base2, ptr));
+            return @alignCast(@as(*[4]Base2, @ptrCast(ptr)));
         }
     };
     var bases = [_]S.Base{.{ .a = 2 }} ** 4;
test/behavior/popcount.zig
@@ -63,7 +63,7 @@ fn testPopCountIntegers() !void {
         try expect(@popCount(x) == 2);
     }
     comptime {
-        try expect(@popCount(@bitCast(u8, @as(i8, -120))) == 2);
+        try expect(@popCount(@as(u8, @bitCast(@as(i8, -120)))) == 2);
     }
 }
 
test/behavior/ptrcast.zig
@@ -16,7 +16,7 @@ fn testReinterpretBytesAsInteger() !void {
         .Little => 0xab785634,
         .Big => 0x345678ab,
     };
-    try expect(@ptrCast(*align(1) const u32, bytes[1..5]).* == expected);
+    try expect(@as(*align(1) const u32, @ptrCast(bytes[1..5])).* == expected);
 }
 
 test "reinterpret an array over multiple elements, with no well-defined layout" {
@@ -32,7 +32,7 @@ test "reinterpret an array over multiple elements, with no well-defined layout"
 fn testReinterpretWithOffsetAndNoWellDefinedLayout() !void {
     const bytes: ?[5]?u8 = [5]?u8{ 0x12, 0x34, 0x56, 0x78, 0x9a };
     const ptr = &bytes.?[1];
-    const copy: [4]?u8 = @ptrCast(*const [4]?u8, ptr).*;
+    const copy: [4]?u8 = @as(*const [4]?u8, @ptrCast(ptr)).*;
     _ = copy;
     //try expect(@ptrCast(*align(1)?u8, bytes[1..5]).* == );
 }
@@ -51,7 +51,7 @@ fn testReinterpretStructWrappedBytesAsInteger() !void {
         .Little => 0xab785634,
         .Big => 0x345678ab,
     };
-    try expect(@ptrCast(*align(1) const u32, obj.bytes[1..5]).* == expected);
+    try expect(@as(*align(1) const u32, @ptrCast(obj.bytes[1..5])).* == expected);
 }
 
 test "reinterpret bytes of an array into an extern struct" {
@@ -71,7 +71,7 @@ fn testReinterpretBytesAsExternStruct() !void {
         c: u8,
     };
 
-    var ptr = @ptrCast(*const S, &bytes);
+    var ptr = @as(*const S, @ptrCast(&bytes));
     var val = ptr.c;
     try expect(val == 5);
 }
@@ -95,7 +95,7 @@ fn testReinterpretExternStructAsExternStruct() !void {
         a: u32 align(2),
         c: u8,
     };
-    var ptr = @ptrCast(*const S2, &bytes);
+    var ptr = @as(*const S2, @ptrCast(&bytes));
     var val = ptr.c;
     try expect(val == 5);
 }
@@ -121,7 +121,7 @@ fn testReinterpretOverAlignedExternStructAsExternStruct() !void {
         a2: u16,
         c: u8,
     };
-    var ptr = @ptrCast(*const S2, &bytes);
+    var ptr = @as(*const S2, @ptrCast(&bytes));
     var val = ptr.c;
     try expect(val == 5);
 }
@@ -138,13 +138,13 @@ test "lower reinterpreted comptime field ptr (with under-aligned fields)" {
         a: u32 align(2),
         c: u8,
     };
-    comptime var ptr = @ptrCast(*const S, &bytes);
+    comptime var ptr = @as(*const S, @ptrCast(&bytes));
     var val = &ptr.c;
     try expect(val.* == 5);
 
     // Test lowering an elem ptr
     comptime var src_value = S{ .a = 15, .c = 5 };
-    comptime var ptr2 = @ptrCast(*[@sizeOf(S)]u8, &src_value);
+    comptime var ptr2 = @as(*[@sizeOf(S)]u8, @ptrCast(&src_value));
     var val2 = &ptr2[4];
     try expect(val2.* == 5);
 }
@@ -161,13 +161,13 @@ test "lower reinterpreted comptime field ptr" {
         a: u32,
         c: u8,
     };
-    comptime var ptr = @ptrCast(*const S, &bytes);
+    comptime var ptr = @as(*const S, @ptrCast(&bytes));
     var val = &ptr.c;
     try expect(val.* == 5);
 
     // Test lowering an elem ptr
     comptime var src_value = S{ .a = 15, .c = 5 };
-    comptime var ptr2 = @ptrCast(*[@sizeOf(S)]u8, &src_value);
+    comptime var ptr2 = @as(*[@sizeOf(S)]u8, @ptrCast(&src_value));
     var val2 = &ptr2[4];
     try expect(val2.* == 5);
 }
@@ -190,27 +190,17 @@ const Bytes = struct {
 
     pub fn init(v: u32) Bytes {
         var res: Bytes = undefined;
-        @ptrCast(*align(1) u32, &res.bytes).* = v;
+        @as(*align(1) u32, @ptrCast(&res.bytes)).* = v;
 
         return res;
     }
 };
 
-test "comptime ptrcast keeps larger alignment" {
-    if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
-
-    comptime {
-        const a: u32 = 1234;
-        const p = @ptrCast([*]const u8, &a);
-        try expect(@TypeOf(p) == [*]align(@alignOf(u32)) const u8);
-    }
-}
-
 test "ptrcast of const integer has the correct object size" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
-    const is_value = ~@intCast(isize, std.math.minInt(isize));
-    const is_bytes = @ptrCast([*]const u8, &is_value)[0..@sizeOf(isize)];
+    const is_value = ~@as(isize, @intCast(std.math.minInt(isize)));
+    const is_bytes = @as([*]const u8, @ptrCast(&is_value))[0..@sizeOf(isize)];
     if (@sizeOf(isize) == 8) {
         switch (native_endian) {
             .Little => {
@@ -248,7 +238,7 @@ test "implicit optional pointer to optional anyopaque pointer" {
     var buf: [4]u8 = "aoeu".*;
     var x: ?[*]u8 = &buf;
     var y: ?*anyopaque = x;
-    var z = @ptrCast(*[4]u8, y);
+    var z = @as(*[4]u8, @ptrCast(y));
     try expect(std.mem.eql(u8, z, "aoeu"));
 }
 
@@ -260,7 +250,7 @@ test "@ptrCast slice to slice" {
 
     const S = struct {
         fn foo(slice: []u32) []i32 {
-            return @ptrCast([]i32, slice);
+            return @as([]i32, @ptrCast(slice));
         }
     };
     var buf: [4]u32 = .{ 0, 0, 0, 0 };
@@ -277,7 +267,7 @@ test "comptime @ptrCast a subset of an array, then write through it" {
 
     comptime {
         var buff: [16]u8 align(4) = undefined;
-        const len_bytes = @ptrCast(*u32, &buff);
+        const len_bytes = @as(*u32, @ptrCast(&buff));
         len_bytes.* = 16;
         std.mem.copy(u8, buff[4..], "abcdef");
     }
@@ -286,7 +276,7 @@ test "comptime @ptrCast a subset of an array, then write through it" {
 test "@ptrCast undefined value at comptime" {
     const S = struct {
         fn transmute(comptime T: type, comptime U: type, value: T) U {
-            return @ptrCast(*const U, &value).*;
+            return @as(*const U, @ptrCast(&value)).*;
         }
     };
     comptime {
test/behavior/ptrfromint.zig
@@ -9,7 +9,7 @@ test "casting integer address to function pointer" {
 
 fn addressToFunction() void {
     var addr: usize = 0xdeadbee0;
-    _ = @ptrFromInt(*const fn () void, addr);
+    _ = @as(*const fn () void, @ptrFromInt(addr));
 }
 
 test "mutate through ptr initialized with constant ptrFromInt value" {
@@ -21,7 +21,7 @@ test "mutate through ptr initialized with constant ptrFromInt value" {
 }
 
 fn forceCompilerAnalyzeBranchHardCodedPtrDereference(x: bool) void {
-    const hardCodedP = @ptrFromInt(*volatile u8, 0xdeadbeef);
+    const hardCodedP = @as(*volatile u8, @ptrFromInt(0xdeadbeef));
     if (x) {
         hardCodedP.* = hardCodedP.* | 10;
     } else {
@@ -34,7 +34,7 @@ test "@ptrFromInt creates null pointer" {
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
-    const ptr = @ptrFromInt(?*u32, 0);
+    const ptr = @as(?*u32, @ptrFromInt(0));
     try expectEqual(@as(?*u32, null), ptr);
 }
 
@@ -43,6 +43,6 @@ test "@ptrFromInt creates allowzero zero pointer" {
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
-    const ptr = @ptrFromInt(*allowzero u32, 0);
+    const ptr = @as(*allowzero u32, @ptrFromInt(0));
     try expectEqual(@as(usize, 0), @intFromPtr(ptr));
 }
test/behavior/sizeof_and_typeof.zig
@@ -231,7 +231,7 @@ test "@sizeOf comparison against zero" {
 
 test "hardcoded address in typeof expression" {
     const S = struct {
-        fn func() @TypeOf(@ptrFromInt(*[]u8, 0x10).*[0]) {
+        fn func() @TypeOf(@as(*[]u8, @ptrFromInt(0x10)).*[0]) {
             return 0;
         }
     };
@@ -252,7 +252,7 @@ test "array access of generic param in typeof expression" {
 test "lazy size cast to float" {
     {
         const S = struct { a: u8 };
-        try expect(@floatFromInt(f32, @sizeOf(S)) == 1.0);
+        try expect(@as(f32, @floatFromInt(@sizeOf(S))) == 1.0);
     }
     {
         const S = struct { a: u8 };
test/behavior/slice.zig
@@ -129,7 +129,7 @@ test "generic malloc free" {
 }
 var some_mem: [100]u8 = undefined;
 fn memAlloc(comptime T: type, n: usize) anyerror![]T {
-    return @ptrCast([*]T, &some_mem[0])[0..n];
+    return @as([*]T, @ptrCast(&some_mem[0]))[0..n];
 }
 fn memFree(comptime T: type, memory: []T) void {
     _ = memory;
@@ -138,7 +138,7 @@ fn memFree(comptime T: type, memory: []T) void {
 test "slice of hardcoded address to pointer" {
     const S = struct {
         fn doTheTest() !void {
-            const pointer = @ptrFromInt([*]u8, 0x04)[0..2];
+            const pointer = @as([*]u8, @ptrFromInt(0x04))[0..2];
             try comptime expect(@TypeOf(pointer) == *[2]u8);
             const slice: []const u8 = pointer;
             try expect(@intFromPtr(slice.ptr) == 4);
@@ -152,7 +152,7 @@ test "slice of hardcoded address to pointer" {
 test "comptime slice of pointer preserves comptime var" {
     comptime {
         var buff: [10]u8 = undefined;
-        var a = @ptrCast([*]u8, &buff);
+        var a = @as([*]u8, @ptrCast(&buff));
         a[0..1][0] = 1;
         try expect(buff[0..][0..][0] == 1);
     }
@@ -161,7 +161,7 @@ test "comptime slice of pointer preserves comptime var" {
 test "comptime pointer cast array and then slice" {
     const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
 
-    const ptrA: [*]const u8 = @ptrCast([*]const u8, &array);
+    const ptrA: [*]const u8 = @as([*]const u8, @ptrCast(&array));
     const sliceA: []const u8 = ptrA[0..2];
 
     const ptrB: [*]const u8 = &array;
@@ -188,7 +188,7 @@ test "slicing pointer by length" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
-    const ptr: [*]const u8 = @ptrCast([*]const u8, &array);
+    const ptr: [*]const u8 = @as([*]const u8, @ptrCast(&array));
     const slice = ptr[1..][0..5];
     try expect(slice.len == 5);
     var i: usize = 0;
@@ -197,7 +197,7 @@ test "slicing pointer by length" {
     }
 }
 
-const x = @ptrFromInt([*]i32, 0x1000)[0..0x500];
+const x = @as([*]i32, @ptrFromInt(0x1000))[0..0x500];
 const y = x[0x100..];
 test "compile time slice of pointer to hard coded address" {
     try expect(@intFromPtr(x) == 0x1000);
@@ -262,7 +262,7 @@ test "C pointer slice access" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     var buf: [10]u32 = [1]u32{42} ** 10;
-    const c_ptr = @ptrCast([*c]const u32, &buf);
+    const c_ptr = @as([*c]const u32, @ptrCast(&buf));
 
     var runtime_zero: usize = 0;
     try comptime expectEqual([]const u32, @TypeOf(c_ptr[runtime_zero..1]));
@@ -352,7 +352,7 @@ test "@ptrCast slice to pointer" {
         fn doTheTest() !void {
             var array align(@alignOf(u16)) = [5]u8{ 0xff, 0xff, 0xff, 0xff, 0xff };
             var slice: []align(@alignOf(u16)) u8 = &array;
-            var ptr = @ptrCast(*u16, slice);
+            var ptr = @as(*u16, @ptrCast(slice));
             try expect(ptr.* == 65535);
         }
     };
@@ -837,13 +837,13 @@ test "empty slice ptr is non null" {
     {
         const empty_slice: []u8 = &[_]u8{};
         const p: [*]u8 = empty_slice.ptr + 0;
-        const t = @ptrCast([*]i8, p);
+        const t = @as([*]i8, @ptrCast(p));
         try expect(@intFromPtr(t) == @intFromPtr(empty_slice.ptr));
     }
     {
         const empty_slice: []u8 = &.{};
         const p: [*]u8 = empty_slice.ptr + 0;
-        const t = @ptrCast([*]i8, p);
+        const t = @as([*]i8, @ptrCast(p));
         try expect(@intFromPtr(t) == @intFromPtr(empty_slice.ptr));
     }
 }
test/behavior/slice_sentinel_comptime.zig
@@ -25,7 +25,7 @@ test "comptime slice-sentinel in bounds (unterminated)" {
     // vector_ConstPtrSpecialRef
     comptime {
         var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*]u8 = @ptrCast([*]u8, &buf);
+        var target: [*]u8 = @as([*]u8, @ptrCast(&buf));
         const slice = target[0..3 :'d'];
         _ = slice;
     }
@@ -41,7 +41,7 @@ test "comptime slice-sentinel in bounds (unterminated)" {
     // cvector_ConstPtrSpecialRef
     comptime {
         var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*c]u8 = @ptrCast([*c]u8, &buf);
+        var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf));
         const slice = target[0..3 :'d'];
         _ = slice;
     }
@@ -82,7 +82,7 @@ test "comptime slice-sentinel in bounds (end,unterminated)" {
     // vector_ConstPtrSpecialRef
     comptime {
         var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{0xff} ** 10;
-        var target: [*]u8 = @ptrCast([*]u8, &buf);
+        var target: [*]u8 = @as([*]u8, @ptrCast(&buf));
         const slice = target[0..13 :0xff];
         _ = slice;
     }
@@ -98,7 +98,7 @@ test "comptime slice-sentinel in bounds (end,unterminated)" {
     // cvector_ConstPtrSpecialRef
     comptime {
         var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{0xff} ** 10;
-        var target: [*c]u8 = @ptrCast([*c]u8, &buf);
+        var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf));
         const slice = target[0..13 :0xff];
         _ = slice;
     }
@@ -139,7 +139,7 @@ test "comptime slice-sentinel in bounds (terminated)" {
     // vector_ConstPtrSpecialRef
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*]u8 = @ptrCast([*]u8, &buf);
+        var target: [*]u8 = @as([*]u8, @ptrCast(&buf));
         const slice = target[0..3 :'d'];
         _ = slice;
     }
@@ -155,7 +155,7 @@ test "comptime slice-sentinel in bounds (terminated)" {
     // cvector_ConstPtrSpecialRef
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*c]u8 = @ptrCast([*c]u8, &buf);
+        var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf));
         const slice = target[0..3 :'d'];
         _ = slice;
     }
@@ -196,7 +196,7 @@ test "comptime slice-sentinel in bounds (on target sentinel)" {
     // vector_ConstPtrSpecialRef
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*]u8 = @ptrCast([*]u8, &buf);
+        var target: [*]u8 = @as([*]u8, @ptrCast(&buf));
         const slice = target[0..14 :0];
         _ = slice;
     }
@@ -212,7 +212,7 @@ test "comptime slice-sentinel in bounds (on target sentinel)" {
     // cvector_ConstPtrSpecialRef
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*c]u8 = @ptrCast([*c]u8, &buf);
+        var target: [*c]u8 = @as([*c]u8, @ptrCast(&buf));
         const slice = target[0..14 :0];
         _ = slice;
     }
test/behavior/struct.zig
@@ -92,7 +92,7 @@ test "structs" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     var foo: StructFoo = undefined;
-    @memset(@ptrCast([*]u8, &foo)[0..@sizeOf(StructFoo)], 0);
+    @memset(@as([*]u8, @ptrCast(&foo))[0..@sizeOf(StructFoo)], 0);
     foo.a += 1;
     foo.b = foo.a == 1;
     try testFoo(foo);
@@ -479,14 +479,14 @@ test "runtime struct initialization of bitfield" {
         .y = x1,
     };
     const s2 = Nibbles{
-        .x = @intCast(u4, x2),
-        .y = @intCast(u4, x2),
+        .x = @as(u4, @intCast(x2)),
+        .y = @as(u4, @intCast(x2)),
     };
 
     try expect(s1.x == x1);
     try expect(s1.y == x1);
-    try expect(s2.x == @intCast(u4, x2));
-    try expect(s2.y == @intCast(u4, x2));
+    try expect(s2.x == @as(u4, @intCast(x2)));
+    try expect(s2.y == @as(u4, @intCast(x2)));
 }
 
 var x1 = @as(u4, 1);
@@ -515,8 +515,8 @@ test "packed struct fields are ordered from LSB to MSB" {
 
     var all: u64 = 0x7765443322221111;
     var bytes: [8]u8 align(@alignOf(Bitfields)) = undefined;
-    @memcpy(bytes[0..8], @ptrCast([*]u8, &all));
-    var bitfields = @ptrCast(*Bitfields, &bytes).*;
+    @memcpy(bytes[0..8], @as([*]u8, @ptrCast(&all)));
+    var bitfields = @as(*Bitfields, @ptrCast(&bytes)).*;
 
     try expect(bitfields.f1 == 0x1111);
     try expect(bitfields.f2 == 0x2222);
@@ -1281,7 +1281,7 @@ test "packed struct aggregate init" {
 
     const S = struct {
         fn foo(a: i2, b: i6) u8 {
-            return @bitCast(u8, P{ .a = a, .b = b });
+            return @as(u8, @bitCast(P{ .a = a, .b = b }));
         }
 
         const P = packed struct {
@@ -1289,7 +1289,7 @@ test "packed struct aggregate init" {
             b: i6,
         };
     };
-    const result = @bitCast(u8, S.foo(1, 2));
+    const result = @as(u8, @bitCast(S.foo(1, 2)));
     try expect(result == 9);
 }
 
@@ -1365,7 +1365,7 @@ test "under-aligned struct field" {
     };
     var runtime: usize = 1234;
     const ptr = &S{ .events = 0, .data = .{ .u64 = runtime } };
-    const array = @ptrCast(*const [12]u8, ptr);
+    const array = @as(*const [12]u8, @ptrCast(ptr));
     const result = std.mem.readIntNative(u64, array[4..12]);
     try expect(result == 1234);
 }
test/behavior/switch.zig
@@ -590,9 +590,9 @@ test "switch on pointer type" {
             field: u32,
         };
 
-        const P1 = @ptrFromInt(*X, 0x400);
-        const P2 = @ptrFromInt(*X, 0x800);
-        const P3 = @ptrFromInt(*X, 0xC00);
+        const P1 = @as(*X, @ptrFromInt(0x400));
+        const P2 = @as(*X, @ptrFromInt(0x800));
+        const P3 = @as(*X, @ptrFromInt(0xC00));
 
         fn doTheTest(arg: *X) i32 {
             switch (arg) {
@@ -682,9 +682,9 @@ test "enum value without tag name used as switch item" {
         b = 2,
         _,
     };
-    var e: E = @enumFromInt(E, 0);
+    var e: E = @as(E, @enumFromInt(0));
     switch (e) {
-        @enumFromInt(E, 0) => {},
+        @as(E, @enumFromInt(0)) => {},
         .a => return error.TestFailed,
         .b => return error.TestFailed,
         _ => return error.TestFailed,
test/behavior/translate_c_macros.zig
@@ -60,7 +60,7 @@ test "cast negative integer to pointer" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
-    try expectEqual(@ptrFromInt(?*anyopaque, @bitCast(usize, @as(isize, -1))), h.MAP_FAILED);
+    try expectEqual(@as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))), h.MAP_FAILED);
 }
 
 test "casting to union with a macro" {
@@ -89,7 +89,7 @@ test "casting or calling a value with a paren-surrounded macro" {
 
     const l: c_long = 42;
     const casted = h.CAST_OR_CALL_WITH_PARENS(c_int, l);
-    try expect(casted == @intCast(c_int, l));
+    try expect(casted == @as(c_int, @intCast(l)));
 
     const Helper = struct {
         fn foo(n: c_int) !void {
test/behavior/truncate.zig
@@ -4,58 +4,58 @@ const expect = std.testing.expect;
 
 test "truncate u0 to larger integer allowed and has comptime-known result" {
     var x: u0 = 0;
-    const y = @truncate(u8, x);
+    const y = @as(u8, @truncate(x));
     try comptime expect(y == 0);
 }
 
 test "truncate.u0.literal" {
-    var z = @truncate(u0, 0);
+    var z = @as(u0, @truncate(0));
     try expect(z == 0);
 }
 
 test "truncate.u0.const" {
     const c0: usize = 0;
-    var z = @truncate(u0, c0);
+    var z = @as(u0, @truncate(c0));
     try expect(z == 0);
 }
 
 test "truncate.u0.var" {
     var d: u8 = 2;
-    var z = @truncate(u0, d);
+    var z = @as(u0, @truncate(d));
     try expect(z == 0);
 }
 
 test "truncate i0 to larger integer allowed and has comptime-known result" {
     var x: i0 = 0;
-    const y = @truncate(i8, x);
+    const y = @as(i8, @truncate(x));
     try comptime expect(y == 0);
 }
 
 test "truncate.i0.literal" {
-    var z = @truncate(i0, 0);
+    var z = @as(i0, @truncate(0));
     try expect(z == 0);
 }
 
 test "truncate.i0.const" {
     const c0: isize = 0;
-    var z = @truncate(i0, c0);
+    var z = @as(i0, @truncate(c0));
     try expect(z == 0);
 }
 
 test "truncate.i0.var" {
     var d: i8 = 2;
-    var z = @truncate(i0, d);
+    var z = @as(i0, @truncate(d));
     try expect(z == 0);
 }
 
 test "truncate on comptime integer" {
-    var x = @truncate(u16, 9999);
+    var x = @as(u16, @truncate(9999));
     try expect(x == 9999);
-    var y = @truncate(u16, -21555);
+    var y = @as(u16, @truncate(-21555));
     try expect(y == 0xabcd);
-    var z = @truncate(i16, -65537);
+    var z = @as(i16, @truncate(-65537));
     try expect(z == -1);
-    var w = @truncate(u1, 1 << 100);
+    var w = @as(u1, @truncate(1 << 100));
     try expect(w == 0);
 }
 
@@ -69,7 +69,7 @@ test "truncate on vectors" {
     const S = struct {
         fn doTheTest() !void {
             var v1: @Vector(4, u16) = .{ 0xaabb, 0xccdd, 0xeeff, 0x1122 };
-            var v2 = @truncate(u8, v1);
+            var v2: @Vector(4, u8) = @truncate(v1);
             try expect(std.mem.eql(u8, &@as([4]u8, v2), &[4]u8{ 0xbb, 0xdd, 0xff, 0x22 }));
         }
     };
test/behavior/tuple.zig
@@ -403,7 +403,7 @@ test "nested runtime conditionals in tuple initializer" {
 
     var data: u8 = 0;
     const x = .{
-        if (data != 0) "" else switch (@truncate(u1, data)) {
+        if (data != 0) "" else switch (@as(u1, @truncate(data))) {
             0 => "up",
             1 => "down",
         },
test/behavior/tuple_declarations.zig
@@ -21,7 +21,7 @@ test "tuple declaration type info" {
 
         try expectEqualStrings(info.fields[0].name, "0");
         try expect(info.fields[0].type == u32);
-        try expect(@ptrCast(*const u32, @alignCast(@alignOf(u32), info.fields[0].default_value)).* == 1);
+        try expect(@as(*const u32, @ptrCast(@alignCast(info.fields[0].default_value))).* == 1);
         try expect(info.fields[0].is_comptime);
         try expect(info.fields[0].alignment == 2);
 
test/behavior/type.zig
@@ -289,7 +289,7 @@ test "Type.Struct" {
     try testing.expectEqual(@as(?*const anyopaque, null), infoB.fields[0].default_value);
     try testing.expectEqualSlices(u8, "y", infoB.fields[1].name);
     try testing.expectEqual(u32, infoB.fields[1].type);
-    try testing.expectEqual(@as(u32, 5), @ptrCast(*align(1) const u32, infoB.fields[1].default_value.?).*);
+    try testing.expectEqual(@as(u32, 5), @as(*align(1) const u32, @ptrCast(infoB.fields[1].default_value.?)).*);
     try testing.expectEqual(@as(usize, 0), infoB.decls.len);
     try testing.expectEqual(@as(bool, false), infoB.is_tuple);
 
@@ -298,10 +298,10 @@ test "Type.Struct" {
     try testing.expectEqual(Type.ContainerLayout.Packed, infoC.layout);
     try testing.expectEqualSlices(u8, "x", infoC.fields[0].name);
     try testing.expectEqual(u8, infoC.fields[0].type);
-    try testing.expectEqual(@as(u8, 3), @ptrCast(*const u8, infoC.fields[0].default_value.?).*);
+    try testing.expectEqual(@as(u8, 3), @as(*const u8, @ptrCast(infoC.fields[0].default_value.?)).*);
     try testing.expectEqualSlices(u8, "y", infoC.fields[1].name);
     try testing.expectEqual(u32, infoC.fields[1].type);
-    try testing.expectEqual(@as(u32, 5), @ptrCast(*align(1) const u32, infoC.fields[1].default_value.?).*);
+    try testing.expectEqual(@as(u32, 5), @as(*align(1) const u32, @ptrCast(infoC.fields[1].default_value.?)).*);
     try testing.expectEqual(@as(usize, 0), infoC.decls.len);
     try testing.expectEqual(@as(bool, false), infoC.is_tuple);
 
@@ -311,10 +311,10 @@ test "Type.Struct" {
     try testing.expectEqual(Type.ContainerLayout.Auto, infoD.layout);
     try testing.expectEqualSlices(u8, "x", infoD.fields[0].name);
     try testing.expectEqual(comptime_int, infoD.fields[0].type);
-    try testing.expectEqual(@as(comptime_int, 3), @ptrCast(*const comptime_int, infoD.fields[0].default_value.?).*);
+    try testing.expectEqual(@as(comptime_int, 3), @as(*const comptime_int, @ptrCast(infoD.fields[0].default_value.?)).*);
     try testing.expectEqualSlices(u8, "y", infoD.fields[1].name);
     try testing.expectEqual(comptime_int, infoD.fields[1].type);
-    try testing.expectEqual(@as(comptime_int, 5), @ptrCast(*const comptime_int, infoD.fields[1].default_value.?).*);
+    try testing.expectEqual(@as(comptime_int, 5), @as(*const comptime_int, @ptrCast(infoD.fields[1].default_value.?)).*);
     try testing.expectEqual(@as(usize, 0), infoD.decls.len);
     try testing.expectEqual(@as(bool, false), infoD.is_tuple);
 
@@ -324,10 +324,10 @@ test "Type.Struct" {
     try testing.expectEqual(Type.ContainerLayout.Auto, infoE.layout);
     try testing.expectEqualSlices(u8, "0", infoE.fields[0].name);
     try testing.expectEqual(comptime_int, infoE.fields[0].type);
-    try testing.expectEqual(@as(comptime_int, 1), @ptrCast(*const comptime_int, infoE.fields[0].default_value.?).*);
+    try testing.expectEqual(@as(comptime_int, 1), @as(*const comptime_int, @ptrCast(infoE.fields[0].default_value.?)).*);
     try testing.expectEqualSlices(u8, "1", infoE.fields[1].name);
     try testing.expectEqual(comptime_int, infoE.fields[1].type);
-    try testing.expectEqual(@as(comptime_int, 2), @ptrCast(*const comptime_int, infoE.fields[1].default_value.?).*);
+    try testing.expectEqual(@as(comptime_int, 2), @as(*const comptime_int, @ptrCast(infoE.fields[1].default_value.?)).*);
     try testing.expectEqual(@as(usize, 0), infoE.decls.len);
     try testing.expectEqual(@as(bool, true), infoE.is_tuple);
 
@@ -379,7 +379,7 @@ test "Type.Enum" {
     try testing.expectEqual(false, @typeInfo(Bar).Enum.is_exhaustive);
     try testing.expectEqual(@as(u32, 1), @intFromEnum(Bar.a));
     try testing.expectEqual(@as(u32, 5), @intFromEnum(Bar.b));
-    try testing.expectEqual(@as(u32, 6), @intFromEnum(@enumFromInt(Bar, 6)));
+    try testing.expectEqual(@as(u32, 6), @intFromEnum(@as(Bar, @enumFromInt(6))));
 }
 
 test "Type.Union" {
test/behavior/type_info.zig
@@ -113,7 +113,7 @@ fn testNullTerminatedPtr() !void {
     try expect(ptr_info.Pointer.size == .Many);
     try expect(ptr_info.Pointer.is_const == false);
     try expect(ptr_info.Pointer.is_volatile == false);
-    try expect(@ptrCast(*const u8, ptr_info.Pointer.sentinel.?).* == 0);
+    try expect(@as(*const u8, @ptrCast(ptr_info.Pointer.sentinel.?)).* == 0);
 
     try expect(@typeInfo([:0]u8).Pointer.sentinel != null);
 }
@@ -151,7 +151,7 @@ fn testArray() !void {
         const info = @typeInfo([10:0]u8);
         try expect(info.Array.len == 10);
         try expect(info.Array.child == u8);
-        try expect(@ptrCast(*const u8, info.Array.sentinel.?).* == @as(u8, 0));
+        try expect(@as(*const u8, @ptrCast(info.Array.sentinel.?)).* == @as(u8, 0));
         try expect(@sizeOf([10:0]u8) == info.Array.len + 1);
     }
 }
@@ -295,8 +295,8 @@ fn testStruct() !void {
     try expect(unpacked_struct_info.Struct.is_tuple == false);
     try expect(unpacked_struct_info.Struct.backing_integer == null);
     try expect(unpacked_struct_info.Struct.fields[0].alignment == @alignOf(u32));
-    try expect(@ptrCast(*align(1) const u32, unpacked_struct_info.Struct.fields[0].default_value.?).* == 4);
-    try expect(mem.eql(u8, "foobar", @ptrCast(*align(1) const *const [6:0]u8, unpacked_struct_info.Struct.fields[1].default_value.?).*));
+    try expect(@as(*align(1) const u32, @ptrCast(unpacked_struct_info.Struct.fields[0].default_value.?)).* == 4);
+    try expect(mem.eql(u8, "foobar", @as(*align(1) const *const [6:0]u8, @ptrCast(unpacked_struct_info.Struct.fields[1].default_value.?)).*));
 }
 
 const TestStruct = struct {
@@ -319,7 +319,7 @@ fn testPackedStruct() !void {
     try expect(struct_info.Struct.fields[0].alignment == 0);
     try expect(struct_info.Struct.fields[2].type == f32);
     try expect(struct_info.Struct.fields[2].default_value == null);
-    try expect(@ptrCast(*align(1) const u32, struct_info.Struct.fields[3].default_value.?).* == 4);
+    try expect(@as(*align(1) const u32, @ptrCast(struct_info.Struct.fields[3].default_value.?)).* == 4);
     try expect(struct_info.Struct.fields[3].alignment == 0);
     try expect(struct_info.Struct.decls.len == 2);
     try expect(struct_info.Struct.decls[0].is_pub);
@@ -504,7 +504,7 @@ test "type info for async frames" {
 
     switch (@typeInfo(@Frame(add))) {
         .Frame => |frame| {
-            try expect(@ptrCast(@TypeOf(add), frame.function) == add);
+            try expect(@as(@TypeOf(add), @ptrCast(frame.function)) == add);
         },
         else => unreachable,
     }
@@ -564,7 +564,7 @@ test "typeInfo resolves usingnamespace declarations" {
 test "value from struct @typeInfo default_value can be loaded at comptime" {
     comptime {
         const a = @typeInfo(@TypeOf(.{ .foo = @as(u8, 1) })).Struct.fields[0].default_value;
-        try expect(@ptrCast(*const u8, a).* == 1);
+        try expect(@as(*const u8, @ptrCast(a)).* == 1);
     }
 }
 
@@ -607,6 +607,6 @@ test "@typeInfo decls ignore dependency loops" {
 
 test "type info of tuple of string literal default value" {
     const struct_field = @typeInfo(@TypeOf(.{"hi"})).Struct.fields[0];
-    const value = @ptrCast(*align(1) const *const [2:0]u8, struct_field.default_value.?).*;
+    const value = @as(*align(1) const *const [2:0]u8, @ptrCast(struct_field.default_value.?)).*;
     comptime std.debug.assert(value[0] == 'h');
 }
test/behavior/vector.zig
@@ -1244,7 +1244,7 @@ test "@intCast to u0" {
     if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
 
     var zeros = @Vector(2, u32){ 0, 0 };
-    const casted = @intCast(@Vector(2, u0), zeros);
+    const casted = @as(@Vector(2, u0), @intCast(zeros));
 
     _ = casted[0];
 }
test/c_abi/main.zig
@@ -143,7 +143,7 @@ export fn zig_longdouble(x: c_longdouble) void {
 extern fn c_ptr(*anyopaque) void;
 
 test "C ABI pointer" {
-    c_ptr(@ptrFromInt(*anyopaque, 0xdeadbeef));
+    c_ptr(@as(*anyopaque, @ptrFromInt(0xdeadbeef)));
 }
 
 export fn zig_ptr(x: *anyopaque) void {
@@ -1058,14 +1058,14 @@ test "C function that takes byval struct called via function pointer" {
 
     var fn_ptr = &c_func_ptr_byval;
     fn_ptr(
-        @ptrFromInt(*anyopaque, 1),
-        @ptrFromInt(*anyopaque, 2),
+        @as(*anyopaque, @ptrFromInt(1)),
+        @as(*anyopaque, @ptrFromInt(2)),
         ByVal{
             .origin = .{ .x = 9, .y = 10, .z = 11 },
             .size = .{ .width = 12, .height = 13, .depth = 14 },
         },
         @as(c_ulong, 3),
-        @ptrFromInt(*anyopaque, 4),
+        @as(*anyopaque, @ptrFromInt(4)),
         @as(c_ulong, 5),
     );
 }
@@ -1098,7 +1098,7 @@ test "f80 bare" {
     if (!has_f80) return error.SkipZigTest;
 
     const a = c_f80(12.34);
-    try expect(@floatCast(f64, a) == 56.78);
+    try expect(@as(f64, @floatCast(a)) == 56.78);
 }
 
 const f80_struct = extern struct {
@@ -1111,7 +1111,7 @@ test "f80 struct" {
     if (builtin.mode != .Debug) return error.SkipZigTest;
 
     const a = c_f80_struct(.{ .a = 12.34 });
-    try expect(@floatCast(f64, a.a) == 56.78);
+    try expect(@as(f64, @floatCast(a.a)) == 56.78);
 }
 
 const f80_extra_struct = extern struct {
@@ -1124,7 +1124,7 @@ test "f80 extra struct" {
     if (builtin.target.cpu.arch == .x86) return error.SkipZigTest;
 
     const a = c_f80_extra_struct(.{ .a = 12.34, .b = 42 });
-    try expect(@floatCast(f64, a.a) == 56.78);
+    try expect(@as(f64, @floatCast(a.a)) == 56.78);
     try expect(a.b == 24);
 }
 
@@ -1133,7 +1133,7 @@ test "f128 bare" {
     if (!has_f128) return error.SkipZigTest;
 
     const a = c_f128(12.34);
-    try expect(@floatCast(f64, a) == 56.78);
+    try expect(@as(f64, @floatCast(a)) == 56.78);
 }
 
 const f128_struct = extern struct {
@@ -1144,7 +1144,7 @@ test "f128 struct" {
     if (!has_f128) return error.SkipZigTest;
 
     const a = c_f128_struct(.{ .a = 12.34 });
-    try expect(@floatCast(f64, a.a) == 56.78);
+    try expect(@as(f64, @floatCast(a.a)) == 56.78);
 }
 
 // The stdcall attribute on C functions is ignored when compiled on non-x86
test/cases/compile_errors/alignCast_expects_pointer_or_slice.zig
@@ -1,9 +1,10 @@
 export fn entry() void {
-    @alignCast(4, @as(u32, 3));
+    const x: *align(8) u32 = @alignCast(@as(u32, 3));
+    _ = x;
 }
 
 // error
 // backend=stage2
 // target=native
 //
-// :2:19: error: expected pointer type, found 'u32'
+// :2:41: error: expected pointer type, found 'u32'
test/cases/compile_errors/bad_alignCast_at_comptime.zig
@@ -1,6 +1,6 @@
 comptime {
-    const ptr = @ptrFromInt(*align(1) i32, 0x1);
-    const aligned = @alignCast(4, ptr);
+    const ptr: *align(1) i32 = @ptrFromInt(0x1);
+    const aligned: *align(4) i32 = @alignCast(ptr);
     _ = aligned;
 }
 
@@ -8,4 +8,4 @@ comptime {
 // backend=stage2
 // target=native
 //
-// :3:35: error: pointer address 0x1 is not aligned to 4 bytes
+// :3:47: error: pointer address 0x1 is not aligned to 4 bytes
test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig
@@ -1,5 +1,5 @@
 export fn entry(byte: u8) void {
-    var oops = @bitCast(u7, byte);
+    var oops: u7 = @bitCast(byte);
     _ = oops;
 }
 
@@ -7,4 +7,4 @@ export fn entry(byte: u8) void {
 // backend=stage2
 // target=native
 //
-// :2:16: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits
+// :2:20: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits
test/cases/compile_errors/bitCast_to_enum_type.zig
@@ -1,6 +1,6 @@
 export fn entry() void {
     const E = enum(u32) { a, b };
-    const y = @bitCast(E, @as(u32, 3));
+    const y: E = @bitCast(@as(u32, 3));
     _ = y;
 }
 
@@ -8,5 +8,5 @@ export fn entry() void {
 // backend=stage2
 // target=native
 //
-// :3:24: error: cannot @bitCast to 'tmp.entry.E'
-// :3:24: note: use @enumFromInt to cast from 'u32'
+// :3:18: error: cannot @bitCast to 'tmp.entry.E'
+// :3:18: note: use @enumFromInt to cast from 'u32'
test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig
@@ -1,5 +1,5 @@
 export fn entry() void {
-    var foo = (@bitCast(u8, @as(f32, 1.0)) == 0xf);
+    var foo = (@as(u8, @bitCast(@as(f32, 1.0))) == 0xf);
     _ = foo;
 }
 
@@ -7,4 +7,4 @@ export fn entry() void {
 // backend=stage2
 // target=native
 //
-// :2:16: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits
+// :2:24: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits
test/cases/compile_errors/cast_negative_value_to_unsigned_integer.zig
@@ -1,6 +1,6 @@
 comptime {
     const value: i32 = -1;
-    const unsigned = @intCast(u32, value);
+    const unsigned: u32 = @intCast(value);
     _ = unsigned;
 }
 export fn entry1() void {
test/cases/compile_errors/compile_log_a_pointer_to_an_opaque_value.zig
@@ -1,5 +1,5 @@
 export fn entry() void {
-    @compileLog(@as(*align(1) const anyopaque, @ptrCast(*const anyopaque, &entry)));
+    @compileLog(@as(*const anyopaque, @ptrCast(&entry)));
 }
 
 // error
test/cases/compile_errors/compile_time_null_ptr_cast.zig
@@ -1,6 +1,6 @@
 comptime {
     var opt_ptr: ?*i32 = null;
-    const ptr = @ptrCast(*i32, opt_ptr);
+    const ptr: *i32 = @ptrCast(opt_ptr);
     _ = ptr;
 }
 
test/cases/compile_errors/compile_time_undef_ptr_cast.zig
@@ -1,6 +1,6 @@
 comptime {
     var undef_ptr: *i32 = undefined;
-    const ptr = @ptrCast(*i32, undef_ptr);
+    const ptr: *i32 = @ptrCast(undef_ptr);
     _ = ptr;
 }
 
test/cases/compile_errors/comptime_call_of_function_pointer.zig
@@ -1,5 +1,5 @@
 export fn entry() void {
-    const fn_ptr = @ptrFromInt(*align(1) fn () void, 0xffd2);
+    const fn_ptr: *align(1) fn () void = @ptrFromInt(0xffd2);
     comptime fn_ptr();
 }
 
test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_terminated.zig
@@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void {
 export fn foo_vector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*]u8 = @ptrCast([*]u8, &buf);
+        var target: [*]u8 = @ptrCast(&buf);
         const slice = target[0..3 :0];
         _ = slice;
     }
@@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void {
 export fn foo_cvector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*c]u8 = @ptrCast([*c]u8, &buf);
+        var target: [*c]u8 = @ptrCast(&buf);
         const slice = target[0..3 :0];
         _ = slice;
     }
test/cases/compile_errors/comptime_slice-sentinel_does_not_match_memory_at_target_index_unterminated.zig
@@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void {
 export fn foo_vector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*]u8 = @ptrCast([*]u8, &buf);
+        var target: [*]u8 = @ptrCast(&buf);
         const slice = target[0..3 :0];
         _ = slice;
     }
@@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void {
 export fn foo_cvector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*c]u8 = @ptrCast([*c]u8, &buf);
+        var target: [*c]u8 = @ptrCast(&buf);
         const slice = target[0..3 :0];
         _ = slice;
     }
test/cases/compile_errors/comptime_slice-sentinel_does_not_match_target-sentinel.zig
@@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void {
 export fn foo_vector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*]u8 = @ptrCast([*]u8, &buf);
+        var target: [*]u8 = @ptrCast(&buf);
         const slice = target[0..14 :255];
         _ = slice;
     }
@@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void {
 export fn foo_cvector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*c]u8 = @ptrCast([*c]u8, &buf);
+        var target: [*c]u8 = @ptrCast(&buf);
         const slice = target[0..14 :255];
         _ = slice;
     }
test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_terminated.zig
@@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void {
 export fn foo_vector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*]u8 = @ptrCast([*]u8, &buf);
+        var target: [*]u8 = @ptrCast(&buf);
         const slice = target[0..15 :0];
         _ = slice;
     }
@@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void {
 export fn foo_cvector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_:0]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*c]u8 = @ptrCast([*c]u8, &buf);
+        var target: [*c]u8 = @ptrCast(&buf);
         const slice = target[0..15 :0];
         _ = slice;
     }
test/cases/compile_errors/comptime_slice-sentinel_is_out_of_bounds_unterminated.zig
@@ -24,7 +24,7 @@ export fn foo_vector_ConstPtrSpecialBaseArray() void {
 export fn foo_vector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*]u8 = @ptrCast([*]u8, &buf);
+        var target: [*]u8 = @ptrCast(&buf);
         const slice = target[0..14 :0];
         _ = slice;
     }
@@ -40,7 +40,7 @@ export fn foo_cvector_ConstPtrSpecialBaseArray() void {
 export fn foo_cvector_ConstPtrSpecialRef() void {
     comptime {
         var buf = [_]u8{ 'a', 'b', 'c', 'd' } ++ [_]u8{undefined} ** 10;
-        var target: [*c]u8 = @ptrCast([*c]u8, &buf);
+        var target: [*c]u8 = @ptrCast(&buf);
         const slice = target[0..14 :0];
         _ = slice;
     }
test/cases/compile_errors/enum_in_field_count_range_but_not_matching_tag.zig
@@ -3,7 +3,7 @@ const Foo = enum(u32) {
     B = 11,
 };
 export fn entry() void {
-    var x = @enumFromInt(Foo, 0);
+    var x: Foo = @enumFromInt(0);
     _ = x;
 }
 
@@ -11,5 +11,5 @@ export fn entry() void {
 // backend=stage2
 // target=native
 //
-// :6:13: error: enum 'tmp.Foo' has no tag with value '0'
+// :6:18: error: enum 'tmp.Foo' has no tag with value '0'
 // :1:13: note: enum declared here
test/cases/compile_errors/enumFromInt_on_non-exhaustive_enums_checks_int_in_range.zig
@@ -1,11 +1,11 @@
 pub export fn entry() void {
     const E = enum(u3) { a, b, c, _ };
-    @compileLog(@enumFromInt(E, 100));
+    @compileLog(@as(E, @enumFromInt(100)));
 }
 
 // error
 // target=native
 // backend=stage2
 //
-// :3:17: error: int value '100' out of range of non-exhaustive enum 'tmp.entry.E'
+// :3:24: error: int value '100' out of range of non-exhaustive enum 'tmp.entry.E'
 // :2:15: note: enum declared here
test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig
@@ -2,7 +2,7 @@ const Set1 = error{ A, B };
 const Set2 = error{ A, C };
 comptime {
     var x = Set1.B;
-    var y = @errSetCast(Set2, x);
+    var y: Set2 = @errSetCast(x);
     _ = y;
 }
 
@@ -10,4 +10,4 @@ comptime {
 // backend=stage2
 // target=native
 //
-// :5:13: error: 'error.B' not a member of error set 'error{C,A}'
+// :5:19: error: 'error.B' not a member of error set 'error{C,A}'
test/cases/compile_errors/explicitly_casting_non_tag_type_to_enum.zig
@@ -7,7 +7,7 @@ const Small = enum(u2) {
 
 export fn entry() void {
     var y = @as(f32, 3);
-    var x = @enumFromInt(Small, y);
+    var x: Small = @enumFromInt(y);
     _ = x;
 }
 
test/cases/compile_errors/field_access_of_opaque_type.zig
@@ -2,7 +2,7 @@ const MyType = opaque {};
 
 export fn entry() bool {
     var x: i32 = 1;
-    return bar(@ptrCast(*MyType, &x));
+    return bar(@ptrCast(&x));
 }
 
 fn bar(x: *MyType) bool {
test/cases/compile_errors/fieldParentPtr-comptime_field_ptr_not_based_on_struct.zig
@@ -8,7 +8,7 @@ const foo = Foo{
 };
 
 comptime {
-    const field_ptr = @ptrFromInt(*i32, 0x1234);
+    const field_ptr: *i32 = @ptrFromInt(0x1234);
     const another_foo_ptr = @fieldParentPtr(Foo, "b", field_ptr);
     _ = another_foo_ptr;
 }
test/cases/compile_errors/incorrect_type_to_memset_memcpy.zig
@@ -2,7 +2,7 @@ pub export fn entry() void {
     var buf: [5]u8 = .{ 1, 2, 3, 4, 5 };
     var slice: []u8 = &buf;
     const a: u32 = 1234;
-    @memcpy(slice.ptr, @ptrCast([*]const u8, &a));
+    @memcpy(slice.ptr, @as([*]const u8, @ptrCast(&a)));
 }
 pub export fn entry1() void {
     var buf: [5]u8 = .{ 1, 2, 3, 4, 5 };
@@ -39,7 +39,7 @@ pub export fn memset_array() void {
 //
 // :5:5: error: unknown @memcpy length
 // :5:18: note: destination type '[*]u8' provides no length
-// :5:24: note: source type '[*]align(4) const u8' provides no length
+// :5:24: note: source type '[*]const u8' provides no length
 // :10:13: error: type '*u8' is not an indexable pointer
 // :10:13: note: operand must be a slice, a many pointer or a pointer to an array
 // :15:13: error: type '*u8' is not an indexable pointer
test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig
@@ -1,6 +1,6 @@
 export fn entry() u32 {
     var bytes: [4]u8 = [_]u8{ 0x01, 0x02, 0x03, 0x04 };
-    const ptr = @ptrCast(*u32, &bytes[0]);
+    const ptr: *u32 = @ptrCast(&bytes[0]);
     return ptr.*;
 }
 
@@ -8,7 +8,7 @@ export fn entry() u32 {
 // backend=stage2
 // target=native
 //
-// :3:17: error: cast increases pointer alignment
+// :3:23: error: cast increases pointer alignment
 // :3:32: note: '*u8' has alignment '1'
-// :3:26: note: '*u32' has alignment '4'
-// :3:17: note: consider using '@alignCast'
+// :3:23: note: '*u32' has alignment '4'
+// :3:23: note: use @alignCast to assert pointer alignment
test/cases/compile_errors/int-float_conversion_to_comptime_int-float.zig
@@ -1,17 +1,17 @@
 export fn foo() void {
     var a: f32 = 2;
-    _ = @intFromFloat(comptime_int, a);
+    _ = @as(comptime_int, @intFromFloat(a));
 }
 export fn bar() void {
     var a: u32 = 2;
-    _ = @floatFromInt(comptime_float, a);
+    _ = @as(comptime_float, @floatFromInt(a));
 }
 
 // error
 // backend=stage2
 // target=native
 //
-// :3:37: error: unable to resolve comptime value
-// :3:37: note: value being casted to 'comptime_int' must be comptime-known
-// :7:39: error: unable to resolve comptime value
-// :7:39: note: value being casted to 'comptime_float' must be comptime-known
+// :3:41: error: unable to resolve comptime value
+// :3:41: note: value being casted to 'comptime_int' must be comptime-known
+// :7:43: error: unable to resolve comptime value
+// :7:43: note: value being casted to 'comptime_float' must be comptime-known
test/cases/compile_errors/int_to_err_non_global_invalid_number.zig
@@ -8,7 +8,7 @@ const Set2 = error{
 };
 comptime {
     var x = @intFromError(Set1.B);
-    var y = @errSetCast(Set2, @errorFromInt(x));
+    var y: Set2 = @errSetCast(@errorFromInt(x));
     _ = y;
 }
 
@@ -16,4 +16,4 @@ comptime {
 // backend=llvm
 // target=native
 //
-// :11:13: error: 'error.B' not a member of error set 'error{C,A}'
+// :11:19: error: 'error.B' not a member of error set 'error{C,A}'
test/cases/compile_errors/integer_cast_truncates_bits.zig
@@ -1,6 +1,6 @@
 export fn entry1() void {
     const spartan_count: u16 = 300;
-    const byte = @intCast(u8, spartan_count);
+    const byte: u8 = @intCast(spartan_count);
     _ = byte;
 }
 export fn entry2() void {
test/cases/compile_errors/integer_underflow_error.zig
@@ -1,9 +1,9 @@
 export fn entry() void {
-    _ = @ptrFromInt(*anyopaque, ~@as(usize, @import("std").math.maxInt(usize)) - 1);
+    _ = @as(*anyopaque, @ptrFromInt(~@as(usize, @import("std").math.maxInt(usize)) - 1));
 }
 
 // error
 // backend=stage2
 // target=native
 //
-// :2:80: error: overflow of integer type 'usize' with value '-1'
+// :2:84: error: overflow of integer type 'usize' with value '-1'
test/cases/compile_errors/intFromFloat_comptime_safety.zig
@@ -1,17 +1,17 @@
 comptime {
-    _ = @intFromFloat(i8, @as(f32, -129.1));
+    _ = @as(i8, @intFromFloat(@as(f32, -129.1)));
 }
 comptime {
-    _ = @intFromFloat(u8, @as(f32, -1.1));
+    _ = @as(u8, @intFromFloat(@as(f32, -1.1)));
 }
 comptime {
-    _ = @intFromFloat(u8, @as(f32, 256.1));
+    _ = @as(u8, @intFromFloat(@as(f32, 256.1)));
 }
 
 // error
 // backend=stage2
 // target=native
 //
-// :2:27: error: float value '-129.10000610351562' cannot be stored in integer type 'i8'
-// :5:27: error: float value '-1.100000023841858' cannot be stored in integer type 'u8'
-// :8:27: error: float value '256.1000061035156' cannot be stored in integer type 'u8'
+// :2:31: error: float value '-129.10000610351562' cannot be stored in integer type 'i8'
+// :5:31: error: float value '-1.100000023841858' cannot be stored in integer type 'u8'
+// :8:31: error: float value '256.1000061035156' cannot be stored in integer type 'u8'
test/cases/compile_errors/intFromPtr_0_to_non_optional_pointer.zig
@@ -1,5 +1,5 @@
 export fn entry() void {
-    var b = @ptrFromInt(*i32, 0);
+    var b: *i32 = @ptrFromInt(0);
     _ = b;
 }
 
test/cases/compile_errors/invalid_float_casts.zig
@@ -1,25 +1,25 @@
 export fn foo() void {
     var a: f32 = 2;
-    _ = @floatCast(comptime_float, a);
+    _ = @as(comptime_float, @floatCast(a));
 }
 export fn bar() void {
     var a: f32 = 2;
-    _ = @intFromFloat(f32, a);
+    _ = @as(f32, @intFromFloat(a));
 }
 export fn baz() void {
     var a: f32 = 2;
-    _ = @floatFromInt(f32, a);
+    _ = @as(f32, @floatFromInt(a));
 }
 export fn qux() void {
     var a: u32 = 2;
-    _ = @floatCast(f32, a);
+    _ = @as(f32, @floatCast(a));
 }
 
 // error
 // backend=stage2
 // target=native
 //
-// :3:36: error: unable to cast runtime value to 'comptime_float'
-// :7:23: error: expected integer type, found 'f32'
-// :11:28: error: expected integer type, found 'f32'
-// :15:25: error: expected float type, found 'u32'
+// :3:40: error: unable to cast runtime value to 'comptime_float'
+// :7:18: error: expected integer type, found 'f32'
+// :11:32: error: expected integer type, found 'f32'
+// :15:29: error: expected float type, found 'u32'
test/cases/compile_errors/invalid_int_casts.zig
@@ -1,25 +1,25 @@
 export fn foo() void {
     var a: u32 = 2;
-    _ = @intCast(comptime_int, a);
+    _ = @as(comptime_int, @intCast(a));
 }
 export fn bar() void {
     var a: u32 = 2;
-    _ = @floatFromInt(u32, a);
+    _ = @as(u32, @floatFromInt(a));
 }
 export fn baz() void {
     var a: u32 = 2;
-    _ = @intFromFloat(u32, a);
+    _ = @as(u32, @intFromFloat(a));
 }
 export fn qux() void {
     var a: f32 = 2;
-    _ = @intCast(u32, a);
+    _ = @as(u32, @intCast(a));
 }
 
 // error
 // backend=stage2
 // target=native
 //
-// :3:32: error: unable to cast runtime value to 'comptime_int'
-// :7:23: error: expected float type, found 'u32'
-// :11:28: error: expected float type, found 'u32'
-// :15:23: error: expected integer or vector, found 'f32'
+// :3:36: error: unable to cast runtime value to 'comptime_int'
+// :7:18: error: expected float type, found 'u32'
+// :11:32: error: expected float type, found 'u32'
+// :15:27: error: expected integer or vector, found 'f32'
test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig
@@ -8,12 +8,12 @@ const U = union(E) {
     b,
 };
 export fn foo() void {
-    var e = @enumFromInt(E, 15);
+    var e: E = @enumFromInt(15);
     var u: U = e;
     _ = u;
 }
 export fn bar() void {
-    const e = @enumFromInt(E, 15);
+    const e: E = @enumFromInt(15);
     var u: U = e;
     _ = u;
 }
@@ -24,5 +24,5 @@ export fn bar() void {
 //
 // :12:16: error: runtime coercion to union 'tmp.U' from non-exhaustive enum
 // :1:11: note: enum declared here
-// :17:16: error: union 'tmp.U' has no tag with value '@enumFromInt(tmp.E, 15)'
+// :17:16: error: union 'tmp.U' has no tag with value '@enumFromInt(15)'
 // :6:11: note: union declared here
test/cases/compile_errors/issue_3818_bitcast_from_parray-slice_to_u16.zig
@@ -1,11 +1,11 @@
 export fn foo1() void {
     var bytes = [_]u8{ 1, 2 };
-    const word: u16 = @bitCast(u16, bytes[0..]);
+    const word: u16 = @bitCast(bytes[0..]);
     _ = word;
 }
 export fn foo2() void {
     var bytes: []const u8 = &[_]u8{ 1, 2 };
-    const word: u16 = @bitCast(u16, bytes);
+    const word: u16 = @bitCast(bytes);
     _ = word;
 }
 
@@ -13,7 +13,7 @@ export fn foo2() void {
 // backend=stage2
 // target=native
 //
-// :3:42: error: cannot @bitCast from '*[2]u8'
-// :3:42: note: use @intFromPtr to cast to 'u16'
-// :8:37: error: cannot @bitCast from '[]const u8'
-// :8:37: note: use @intFromPtr to cast to 'u16'
+// :3:37: error: cannot @bitCast from '*[2]u8'
+// :3:37: note: use @intFromPtr to cast to 'u16'
+// :8:32: error: cannot @bitCast from '[]const u8'
+// :8:32: note: use @intFromPtr to cast to 'u16'
test/cases/compile_errors/load_too_many_bytes_from_comptime_reinterpreted_pointer.zig
@@ -1,7 +1,7 @@
 export fn entry() void {
     const float: f32 align(@alignOf(i64)) = 5.99999999999994648725e-01;
     const float_ptr = &float;
-    const int_ptr = @ptrCast(*const i64, float_ptr);
+    const int_ptr: *const i64 = @ptrCast(float_ptr);
     const int_val = int_ptr.*;
     _ = int_val;
 }
test/cases/compile_errors/missing_builtin_arg_in_initializer.zig
@@ -1,8 +1,11 @@
 comptime {
-    const v = @as();
+    const a = @as();
 }
 comptime {
-    const u = @bitCast(u32);
+    const b = @bitCast();
+}
+comptime {
+    const c = @as(u32);
 }
 
 // error
@@ -10,4 +13,5 @@ comptime {
 // target=native
 //
 // :2:15: error: expected 2 arguments, found 0
-// :5:15: error: expected 2 arguments, found 1
+// :5:15: error: expected 1 argument, found 0
+// :8:15: error: expected 2 arguments, found 1
test/cases/compile_errors/non_float_passed_to_intFromFloat.zig
@@ -1,5 +1,5 @@
 export fn entry() void {
-    const x = @intFromFloat(i32, @as(i32, 54));
+    const x: i32 = @intFromFloat(@as(i32, 54));
     _ = x;
 }
 
test/cases/compile_errors/non_int_passed_to_floatFromInt.zig
@@ -1,5 +1,5 @@
 export fn entry() void {
-    const x = @floatFromInt(f32, 1.1);
+    const x: f32 = @floatFromInt(1.1);
     _ = x;
 }
 
test/cases/compile_errors/out_of_int_range_comptime_float_passed_to_intFromFloat.zig
@@ -1,5 +1,5 @@
 export fn entry() void {
-    const x = @intFromFloat(i8, 200);
+    const x: i8 = @intFromFloat(200);
     _ = x;
 }
 
test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
@@ -1,6 +1,6 @@
 export fn entry() void {
     const x: i32 = 1234;
-    const y = @ptrCast(*i32, &x);
+    const y: *i32 = @ptrCast(&x);
     _ = y;
 }
 
@@ -8,5 +8,5 @@ export fn entry() void {
 // backend=stage2
 // target=native
 //
-// :3:15: error: cast discards const qualifier
-// :3:15: note: consider using '@constCast'
+// :3:21: error: cast discards const qualifier
+// :3:21: note: use @constCast to discard const qualifier
test/cases/compile_errors/ptrcast_to_non-pointer.zig
@@ -1,9 +1,9 @@
 export fn entry(a: *i32) usize {
-    return @ptrCast(usize, a);
+    return @ptrCast(a);
 }
 
 // error
 // backend=llvm
 // target=native
 //
-// :2:21: error: expected pointer type, found 'usize'
+// :2:12: error: expected pointer type, found 'usize'
test/cases/compile_errors/ptrFromInt_non_ptr_type.zig
@@ -1,15 +1,15 @@
 pub export fn entry() void {
-    _ = @ptrFromInt(i32, 10);
+    _ = @as(i32, @ptrFromInt(10));
 }
 
 pub export fn entry2() void {
-    _ = @ptrFromInt([]u8, 20);
+    _ = @as([]u8, @ptrFromInt(20));
 }
 
 // error
 // backend=stage2
 // target=native
 //
-// :2:21: error: expected pointer type, found 'i32'
-// :6:21: error: integer cannot be converted to slice type '[]u8'
-// :6:21: note: slice length cannot be inferred from address
+// :2:18: error: expected pointer type, found 'i32'
+// :6:19: error: integer cannot be converted to slice type '[]u8'
+// :6:19: note: slice length cannot be inferred from address
test/cases/compile_errors/ptrFromInt_with_misaligned_address.zig
@@ -1,5 +1,5 @@
 pub export fn entry() void {
-    var y = @ptrFromInt([*]align(4) u8, 5);
+    var y: [*]align(4) u8 = @ptrFromInt(5);
     _ = y;
 }
 
test/cases/compile_errors/reading_past_end_of_pointer_casted_array.zig
@@ -1,7 +1,7 @@
 comptime {
     const array: [4]u8 = "aoeu".*;
     const sub_array = array[1..];
-    const int_ptr = @ptrCast(*const u24, @alignCast(@alignOf(u24), sub_array));
+    const int_ptr: *const u24 = @ptrCast(@alignCast(sub_array));
     const deref = int_ptr.*;
     _ = deref;
 }
test/cases/compile_errors/reify_type_for_exhaustive_enum_with_non-integer_tag_type.zig
@@ -7,7 +7,7 @@ const Tag = @Type(.{
     },
 });
 export fn entry() void {
-    _ = @enumFromInt(Tag, 0);
+    _ = @as(Tag, @enumFromInt(0));
 }
 
 // error
test/cases/compile_errors/reify_type_for_exhaustive_enum_with_undefined_tag_type.zig
@@ -7,7 +7,7 @@ const Tag = @Type(.{
     },
 });
 export fn entry() void {
-    _ = @enumFromInt(Tag, 0);
+    _ = @as(Tag, @enumFromInt(0));
 }
 
 // error
test/cases/compile_errors/slice_cannot_have_its_bytes_reinterpreted.zig
@@ -1,6 +1,6 @@
 export fn foo() void {
     const bytes align(@alignOf([]const u8)) = [1]u8{0xfa} ** 16;
-    var value = @ptrCast(*const []const u8, &bytes).*;
+    var value = @as(*const []const u8, @ptrCast(&bytes)).*;
     _ = value;
 }
 
@@ -8,4 +8,4 @@ export fn foo() void {
 // backend=stage2
 // target=native
 //
-// :3:52: error: comptime dereference requires '[]const u8' to have a well-defined layout, but it does not.
+// :3:57: error: comptime dereference requires '[]const u8' to have a well-defined layout, but it does not.
test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig
@@ -1,6 +1,6 @@
 test "enum" {
     const E = enum(u8) { A, B, _ };
-    _ = @tagName(@enumFromInt(E, 5));
+    _ = @tagName(@as(E, @enumFromInt(5)));
 }
 
 // error
@@ -8,5 +8,5 @@ test "enum" {
 // target=native
 // is_test=1
 //
-// :3:9: error: no field with value '@enumFromInt(tmp.test.enum.E, 5)' in enum 'test.enum.E'
+// :3:9: error: no field with value '@enumFromInt(5)' in enum 'test.enum.E'
 // :2:15: note: declared here
test/cases/compile_errors/truncate_sign_mismatch.zig
@@ -1,25 +1,25 @@
 export fn entry1() i8 {
     var x: u32 = 10;
-    return @truncate(i8, x);
+    return @truncate(x);
 }
 export fn entry2() u8 {
     var x: i32 = -10;
-    return @truncate(u8, x);
+    return @truncate(x);
 }
 export fn entry3() i8 {
     comptime var x: u32 = 10;
-    return @truncate(i8, x);
+    return @truncate(x);
 }
 export fn entry4() u8 {
     comptime var x: i32 = -10;
-    return @truncate(u8, x);
+    return @truncate(x);
 }
 
 // error
 // backend=stage2
 // target=native
 //
-// :3:26: error: expected signed integer type, found 'u32'
-// :7:26: error: expected unsigned integer type, found 'i32'
-// :11:26: error: expected signed integer type, found 'u32'
-// :15:26: error: expected unsigned integer type, found 'i32'
+// :3:22: error: expected signed integer type, found 'u32'
+// :7:22: error: expected unsigned integer type, found 'i32'
+// :11:22: error: expected signed integer type, found 'u32'
+// :15:22: error: expected unsigned integer type, found 'i32'
test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig
@@ -2,7 +2,7 @@ const Derp = opaque {};
 extern fn bar(d: *Derp) void;
 export fn foo() void {
     var x = @as(u8, 1);
-    bar(@ptrCast(*anyopaque, &x));
+    bar(@as(*anyopaque, @ptrCast(&x)));
 }
 
 // error
test/cases/llvm/f_segment_address_space_reading_and_writing.zig
@@ -34,7 +34,7 @@ pub fn main() void {
     setFs(@intFromPtr(&test_value));
     assert(getFs() == @intFromPtr(&test_value));
 
-    var test_ptr = @ptrFromInt(*allowzero addrspace(.fs) u64, 0);
+    var test_ptr: *allowzero addrspace(.fs) u64 = @ptrFromInt(0);
     assert(test_ptr.* == 12345);
     test_ptr.* = 98765;
     assert(test_value == 98765);
test/cases/llvm/large_slices.zig
@@ -1,5 +1,5 @@
 pub fn main() void {
-    const large_slice = @ptrFromInt([*]const u8, 1)[0..(0xffffffffffffffff >> 3)];
+    const large_slice = @as([*]const u8, @ptrFromInt(1))[0..(0xffffffffffffffff >> 3)];
     _ = large_slice;
 }
 
test/cases/safety/@alignCast misaligned.zig
@@ -16,7 +16,8 @@ pub fn main() !void {
 }
 fn foo(bytes: []u8) u32 {
     const slice4 = bytes[1..5];
-    const int_slice = std.mem.bytesAsSlice(u32, @alignCast(4, slice4));
+    const aligned: *align(4) [4]u8 = @alignCast(slice4);
+    const int_slice = std.mem.bytesAsSlice(u32, aligned);
     return int_slice[0];
 }
 // run
test/cases/safety/@enumFromInt - no matching tag value.zig
@@ -17,7 +17,7 @@ pub fn main() !void {
     return error.TestFailed;
 }
 fn bar(a: u2) Foo {
-    return @enumFromInt(Foo, a);
+    return @enumFromInt(a);
 }
 fn baz(_: Foo) void {}
 
test/cases/safety/@errSetCast error not present in destination.zig
@@ -14,7 +14,7 @@ pub fn main() !void {
     return error.TestFailed;
 }
 fn foo(set1: Set1) Set2 {
-    return @errSetCast(Set2, set1);
+    return @errSetCast(set1);
 }
 // run
 // backend=llvm
test/cases/safety/@intCast to u0.zig
@@ -14,7 +14,7 @@ pub fn main() !void {
 }
 
 fn bar(one: u1, not_zero: i32) void {
-    var x = one << @intCast(u0, not_zero);
+    var x = one << @as(u0, @intCast(not_zero));
     _ = x;
 }
 // run
test/cases/safety/@intFromFloat cannot fit - negative out of range.zig
@@ -12,7 +12,7 @@ pub fn main() !void {
     return error.TestFailed;
 }
 fn bar(a: f32) i8 {
-    return @intFromFloat(i8, a);
+    return @intFromFloat(a);
 }
 fn baz(_: i8) void {}
 // run
test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig
@@ -12,7 +12,7 @@ pub fn main() !void {
     return error.TestFailed;
 }
 fn bar(a: f32) u8 {
-    return @intFromFloat(u8, a);
+    return @intFromFloat(a);
 }
 fn baz(_: u8) void {}
 // run
test/cases/safety/@intFromFloat cannot fit - positive out of range.zig
@@ -12,7 +12,7 @@ pub fn main() !void {
     return error.TestFailed;
 }
 fn bar(a: f32) u8 {
-    return @intFromFloat(u8, a);
+    return @intFromFloat(a);
 }
 fn baz(_: u8) void {}
 // run
test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig
@@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi
 }
 pub fn main() !void {
     var zero: usize = 0;
-    var b = @ptrFromInt(*u8, zero);
+    var b: *u8 = @ptrFromInt(zero);
     _ = b;
     return error.TestFailed;
 }
test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig
@@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi
 }
 pub fn main() !void {
     var zero: usize = 0;
-    var b = @ptrFromInt(*i32, zero);
+    var b: *i32 = @ptrFromInt(zero);
     _ = b;
     return error.TestFailed;
 }
test/cases/safety/@ptrFromInt with misaligned address.zig
@@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi
 }
 pub fn main() !void {
     var x: usize = 5;
-    var y = @ptrFromInt([*]align(4) u8, x);
+    var y: [*]align(4) u8 = @ptrFromInt(x);
     _ = y;
     return error.TestFailed;
 }
test/cases/safety/@tagName on corrupted enum value.zig
@@ -15,7 +15,7 @@ const E = enum(u32) {
 
 pub fn main() !void {
     var e: E = undefined;
-    @memset(@ptrCast([*]u8, &e)[0..@sizeOf(E)], 0x55);
+    @memset(@as([*]u8, @ptrCast(&e))[0..@sizeOf(E)], 0x55);
     var n = @tagName(e);
     _ = n;
     return error.TestFailed;
test/cases/safety/@tagName on corrupted union value.zig
@@ -15,7 +15,7 @@ const U = union(enum(u32)) {
 
 pub fn main() !void {
     var u: U = undefined;
-    @memset(@ptrCast([*]u8, &u)[0..@sizeOf(U)], 0x55);
+    @memset(@as([*]u8, @ptrCast(&u))[0..@sizeOf(U)], 0x55);
     var t: @typeInfo(U).Union.tag_type.? = u;
     var n = @tagName(t);
     _ = n;
test/cases/safety/pointer casting to null function pointer.zig
@@ -13,7 +13,7 @@ fn getNullPtr() ?*const anyopaque {
 }
 pub fn main() !void {
     const null_ptr: ?*const anyopaque = getNullPtr();
-    const required_ptr: *align(1) const fn () void = @ptrCast(*align(1) const fn () void, null_ptr);
+    const required_ptr: *align(1) const fn () void = @ptrCast(null_ptr);
     _ = required_ptr;
     return error.TestFailed;
 }
test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig
@@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi
 }
 pub fn main() !void {
     var value: c_short = -1;
-    var casted = @intCast(u32, value);
+    var casted: u32 = @intCast(value);
     _ = casted;
     return error.TestFailed;
 }
test/cases/safety/signed integer not fitting in cast to unsigned integer.zig
@@ -13,7 +13,7 @@ pub fn main() !void {
     return error.TestFailed;
 }
 fn unsigned_cast(x: i32) u32 {
-    return @intCast(u32, x);
+    return @intCast(x);
 }
 // run
 // backend=llvm
test/cases/safety/signed-unsigned vector cast.zig
@@ -10,7 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi
 
 pub fn main() !void {
     var x = @splat(4, @as(i32, -2147483647));
-    var y = @intCast(@Vector(4, u32), x);
+    var y: @Vector(4, u32) = @intCast(x);
     _ = y;
     return error.TestFailed;
 }
test/cases/safety/slice sentinel mismatch - optional pointers.zig
@@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi
 }
 
 pub fn main() !void {
-    var buf: [4]?*i32 = .{ @ptrFromInt(*i32, 4), @ptrFromInt(*i32, 8), @ptrFromInt(*i32, 12), @ptrFromInt(*i32, 16) };
+    var buf: [4]?*i32 = .{ @ptrFromInt(4), @ptrFromInt(8), @ptrFromInt(12), @ptrFromInt(16) };
     const slice = buf[0..3 :null];
     _ = slice;
     return error.TestFailed;
test/cases/safety/switch else on corrupt enum value - one prong.zig
@@ -13,7 +13,7 @@ const E = enum(u32) {
 };
 pub fn main() !void {
     var a: E = undefined;
-    @ptrCast(*u32, &a).* = 255;
+    @as(*u32, @ptrCast(&a)).* = 255;
     switch (a) {
         .one => @panic("one"),
         else => @panic("else"),
test/cases/safety/switch else on corrupt enum value - union.zig
@@ -18,7 +18,7 @@ const U = union(E) {
 };
 pub fn main() !void {
     var a: U = undefined;
-    @ptrCast(*align(@alignOf(U)) u32, &a).* = 0xFFFF_FFFF;
+    @as(*align(@alignOf(U)) u32, @ptrCast(&a)).* = 0xFFFF_FFFF;
     switch (a) {
         .one => @panic("one"),
         else => @panic("else"),
test/cases/safety/switch else on corrupt enum value.zig
@@ -13,7 +13,7 @@ const E = enum(u32) {
 };
 pub fn main() !void {
     var a: E = undefined;
-    @ptrCast(*u32, &a).* = 255;
+    @as(*u32, @ptrCast(&a)).* = 255;
     switch (a) {
         else => @panic("else"),
     }
test/cases/safety/switch on corrupted enum value.zig
@@ -15,7 +15,7 @@ const E = enum(u32) {
 
 pub fn main() !void {
     var e: E = undefined;
-    @memset(@ptrCast([*]u8, &e)[0..@sizeOf(E)], 0x55);
+    @memset(@as([*]u8, @ptrCast(&e))[0..@sizeOf(E)], 0x55);
     switch (e) {
         .X, .Y => @breakpoint(),
     }
test/cases/safety/switch on corrupted union value.zig
@@ -15,7 +15,7 @@ const U = union(enum(u32)) {
 
 pub fn main() !void {
     var u: U = undefined;
-    @memset(@ptrCast([*]u8, &u)[0..@sizeOf(U)], 0x55);
+    @memset(@as([*]u8, @ptrCast(&u))[0..@sizeOf(U)], 0x55);
     switch (u) {
         .X, .Y => @breakpoint(),
     }
test/cases/safety/truncating vector cast.zig
@@ -10,7 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi
 
 pub fn main() !void {
     var x = @splat(4, @as(u32, 0xdeadbeef));
-    var y = @intCast(@Vector(4, u16), x);
+    var y: @Vector(4, u16) = @intCast(x);
     _ = y;
     return error.TestFailed;
 }
test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig
@@ -9,7 +9,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi
 }
 pub fn main() !void {
     var value: u8 = 245;
-    var casted = @intCast(i8, value);
+    var casted: i8 = @intCast(value);
     _ = casted;
     return error.TestFailed;
 }
test/cases/safety/unsigned-signed vector cast.zig
@@ -10,7 +10,7 @@ pub fn panic(message: []const u8, stack_trace: ?*std.builtin.StackTrace, _: ?usi
 
 pub fn main() !void {
     var x = @splat(4, @as(u32, 0x80000000));
-    var y = @intCast(@Vector(4, i32), x);
+    var y: @Vector(4, i32) = @intCast(x);
     _ = y;
     return error.TestFailed;
 }
test/cases/safety/value does not fit in shortening cast - u0.zig
@@ -14,7 +14,7 @@ pub fn main() !void {
     return error.TestFailed;
 }
 fn shorten_cast(x: u8) u0 {
-    return @intCast(u0, x);
+    return @intCast(x);
 }
 // run
 // backend=llvm
test/cases/safety/value does not fit in shortening cast.zig
@@ -14,7 +14,7 @@ pub fn main() !void {
     return error.TestFailed;
 }
 fn shorten_cast(x: i32) i8 {
-    return @intCast(i8, x);
+    return @intCast(x);
 }
 // run
 // backend=llvm
test/cases/enum_values.0.zig
@@ -7,7 +7,7 @@ pub fn main() void {
         number1;
         number2;
     }
-    const number3 = @enumFromInt(Number, 2);
+    const number3: Number = @enumFromInt(2);
     if (@intFromEnum(number3) != 2) {
         unreachable;
     }
test/cases/enum_values.1.zig
@@ -3,7 +3,7 @@ const Number = enum { One, Two, Three };
 pub fn main() void {
     var number1 = Number.One;
     var number2: Number = .Two;
-    const number3 = @enumFromInt(Number, 2);
+    const number3: Number = @enumFromInt(2);
     assert(number1 != number2);
     assert(number2 != number3);
     assert(@intFromEnum(number1) == 0);
test/cases/error_in_nested_declaration.zig
@@ -3,7 +3,7 @@ const S = struct {
     c: i32,
     a: struct {
         pub fn str(_: @This(), extra: []u32) []i32 {
-            return @bitCast([]i32, extra);
+            return @bitCast(extra);
         }
     },
 };
@@ -27,5 +27,5 @@ pub export fn entry2() void {
 // target=native
 //
 // :17:12: error: C pointers cannot point to opaque types
-// :6:29: error: cannot @bitCast to '[]i32'
-// :6:29: note: use @ptrCast to cast from '[]u32'
+// :6:20: error: cannot @bitCast to '[]i32'
+// :6:20: note: use @ptrCast to cast from '[]u32'
test/cases/int_to_ptr.0.zig
@@ -1,8 +1,8 @@
 pub fn main() void {
-    _ = @ptrFromInt(*u8, 0);
+    _ = @as(*u8, @ptrFromInt(0));
 }
 
 // error
 // output_mode=Exe
 //
-// :2:24: error: pointer type '*u8' does not allow address zero
+// :2:18: error: pointer type '*u8' does not allow address zero
test/cases/int_to_ptr.1.zig
@@ -1,7 +1,7 @@
 pub fn main() void {
-    _ = @ptrFromInt(*u32, 2);
+    _ = @as(*u32, @ptrFromInt(2));
 }
 
 // error
 //
-// :2:25: error: pointer type '*u32' requires aligned address
+// :2:19: error: pointer type '*u32' requires aligned address
test/link/macho/dead_strip_dylibs/build.zig
@@ -37,7 +37,7 @@ fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.Optimize
         exe.dead_strip_dylibs = true;
 
         const run_cmd = b.addRunArtifact(exe);
-        run_cmd.expectExitCode(@bitCast(u8, @as(i8, -2))); // should fail
+        run_cmd.expectExitCode(@as(u8, @bitCast(@as(i8, -2)))); // should fail
         test_step.dependOn(&run_cmd.step);
     }
 }
test/standalone/hello_world/hello_libc.zig
@@ -10,6 +10,6 @@ const msg = "Hello, world!\n";
 pub export fn main(argc: c_int, argv: **u8) c_int {
     _ = argv;
     _ = argc;
-    if (c.printf(msg) != @intCast(c_int, c.strlen(msg))) return -1;
+    if (c.printf(msg) != @as(c_int, @intCast(c.strlen(msg)))) return -1;
     return 0;
 }
test/standalone/issue_11595/main.zig
@@ -1,5 +1,5 @@
 extern fn check() c_int;
 
 pub fn main() u8 {
-    return @intCast(u8, check());
+    return @as(u8, @intCast(check()));
 }
test/standalone/main_return_error/error_u8_non_zero.zig
@@ -1,7 +1,7 @@
 const Err = error{Foo};
 
 fn foo() u8 {
-    var x = @intCast(u8, 9);
+    var x = @as(u8, @intCast(9));
     return x;
 }
 
test/standalone/mix_c_files/main.zig
@@ -25,6 +25,6 @@ pub fn main() anyerror!void {
     x = add_C(x);
     x = add_C_zig(x);
 
-    const u = @intCast(u32, x);
+    const u = @as(u32, @intCast(x));
     try std.testing.expect(u / 100 == u % 100);
 }
test/standalone/pie/main.zig
@@ -5,7 +5,7 @@ threadlocal var foo: u8 = 42;
 
 test "Check ELF header" {
     // PIE executables are marked as ET_DYN, regular exes as ET_EXEC.
-    const header = @ptrFromInt(*elf.Ehdr, std.process.getBaseAddress());
+    const header = @as(*elf.Ehdr, @ptrFromInt(std.process.getBaseAddress()));
     try std.testing.expectEqual(elf.ET.DYN, header.e_type);
 }
 
test/cbe.zig
@@ -642,7 +642,7 @@ pub fn addCases(ctx: *Cases) !void {
             \\pub export fn main() c_int {
             \\    var number1 = Number.One;
             \\    var number2: Number = .Two;
-            \\    const number3 = @enumFromInt(Number, 2);
+            \\    const number3: Number = @enumFromInt(2);
             \\    if (number1 == number2) return 1;
             \\    if (number2 == number3) return 1;
             \\    if (@intFromEnum(number1) != 0) return 1;
@@ -737,19 +737,19 @@ pub fn addCases(ctx: *Cases) !void {
         case.addError(
             \\pub export fn main() c_int {
             \\    const a = 1;
-            \\    _ = @enumFromInt(bool, a);
+            \\    _ = @as(bool, @enumFromInt(a));
             \\}
         , &.{
-            ":3:20: error: expected enum, found 'bool'",
+            ":3:19: error: expected enum, found 'bool'",
         });
 
         case.addError(
             \\const E = enum { a, b, c };
             \\pub export fn main() c_int {
-            \\    _ = @enumFromInt(E, 3);
+            \\    _ = @as(E, @enumFromInt(3));
             \\}
         , &.{
-            ":3:9: error: enum 'tmp.E' has no tag with value '3'",
+            ":3:16: error: enum 'tmp.E' has no tag with value '3'",
             ":1:11: note: enum declared here",
         });
 
test/compare_output.zig
@@ -180,8 +180,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
         \\const c = @cImport(@cInclude("stdlib.h"));
         \\
         \\export fn compare_fn(a: ?*const anyopaque, b: ?*const anyopaque) c_int {
-        \\    const a_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), a));
-        \\    const b_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), b));
+        \\    const a_int: *const i32 = @ptrCast(@alignCast(a));
+        \\    const b_int: *const i32 = @ptrCast(@alignCast(b));
         \\    if (a_int.* < b_int.*) {
         \\        return -1;
         \\    } else if (a_int.* > b_int.*) {
@@ -194,7 +194,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
         \\pub export fn main() c_int {
         \\    var array = [_]u32{ 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 };
         \\
-        \\    c.qsort(@ptrCast(?*anyopaque, &array), @intCast(c_ulong, array.len), @sizeOf(i32), compare_fn);
+        \\    c.qsort(@ptrCast(&array), @intCast(array.len), @sizeOf(i32), compare_fn);
         \\
         \\    for (array, 0..) |item, i| {
         \\        if (item != i) {
@@ -229,8 +229,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
         \\    }
         \\    const small: f32 = 3.25;
         \\    const x: f64 = small;
-        \\    const y = @intFromFloat(i32, x);
-        \\    const z = @floatFromInt(f64, y);
+        \\    const y: i32 = @intFromFloat(x);
+        \\    const z: f64 = @floatFromInt(y);
         \\    _ = c.printf("%.2f\n%d\n%.2f\n%.2f\n", x, y, z, @as(f64, -0.4));
         \\    return 0;
         \\}
test/nvptx.zig
@@ -60,7 +60,7 @@ pub fn addCases(ctx: *Cases) !void {
             \\
             \\ var _sdata: [1024]f32 addrspace(.shared) = undefined;
             \\ pub export fn reduceSum(d_x: []const f32, out: *f32) callconv(.Kernel) void {
-            \\     var sdata = @addrSpaceCast(.generic, &_sdata);
+            \\     var sdata: *addrspace(.generic) [1024]f32 = @addrSpaceCast(&_sdata);
             \\     const tid: u32 = threadIdX();
             \\     var sum = d_x[tid];
             \\     sdata[tid] = sum;
test/translate_c.zig
@@ -351,7 +351,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\}
     , &[_][]const u8{
         \\pub export fn main() void {
-        \\    var a: c_int = @bitCast(c_int, @truncate(c_uint, @alignOf(c_int)));
+        \\    var a: c_int = @as(c_int, @bitCast(@as(c_uint, @truncate(@alignOf(c_int)))));
         \\    _ = @TypeOf(a);
         \\}
     });
@@ -465,7 +465,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\    pub fn y(self: anytype) @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int) {
         \\        const Intermediate = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), u8);
         \\        const ReturnType = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int);
-        \\        return @ptrCast(ReturnType, @alignCast(@alignOf(c_int), @ptrCast(Intermediate, self) + 4));
+        \\        return @as(ReturnType, @ptrCast(@alignCast(@as(Intermediate, @ptrCast(self)) + 4)));
         \\    }
         \\};
         \\pub const struct_bar = extern struct {
@@ -473,7 +473,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\    pub fn y(self: anytype) @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int) {
         \\        const Intermediate = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), u8);
         \\        const ReturnType = @import("std").zig.c_translation.FlexibleArrayType(@TypeOf(self), c_int);
-        \\        return @ptrCast(ReturnType, @alignCast(@alignOf(c_int), @ptrCast(Intermediate, self) + 4));
+        \\        return @as(ReturnType, @ptrCast(@alignCast(@as(Intermediate, @ptrCast(self)) + 4)));
         \\    }
         \\};
     });
@@ -635,7 +635,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\};
         \\pub export fn foo(arg_x: [*c]outer) void {
         \\    var x = arg_x;
-        \\    x.*.unnamed_0.unnamed_0.y = @bitCast(c_int, @as(c_uint, x.*.unnamed_0.x));
+        \\    x.*.unnamed_0.unnamed_0.y = @as(c_int, @bitCast(@as(c_uint, x.*.unnamed_0.x)));
         \\}
     });
 
@@ -721,7 +721,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\pub const struct_opaque_2 = opaque {};
         \\pub export fn function(arg_opaque_1: ?*struct_opaque) void {
         \\    var opaque_1 = arg_opaque_1;
-        \\    var cast: ?*struct_opaque_2 = @ptrCast(?*struct_opaque_2, opaque_1);
+        \\    var cast: ?*struct_opaque_2 = @as(?*struct_opaque_2, @ptrCast(opaque_1));
         \\    _ = @TypeOf(cast);
         \\}
     });
@@ -799,7 +799,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\    _ = @TypeOf(b);
         \\    const c: c_int = undefined;
         \\    _ = @TypeOf(c);
-        \\    const d: c_uint = @bitCast(c_uint, @as(c_int, 440));
+        \\    const d: c_uint = @as(c_uint, @bitCast(@as(c_int, 440)));
         \\    _ = @TypeOf(d);
         \\    var e: c_int = 10;
         \\    _ = @TypeOf(e);
@@ -904,8 +904,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
     , &[_][]const u8{
         \\pub extern fn foo() void;
         \\pub export fn bar() void {
-        \\    var func_ptr: ?*anyopaque = @ptrCast(?*anyopaque, &foo);
-        \\    var typed_func_ptr: ?*const fn () callconv(.C) void = @ptrFromInt(?*const fn () callconv(.C) void, @intCast(c_ulong, @intFromPtr(func_ptr)));
+        \\    var func_ptr: ?*anyopaque = @as(?*anyopaque, @ptrCast(&foo));
+        \\    var typed_func_ptr: ?*const fn () callconv(.C) void = @as(?*const fn () callconv(.C) void, @ptrFromInt(@as(c_ulong, @intCast(@intFromPtr(func_ptr)))));
         \\    _ = @TypeOf(typed_func_ptr);
         \\}
     });
@@ -1353,7 +1353,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
     , &[_][]const u8{
         \\pub export fn foo() ?*anyopaque {
         \\    var x: [*c]c_ushort = undefined;
-        \\    return @ptrCast(?*anyopaque, x);
+        \\    return @as(?*anyopaque, @ptrCast(x));
         \\}
     });
 
@@ -1543,7 +1543,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
     , &[_][]const u8{
         \\pub export fn ptrcast() [*c]f32 {
         \\    var a: [*c]c_int = undefined;
-        \\    return @ptrCast([*c]f32, @alignCast(@import("std").meta.alignment([*c]f32), a));
+        \\    return @as([*c]f32, @ptrCast(@alignCast(a)));
         \\}
     });
 
@@ -1555,7 +1555,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
     , &[_][]const u8{
         \\pub export fn ptrptrcast() [*c][*c]f32 {
         \\    var a: [*c][*c]c_int = undefined;
-        \\    return @ptrCast([*c][*c]f32, @alignCast(@import("std").meta.alignment([*c][*c]f32), a));
+        \\    return @as([*c][*c]f32, @ptrCast(@alignCast(a)));
         \\}
     });
 
@@ -1579,23 +1579,23 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\pub export fn test_ptr_cast() void {
         \\    var p: ?*anyopaque = undefined;
         \\    {
-        \\        var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment([*c]u8), p));
+        \\        var to_char: [*c]u8 = @as([*c]u8, @ptrCast(@alignCast(p)));
         \\        _ = @TypeOf(to_char);
-        \\        var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment([*c]c_short), p));
+        \\        var to_short: [*c]c_short = @as([*c]c_short, @ptrCast(@alignCast(p)));
         \\        _ = @TypeOf(to_short);
-        \\        var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment([*c]c_int), p));
+        \\        var to_int: [*c]c_int = @as([*c]c_int, @ptrCast(@alignCast(p)));
         \\        _ = @TypeOf(to_int);
-        \\        var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment([*c]c_longlong), p));
+        \\        var to_longlong: [*c]c_longlong = @as([*c]c_longlong, @ptrCast(@alignCast(p)));
         \\        _ = @TypeOf(to_longlong);
         \\    }
         \\    {
-        \\        var to_char: [*c]u8 = @ptrCast([*c]u8, @alignCast(@import("std").meta.alignment([*c]u8), p));
+        \\        var to_char: [*c]u8 = @as([*c]u8, @ptrCast(@alignCast(p)));
         \\        _ = @TypeOf(to_char);
-        \\        var to_short: [*c]c_short = @ptrCast([*c]c_short, @alignCast(@import("std").meta.alignment([*c]c_short), p));
+        \\        var to_short: [*c]c_short = @as([*c]c_short, @ptrCast(@alignCast(p)));
         \\        _ = @TypeOf(to_short);
-        \\        var to_int: [*c]c_int = @ptrCast([*c]c_int, @alignCast(@import("std").meta.alignment([*c]c_int), p));
+        \\        var to_int: [*c]c_int = @as([*c]c_int, @ptrCast(@alignCast(p)));
         \\        _ = @TypeOf(to_int);
-        \\        var to_longlong: [*c]c_longlong = @ptrCast([*c]c_longlong, @alignCast(@import("std").meta.alignment([*c]c_longlong), p));
+        \\        var to_longlong: [*c]c_longlong = @as([*c]c_longlong, @ptrCast(@alignCast(p)));
         \\        _ = @TypeOf(to_longlong);
         \\    }
         \\}
@@ -1651,7 +1651,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\}
     , &[_][]const u8{
         \\pub export fn foo() c_int {
-        \\    return (@as(c_int, 1) << @intCast(@import("std").math.Log2Int(c_int), 2)) >> @intCast(@import("std").math.Log2Int(c_int), 1);
+        \\    return (@as(c_int, 1) << @intCast(2)) >> @intCast(1);
         \\}
     });
 
@@ -1885,7 +1885,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
             \\const enum_unnamed_1 =
         ++ " " ++ default_enum_type ++
             \\;
-            \\pub export var h: enum_unnamed_1 = @bitCast(c_uint, e);
+            \\pub export var h: enum_unnamed_1 = @as(c_uint, @bitCast(e));
             \\pub const i: c_int = 0;
             \\pub const j: c_int = 1;
             \\pub const k: c_int = 2;
@@ -2091,12 +2091,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\    _ = @TypeOf(c_1);
         \\    var a_2: c_int = undefined;
         \\    var b_3: u8 = 123;
-        \\    b_3 = @bitCast(u8, @truncate(i8, a_2));
+        \\    b_3 = @as(u8, @bitCast(@as(i8, @truncate(a_2))));
         \\    {
         \\        var d: c_int = 5;
         \\        _ = @TypeOf(d);
         \\    }
-        \\    var d: c_uint = @bitCast(c_uint, @as(c_int, 440));
+        \\    var d: c_uint = @as(c_uint, @bitCast(@as(c_int, 440)));
         \\    _ = @TypeOf(d);
         \\}
     });
@@ -2236,9 +2236,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\int c = 3.1415;
         \\double d = 3;
     , &[_][]const u8{
-        \\pub export var a: f32 = @floatCast(f32, 3.1415);
+        \\pub export var a: f32 = @as(f32, @floatCast(3.1415));
         \\pub export var b: f64 = 3.1415;
-        \\pub export var c: c_int = @intFromFloat(c_int, 3.1415);
+        \\pub export var c: c_int = @as(c_int, @intFromFloat(3.1415));
         \\pub export var d: f64 = 3;
     });
 
@@ -2423,7 +2423,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
     , &[_][]const u8{
         \\pub export fn int_from_float(arg_a: f32) c_int {
         \\    var a = arg_a;
-        \\    return @intFromFloat(c_int, a);
+        \\    return @as(c_int, @intFromFloat(a));
         \\}
     });
 
@@ -2533,15 +2533,15 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
             \\    var a = arg_a;
             \\    var b = arg_b;
             \\    var c = arg_c;
-            \\    var d: enum_Foo = @bitCast(c_uint, FooA);
+            \\    var d: enum_Foo = @as(c_uint, @bitCast(FooA));
             \\    var e: c_int = @intFromBool((a != 0) and (b != 0));
             \\    var f: c_int = @intFromBool((b != 0) and (c != null));
             \\    var g: c_int = @intFromBool((a != 0) and (c != null));
             \\    var h: c_int = @intFromBool((a != 0) or (b != 0));
             \\    var i: c_int = @intFromBool((b != 0) or (c != null));
             \\    var j: c_int = @intFromBool((a != 0) or (c != null));
-            \\    var k: c_int = @intFromBool((a != 0) or (@bitCast(c_int, d) != 0));
-            \\    var l: c_int = @intFromBool((@bitCast(c_int, d) != 0) and (b != 0));
+            \\    var k: c_int = @intFromBool((a != 0) or (@as(c_int, @bitCast(d)) != 0));
+            \\    var l: c_int = @intFromBool((@as(c_int, @bitCast(d)) != 0) and (b != 0));
             \\    var m: c_int = @intFromBool((c != null) or (d != 0));
             \\    var td: SomeTypedef = 44;
             \\    var o: c_int = @intFromBool((td != 0) or (b != 0));
@@ -2707,10 +2707,10 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\pub export var array: [100]c_int = [1]c_int{0} ** 100;
         \\pub export fn foo(arg_index: c_int) c_int {
         \\    var index = arg_index;
-        \\    return array[@intCast(c_uint, index)];
+        \\    return array[@as(c_uint, @intCast(index))];
         \\}
         ,
-        \\pub const ACCESS = array[@intCast(usize, @as(c_int, 2))];
+        \\pub const ACCESS = array[@as(usize, @intCast(@as(c_int, 2)))];
     });
 
     cases.add("cast signed array index to unsigned",
@@ -2722,7 +2722,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\pub export fn foo() void {
         \\    var a: [10]c_int = undefined;
         \\    var i: c_int = 0;
-        \\    a[@intCast(c_uint, i)] = 0;
+        \\    a[@as(c_uint, @intCast(i))] = 0;
         \\}
     });
 
@@ -2735,7 +2735,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\pub export fn foo() void {
         \\    var a: [10]c_longlong = undefined;
         \\    var i: c_longlong = 0;
-        \\    a[@intCast(usize, i)] = 0;
+        \\    a[@as(usize, @intCast(i))] = 0;
         \\}
     });
 
@@ -3006,8 +3006,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\pub export fn log2(arg_a: c_uint) c_int {
         \\    var a = arg_a;
         \\    var i: c_int = 0;
-        \\    while (a > @bitCast(c_uint, @as(c_int, 0))) {
-        \\        a >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1));
+        \\    while (a > @as(c_uint, @bitCast(@as(c_int, 0)))) {
+        \\        a >>= @intCast(@as(c_int, 1));
         \\    }
         \\    return i;
         \\}
@@ -3026,8 +3026,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\pub export fn log2(arg_a: u32) c_int {
         \\    var a = arg_a;
         \\    var i: c_int = 0;
-        \\    while (a > @bitCast(u32, @as(c_int, 0))) {
-        \\        a >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1));
+        \\    while (a > @as(u32, @bitCast(@as(c_int, 0)))) {
+        \\        a >>= @intCast(@as(c_int, 1));
         \\    }
         \\    return i;
         \\}
@@ -3084,14 +3084,14 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\        ref.* ^= @as(c_int, 1);
         \\        break :blk ref.*;
         \\    };
-        \\    a >>= @intCast(@import("std").math.Log2Int(c_int), blk: {
+        \\    a >>= @intCast(blk: {
         \\        const ref = &a;
-        \\        ref.* >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1));
+        \\        ref.* >>= @intCast(@as(c_int, 1));
         \\        break :blk ref.*;
         \\    });
-        \\    a <<= @intCast(@import("std").math.Log2Int(c_int), blk: {
+        \\    a <<= @intCast(blk: {
         \\        const ref = &a;
-        \\        ref.* <<= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1));
+        \\        ref.* <<= @intCast(@as(c_int, 1));
         \\        break :blk ref.*;
         \\    });
         \\    a = @divTrunc(a, blk: {
@@ -3106,12 +3106,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\    });
         \\    b /= blk: {
         \\        const ref = &b;
-        \\        ref.* /= @bitCast(c_uint, @as(c_int, 1));
+        \\        ref.* /= @as(c_uint, @bitCast(@as(c_int, 1)));
         \\        break :blk ref.*;
         \\    };
         \\    b %= blk: {
         \\        const ref = &b;
-        \\        ref.* %= @bitCast(c_uint, @as(c_int, 1));
+        \\        ref.* %= @as(c_uint, @bitCast(@as(c_int, 1)));
         \\        break :blk ref.*;
         \\    };
         \\}
@@ -3134,42 +3134,42 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\    var a: c_uint = 0;
         \\    a +%= blk: {
         \\        const ref = &a;
-        \\        ref.* +%= @bitCast(c_uint, @as(c_int, 1));
+        \\        ref.* +%= @as(c_uint, @bitCast(@as(c_int, 1)));
         \\        break :blk ref.*;
         \\    };
         \\    a -%= blk: {
         \\        const ref = &a;
-        \\        ref.* -%= @bitCast(c_uint, @as(c_int, 1));
+        \\        ref.* -%= @as(c_uint, @bitCast(@as(c_int, 1)));
         \\        break :blk ref.*;
         \\    };
         \\    a *%= blk: {
         \\        const ref = &a;
-        \\        ref.* *%= @bitCast(c_uint, @as(c_int, 1));
+        \\        ref.* *%= @as(c_uint, @bitCast(@as(c_int, 1)));
         \\        break :blk ref.*;
         \\    };
         \\    a &= blk: {
         \\        const ref = &a;
-        \\        ref.* &= @bitCast(c_uint, @as(c_int, 1));
+        \\        ref.* &= @as(c_uint, @bitCast(@as(c_int, 1)));
         \\        break :blk ref.*;
         \\    };
         \\    a |= blk: {
         \\        const ref = &a;
-        \\        ref.* |= @bitCast(c_uint, @as(c_int, 1));
+        \\        ref.* |= @as(c_uint, @bitCast(@as(c_int, 1)));
         \\        break :blk ref.*;
         \\    };
         \\    a ^= blk: {
         \\        const ref = &a;
-        \\        ref.* ^= @bitCast(c_uint, @as(c_int, 1));
+        \\        ref.* ^= @as(c_uint, @bitCast(@as(c_int, 1)));
         \\        break :blk ref.*;
         \\    };
-        \\    a >>= @intCast(@import("std").math.Log2Int(c_uint), blk: {
+        \\    a >>= @intCast(blk: {
         \\        const ref = &a;
-        \\        ref.* >>= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1));
+        \\        ref.* >>= @intCast(@as(c_int, 1));
         \\        break :blk ref.*;
         \\    });
-        \\    a <<= @intCast(@import("std").math.Log2Int(c_uint), blk: {
+        \\    a <<= @intCast(blk: {
         \\        const ref = &a;
-        \\        ref.* <<= @intCast(@import("std").math.Log2Int(c_int), @as(c_int, 1));
+        \\        ref.* <<= @intCast(@as(c_int, 1));
         \\        break :blk ref.*;
         \\    });
         \\}
@@ -3258,21 +3258,21 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\pub extern fn fn_bool(x: bool) void;
         \\pub extern fn fn_ptr(x: ?*anyopaque) void;
         \\pub export fn call() void {
-        \\    fn_int(@intFromFloat(c_int, 3.0));
-        \\    fn_int(@intFromFloat(c_int, 3.0));
+        \\    fn_int(@as(c_int, @intFromFloat(3.0)));
+        \\    fn_int(@as(c_int, @intFromFloat(3.0)));
         \\    fn_int(@as(c_int, 1094861636));
-        \\    fn_f32(@floatFromInt(f32, @as(c_int, 3)));
-        \\    fn_f64(@floatFromInt(f64, @as(c_int, 3)));
-        \\    fn_char(@bitCast(u8, @truncate(i8, @as(c_int, '3'))));
-        \\    fn_char(@bitCast(u8, @truncate(i8, @as(c_int, '\x01'))));
-        \\    fn_char(@bitCast(u8, @truncate(i8, @as(c_int, 0))));
+        \\    fn_f32(@as(f32, @floatFromInt(@as(c_int, 3))));
+        \\    fn_f64(@as(f64, @floatFromInt(@as(c_int, 3))));
+        \\    fn_char(@as(u8, @bitCast(@as(i8, @truncate(@as(c_int, '3'))))));
+        \\    fn_char(@as(u8, @bitCast(@as(i8, @truncate(@as(c_int, '\x01'))))));
+        \\    fn_char(@as(u8, @bitCast(@as(i8, @truncate(@as(c_int, 0))))));
         \\    fn_f32(3.0);
         \\    fn_f64(3.0);
         \\    fn_bool(@as(c_int, 123) != 0);
         \\    fn_bool(@as(c_int, 0) != 0);
         \\    fn_bool(@intFromPtr(&fn_int) != 0);
-        \\    fn_int(@intCast(c_int, @intFromPtr(&fn_int)));
-        \\    fn_ptr(@ptrFromInt(?*anyopaque, @as(c_int, 42)));
+        \\    fn_int(@as(c_int, @intCast(@intFromPtr(&fn_int))));
+        \\    fn_ptr(@as(?*anyopaque, @ptrFromInt(@as(c_int, 42))));
         \\}
     });
 
@@ -3411,11 +3411,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\}
     , &[_][]const u8{
         \\pub export fn foo() c_ulong {
-        \\    return @bitCast(c_ulong, @as(c_long, -@as(c_int, 1)));
+        \\    return @as(c_ulong, @bitCast(@as(c_long, -@as(c_int, 1))));
         \\}
         \\pub export fn bar(arg_x: c_long) c_ushort {
         \\    var x = arg_x;
-        \\    return @bitCast(c_ushort, @truncate(c_short, x));
+        \\    return @as(c_ushort, @bitCast(@as(c_short, @truncate(x))));
         \\}
     });
 
@@ -3473,11 +3473,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\}
         \\pub export fn bar(arg_a: [*c]const c_int) void {
         \\    var a = arg_a;
-        \\    foo(@ptrFromInt([*c]c_int, @intFromPtr(a)));
+        \\    foo(@as([*c]c_int, @ptrFromInt(@intFromPtr(a))));
         \\}
         \\pub export fn baz(arg_a: [*c]volatile c_int) void {
         \\    var a = arg_a;
-        \\    foo(@ptrFromInt([*c]c_int, @intFromPtr(a)));
+        \\    foo(@as([*c]c_int, @ptrFromInt(@intFromPtr(a))));
         \\}
     });
 
@@ -3860,9 +3860,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
         \\    p[1];
         \\}
     , &[_][]const u8{
-        \\_ = p[@intCast(c_uint, @as(c_int, 0))];
+        \\_ = p[@as(c_uint, @intCast(@as(c_int, 0)))];
         ,
-        \\_ = p[@intCast(c_uint, @as(c_int, 1))];
+        \\_ = p[@as(c_uint, @intCast(@as(c_int, 1)))];
     });
 
     cases.add("Undefined macro identifier",
@@ -3928,7 +3928,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
             \\pub export fn foo() void {
             \\    var a: S = undefined;
             \\    var b: S = undefined;
-            \\    var c: c_longlong = @divExact(@bitCast(c_longlong, @intFromPtr(a) -% @intFromPtr(b)), @sizeOf(u8));
+            \\    var c: c_longlong = @divExact(@as(c_longlong, @bitCast(@intFromPtr(a) -% @intFromPtr(b))), @sizeOf(u8));
             \\    _ = @TypeOf(c);
             \\}
         });
@@ -3943,7 +3943,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
             \\pub export fn foo() void {
             \\    var a: S = undefined;
             \\    var b: S = undefined;
-            \\    var c: c_long = @divExact(@bitCast(c_long, @intFromPtr(a) -% @intFromPtr(b)), @sizeOf(u8));
+            \\    var c: c_long = @divExact(@as(c_long, @bitCast(@intFromPtr(a) -% @intFromPtr(b))), @sizeOf(u8));
             \\    _ = @TypeOf(c);
             \\}
         });
tools/extract-grammar.zig
@@ -90,7 +90,7 @@ fn read(path: []const u8, allocator: mem.Allocator) ![:0]const u8 {
     const st = try f.stat();
     if (st.size > max_src_size) return error.FileTooBig;
 
-    const src = try allocator.allocSentinel(u8, @intCast(usize, st.size), 0);
+    const src = try allocator.allocSentinel(u8, @as(usize, @intCast(st.size)), 0);
     const n = try f.readAll(src);
     if (n != st.size) return error.UnexpectedEndOfFile;
 
tools/gen_spirv_spec.zig
@@ -40,7 +40,7 @@ fn extendedStructs(
     kinds: []const g.OperandKind,
 ) !ExtendedStructSet {
     var map = ExtendedStructSet.init(arena);
-    try map.ensureTotalCapacity(@intCast(u32, kinds.len));
+    try map.ensureTotalCapacity(@as(u32, @intCast(kinds.len)));
 
     for (kinds) |kind| {
         const enumerants = kind.enumerants orelse continue;
tools/gen_stubs.zig
@@ -441,10 +441,10 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian)
         const sh_name = try arena.dupe(u8, mem.sliceTo(shstrtab[s(shdr.sh_name)..], 0));
         log.debug("found section: {s}", .{sh_name});
         if (mem.eql(u8, sh_name, ".dynsym")) {
-            dynsym_index = @intCast(u16, i);
+            dynsym_index = @as(u16, @intCast(i));
         }
         const gop = try parse.sections.getOrPut(sh_name);
-        section_index_map[i] = @intCast(u16, gop.index);
+        section_index_map[i] = @as(u16, @intCast(gop.index));
     }
     if (dynsym_index == 0) @panic("did not find the .dynsym section");
 
@@ -470,9 +470,9 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian)
     for (copied_dyn_syms) |sym| {
         const this_section = s(sym.st_shndx);
         const name = try arena.dupe(u8, mem.sliceTo(dynstr[s(sym.st_name)..], 0));
-        const ty = @truncate(u4, sym.st_info);
-        const binding = @truncate(u4, sym.st_info >> 4);
-        const visib = @enumFromInt(elf.STV, @truncate(u2, sym.st_other));
+        const ty = @as(u4, @truncate(sym.st_info));
+        const binding = @as(u4, @truncate(sym.st_info >> 4));
+        const visib = @as(elf.STV, @enumFromInt(@as(u2, @truncate(sym.st_other))));
         const size = s(sym.st_size);
 
         if (parse.blacklist.contains(name)) continue;
tools/update-linux-headers.zig
@@ -112,7 +112,7 @@ const DestTarget = struct {
             _ = self;
             var hasher = std.hash.Wyhash.init(0);
             std.hash.autoHash(&hasher, a.arch);
-            return @truncate(u32, hasher.final());
+            return @as(u32, @truncate(hasher.final()));
         }
 
         pub fn eql(self: @This(), a: DestTarget, b: DestTarget, b_index: usize) bool {
tools/update_clang_options.zig
@@ -591,7 +591,7 @@ pub fn main() anyerror!void {
 
         for (all_features, 0..) |feat, i| {
             const llvm_name = feat.llvm_name orelse continue;
-            const zig_feat = @enumFromInt(Feature, i);
+            const zig_feat = @as(Feature, @enumFromInt(i));
             const zig_name = @tagName(zig_feat);
             try llvm_to_zig_cpu_features.put(llvm_name, zig_name);
         }
@@ -790,7 +790,7 @@ const Syntax = union(enum) {
 };
 
 fn objSyntax(obj: *json.ObjectMap) ?Syntax {
-    const num_args = @intCast(u8, obj.get("NumArgs").?.integer);
+    const num_args = @as(u8, @intCast(obj.get("NumArgs").?.integer));
     for (obj.get("!superclasses").?.array.items) |superclass_json| {
         const superclass = superclass_json.string;
         if (std.mem.eql(u8, superclass, "Joined")) {