Commit 79460d4a3e

Linus Groh <mail@linusgroh.de>
2025-03-03 19:01:47
Remove uses of deprecated callconv aliases
1 parent 05937b3
Changed files (251)
doc
lib
compiler
compiler_rt
std
src
codegen
link
test
tools
doc/langref/export_builtin.zig
@@ -2,6 +2,6 @@ comptime {
     @export(&internalName, .{ .name = "foo", .linkage = .strong });
 }
 
-fn internalName() callconv(.C) void {}
+fn internalName() callconv(.c) void {}
 
 // obj
doc/langref/test_defining_variadic_function.zig
@@ -2,7 +2,7 @@ const std = @import("std");
 const testing = std.testing;
 const builtin = @import("builtin");
 
-fn add(count: c_int, ...) callconv(.C) c_int {
+fn add(count: c_int, ...) callconv(.c) c_int {
     var ap = @cVaStart();
     defer @cVaEnd(&ap);
     var i: usize = 0;
doc/langref/test_functions.zig
@@ -35,7 +35,7 @@ fn abort() noreturn {
 
 // The naked calling convention makes a function not have any function prologue or epilogue.
 // This can be useful when integrating with assembly.
-fn _start() callconv(.Naked) noreturn {
+fn _start() callconv(.naked) noreturn {
     abort();
 }
 
doc/langref/test_opaque.zig
@@ -2,7 +2,7 @@ const Derp = opaque {};
 const Wat = opaque {};
 
 extern fn bar(d: *Derp) void;
-fn foo(w: *Wat) callconv(.C) void {
+fn foo(w: *Wat) callconv(.c) void {
     bar(w);
 }
 
lib/compiler/test_runner.zig
@@ -350,7 +350,7 @@ var is_fuzz_test: bool = undefined;
 extern fn fuzzer_set_name(name_ptr: [*]const u8, name_len: usize) void;
 extern fn fuzzer_init(cache_dir: FuzzerSlice) void;
 extern fn fuzzer_init_corpus_elem(input_ptr: [*]const u8, input_len: usize) void;
-extern fn fuzzer_start(testOne: *const fn ([*]const u8, usize) callconv(.C) void) void;
+extern fn fuzzer_start(testOne: *const fn ([*]const u8, usize) callconv(.c) void) void;
 extern fn fuzzer_coverage_id() u64;
 
 pub fn fuzz(
@@ -382,7 +382,7 @@ pub fn fuzz(
     const global = struct {
         var ctx: @TypeOf(context) = undefined;
 
-        fn fuzzer_one(input_ptr: [*]const u8, input_len: usize) callconv(.C) void {
+        fn fuzzer_one(input_ptr: [*]const u8, input_len: usize) callconv(.c) void {
             @disableInstrumentation();
             testing.allocator_instance = .{};
             defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1);
lib/compiler_rt/aarch64_outline_atomics.zig
@@ -10,7 +10,7 @@ const always_has_lse = std.Target.aarch64.featureSetHas(builtin.cpu.features, .l
 /// which ARM is concerned would have too much overhead.
 var __aarch64_have_lse_atomics: u8 = @intFromBool(always_has_lse);
 
-fn __aarch64_cas1_relax() align(16) callconv(.Naked) void {
+fn __aarch64_cas1_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -32,7 +32,7 @@ fn __aarch64_cas1_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp1_relax() align(16) callconv(.Naked) void {
+fn __aarch64_swp1_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -52,7 +52,7 @@ fn __aarch64_swp1_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd1_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd1_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -73,7 +73,7 @@ fn __aarch64_ldadd1_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr1_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr1_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -94,7 +94,7 @@ fn __aarch64_ldclr1_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor1_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor1_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -115,7 +115,7 @@ fn __aarch64_ldeor1_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset1_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldset1_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -136,7 +136,7 @@ fn __aarch64_ldset1_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas1_acq() align(16) callconv(.Naked) void {
+fn __aarch64_cas1_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -158,7 +158,7 @@ fn __aarch64_cas1_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp1_acq() align(16) callconv(.Naked) void {
+fn __aarch64_swp1_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -178,7 +178,7 @@ fn __aarch64_swp1_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd1_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd1_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -199,7 +199,7 @@ fn __aarch64_ldadd1_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr1_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr1_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -220,7 +220,7 @@ fn __aarch64_ldclr1_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor1_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor1_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -241,7 +241,7 @@ fn __aarch64_ldeor1_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset1_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldset1_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -262,7 +262,7 @@ fn __aarch64_ldset1_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas1_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas1_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -284,7 +284,7 @@ fn __aarch64_cas1_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp1_rel() align(16) callconv(.Naked) void {
+fn __aarch64_swp1_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -304,7 +304,7 @@ fn __aarch64_swp1_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd1_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd1_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -325,7 +325,7 @@ fn __aarch64_ldadd1_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr1_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr1_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -346,7 +346,7 @@ fn __aarch64_ldclr1_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor1_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor1_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -367,7 +367,7 @@ fn __aarch64_ldeor1_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset1_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldset1_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -388,7 +388,7 @@ fn __aarch64_ldset1_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas1_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas1_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -410,7 +410,7 @@ fn __aarch64_cas1_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp1_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_swp1_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -430,7 +430,7 @@ fn __aarch64_swp1_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd1_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd1_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -451,7 +451,7 @@ fn __aarch64_ldadd1_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr1_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr1_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -472,7 +472,7 @@ fn __aarch64_ldclr1_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor1_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor1_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -493,7 +493,7 @@ fn __aarch64_ldeor1_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset1_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldset1_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -514,7 +514,7 @@ fn __aarch64_ldset1_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas2_relax() align(16) callconv(.Naked) void {
+fn __aarch64_cas2_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -536,7 +536,7 @@ fn __aarch64_cas2_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp2_relax() align(16) callconv(.Naked) void {
+fn __aarch64_swp2_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -556,7 +556,7 @@ fn __aarch64_swp2_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd2_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd2_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -577,7 +577,7 @@ fn __aarch64_ldadd2_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr2_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr2_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -598,7 +598,7 @@ fn __aarch64_ldclr2_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor2_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor2_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -619,7 +619,7 @@ fn __aarch64_ldeor2_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset2_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldset2_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -640,7 +640,7 @@ fn __aarch64_ldset2_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas2_acq() align(16) callconv(.Naked) void {
+fn __aarch64_cas2_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -662,7 +662,7 @@ fn __aarch64_cas2_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp2_acq() align(16) callconv(.Naked) void {
+fn __aarch64_swp2_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -682,7 +682,7 @@ fn __aarch64_swp2_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd2_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd2_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -703,7 +703,7 @@ fn __aarch64_ldadd2_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr2_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr2_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -724,7 +724,7 @@ fn __aarch64_ldclr2_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor2_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor2_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -745,7 +745,7 @@ fn __aarch64_ldeor2_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset2_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldset2_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -766,7 +766,7 @@ fn __aarch64_ldset2_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas2_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas2_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -788,7 +788,7 @@ fn __aarch64_cas2_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp2_rel() align(16) callconv(.Naked) void {
+fn __aarch64_swp2_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -808,7 +808,7 @@ fn __aarch64_swp2_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd2_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd2_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -829,7 +829,7 @@ fn __aarch64_ldadd2_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr2_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr2_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -850,7 +850,7 @@ fn __aarch64_ldclr2_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor2_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor2_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -871,7 +871,7 @@ fn __aarch64_ldeor2_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset2_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldset2_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -892,7 +892,7 @@ fn __aarch64_ldset2_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas2_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas2_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -914,7 +914,7 @@ fn __aarch64_cas2_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp2_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_swp2_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -934,7 +934,7 @@ fn __aarch64_swp2_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd2_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd2_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -955,7 +955,7 @@ fn __aarch64_ldadd2_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr2_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr2_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -976,7 +976,7 @@ fn __aarch64_ldclr2_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor2_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor2_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -997,7 +997,7 @@ fn __aarch64_ldeor2_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset2_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldset2_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1018,7 +1018,7 @@ fn __aarch64_ldset2_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas4_relax() align(16) callconv(.Naked) void {
+fn __aarch64_cas4_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1040,7 +1040,7 @@ fn __aarch64_cas4_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp4_relax() align(16) callconv(.Naked) void {
+fn __aarch64_swp4_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1060,7 +1060,7 @@ fn __aarch64_swp4_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd4_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd4_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1081,7 +1081,7 @@ fn __aarch64_ldadd4_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr4_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr4_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1102,7 +1102,7 @@ fn __aarch64_ldclr4_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor4_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor4_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1123,7 +1123,7 @@ fn __aarch64_ldeor4_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset4_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldset4_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1144,7 +1144,7 @@ fn __aarch64_ldset4_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas4_acq() align(16) callconv(.Naked) void {
+fn __aarch64_cas4_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1166,7 +1166,7 @@ fn __aarch64_cas4_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp4_acq() align(16) callconv(.Naked) void {
+fn __aarch64_swp4_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1186,7 +1186,7 @@ fn __aarch64_swp4_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd4_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd4_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1207,7 +1207,7 @@ fn __aarch64_ldadd4_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr4_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr4_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1228,7 +1228,7 @@ fn __aarch64_ldclr4_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor4_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor4_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1249,7 +1249,7 @@ fn __aarch64_ldeor4_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset4_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldset4_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1270,7 +1270,7 @@ fn __aarch64_ldset4_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas4_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas4_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1292,7 +1292,7 @@ fn __aarch64_cas4_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp4_rel() align(16) callconv(.Naked) void {
+fn __aarch64_swp4_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1312,7 +1312,7 @@ fn __aarch64_swp4_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd4_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd4_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1333,7 +1333,7 @@ fn __aarch64_ldadd4_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr4_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr4_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1354,7 +1354,7 @@ fn __aarch64_ldclr4_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor4_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor4_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1375,7 +1375,7 @@ fn __aarch64_ldeor4_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset4_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldset4_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1396,7 +1396,7 @@ fn __aarch64_ldset4_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas4_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas4_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1418,7 +1418,7 @@ fn __aarch64_cas4_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp4_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_swp4_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1438,7 +1438,7 @@ fn __aarch64_swp4_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd4_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd4_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1459,7 +1459,7 @@ fn __aarch64_ldadd4_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr4_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr4_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1480,7 +1480,7 @@ fn __aarch64_ldclr4_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor4_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor4_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1501,7 +1501,7 @@ fn __aarch64_ldeor4_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset4_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldset4_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1522,7 +1522,7 @@ fn __aarch64_ldset4_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas8_relax() align(16) callconv(.Naked) void {
+fn __aarch64_cas8_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1544,7 +1544,7 @@ fn __aarch64_cas8_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp8_relax() align(16) callconv(.Naked) void {
+fn __aarch64_swp8_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1564,7 +1564,7 @@ fn __aarch64_swp8_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd8_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd8_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1585,7 +1585,7 @@ fn __aarch64_ldadd8_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr8_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr8_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1606,7 +1606,7 @@ fn __aarch64_ldclr8_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor8_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor8_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1627,7 +1627,7 @@ fn __aarch64_ldeor8_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset8_relax() align(16) callconv(.Naked) void {
+fn __aarch64_ldset8_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1648,7 +1648,7 @@ fn __aarch64_ldset8_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas8_acq() align(16) callconv(.Naked) void {
+fn __aarch64_cas8_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1670,7 +1670,7 @@ fn __aarch64_cas8_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp8_acq() align(16) callconv(.Naked) void {
+fn __aarch64_swp8_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1690,7 +1690,7 @@ fn __aarch64_swp8_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd8_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd8_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1711,7 +1711,7 @@ fn __aarch64_ldadd8_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr8_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr8_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1732,7 +1732,7 @@ fn __aarch64_ldclr8_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor8_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor8_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1753,7 +1753,7 @@ fn __aarch64_ldeor8_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset8_acq() align(16) callconv(.Naked) void {
+fn __aarch64_ldset8_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1774,7 +1774,7 @@ fn __aarch64_ldset8_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas8_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas8_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1796,7 +1796,7 @@ fn __aarch64_cas8_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp8_rel() align(16) callconv(.Naked) void {
+fn __aarch64_swp8_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1816,7 +1816,7 @@ fn __aarch64_swp8_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd8_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd8_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1837,7 +1837,7 @@ fn __aarch64_ldadd8_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr8_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr8_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1858,7 +1858,7 @@ fn __aarch64_ldclr8_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor8_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor8_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1879,7 +1879,7 @@ fn __aarch64_ldeor8_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset8_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldset8_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1900,7 +1900,7 @@ fn __aarch64_ldset8_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas8_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas8_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1922,7 +1922,7 @@ fn __aarch64_cas8_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_swp8_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_swp8_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1942,7 +1942,7 @@ fn __aarch64_swp8_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldadd8_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldadd8_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1963,7 +1963,7 @@ fn __aarch64_ldadd8_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldclr8_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldclr8_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -1984,7 +1984,7 @@ fn __aarch64_ldclr8_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldeor8_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldeor8_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -2005,7 +2005,7 @@ fn __aarch64_ldeor8_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_ldset8_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_ldset8_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -2026,7 +2026,7 @@ fn __aarch64_ldset8_acq_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas16_relax() align(16) callconv(.Naked) void {
+fn __aarch64_cas16_relax() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -2050,7 +2050,7 @@ fn __aarch64_cas16_relax() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas16_acq() align(16) callconv(.Naked) void {
+fn __aarch64_cas16_acq() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -2074,7 +2074,7 @@ fn __aarch64_cas16_acq() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas16_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas16_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
@@ -2098,7 +2098,7 @@ fn __aarch64_cas16_rel() align(16) callconv(.Naked) void {
     );
     unreachable;
 }
-fn __aarch64_cas16_acq_rel() align(16) callconv(.Naked) void {
+fn __aarch64_cas16_acq_rel() align(16) callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\        cbz     w16, 8f
lib/compiler_rt/absvdi2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__absvdi2, .{ .name = "__absvdi2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __absvdi2(a: i64) callconv(.C) i64 {
+pub fn __absvdi2(a: i64) callconv(.c) i64 {
     return absv(i64, a);
 }
lib/compiler_rt/absvsi2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__absvsi2, .{ .name = "__absvsi2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __absvsi2(a: i32) callconv(.C) i32 {
+pub fn __absvsi2(a: i32) callconv(.c) i32 {
     return absv(i32, a);
 }
lib/compiler_rt/absvti2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__absvti2, .{ .name = "__absvti2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __absvti2(a: i128) callconv(.C) i128 {
+pub fn __absvti2(a: i128) callconv(.c) i128 {
     return absv(i128, a);
 }
lib/compiler_rt/adddf3.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
+fn __adddf3(a: f64, b: f64) callconv(.c) f64 {
     return addf3(f64, a, b);
 }
 
-fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
+fn __aeabi_dadd(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) f64 {
     return addf3(f64, a, b);
 }
lib/compiler_rt/addhf3.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__addhf3, .{ .name = "__addhf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __addhf3(a: f16, b: f16) callconv(.C) f16 {
+fn __addhf3(a: f16, b: f16) callconv(.c) f16 {
     return addf3(f16, a, b);
 }
lib/compiler_rt/addo.zig
@@ -31,13 +31,13 @@ inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST
     return sum;
 }
 
-pub fn __addosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
+pub fn __addosi4(a: i32, b: i32, overflow: *c_int) callconv(.c) i32 {
     return addoXi4_generic(i32, a, b, overflow);
 }
-pub fn __addodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
+pub fn __addodi4(a: i64, b: i64, overflow: *c_int) callconv(.c) i64 {
     return addoXi4_generic(i64, a, b, overflow);
 }
-pub fn __addoti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
+pub fn __addoti4(a: i128, b: i128, overflow: *c_int) callconv(.c) i128 {
     return addoXi4_generic(i128, a, b, overflow);
 }
 
lib/compiler_rt/addsf3.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
+fn __addsf3(a: f32, b: f32) callconv(.c) f32 {
     return addf3(f32, a, b);
 }
 
-fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
+fn __aeabi_fadd(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) f32 {
     return addf3(f32, a, b);
 }
lib/compiler_rt/addtf3.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__addtf3, .{ .name = "__addtf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
+pub fn __addtf3(a: f128, b: f128) callconv(.c) f128 {
     return addf3(f128, a, b);
 }
 
-fn _Qp_add(c: *f128, a: *f128, b: *f128) callconv(.C) void {
+fn _Qp_add(c: *f128, a: *f128, b: *f128) callconv(.c) void {
     c.* = addf3(f128, a.*, b.*);
 }
lib/compiler_rt/addxf3.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__addxf3, .{ .name = "__addxf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
+pub fn __addxf3(a: f80, b: f80) callconv(.c) f80 {
     return addf3(f80, a, b);
 }
lib/compiler_rt/arm.zig
@@ -57,67 +57,67 @@ extern fn memset(dest: ?[*]u8, c: i32, n: usize) ?[*]u8;
 extern fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) ?[*]u8;
 extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) ?[*]u8;
 
-pub fn __aeabi_memcpy(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+pub fn __aeabi_memcpy(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memcpy(dest, src, n);
 }
-pub fn __aeabi_memcpy4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+pub fn __aeabi_memcpy4(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memcpy(dest, src, n);
 }
-pub fn __aeabi_memcpy8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+pub fn __aeabi_memcpy8(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memcpy(dest, src, n);
 }
 
-pub fn __aeabi_memmove(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+pub fn __aeabi_memmove(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memmove(dest, src, n);
 }
-pub fn __aeabi_memmove4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+pub fn __aeabi_memmove4(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memmove(dest, src, n);
 }
-pub fn __aeabi_memmove8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
+pub fn __aeabi_memmove8(dest: [*]u8, src: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memmove(dest, src, n);
 }
 
-pub fn __aeabi_memset(dest: [*]u8, n: usize, c: i32) callconv(.AAPCS) void {
+pub fn __aeabi_memset(dest: [*]u8, n: usize, c: i32) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     // This is dentical to the standard `memset` definition but with the last
     // two arguments swapped
     _ = memset(dest, c, n);
 }
-pub fn __aeabi_memset4(dest: [*]u8, n: usize, c: i32) callconv(.AAPCS) void {
+pub fn __aeabi_memset4(dest: [*]u8, n: usize, c: i32) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memset(dest, c, n);
 }
-pub fn __aeabi_memset8(dest: [*]u8, n: usize, c: i32) callconv(.AAPCS) void {
+pub fn __aeabi_memset8(dest: [*]u8, n: usize, c: i32) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memset(dest, c, n);
 }
 
-pub fn __aeabi_memclr(dest: [*]u8, n: usize) callconv(.AAPCS) void {
+pub fn __aeabi_memclr(dest: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memset(dest, 0, n);
 }
-pub fn __aeabi_memclr4(dest: [*]u8, n: usize) callconv(.AAPCS) void {
+pub fn __aeabi_memclr4(dest: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memset(dest, 0, n);
 }
-pub fn __aeabi_memclr8(dest: [*]u8, n: usize) callconv(.AAPCS) void {
+pub fn __aeabi_memclr8(dest: [*]u8, n: usize) callconv(.{ .arm_aapcs = .{} }) void {
     @setRuntimeSafety(false);
     _ = memset(dest, 0, n);
 }
 
 // Dummy functions to avoid errors during the linking phase
-pub fn __aeabi_unwind_cpp_pr0() callconv(.AAPCS) void {}
-pub fn __aeabi_unwind_cpp_pr1() callconv(.AAPCS) void {}
-pub fn __aeabi_unwind_cpp_pr2() callconv(.AAPCS) void {}
+pub fn __aeabi_unwind_cpp_pr0() callconv(.{ .arm_aapcs = .{} }) void {}
+pub fn __aeabi_unwind_cpp_pr1() callconv(.{ .arm_aapcs = .{} }) void {}
+pub fn __aeabi_unwind_cpp_pr2() callconv(.{ .arm_aapcs = .{} }) void {}
 
 // This function can only clobber r0 according to the ABI
-pub fn __aeabi_read_tp() callconv(.Naked) void {
+pub fn __aeabi_read_tp() callconv(.naked) void {
     @setRuntimeSafety(false);
     asm volatile (
         \\ mrc p15, 0, r0, c13, c0, 3
@@ -129,7 +129,7 @@ pub fn __aeabi_read_tp() callconv(.Naked) void {
 // The following functions are wrapped in an asm block to ensure the required
 // calling convention is always respected
 
-pub fn __aeabi_uidivmod() callconv(.Naked) void {
+pub fn __aeabi_uidivmod() callconv(.naked) void {
     @setRuntimeSafety(false);
     // Divide r0 by r1; the quotient goes in r0, the remainder in r1
     asm volatile (
@@ -147,7 +147,7 @@ pub fn __aeabi_uidivmod() callconv(.Naked) void {
     unreachable;
 }
 
-pub fn __aeabi_uldivmod() callconv(.Naked) void {
+pub fn __aeabi_uldivmod() callconv(.naked) void {
     @setRuntimeSafety(false);
     // Divide r1:r0 by r3:r2; the quotient goes in r1:r0, the remainder in r3:r2
     asm volatile (
@@ -167,7 +167,7 @@ pub fn __aeabi_uldivmod() callconv(.Naked) void {
     unreachable;
 }
 
-pub fn __aeabi_idivmod() callconv(.Naked) void {
+pub fn __aeabi_idivmod() callconv(.naked) void {
     @setRuntimeSafety(false);
     // Divide r0 by r1; the quotient goes in r0, the remainder in r1
     asm volatile (
@@ -185,7 +185,7 @@ pub fn __aeabi_idivmod() callconv(.Naked) void {
     unreachable;
 }
 
-pub fn __aeabi_ldivmod() callconv(.Naked) void {
+pub fn __aeabi_ldivmod() callconv(.naked) void {
     @setRuntimeSafety(false);
     // Divide r1:r0 by r3:r2; the quotient goes in r1:r0, the remainder in r3:r2
     asm volatile (
@@ -207,12 +207,12 @@ pub fn __aeabi_ldivmod() callconv(.Naked) void {
 
 // Float Arithmetic
 
-fn __aeabi_frsub(a: f32, b: f32) callconv(.AAPCS) f32 {
+fn __aeabi_frsub(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) f32 {
     const neg_a: f32 = @bitCast(@as(u32, @bitCast(a)) ^ (@as(u32, 1) << 31));
     return b + neg_a;
 }
 
-fn __aeabi_drsub(a: f64, b: f64) callconv(.AAPCS) f64 {
+fn __aeabi_drsub(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) f64 {
     const neg_a: f64 = @bitCast(@as(u64, @bitCast(a)) ^ (@as(u64, 1) << 63));
     return b + neg_a;
 }
lib/compiler_rt/atomics.zig
@@ -117,21 +117,21 @@ var spinlocks: SpinlockTable = SpinlockTable{};
 // Generic version of GCC atomic builtin functions.
 // Those work on any object no matter the pointer alignment nor its size.
 
-fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.C) void {
+fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.c) void {
     _ = model;
     var sl = spinlocks.get(@intFromPtr(src));
     defer sl.release();
     @memcpy(dest[0..size], src);
 }
 
-fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.C) void {
+fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.c) void {
     _ = model;
     var sl = spinlocks.get(@intFromPtr(dest));
     defer sl.release();
     @memcpy(dest[0..size], src);
 }
 
-fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.C) void {
+fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.c) void {
     _ = model;
     var sl = spinlocks.get(@intFromPtr(ptr));
     defer sl.release();
@@ -146,7 +146,7 @@ fn __atomic_compare_exchange(
     desired: [*]u8,
     success: i32,
     failure: i32,
-) callconv(.C) i32 {
+) callconv(.c) i32 {
     _ = success;
     _ = failure;
     var sl = spinlocks.get(@intFromPtr(ptr));
@@ -176,23 +176,23 @@ inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T {
     }
 }
 
-fn __atomic_load_1(src: *u8, model: i32) callconv(.C) u8 {
+fn __atomic_load_1(src: *u8, model: i32) callconv(.c) u8 {
     return atomic_load_N(u8, src, model);
 }
 
-fn __atomic_load_2(src: *u16, model: i32) callconv(.C) u16 {
+fn __atomic_load_2(src: *u16, model: i32) callconv(.c) u16 {
     return atomic_load_N(u16, src, model);
 }
 
-fn __atomic_load_4(src: *u32, model: i32) callconv(.C) u32 {
+fn __atomic_load_4(src: *u32, model: i32) callconv(.c) u32 {
     return atomic_load_N(u32, src, model);
 }
 
-fn __atomic_load_8(src: *u64, model: i32) callconv(.C) u64 {
+fn __atomic_load_8(src: *u64, model: i32) callconv(.c) u64 {
     return atomic_load_N(u64, src, model);
 }
 
-fn __atomic_load_16(src: *u128, model: i32) callconv(.C) u128 {
+fn __atomic_load_16(src: *u128, model: i32) callconv(.c) u128 {
     return atomic_load_N(u128, src, model);
 }
 
@@ -207,23 +207,23 @@ inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
     }
 }
 
-fn __atomic_store_1(dst: *u8, value: u8, model: i32) callconv(.C) void {
+fn __atomic_store_1(dst: *u8, value: u8, model: i32) callconv(.c) void {
     return atomic_store_N(u8, dst, value, model);
 }
 
-fn __atomic_store_2(dst: *u16, value: u16, model: i32) callconv(.C) void {
+fn __atomic_store_2(dst: *u16, value: u16, model: i32) callconv(.c) void {
     return atomic_store_N(u16, dst, value, model);
 }
 
-fn __atomic_store_4(dst: *u32, value: u32, model: i32) callconv(.C) void {
+fn __atomic_store_4(dst: *u32, value: u32, model: i32) callconv(.c) void {
     return atomic_store_N(u32, dst, value, model);
 }
 
-fn __atomic_store_8(dst: *u64, value: u64, model: i32) callconv(.C) void {
+fn __atomic_store_8(dst: *u64, value: u64, model: i32) callconv(.c) void {
     return atomic_store_N(u64, dst, value, model);
 }
 
-fn __atomic_store_16(dst: *u128, value: u128, model: i32) callconv(.C) void {
+fn __atomic_store_16(dst: *u128, value: u128, model: i32) callconv(.c) void {
     return atomic_store_N(u128, dst, value, model);
 }
 
@@ -274,23 +274,23 @@ inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T {
     }
 }
 
-fn __atomic_exchange_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+fn __atomic_exchange_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
     return atomic_exchange_N(u8, ptr, val, model);
 }
 
-fn __atomic_exchange_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+fn __atomic_exchange_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
     return atomic_exchange_N(u16, ptr, val, model);
 }
 
-fn __atomic_exchange_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+fn __atomic_exchange_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
     return atomic_exchange_N(u32, ptr, val, model);
 }
 
-fn __atomic_exchange_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+fn __atomic_exchange_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
     return atomic_exchange_N(u64, ptr, val, model);
 }
 
-fn __atomic_exchange_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+fn __atomic_exchange_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
     return atomic_exchange_N(u128, ptr, val, model);
 }
 
@@ -323,23 +323,23 @@ inline fn atomic_compare_exchange_N(
     }
 }
 
-fn __atomic_compare_exchange_1(ptr: *u8, expected: *u8, desired: u8, success: i32, failure: i32) callconv(.C) i32 {
+fn __atomic_compare_exchange_1(ptr: *u8, expected: *u8, desired: u8, success: i32, failure: i32) callconv(.c) i32 {
     return atomic_compare_exchange_N(u8, ptr, expected, desired, success, failure);
 }
 
-fn __atomic_compare_exchange_2(ptr: *u16, expected: *u16, desired: u16, success: i32, failure: i32) callconv(.C) i32 {
+fn __atomic_compare_exchange_2(ptr: *u16, expected: *u16, desired: u16, success: i32, failure: i32) callconv(.c) i32 {
     return atomic_compare_exchange_N(u16, ptr, expected, desired, success, failure);
 }
 
-fn __atomic_compare_exchange_4(ptr: *u32, expected: *u32, desired: u32, success: i32, failure: i32) callconv(.C) i32 {
+fn __atomic_compare_exchange_4(ptr: *u32, expected: *u32, desired: u32, success: i32, failure: i32) callconv(.c) i32 {
     return atomic_compare_exchange_N(u32, ptr, expected, desired, success, failure);
 }
 
-fn __atomic_compare_exchange_8(ptr: *u64, expected: *u64, desired: u64, success: i32, failure: i32) callconv(.C) i32 {
+fn __atomic_compare_exchange_8(ptr: *u64, expected: *u64, desired: u64, success: i32, failure: i32) callconv(.c) i32 {
     return atomic_compare_exchange_N(u64, ptr, expected, desired, success, failure);
 }
 
-fn __atomic_compare_exchange_16(ptr: *u128, expected: *u128, desired: u128, success: i32, failure: i32) callconv(.C) i32 {
+fn __atomic_compare_exchange_16(ptr: *u128, expected: *u128, desired: u128, success: i32, failure: i32) callconv(.c) i32 {
     return atomic_compare_exchange_N(u128, ptr, expected, desired, success, failure);
 }
 
@@ -376,163 +376,163 @@ inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr
     return @atomicRmw(T, ptr, op, val, .seq_cst);
 }
 
-fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
     return fetch_op_N(u8, .Add, ptr, val, model);
 }
 
-fn __atomic_fetch_add_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+fn __atomic_fetch_add_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
     return fetch_op_N(u16, .Add, ptr, val, model);
 }
 
-fn __atomic_fetch_add_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+fn __atomic_fetch_add_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
     return fetch_op_N(u32, .Add, ptr, val, model);
 }
 
-fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
     return fetch_op_N(u64, .Add, ptr, val, model);
 }
 
-fn __atomic_fetch_add_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+fn __atomic_fetch_add_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
     return fetch_op_N(u128, .Add, ptr, val, model);
 }
 
-fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
     return fetch_op_N(u8, .Sub, ptr, val, model);
 }
 
-fn __atomic_fetch_sub_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+fn __atomic_fetch_sub_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
     return fetch_op_N(u16, .Sub, ptr, val, model);
 }
 
-fn __atomic_fetch_sub_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+fn __atomic_fetch_sub_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
     return fetch_op_N(u32, .Sub, ptr, val, model);
 }
 
-fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
     return fetch_op_N(u64, .Sub, ptr, val, model);
 }
 
-fn __atomic_fetch_sub_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+fn __atomic_fetch_sub_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
     return fetch_op_N(u128, .Sub, ptr, val, model);
 }
 
-fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
     return fetch_op_N(u8, .And, ptr, val, model);
 }
 
-fn __atomic_fetch_and_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+fn __atomic_fetch_and_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
     return fetch_op_N(u16, .And, ptr, val, model);
 }
 
-fn __atomic_fetch_and_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+fn __atomic_fetch_and_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
     return fetch_op_N(u32, .And, ptr, val, model);
 }
 
-fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
     return fetch_op_N(u64, .And, ptr, val, model);
 }
 
-fn __atomic_fetch_and_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+fn __atomic_fetch_and_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
     return fetch_op_N(u128, .And, ptr, val, model);
 }
 
-fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
     return fetch_op_N(u8, .Or, ptr, val, model);
 }
 
-fn __atomic_fetch_or_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+fn __atomic_fetch_or_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
     return fetch_op_N(u16, .Or, ptr, val, model);
 }
 
-fn __atomic_fetch_or_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+fn __atomic_fetch_or_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
     return fetch_op_N(u32, .Or, ptr, val, model);
 }
 
-fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
     return fetch_op_N(u64, .Or, ptr, val, model);
 }
 
-fn __atomic_fetch_or_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+fn __atomic_fetch_or_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
     return fetch_op_N(u128, .Or, ptr, val, model);
 }
 
-fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
     return fetch_op_N(u8, .Xor, ptr, val, model);
 }
 
-fn __atomic_fetch_xor_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+fn __atomic_fetch_xor_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
     return fetch_op_N(u16, .Xor, ptr, val, model);
 }
 
-fn __atomic_fetch_xor_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+fn __atomic_fetch_xor_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
     return fetch_op_N(u32, .Xor, ptr, val, model);
 }
 
-fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
     return fetch_op_N(u64, .Xor, ptr, val, model);
 }
 
-fn __atomic_fetch_xor_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+fn __atomic_fetch_xor_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
     return fetch_op_N(u128, .Xor, ptr, val, model);
 }
 
-fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
     return fetch_op_N(u8, .Nand, ptr, val, model);
 }
 
-fn __atomic_fetch_nand_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+fn __atomic_fetch_nand_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
     return fetch_op_N(u16, .Nand, ptr, val, model);
 }
 
-fn __atomic_fetch_nand_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+fn __atomic_fetch_nand_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
     return fetch_op_N(u32, .Nand, ptr, val, model);
 }
 
-fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
     return fetch_op_N(u64, .Nand, ptr, val, model);
 }
 
-fn __atomic_fetch_nand_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+fn __atomic_fetch_nand_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
     return fetch_op_N(u128, .Nand, ptr, val, model);
 }
 
-fn __atomic_fetch_umax_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+fn __atomic_fetch_umax_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
     return fetch_op_N(u8, .Max, ptr, val, model);
 }
 
-fn __atomic_fetch_umax_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+fn __atomic_fetch_umax_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
     return fetch_op_N(u16, .Max, ptr, val, model);
 }
 
-fn __atomic_fetch_umax_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+fn __atomic_fetch_umax_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
     return fetch_op_N(u32, .Max, ptr, val, model);
 }
 
-fn __atomic_fetch_umax_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+fn __atomic_fetch_umax_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
     return fetch_op_N(u64, .Max, ptr, val, model);
 }
 
-fn __atomic_fetch_umax_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+fn __atomic_fetch_umax_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
     return fetch_op_N(u128, .Max, ptr, val, model);
 }
 
-fn __atomic_fetch_umin_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+fn __atomic_fetch_umin_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
     return fetch_op_N(u8, .Min, ptr, val, model);
 }
 
-fn __atomic_fetch_umin_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+fn __atomic_fetch_umin_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
     return fetch_op_N(u16, .Min, ptr, val, model);
 }
 
-fn __atomic_fetch_umin_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+fn __atomic_fetch_umin_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
     return fetch_op_N(u32, .Min, ptr, val, model);
 }
 
-fn __atomic_fetch_umin_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+fn __atomic_fetch_umin_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
     return fetch_op_N(u64, .Min, ptr, val, model);
 }
 
-fn __atomic_fetch_umin_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+fn __atomic_fetch_umin_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
     return fetch_op_N(u128, .Min, ptr, val, model);
 }
 
lib/compiler_rt/aulldiv.zig
@@ -15,7 +15,7 @@ comptime {
     }
 }
 
-pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
+pub fn _alldiv(a: i64, b: i64) callconv(.{ .x86_stdcall = .{} }) i64 {
     const s_a = a >> (64 - 1);
     const s_b = b >> (64 - 1);
 
@@ -27,7 +27,7 @@ pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
     return (@as(i64, @bitCast(r)) ^ s) -% s;
 }
 
-pub fn _aulldiv() callconv(.Naked) void {
+pub fn _aulldiv() callconv(.naked) void {
     @setRuntimeSafety(false);
 
     // The stack layout is:
lib/compiler_rt/aullrem.zig
@@ -15,7 +15,7 @@ comptime {
     }
 }
 
-pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
+pub fn _allrem(a: i64, b: i64) callconv(.{ .x86_stdcall = .{} }) i64 {
     const s_a = a >> (64 - 1);
     const s_b = b >> (64 - 1);
 
@@ -27,7 +27,7 @@ pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
     return (@as(i64, @bitCast(r)) ^ s) -% s;
 }
 
-pub fn _aullrem() callconv(.Naked) void {
+pub fn _aullrem() callconv(.naked) void {
     @setRuntimeSafety(false);
 
     // The stack layout is:
lib/compiler_rt/bcmp.zig
@@ -5,7 +5,7 @@ comptime {
     @export(&bcmp, .{ .name = "bcmp", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) c_int {
+pub fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.c) c_int {
     @setRuntimeSafety(false);
 
     var index: usize = 0;
lib/compiler_rt/bitreverse.zig
@@ -46,15 +46,15 @@ inline fn bitreverseXi2(comptime T: type, a: T) T {
     }
 }
 
-pub fn __bitreversesi2(a: u32) callconv(.C) u32 {
+pub fn __bitreversesi2(a: u32) callconv(.c) u32 {
     return bitreverseXi2(u32, a);
 }
 
-pub fn __bitreversedi2(a: u64) callconv(.C) u64 {
+pub fn __bitreversedi2(a: u64) callconv(.c) u64 {
     return bitreverseXi2(u64, a);
 }
 
-pub fn __bitreverseti2(a: u128) callconv(.C) u128 {
+pub fn __bitreverseti2(a: u128) callconv(.c) u128 {
     return bitreverseXi2(u128, a);
 }
 
lib/compiler_rt/bswap.zig
@@ -66,15 +66,15 @@ inline fn bswapXi2(comptime T: type, a: T) T {
     }
 }
 
-pub fn __bswapsi2(a: u32) callconv(.C) u32 {
+pub fn __bswapsi2(a: u32) callconv(.c) u32 {
     return bswapXi2(u32, a);
 }
 
-pub fn __bswapdi2(a: u64) callconv(.C) u64 {
+pub fn __bswapdi2(a: u64) callconv(.c) u64 {
     return bswapXi2(u64, a);
 }
 
-pub fn __bswapti2(a: u128) callconv(.C) u128 {
+pub fn __bswapti2(a: u128) callconv(.c) u128 {
     return bswapXi2(u128, a);
 }
 
lib/compiler_rt/ceil.zig
@@ -26,12 +26,12 @@ comptime {
     @export(&ceill, .{ .name = "ceill", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __ceilh(x: f16) callconv(.C) f16 {
+pub fn __ceilh(x: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(ceilf(x));
 }
 
-pub fn ceilf(x: f32) callconv(.C) f32 {
+pub fn ceilf(x: f32) callconv(.c) f32 {
     var u: u32 = @bitCast(x);
     const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
     var m: u32 = undefined;
@@ -64,7 +64,7 @@ pub fn ceilf(x: f32) callconv(.C) f32 {
     }
 }
 
-pub fn ceil(x: f64) callconv(.C) f64 {
+pub fn ceil(x: f64) callconv(.c) f64 {
     const f64_toint = 1.0 / math.floatEps(f64);
 
     const u: u64 = @bitCast(x);
@@ -95,12 +95,12 @@ pub fn ceil(x: f64) callconv(.C) f64 {
     }
 }
 
-pub fn __ceilx(x: f80) callconv(.C) f80 {
+pub fn __ceilx(x: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(ceilq(x));
 }
 
-pub fn ceilq(x: f128) callconv(.C) f128 {
+pub fn ceilq(x: f128) callconv(.c) f128 {
     const f128_toint = 1.0 / math.floatEps(f128);
 
     const u: u128 = @bitCast(x);
@@ -129,7 +129,7 @@ pub fn ceilq(x: f128) callconv(.C) f128 {
     }
 }
 
-pub fn ceill(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn ceill(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __ceilh(x),
         32 => return ceilf(x),
lib/compiler_rt/clear_cache.zig
@@ -15,7 +15,7 @@ comptime {
     _ = &clear_cache;
 }
 
-fn clear_cache(start: usize, end: usize) callconv(.C) void {
+fn clear_cache(start: usize, end: usize) callconv(.c) void {
     const x86 = switch (arch) {
         .x86, .x86_64 => true,
         else => false,
lib/compiler_rt/clzsi2_test.zig
@@ -4,7 +4,7 @@ const testing = @import("std").testing;
 
 fn test__clzsi2(a: u32, expected: i32) !void {
     const nakedClzsi2 = clz.__clzsi2;
-    const actualClzsi2 = @as(*const fn (a: i32) callconv(.C) i32, @ptrCast(&nakedClzsi2));
+    const actualClzsi2 = @as(*const fn (a: i32) callconv(.c) i32, @ptrCast(&nakedClzsi2));
     const x: i32 = @bitCast(a);
     const result = actualClzsi2(x);
     try testing.expectEqual(expected, result);
lib/compiler_rt/cmp.zig
@@ -34,27 +34,27 @@ inline fn XcmpXi2(comptime T: type, a: T, b: T) i32 {
     return cmp1 - cmp2 + 1;
 }
 
-pub fn __cmpsi2(a: i32, b: i32) callconv(.C) i32 {
+pub fn __cmpsi2(a: i32, b: i32) callconv(.c) i32 {
     return XcmpXi2(i32, a, b);
 }
 
-pub fn __cmpdi2(a: i64, b: i64) callconv(.C) i32 {
+pub fn __cmpdi2(a: i64, b: i64) callconv(.c) i32 {
     return XcmpXi2(i64, a, b);
 }
 
-pub fn __cmpti2(a: i128, b: i128) callconv(.C) i32 {
+pub fn __cmpti2(a: i128, b: i128) callconv(.c) i32 {
     return XcmpXi2(i128, a, b);
 }
 
-pub fn __ucmpsi2(a: u32, b: u32) callconv(.C) i32 {
+pub fn __ucmpsi2(a: u32, b: u32) callconv(.c) i32 {
     return XcmpXi2(u32, a, b);
 }
 
-pub fn __ucmpdi2(a: u64, b: u64) callconv(.C) i32 {
+pub fn __ucmpdi2(a: u64, b: u64) callconv(.c) i32 {
     return XcmpXi2(u64, a, b);
 }
 
-pub fn __ucmpti2(a: u128, b: u128) callconv(.C) i32 {
+pub fn __ucmpti2(a: u128, b: u128) callconv(.c) i32 {
     return XcmpXi2(u128, a, b);
 }
 
lib/compiler_rt/cmpdf2.zig
@@ -25,44 +25,44 @@ comptime {
 ///
 /// Note that this matches the definition of `__ledf2`, `__eqdf2`, `__nedf2`, `__cmpdf2`,
 /// and `__ltdf2`.
-fn __cmpdf2(a: f64, b: f64) callconv(.C) i32 {
+fn __cmpdf2(a: f64, b: f64) callconv(.c) i32 {
     return @intFromEnum(comparef.cmpf2(f64, comparef.LE, a, b));
 }
 
 /// "These functions return a value less than or equal to zero if neither argument is NaN,
 /// and a is less than or equal to b."
-pub fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
+pub fn __ledf2(a: f64, b: f64) callconv(.c) i32 {
     return __cmpdf2(a, b);
 }
 
 /// "These functions return zero if neither argument is NaN, and a and b are equal."
 /// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
 /// to have the same return value.
-pub fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
+pub fn __eqdf2(a: f64, b: f64) callconv(.c) i32 {
     return __cmpdf2(a, b);
 }
 
 /// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
 /// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
 /// to have the same return value.
-pub fn __nedf2(a: f64, b: f64) callconv(.C) i32 {
+pub fn __nedf2(a: f64, b: f64) callconv(.c) i32 {
     return __cmpdf2(a, b);
 }
 
 /// "These functions return a value less than zero if neither argument is NaN, and a
 /// is strictly less than b."
-pub fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
+pub fn __ltdf2(a: f64, b: f64) callconv(.c) i32 {
     return __cmpdf2(a, b);
 }
 
-fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
+fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f64, comparef.LE, a, b) == .Equal);
 }
 
-fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
+fn __aeabi_dcmplt(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f64, comparef.LE, a, b) == .Less);
 }
 
-fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
+fn __aeabi_dcmple(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f64, comparef.LE, a, b) != .Greater);
 }
lib/compiler_rt/cmphf2.zig
@@ -19,32 +19,32 @@ comptime {
 ///
 /// Note that this matches the definition of `__lehf2`, `__eqhf2`, `__nehf2`, `__cmphf2`,
 /// and `__lthf2`.
-fn __cmphf2(a: f16, b: f16) callconv(.C) i32 {
+fn __cmphf2(a: f16, b: f16) callconv(.c) i32 {
     return @intFromEnum(comparef.cmpf2(f16, comparef.LE, a, b));
 }
 
 /// "These functions return a value less than or equal to zero if neither argument is NaN,
 /// and a is less than or equal to b."
-pub fn __lehf2(a: f16, b: f16) callconv(.C) i32 {
+pub fn __lehf2(a: f16, b: f16) callconv(.c) i32 {
     return __cmphf2(a, b);
 }
 
 /// "These functions return zero if neither argument is NaN, and a and b are equal."
 /// Note that due to some kind of historical accident, __eqhf2 and __nehf2 are defined
 /// to have the same return value.
-pub fn __eqhf2(a: f16, b: f16) callconv(.C) i32 {
+pub fn __eqhf2(a: f16, b: f16) callconv(.c) i32 {
     return __cmphf2(a, b);
 }
 
 /// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
 /// Note that due to some kind of historical accident, __eqhf2 and __nehf2 are defined
 /// to have the same return value.
-pub fn __nehf2(a: f16, b: f16) callconv(.C) i32 {
+pub fn __nehf2(a: f16, b: f16) callconv(.c) i32 {
     return __cmphf2(a, b);
 }
 
 /// "These functions return a value less than zero if neither argument is NaN, and a
 /// is strictly less than b."
-pub fn __lthf2(a: f16, b: f16) callconv(.C) i32 {
+pub fn __lthf2(a: f16, b: f16) callconv(.c) i32 {
     return __cmphf2(a, b);
 }
lib/compiler_rt/cmpsf2.zig
@@ -25,44 +25,44 @@ comptime {
 ///
 /// Note that this matches the definition of `__lesf2`, `__eqsf2`, `__nesf2`, `__cmpsf2`,
 /// and `__ltsf2`.
-fn __cmpsf2(a: f32, b: f32) callconv(.C) i32 {
+fn __cmpsf2(a: f32, b: f32) callconv(.c) i32 {
     return @intFromEnum(comparef.cmpf2(f32, comparef.LE, a, b));
 }
 
 /// "These functions return a value less than or equal to zero if neither argument is NaN,
 /// and a is less than or equal to b."
-pub fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
+pub fn __lesf2(a: f32, b: f32) callconv(.c) i32 {
     return __cmpsf2(a, b);
 }
 
 /// "These functions return zero if neither argument is NaN, and a and b are equal."
 /// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
 /// to have the same return value.
-pub fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
+pub fn __eqsf2(a: f32, b: f32) callconv(.c) i32 {
     return __cmpsf2(a, b);
 }
 
 /// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
 /// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
 /// to have the same return value.
-pub fn __nesf2(a: f32, b: f32) callconv(.C) i32 {
+pub fn __nesf2(a: f32, b: f32) callconv(.c) i32 {
     return __cmpsf2(a, b);
 }
 
 /// "These functions return a value less than zero if neither argument is NaN, and a
 /// is strictly less than b."
-pub fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
+pub fn __ltsf2(a: f32, b: f32) callconv(.c) i32 {
     return __cmpsf2(a, b);
 }
 
-fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
+fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) == .Equal);
 }
 
-fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
+fn __aeabi_fcmplt(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) == .Less);
 }
 
-fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
+fn __aeabi_fcmple(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) != .Greater);
 }
lib/compiler_rt/cmptf2.zig
@@ -33,33 +33,33 @@ comptime {
 ///
 /// Note that this matches the definition of `__letf2`, `__eqtf2`, `__netf2`, `__cmptf2`,
 /// and `__lttf2`.
-fn __cmptf2(a: f128, b: f128) callconv(.C) i32 {
+fn __cmptf2(a: f128, b: f128) callconv(.c) i32 {
     return @intFromEnum(comparef.cmpf2(f128, comparef.LE, a, b));
 }
 
 /// "These functions return a value less than or equal to zero if neither argument is NaN,
 /// and a is less than or equal to b."
-fn __letf2(a: f128, b: f128) callconv(.C) i32 {
+fn __letf2(a: f128, b: f128) callconv(.c) i32 {
     return __cmptf2(a, b);
 }
 
 /// "These functions return zero if neither argument is NaN, and a and b are equal."
 /// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
 /// to have the same return value.
-fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
+fn __eqtf2(a: f128, b: f128) callconv(.c) i32 {
     return __cmptf2(a, b);
 }
 
 /// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
 /// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
 /// to have the same return value.
-fn __netf2(a: f128, b: f128) callconv(.C) i32 {
+fn __netf2(a: f128, b: f128) callconv(.c) i32 {
     return __cmptf2(a, b);
 }
 
 /// "These functions return a value less than zero if neither argument is NaN, and a
 /// is strictly less than b."
-fn __lttf2(a: f128, b: f128) callconv(.C) i32 {
+fn __lttf2(a: f128, b: f128) callconv(.c) i32 {
     return __cmptf2(a, b);
 }
 
@@ -70,34 +70,34 @@ const SparcFCMP = enum(i32) {
     Unordered = 3,
 };
 
-fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.C) i32 {
+fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.c) i32 {
     return @intFromEnum(comparef.cmpf2(f128, SparcFCMP, a.*, b.*));
 }
 
-fn _Qp_feq(a: *const f128, b: *const f128) callconv(.C) bool {
+fn _Qp_feq(a: *const f128, b: *const f128) callconv(.c) bool {
     return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Equal;
 }
 
-fn _Qp_fne(a: *const f128, b: *const f128) callconv(.C) bool {
+fn _Qp_fne(a: *const f128, b: *const f128) callconv(.c) bool {
     return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) != .Equal;
 }
 
-fn _Qp_flt(a: *const f128, b: *const f128) callconv(.C) bool {
+fn _Qp_flt(a: *const f128, b: *const f128) callconv(.c) bool {
     return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Less;
 }
 
-fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.C) bool {
+fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.c) bool {
     return @as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b))) == .Greater;
 }
 
-fn _Qp_fge(a: *const f128, b: *const f128) callconv(.C) bool {
+fn _Qp_fge(a: *const f128, b: *const f128) callconv(.c) bool {
     return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) {
         .Equal, .Greater => true,
         .Less, .Unordered => false,
     };
 }
 
-fn _Qp_fle(a: *const f128, b: *const f128) callconv(.C) bool {
+fn _Qp_fle(a: *const f128, b: *const f128) callconv(.c) bool {
     return switch (@as(SparcFCMP, @enumFromInt(_Qp_cmp(a, b)))) {
         .Equal, .Less => true,
         .Greater, .Unordered => false,
lib/compiler_rt/cmpxf2.zig
@@ -19,32 +19,32 @@ comptime {
 ///
 /// Note that this matches the definition of `__lexf2`, `__eqxf2`, `__nexf2`, `__cmpxf2`,
 /// and `__ltxf2`.
-fn __cmpxf2(a: f80, b: f80) callconv(.C) i32 {
+fn __cmpxf2(a: f80, b: f80) callconv(.c) i32 {
     return @intFromEnum(comparef.cmp_f80(comparef.LE, a, b));
 }
 
 /// "These functions return a value less than or equal to zero if neither argument is NaN,
 /// and a is less than or equal to b."
-fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
+fn __lexf2(a: f80, b: f80) callconv(.c) i32 {
     return __cmpxf2(a, b);
 }
 
 /// "These functions return zero if neither argument is NaN, and a and b are equal."
 /// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
 /// to have the same return value.
-fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
+fn __eqxf2(a: f80, b: f80) callconv(.c) i32 {
     return __cmpxf2(a, b);
 }
 
 /// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
 /// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
 /// to have the same return value.
-fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
+fn __nexf2(a: f80, b: f80) callconv(.c) i32 {
     return __cmpxf2(a, b);
 }
 
 /// "These functions return a value less than zero if neither argument is NaN, and a
 /// is strictly less than b."
-fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
+fn __ltxf2(a: f80, b: f80) callconv(.c) i32 {
     return __cmpxf2(a, b);
 }
lib/compiler_rt/cos.zig
@@ -22,12 +22,12 @@ comptime {
     @export(&cosl, .{ .name = "cosl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __cosh(a: f16) callconv(.C) f16 {
+pub fn __cosh(a: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(cosf(a));
 }
 
-pub fn cosf(x: f32) callconv(.C) f32 {
+pub fn cosf(x: f32) callconv(.c) f32 {
     // Small multiples of pi/2 rounded to double precision.
     const c1pio2: f64 = 1.0 * math.pi / 2.0; // 0x3FF921FB, 0x54442D18
     const c2pio2: f64 = 2.0 * math.pi / 2.0; // 0x400921FB, 0x54442D18
@@ -84,7 +84,7 @@ pub fn cosf(x: f32) callconv(.C) f32 {
     };
 }
 
-pub fn cos(x: f64) callconv(.C) f64 {
+pub fn cos(x: f64) callconv(.c) f64 {
     var ix = @as(u64, @bitCast(x)) >> 32;
     ix &= 0x7fffffff;
 
@@ -113,17 +113,17 @@ pub fn cos(x: f64) callconv(.C) f64 {
     };
 }
 
-pub fn __cosx(a: f80) callconv(.C) f80 {
+pub fn __cosx(a: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(cosq(a));
 }
 
-pub fn cosq(a: f128) callconv(.C) f128 {
+pub fn cosq(a: f128) callconv(.c) f128 {
     // TODO: more correct implementation
     return cos(@floatCast(a));
 }
 
-pub fn cosl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn cosl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __cosh(x),
         32 => return cosf(x),
lib/compiler_rt/count0bits.zig
@@ -52,7 +52,7 @@ inline fn clzXi2(comptime T: type, a: T) i32 {
     return @intCast(n - @as(T, @bitCast(x)));
 }
 
-fn __clzsi2_thumb1() callconv(.Naked) void {
+fn __clzsi2_thumb1() callconv(.naked) void {
     @setRuntimeSafety(false);
 
     // Similar to the generic version with the last two rounds replaced by a LUT
@@ -86,7 +86,7 @@ fn __clzsi2_thumb1() callconv(.Naked) void {
     unreachable;
 }
 
-fn __clzsi2_arm32() callconv(.Naked) void {
+fn __clzsi2_arm32() callconv(.naked) void {
     @setRuntimeSafety(false);
 
     asm volatile (
@@ -135,7 +135,7 @@ fn __clzsi2_arm32() callconv(.Naked) void {
     unreachable;
 }
 
-fn clzsi2_generic(a: i32) callconv(.C) i32 {
+fn clzsi2_generic(a: i32) callconv(.c) i32 {
     return clzXi2(i32, a);
 }
 
@@ -159,11 +159,11 @@ pub const __clzsi2 = switch (builtin.cpu.arch) {
     else => clzsi2_generic,
 };
 
-pub fn __clzdi2(a: i64) callconv(.C) i32 {
+pub fn __clzdi2(a: i64) callconv(.c) i32 {
     return clzXi2(i64, a);
 }
 
-pub fn __clzti2(a: i128) callconv(.C) i32 {
+pub fn __clzti2(a: i128) callconv(.c) i32 {
     return clzXi2(i128, a);
 }
 
@@ -190,15 +190,15 @@ inline fn ctzXi2(comptime T: type, a: T) i32 {
     return @intCast(n - @as(T, @bitCast((x & 1))));
 }
 
-pub fn __ctzsi2(a: i32) callconv(.C) i32 {
+pub fn __ctzsi2(a: i32) callconv(.c) i32 {
     return ctzXi2(i32, a);
 }
 
-pub fn __ctzdi2(a: i64) callconv(.C) i32 {
+pub fn __ctzdi2(a: i64) callconv(.c) i32 {
     return ctzXi2(i64, a);
 }
 
-pub fn __ctzti2(a: i128) callconv(.C) i32 {
+pub fn __ctzti2(a: i128) callconv(.c) i32 {
     return ctzXi2(i128, a);
 }
 
@@ -222,15 +222,15 @@ inline fn ffsXi2(comptime T: type, a: T) i32 {
     return @as(i32, @intCast(n - @as(T, @bitCast((x & 1))))) + 1;
 }
 
-pub fn __ffssi2(a: i32) callconv(.C) i32 {
+pub fn __ffssi2(a: i32) callconv(.c) i32 {
     return ffsXi2(i32, a);
 }
 
-pub fn __ffsdi2(a: i64) callconv(.C) i32 {
+pub fn __ffsdi2(a: i64) callconv(.c) i32 {
     return ffsXi2(i64, a);
 }
 
-pub fn __ffsti2(a: i128) callconv(.C) i32 {
+pub fn __ffsti2(a: i128) callconv(.c) i32 {
     return ffsXi2(i128, a);
 }
 
lib/compiler_rt/divc3_test.zig
@@ -17,7 +17,7 @@ test "divc3" {
     try testDiv(f128, __divtc3);
 }
 
-fn testDiv(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)) !void {
+fn testDiv(comptime T: type, comptime f: fn (T, T, T, T) callconv(.c) Complex(T)) !void {
     {
         const a: T = 1.0;
         const b: T = 0.0;
lib/compiler_rt/divdc3.zig
@@ -8,6 +8,6 @@ comptime {
     }
 }
 
-pub fn __divdc3(a: f64, b: f64, c: f64, d: f64) callconv(.C) Complex(f64) {
+pub fn __divdc3(a: f64, b: f64, c: f64, d: f64) callconv(.c) Complex(f64) {
     return divc3.divc3(f64, a, b, c, d);
 }
lib/compiler_rt/divdf3.zig
@@ -21,11 +21,11 @@ comptime {
     }
 }
 
-pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
+pub fn __divdf3(a: f64, b: f64) callconv(.c) f64 {
     return div(a, b);
 }
 
-fn __aeabi_ddiv(a: f64, b: f64) callconv(.AAPCS) f64 {
+fn __aeabi_ddiv(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) f64 {
     return div(a, b);
 }
 
lib/compiler_rt/divhc3.zig
@@ -8,6 +8,6 @@ comptime {
     }
 }
 
-pub fn __divhc3(a: f16, b: f16, c: f16, d: f16) callconv(.C) Complex(f16) {
+pub fn __divhc3(a: f16, b: f16, c: f16, d: f16) callconv(.c) Complex(f16) {
     return divc3.divc3(f16, a, b, c, d);
 }
lib/compiler_rt/divhf3.zig
@@ -5,7 +5,7 @@ comptime {
     @export(&__divhf3, .{ .name = "__divhf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __divhf3(a: f16, b: f16) callconv(.C) f16 {
+pub fn __divhf3(a: f16, b: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(divsf3.__divsf3(a, b));
 }
lib/compiler_rt/divmodei4.zig
@@ -33,7 +33,7 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []u32, v: []u32) !void {
     if (r) |x| if (u_sign < 0) neg(x);
 }
 
-pub fn __divei4(r_q: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.C) void {
+pub fn __divei4(r_q: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.c) void {
     @setRuntimeSafety(builtin.is_test);
     const u = u_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
     const v = v_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
@@ -41,7 +41,7 @@ pub fn __divei4(r_q: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.C)
     @call(.always_inline, divmod, .{ q, null, u, v }) catch unreachable;
 }
 
-pub fn __modei4(r_p: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.C) void {
+pub fn __modei4(r_p: [*]u32, u_p: [*]u32, v_p: [*]u32, bits: usize) callconv(.c) void {
     @setRuntimeSafety(builtin.is_test);
     const u = u_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
     const v = v_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
lib/compiler_rt/divsc3.zig
@@ -8,6 +8,6 @@ comptime {
     }
 }
 
-pub fn __divsc3(a: f32, b: f32, c: f32, d: f32) callconv(.C) Complex(f32) {
+pub fn __divsc3(a: f32, b: f32, c: f32, d: f32) callconv(.c) Complex(f32) {
     return divc3.divc3(f32, a, b, c, d);
 }
lib/compiler_rt/divsf3.zig
@@ -19,11 +19,11 @@ comptime {
     }
 }
 
-pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
+pub fn __divsf3(a: f32, b: f32) callconv(.c) f32 {
     return div(a, b);
 }
 
-fn __aeabi_fdiv(a: f32, b: f32) callconv(.AAPCS) f32 {
+fn __aeabi_fdiv(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) f32 {
     return div(a, b);
 }
 
lib/compiler_rt/divtc3.zig
@@ -10,6 +10,6 @@ comptime {
     }
 }
 
-pub fn __divtc3(a: f128, b: f128, c: f128, d: f128) callconv(.C) Complex(f128) {
+pub fn __divtc3(a: f128, b: f128, c: f128, d: f128) callconv(.c) Complex(f128) {
     return divc3.divc3(f128, a, b, c, d);
 }
lib/compiler_rt/divtf3.zig
@@ -16,11 +16,11 @@ comptime {
     @export(&__divtf3, .{ .name = "__divtf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
+pub fn __divtf3(a: f128, b: f128) callconv(.c) f128 {
     return div(a, b);
 }
 
-fn _Qp_div(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
+fn _Qp_div(c: *f128, a: *const f128, b: *const f128) callconv(.c) void {
     c.* = div(a.*, b.*);
 }
 
lib/compiler_rt/divti3.zig
@@ -14,13 +14,13 @@ comptime {
     }
 }
 
-pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
+pub fn __divti3(a: i128, b: i128) callconv(.c) i128 {
     return div(a, b);
 }
 
 const v128 = @Vector(2, u64);
 
-fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
+fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.c) v128 {
     return @bitCast(div(@bitCast(a), @bitCast(b)));
 }
 
lib/compiler_rt/divxc3.zig
@@ -8,6 +8,6 @@ comptime {
     }
 }
 
-pub fn __divxc3(a: f80, b: f80, c: f80, d: f80) callconv(.C) Complex(f80) {
+pub fn __divxc3(a: f80, b: f80, c: f80, d: f80) callconv(.c) Complex(f80) {
     return divc3.divc3(f80, a, b, c, d);
 }
lib/compiler_rt/divxf3.zig
@@ -12,7 +12,7 @@ comptime {
     @export(&__divxf3, .{ .name = "__divxf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
+pub fn __divxf3(a: f80, b: f80) callconv(.c) f80 {
     const T = f80;
     const Z = std.meta.Int(.unsigned, @bitSizeOf(T));
 
lib/compiler_rt/emutls.zig
@@ -24,7 +24,7 @@ comptime {
 }
 
 /// public entrypoint for generated code using EmulatedTLS
-pub fn __emutls_get_address(control: *emutls_control) callconv(.C) *anyopaque {
+pub fn __emutls_get_address(control: *emutls_control) callconv(.c) *anyopaque {
     return control.getPointer();
 }
 
@@ -191,7 +191,7 @@ const current_thread_storage = struct {
     }
 
     /// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer.
-    fn deinit(arrayPtr: *anyopaque) callconv(.C) void {
+    fn deinit(arrayPtr: *anyopaque) callconv(.c) void {
         var array: *ObjectArray = @ptrCast(@alignCast(arrayPtr));
         array.deinit();
     }
lib/compiler_rt/exp.zig
@@ -26,12 +26,12 @@ comptime {
     @export(&expl, .{ .name = "expl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __exph(a: f16) callconv(.C) f16 {
+pub fn __exph(a: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(expf(a));
 }
 
-pub fn expf(x_: f32) callconv(.C) f32 {
+pub fn expf(x_: f32) callconv(.c) f32 {
     const half = [_]f32{ 0.5, -0.5 };
     const ln2hi = 6.9314575195e-1;
     const ln2lo = 1.4286067653e-6;
@@ -106,7 +106,7 @@ pub fn expf(x_: f32) callconv(.C) f32 {
     }
 }
 
-pub fn exp(x_: f64) callconv(.C) f64 {
+pub fn exp(x_: f64) callconv(.c) f64 {
     const half = [_]f64{ 0.5, -0.5 };
     const ln2hi: f64 = 6.93147180369123816490e-01;
     const ln2lo: f64 = 1.90821492927058770002e-10;
@@ -190,17 +190,17 @@ pub fn exp(x_: f64) callconv(.C) f64 {
     }
 }
 
-pub fn __expx(a: f80) callconv(.C) f80 {
+pub fn __expx(a: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(expq(a));
 }
 
-pub fn expq(a: f128) callconv(.C) f128 {
+pub fn expq(a: f128) callconv(.c) f128 {
     // TODO: more correct implementation
     return exp(@floatCast(a));
 }
 
-pub fn expl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn expl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __exph(x),
         32 => return expf(x),
lib/compiler_rt/exp2.zig
@@ -26,12 +26,12 @@ comptime {
     @export(&exp2l, .{ .name = "exp2l", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __exp2h(x: f16) callconv(.C) f16 {
+pub fn __exp2h(x: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(exp2f(x));
 }
 
-pub fn exp2f(x: f32) callconv(.C) f32 {
+pub fn exp2f(x: f32) callconv(.c) f32 {
     const tblsiz: u32 = @intCast(exp2ft.len);
     const redux: f32 = 0x1.8p23 / @as(f32, @floatFromInt(tblsiz));
     const P1: f32 = 0x1.62e430p-1;
@@ -88,7 +88,7 @@ pub fn exp2f(x: f32) callconv(.C) f32 {
     return @floatCast(r * uk);
 }
 
-pub fn exp2(x: f64) callconv(.C) f64 {
+pub fn exp2(x: f64) callconv(.c) f64 {
     const tblsiz: u32 = @intCast(exp2dt.len / 2);
     const redux: f64 = 0x1.8p52 / @as(f64, @floatFromInt(tblsiz));
     const P1: f64 = 0x1.62e42fefa39efp-1;
@@ -157,17 +157,17 @@ pub fn exp2(x: f64) callconv(.C) f64 {
     return math.scalbn(r, ik);
 }
 
-pub fn __exp2x(x: f80) callconv(.C) f80 {
+pub fn __exp2x(x: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(exp2q(x));
 }
 
-pub fn exp2q(x: f128) callconv(.C) f128 {
+pub fn exp2q(x: f128) callconv(.c) f128 {
     // TODO: more correct implementation
     return exp2(@floatCast(x));
 }
 
-pub fn exp2l(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn exp2l(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __exp2h(x),
         32 => return exp2f(x),
lib/compiler_rt/extenddftf2.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__extenddftf2, .{ .name = "__extenddftf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __extenddftf2(a: f64) callconv(.C) f128 {
+pub fn __extenddftf2(a: f64) callconv(.c) f128 {
     return extendf(f128, f64, @as(u64, @bitCast(a)));
 }
 
-fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void {
+fn _Qp_dtoq(c: *f128, a: f64) callconv(.c) void {
     c.* = extendf(f128, f64, @as(u64, @bitCast(a)));
 }
lib/compiler_rt/extenddfxf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __extenddfxf2(a: f64) callconv(.C) f80 {
+pub fn __extenddfxf2(a: f64) callconv(.c) f80 {
     return extend_f80(f64, @as(u64, @bitCast(a)));
 }
lib/compiler_rt/extendhfdf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__extendhfdf2, .{ .name = "__extendhfdf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __extendhfdf2(a: common.F16T(f64)) callconv(.C) f64 {
+pub fn __extendhfdf2(a: common.F16T(f64)) callconv(.c) f64 {
     return extendf(f64, f16, @as(u16, @bitCast(a)));
 }
lib/compiler_rt/extendhfsf2.zig
@@ -12,14 +12,14 @@ comptime {
     @export(&__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __extendhfsf2(a: common.F16T(f32)) callconv(.C) f32 {
+pub fn __extendhfsf2(a: common.F16T(f32)) callconv(.c) f32 {
     return extendf(f32, f16, @as(u16, @bitCast(a)));
 }
 
-fn __gnu_h2f_ieee(a: common.F16T(f32)) callconv(.C) f32 {
+fn __gnu_h2f_ieee(a: common.F16T(f32)) callconv(.c) f32 {
     return extendf(f32, f16, @as(u16, @bitCast(a)));
 }
 
-fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 {
+fn __aeabi_h2f(a: u16) callconv(.{ .arm_aapcs = .{} }) f32 {
     return extendf(f32, f16, @as(u16, @bitCast(a)));
 }
lib/compiler_rt/extendhftf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__extendhftf2, .{ .name = "__extendhftf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __extendhftf2(a: common.F16T(f128)) callconv(.C) f128 {
+pub fn __extendhftf2(a: common.F16T(f128)) callconv(.c) f128 {
     return extendf(f128, f16, @as(u16, @bitCast(a)));
 }
lib/compiler_rt/extendhfxf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __extendhfxf2(a: common.F16T(f80)) callconv(.C) f80 {
+fn __extendhfxf2(a: common.F16T(f80)) callconv(.c) f80 {
     return extend_f80(f16, @as(u16, @bitCast(a)));
 }
lib/compiler_rt/extendsfdf2.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-fn __extendsfdf2(a: f32) callconv(.C) f64 {
+fn __extendsfdf2(a: f32) callconv(.c) f64 {
     return extendf(f64, f32, @as(u32, @bitCast(a)));
 }
 
-fn __aeabi_f2d(a: f32) callconv(.AAPCS) f64 {
+fn __aeabi_f2d(a: f32) callconv(.{ .arm_aapcs = .{} }) f64 {
     return extendf(f64, f32, @as(u32, @bitCast(a)));
 }
lib/compiler_rt/extendsftf2.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__extendsftf2, .{ .name = "__extendsftf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __extendsftf2(a: f32) callconv(.C) f128 {
+pub fn __extendsftf2(a: f32) callconv(.c) f128 {
     return extendf(f128, f32, @as(u32, @bitCast(a)));
 }
 
-fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void {
+fn _Qp_stoq(c: *f128, a: f32) callconv(.c) void {
     c.* = extendf(f128, f32, @as(u32, @bitCast(a)));
 }
lib/compiler_rt/extendsfxf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __extendsfxf2(a: f32) callconv(.C) f80 {
+fn __extendsfxf2(a: f32) callconv(.c) f80 {
     return extend_f80(f32, @as(u32, @bitCast(a)));
 }
lib/compiler_rt/extendxftf2.zig
@@ -7,7 +7,7 @@ comptime {
     @export(&__extendxftf2, .{ .name = "__extendxftf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __extendxftf2(a: f80) callconv(.C) f128 {
+fn __extendxftf2(a: f80) callconv(.c) f128 {
     const src_int_bit: u64 = 0x8000000000000000;
     const src_sig_mask = ~src_int_bit;
     const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
lib/compiler_rt/fabs.zig
@@ -17,27 +17,27 @@ comptime {
     @export(&fabsl, .{ .name = "fabsl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __fabsh(a: f16) callconv(.C) f16 {
+pub fn __fabsh(a: f16) callconv(.c) f16 {
     return generic_fabs(a);
 }
 
-pub fn fabsf(a: f32) callconv(.C) f32 {
+pub fn fabsf(a: f32) callconv(.c) f32 {
     return generic_fabs(a);
 }
 
-pub fn fabs(a: f64) callconv(.C) f64 {
+pub fn fabs(a: f64) callconv(.c) f64 {
     return generic_fabs(a);
 }
 
-pub fn __fabsx(a: f80) callconv(.C) f80 {
+pub fn __fabsx(a: f80) callconv(.c) f80 {
     return generic_fabs(a);
 }
 
-pub fn fabsq(a: f128) callconv(.C) f128 {
+pub fn fabsq(a: f128) callconv(.c) f128 {
     return generic_fabs(a);
 }
 
-pub fn fabsl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn fabsl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __fabsh(x),
         32 => return fabsf(x),
lib/compiler_rt/fixdfdi.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __fixdfdi(a: f64) callconv(.C) i64 {
+pub fn __fixdfdi(a: f64) callconv(.c) i64 {
     return intFromFloat(i64, a);
 }
 
-fn __aeabi_d2lz(a: f64) callconv(.AAPCS) i64 {
+fn __aeabi_d2lz(a: f64) callconv(.{ .arm_aapcs = .{} }) i64 {
     return intFromFloat(i64, a);
 }
lib/compiler_rt/fixdfsi.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __fixdfsi(a: f64) callconv(.C) i32 {
+pub fn __fixdfsi(a: f64) callconv(.c) i32 {
     return intFromFloat(i32, a);
 }
 
-fn __aeabi_d2iz(a: f64) callconv(.AAPCS) i32 {
+fn __aeabi_d2iz(a: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
     return intFromFloat(i32, a);
 }
lib/compiler_rt/fixdfti.zig
@@ -15,12 +15,12 @@ comptime {
     }
 }
 
-pub fn __fixdfti(a: f64) callconv(.C) i128 {
+pub fn __fixdfti(a: f64) callconv(.c) i128 {
     return intFromFloat(i128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
+fn __fixdfti_windows_x86_64(a: f64) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(i128, a));
 }
lib/compiler_rt/fixhfdi.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__fixhfdi, .{ .name = "__fixhfdi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __fixhfdi(a: f16) callconv(.C) i64 {
+fn __fixhfdi(a: f16) callconv(.c) i64 {
     return intFromFloat(i64, a);
 }
lib/compiler_rt/fixhfsi.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__fixhfsi, .{ .name = "__fixhfsi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __fixhfsi(a: f16) callconv(.C) i32 {
+fn __fixhfsi(a: f16) callconv(.c) i32 {
     return intFromFloat(i32, a);
 }
lib/compiler_rt/fixhfti.zig
@@ -12,12 +12,12 @@ comptime {
     }
 }
 
-pub fn __fixhfti(a: f16) callconv(.C) i128 {
+pub fn __fixhfti(a: f16) callconv(.c) i128 {
     return intFromFloat(i128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixhfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
+fn __fixhfti_windows_x86_64(a: f16) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(i128, a));
 }
lib/compiler_rt/fixsfdi.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __fixsfdi(a: f32) callconv(.C) i64 {
+pub fn __fixsfdi(a: f32) callconv(.c) i64 {
     return intFromFloat(i64, a);
 }
 
-fn __aeabi_f2lz(a: f32) callconv(.AAPCS) i64 {
+fn __aeabi_f2lz(a: f32) callconv(.{ .arm_aapcs = .{} }) i64 {
     return intFromFloat(i64, a);
 }
lib/compiler_rt/fixsfsi.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __fixsfsi(a: f32) callconv(.C) i32 {
+pub fn __fixsfsi(a: f32) callconv(.c) i32 {
     return intFromFloat(i32, a);
 }
 
-fn __aeabi_f2iz(a: f32) callconv(.AAPCS) i32 {
+fn __aeabi_f2iz(a: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
     return intFromFloat(i32, a);
 }
lib/compiler_rt/fixsfti.zig
@@ -15,12 +15,12 @@ comptime {
     }
 }
 
-pub fn __fixsfti(a: f32) callconv(.C) i128 {
+pub fn __fixsfti(a: f32) callconv(.c) i128 {
     return intFromFloat(i128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixsfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
+fn __fixsfti_windows_x86_64(a: f32) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(i128, a));
 }
lib/compiler_rt/fixtfdi.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__fixtfdi, .{ .name = "__fixtfdi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __fixtfdi(a: f128) callconv(.C) i64 {
+pub fn __fixtfdi(a: f128) callconv(.c) i64 {
     return intFromFloat(i64, a);
 }
 
-fn _Qp_qtox(a: *const f128) callconv(.C) i64 {
+fn _Qp_qtox(a: *const f128) callconv(.c) i64 {
     return intFromFloat(i64, a.*);
 }
lib/compiler_rt/fixtfsi.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__fixtfsi, .{ .name = "__fixtfsi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __fixtfsi(a: f128) callconv(.C) i32 {
+pub fn __fixtfsi(a: f128) callconv(.c) i32 {
     return intFromFloat(i32, a);
 }
 
-fn _Qp_qtoi(a: *const f128) callconv(.C) i32 {
+fn _Qp_qtoi(a: *const f128) callconv(.c) i32 {
     return intFromFloat(i32, a.*);
 }
lib/compiler_rt/fixtfti.zig
@@ -14,12 +14,12 @@ comptime {
     }
 }
 
-pub fn __fixtfti(a: f128) callconv(.C) i128 {
+pub fn __fixtfti(a: f128) callconv(.c) i128 {
     return intFromFloat(i128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixtfti_windows_x86_64(a: f128) callconv(.C) v2u64 {
+fn __fixtfti_windows_x86_64(a: f128) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(i128, a));
 }
lib/compiler_rt/fixunsdfdi.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __fixunsdfdi(a: f64) callconv(.C) u64 {
+pub fn __fixunsdfdi(a: f64) callconv(.c) u64 {
     return intFromFloat(u64, a);
 }
 
-fn __aeabi_d2ulz(a: f64) callconv(.AAPCS) u64 {
+fn __aeabi_d2ulz(a: f64) callconv(.{ .arm_aapcs = .{} }) u64 {
     return intFromFloat(u64, a);
 }
lib/compiler_rt/fixunsdfsi.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __fixunsdfsi(a: f64) callconv(.C) u32 {
+pub fn __fixunsdfsi(a: f64) callconv(.c) u32 {
     return intFromFloat(u32, a);
 }
 
-fn __aeabi_d2uiz(a: f64) callconv(.AAPCS) u32 {
+fn __aeabi_d2uiz(a: f64) callconv(.{ .arm_aapcs = .{} }) u32 {
     return intFromFloat(u32, a);
 }
lib/compiler_rt/fixunsdfti.zig
@@ -15,12 +15,12 @@ comptime {
     }
 }
 
-pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
+pub fn __fixunsdfti(a: f64) callconv(.c) u128 {
     return intFromFloat(u128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixunsdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
+fn __fixunsdfti_windows_x86_64(a: f64) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(u128, a));
 }
lib/compiler_rt/fixunshfdi.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__fixunshfdi, .{ .name = "__fixunshfdi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __fixunshfdi(a: f16) callconv(.C) u64 {
+fn __fixunshfdi(a: f16) callconv(.c) u64 {
     return intFromFloat(u64, a);
 }
lib/compiler_rt/fixunshfsi.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__fixunshfsi, .{ .name = "__fixunshfsi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __fixunshfsi(a: f16) callconv(.C) u32 {
+fn __fixunshfsi(a: f16) callconv(.c) u32 {
     return intFromFloat(u32, a);
 }
lib/compiler_rt/fixunshfti.zig
@@ -12,12 +12,12 @@ comptime {
     }
 }
 
-pub fn __fixunshfti(a: f16) callconv(.C) u128 {
+pub fn __fixunshfti(a: f16) callconv(.c) u128 {
     return intFromFloat(u128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
+fn __fixunshfti_windows_x86_64(a: f16) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(u128, a));
 }
lib/compiler_rt/fixunssfdi.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __fixunssfdi(a: f32) callconv(.C) u64 {
+pub fn __fixunssfdi(a: f32) callconv(.c) u64 {
     return intFromFloat(u64, a);
 }
 
-fn __aeabi_f2ulz(a: f32) callconv(.AAPCS) u64 {
+fn __aeabi_f2ulz(a: f32) callconv(.{ .arm_aapcs = .{} }) u64 {
     return intFromFloat(u64, a);
 }
lib/compiler_rt/fixunssfsi.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __fixunssfsi(a: f32) callconv(.C) u32 {
+pub fn __fixunssfsi(a: f32) callconv(.c) u32 {
     return intFromFloat(u32, a);
 }
 
-fn __aeabi_f2uiz(a: f32) callconv(.AAPCS) u32 {
+fn __aeabi_f2uiz(a: f32) callconv(.{ .arm_aapcs = .{} }) u32 {
     return intFromFloat(u32, a);
 }
lib/compiler_rt/fixunssfti.zig
@@ -15,12 +15,12 @@ comptime {
     }
 }
 
-pub fn __fixunssfti(a: f32) callconv(.C) u128 {
+pub fn __fixunssfti(a: f32) callconv(.c) u128 {
     return intFromFloat(u128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixunssfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
+fn __fixunssfti_windows_x86_64(a: f32) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(u128, a));
 }
lib/compiler_rt/fixunstfdi.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __fixunstfdi(a: f128) callconv(.C) u64 {
+pub fn __fixunstfdi(a: f128) callconv(.c) u64 {
     return intFromFloat(u64, a);
 }
 
-fn _Qp_qtoux(a: *const f128) callconv(.C) u64 {
+fn _Qp_qtoux(a: *const f128) callconv(.c) u64 {
     return intFromFloat(u64, a.*);
 }
lib/compiler_rt/fixunstfsi.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __fixunstfsi(a: f128) callconv(.C) u32 {
+pub fn __fixunstfsi(a: f128) callconv(.c) u32 {
     return intFromFloat(u32, a);
 }
 
-fn _Qp_qtoui(a: *const f128) callconv(.C) u32 {
+fn _Qp_qtoui(a: *const f128) callconv(.c) u32 {
     return intFromFloat(u32, a.*);
 }
lib/compiler_rt/fixunstfti.zig
@@ -14,12 +14,12 @@ comptime {
     }
 }
 
-pub fn __fixunstfti(a: f128) callconv(.C) u128 {
+pub fn __fixunstfti(a: f128) callconv(.c) u128 {
     return intFromFloat(u128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixunstfti_windows_x86_64(a: f128) callconv(.C) v2u64 {
+fn __fixunstfti_windows_x86_64(a: f128) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(u128, a));
 }
lib/compiler_rt/fixunsxfdi.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __fixunsxfdi(a: f80) callconv(.C) u64 {
+fn __fixunsxfdi(a: f80) callconv(.c) u64 {
     return intFromFloat(u64, a);
 }
lib/compiler_rt/fixunsxfsi.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __fixunsxfsi(a: f80) callconv(.C) u32 {
+fn __fixunsxfsi(a: f80) callconv(.c) u32 {
     return intFromFloat(u32, a);
 }
lib/compiler_rt/fixunsxfti.zig
@@ -12,12 +12,12 @@ comptime {
     }
 }
 
-pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
+pub fn __fixunsxfti(a: f80) callconv(.c) u128 {
     return intFromFloat(u128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixunsxfti_windows_x86_64(a: f80) callconv(.C) v2u64 {
+fn __fixunsxfti_windows_x86_64(a: f80) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(u128, a));
 }
lib/compiler_rt/fixxfdi.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__fixxfdi, .{ .name = "__fixxfdi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __fixxfdi(a: f80) callconv(.C) i64 {
+fn __fixxfdi(a: f80) callconv(.c) i64 {
     return intFromFloat(i64, a);
 }
lib/compiler_rt/fixxfsi.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__fixxfsi, .{ .name = "__fixxfsi", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __fixxfsi(a: f80) callconv(.C) i32 {
+fn __fixxfsi(a: f80) callconv(.c) i32 {
     return intFromFloat(i32, a);
 }
lib/compiler_rt/fixxfti.zig
@@ -12,12 +12,12 @@ comptime {
     }
 }
 
-pub fn __fixxfti(a: f80) callconv(.C) i128 {
+pub fn __fixxfti(a: f80) callconv(.c) i128 {
     return intFromFloat(i128, a);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __fixxfti_windows_x86_64(a: f80) callconv(.C) v2u64 {
+fn __fixxfti_windows_x86_64(a: f80) callconv(.c) v2u64 {
     return @bitCast(intFromFloat(i128, a));
 }
lib/compiler_rt/floatdidf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floatdidf(a: i64) callconv(.C) f64 {
+pub fn __floatdidf(a: i64) callconv(.c) f64 {
     return floatFromInt(f64, a);
 }
 
-fn __aeabi_l2d(a: i64) callconv(.AAPCS) f64 {
+fn __aeabi_l2d(a: i64) callconv(.{ .arm_aapcs = .{} }) f64 {
     return floatFromInt(f64, a);
 }
lib/compiler_rt/floatdihf.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__floatdihf, .{ .name = "__floatdihf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __floatdihf(a: i64) callconv(.C) f16 {
+fn __floatdihf(a: i64) callconv(.c) f16 {
     return floatFromInt(f16, a);
 }
lib/compiler_rt/floatdisf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floatdisf(a: i64) callconv(.C) f32 {
+pub fn __floatdisf(a: i64) callconv(.c) f32 {
     return floatFromInt(f32, a);
 }
 
-fn __aeabi_l2f(a: i64) callconv(.AAPCS) f32 {
+fn __aeabi_l2f(a: i64) callconv(.{ .arm_aapcs = .{} }) f32 {
     return floatFromInt(f32, a);
 }
lib/compiler_rt/floatditf.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__floatditf, .{ .name = "__floatditf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __floatditf(a: i64) callconv(.C) f128 {
+pub fn __floatditf(a: i64) callconv(.c) f128 {
     return floatFromInt(f128, a);
 }
 
-fn _Qp_xtoq(c: *f128, a: i64) callconv(.C) void {
+fn _Qp_xtoq(c: *f128, a: i64) callconv(.c) void {
     c.* = floatFromInt(f128, a);
 }
lib/compiler_rt/floatdixf.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__floatdixf, .{ .name = "__floatdixf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __floatdixf(a: i64) callconv(.C) f80 {
+fn __floatdixf(a: i64) callconv(.c) f80 {
     return floatFromInt(f80, a);
 }
lib/compiler_rt/floatsidf.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __floatsidf(a: i32) callconv(.C) f64 {
+pub fn __floatsidf(a: i32) callconv(.c) f64 {
     return floatFromInt(f64, a);
 }
 
-fn __aeabi_i2d(a: i32) callconv(.AAPCS) f64 {
+fn __aeabi_i2d(a: i32) callconv(.{ .arm_aapcs = .{} }) f64 {
     return floatFromInt(f64, a);
 }
lib/compiler_rt/floatsihf.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__floatsihf, .{ .name = "__floatsihf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __floatsihf(a: i32) callconv(.C) f16 {
+fn __floatsihf(a: i32) callconv(.c) f16 {
     return floatFromInt(f16, a);
 }
lib/compiler_rt/floatsisf.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __floatsisf(a: i32) callconv(.C) f32 {
+pub fn __floatsisf(a: i32) callconv(.c) f32 {
     return floatFromInt(f32, a);
 }
 
-fn __aeabi_i2f(a: i32) callconv(.AAPCS) f32 {
+fn __aeabi_i2f(a: i32) callconv(.{ .arm_aapcs = .{} }) f32 {
     return floatFromInt(f32, a);
 }
lib/compiler_rt/floatsitf.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__floatsitf, .{ .name = "__floatsitf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __floatsitf(a: i32) callconv(.C) f128 {
+pub fn __floatsitf(a: i32) callconv(.c) f128 {
     return floatFromInt(f128, a);
 }
 
-fn _Qp_itoq(c: *f128, a: i32) callconv(.C) void {
+fn _Qp_itoq(c: *f128, a: i32) callconv(.c) void {
     c.* = floatFromInt(f128, a);
 }
lib/compiler_rt/floatsixf.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__floatsixf, .{ .name = "__floatsixf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __floatsixf(a: i32) callconv(.C) f80 {
+fn __floatsixf(a: i32) callconv(.c) f80 {
     return floatFromInt(f80, a);
 }
lib/compiler_rt/floattidf.zig
@@ -15,10 +15,10 @@ comptime {
     }
 }
 
-pub fn __floattidf(a: i128) callconv(.C) f64 {
+pub fn __floattidf(a: i128) callconv(.c) f64 {
     return floatFromInt(f64, a);
 }
 
-fn __floattidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 {
+fn __floattidf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f64 {
     return floatFromInt(f64, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floattihf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floattihf(a: i128) callconv(.C) f16 {
+pub fn __floattihf(a: i128) callconv(.c) f16 {
     return floatFromInt(f16, a);
 }
 
-fn __floattihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 {
+fn __floattihf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f16 {
     return floatFromInt(f16, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floattisf.zig
@@ -15,10 +15,10 @@ comptime {
     }
 }
 
-pub fn __floattisf(a: i128) callconv(.C) f32 {
+pub fn __floattisf(a: i128) callconv(.c) f32 {
     return floatFromInt(f32, a);
 }
 
-fn __floattisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 {
+fn __floattisf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f32 {
     return floatFromInt(f32, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floattitf.zig
@@ -14,10 +14,10 @@ comptime {
     }
 }
 
-pub fn __floattitf(a: i128) callconv(.C) f128 {
+pub fn __floattitf(a: i128) callconv(.c) f128 {
     return floatFromInt(f128, a);
 }
 
-fn __floattitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 {
+fn __floattitf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f128 {
     return floatFromInt(f128, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floattixf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floattixf(a: i128) callconv(.C) f80 {
+pub fn __floattixf(a: i128) callconv(.c) f80 {
     return floatFromInt(f80, a);
 }
 
-fn __floattixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 {
+fn __floattixf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f80 {
     return floatFromInt(f80, @as(i128, @bitCast(a)));
 }
lib/compiler_rt/floatundidf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floatundidf(a: u64) callconv(.C) f64 {
+pub fn __floatundidf(a: u64) callconv(.c) f64 {
     return floatFromInt(f64, a);
 }
 
-fn __aeabi_ul2d(a: u64) callconv(.AAPCS) f64 {
+fn __aeabi_ul2d(a: u64) callconv(.{ .arm_aapcs = .{} }) f64 {
     return floatFromInt(f64, a);
 }
lib/compiler_rt/floatundihf.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__floatundihf, .{ .name = "__floatundihf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __floatundihf(a: u64) callconv(.C) f16 {
+fn __floatundihf(a: u64) callconv(.c) f16 {
     return floatFromInt(f16, a);
 }
lib/compiler_rt/floatundisf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floatundisf(a: u64) callconv(.C) f32 {
+pub fn __floatundisf(a: u64) callconv(.c) f32 {
     return floatFromInt(f32, a);
 }
 
-fn __aeabi_ul2f(a: u64) callconv(.AAPCS) f32 {
+fn __aeabi_ul2f(a: u64) callconv(.{ .arm_aapcs = .{} }) f32 {
     return floatFromInt(f32, a);
 }
lib/compiler_rt/floatunditf.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__floatunditf, .{ .name = "__floatunditf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __floatunditf(a: u64) callconv(.C) f128 {
+pub fn __floatunditf(a: u64) callconv(.c) f128 {
     return floatFromInt(f128, a);
 }
 
-fn _Qp_uxtoq(c: *f128, a: u64) callconv(.C) void {
+fn _Qp_uxtoq(c: *f128, a: u64) callconv(.c) void {
     c.* = floatFromInt(f128, a);
 }
lib/compiler_rt/floatundixf.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__floatundixf, .{ .name = "__floatundixf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __floatundixf(a: u64) callconv(.C) f80 {
+fn __floatundixf(a: u64) callconv(.c) f80 {
     return floatFromInt(f80, a);
 }
lib/compiler_rt/floatunsidf.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __floatunsidf(a: u32) callconv(.C) f64 {
+pub fn __floatunsidf(a: u32) callconv(.c) f64 {
     return floatFromInt(f64, a);
 }
 
-fn __aeabi_ui2d(a: u32) callconv(.AAPCS) f64 {
+fn __aeabi_ui2d(a: u32) callconv(.{ .arm_aapcs = .{} }) f64 {
     return floatFromInt(f64, a);
 }
lib/compiler_rt/floatunsihf.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__floatunsihf, .{ .name = "__floatunsihf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __floatunsihf(a: u32) callconv(.C) f16 {
+pub fn __floatunsihf(a: u32) callconv(.c) f16 {
     return floatFromInt(f16, a);
 }
lib/compiler_rt/floatunsisf.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __floatunsisf(a: u32) callconv(.C) f32 {
+pub fn __floatunsisf(a: u32) callconv(.c) f32 {
     return floatFromInt(f32, a);
 }
 
-fn __aeabi_ui2f(a: u32) callconv(.AAPCS) f32 {
+fn __aeabi_ui2f(a: u32) callconv(.{ .arm_aapcs = .{} }) f32 {
     return floatFromInt(f32, a);
 }
lib/compiler_rt/floatunsitf.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__floatunsitf, .{ .name = "__floatunsitf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __floatunsitf(a: u32) callconv(.C) f128 {
+pub fn __floatunsitf(a: u32) callconv(.c) f128 {
     return floatFromInt(f128, a);
 }
 
-fn _Qp_uitoq(c: *f128, a: u32) callconv(.C) void {
+fn _Qp_uitoq(c: *f128, a: u32) callconv(.c) void {
     c.* = floatFromInt(f128, a);
 }
lib/compiler_rt/floatunsixf.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__floatunsixf, .{ .name = "__floatunsixf", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __floatunsixf(a: u32) callconv(.C) f80 {
+fn __floatunsixf(a: u32) callconv(.c) f80 {
     return floatFromInt(f80, a);
 }
lib/compiler_rt/floatuntidf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floatuntidf(a: u128) callconv(.C) f64 {
+pub fn __floatuntidf(a: u128) callconv(.c) f64 {
     return floatFromInt(f64, a);
 }
 
-fn __floatuntidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 {
+fn __floatuntidf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f64 {
     return floatFromInt(f64, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floatuntihf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floatuntihf(a: u128) callconv(.C) f16 {
+pub fn __floatuntihf(a: u128) callconv(.c) f16 {
     return floatFromInt(f16, a);
 }
 
-fn __floatuntihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 {
+fn __floatuntihf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f16 {
     return floatFromInt(f16, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floatuntisf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floatuntisf(a: u128) callconv(.C) f32 {
+pub fn __floatuntisf(a: u128) callconv(.c) f32 {
     return floatFromInt(f32, a);
 }
 
-fn __floatuntisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 {
+fn __floatuntisf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f32 {
     return floatFromInt(f32, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floatuntitf.zig
@@ -14,10 +14,10 @@ comptime {
     }
 }
 
-pub fn __floatuntitf(a: u128) callconv(.C) f128 {
+pub fn __floatuntitf(a: u128) callconv(.c) f128 {
     return floatFromInt(f128, a);
 }
 
-fn __floatuntitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 {
+fn __floatuntitf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f128 {
     return floatFromInt(f128, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floatuntixf.zig
@@ -12,10 +12,10 @@ comptime {
     }
 }
 
-pub fn __floatuntixf(a: u128) callconv(.C) f80 {
+pub fn __floatuntixf(a: u128) callconv(.c) f80 {
     return floatFromInt(f80, a);
 }
 
-fn __floatuntixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 {
+fn __floatuntixf_windows_x86_64(a: @Vector(2, u64)) callconv(.c) f80 {
     return floatFromInt(f80, @as(u128, @bitCast(a)));
 }
lib/compiler_rt/floor.zig
@@ -26,7 +26,7 @@ comptime {
     @export(&floorl, .{ .name = "floorl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __floorh(x: f16) callconv(.C) f16 {
+pub fn __floorh(x: f16) callconv(.c) f16 {
     var u: u16 = @bitCast(x);
     const e = @as(i16, @intCast((u >> 10) & 31)) - 15;
     var m: u16 = undefined;
@@ -60,7 +60,7 @@ pub fn __floorh(x: f16) callconv(.C) f16 {
     }
 }
 
-pub fn floorf(x: f32) callconv(.C) f32 {
+pub fn floorf(x: f32) callconv(.c) f32 {
     var u: u32 = @bitCast(x);
     const e = @as(i32, @intCast((u >> 23) & 0xFF)) - 0x7F;
     var m: u32 = undefined;
@@ -94,7 +94,7 @@ pub fn floorf(x: f32) callconv(.C) f32 {
     }
 }
 
-pub fn floor(x: f64) callconv(.C) f64 {
+pub fn floor(x: f64) callconv(.c) f64 {
     const f64_toint = 1.0 / math.floatEps(f64);
 
     const u: u64 = @bitCast(x);
@@ -125,12 +125,12 @@ pub fn floor(x: f64) callconv(.C) f64 {
     }
 }
 
-pub fn __floorx(x: f80) callconv(.C) f80 {
+pub fn __floorx(x: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(floorq(x));
 }
 
-pub fn floorq(x: f128) callconv(.C) f128 {
+pub fn floorq(x: f128) callconv(.c) f128 {
     const f128_toint = 1.0 / math.floatEps(f128);
 
     const u: u128 = @bitCast(x);
@@ -159,7 +159,7 @@ pub fn floorq(x: f128) callconv(.C) f128 {
     }
 }
 
-pub fn floorl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn floorl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __floorh(x),
         32 => return floorf(x),
lib/compiler_rt/fma.zig
@@ -24,12 +24,12 @@ comptime {
     @export(&fmal, .{ .name = "fmal", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __fmah(x: f16, y: f16, z: f16) callconv(.C) f16 {
+pub fn __fmah(x: f16, y: f16, z: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(fmaf(x, y, z));
 }
 
-pub fn fmaf(x: f32, y: f32, z: f32) callconv(.C) f32 {
+pub fn fmaf(x: f32, y: f32, z: f32) callconv(.c) f32 {
     const xy = @as(f64, x) * y;
     const xy_z = xy + z;
     const u = @as(u64, @bitCast(xy_z));
@@ -44,7 +44,7 @@ pub fn fmaf(x: f32, y: f32, z: f32) callconv(.C) f32 {
 }
 
 /// NOTE: Upstream fma.c has been rewritten completely to raise fp exceptions more accurately.
-pub fn fma(x: f64, y: f64, z: f64) callconv(.C) f64 {
+pub fn fma(x: f64, y: f64, z: f64) callconv(.c) f64 {
     if (!math.isFinite(x) or !math.isFinite(y)) {
         return x * y + z;
     }
@@ -91,7 +91,7 @@ pub fn fma(x: f64, y: f64, z: f64) callconv(.C) f64 {
     }
 }
 
-pub fn __fmax(a: f80, b: f80, c: f80) callconv(.C) f80 {
+pub fn __fmax(a: f80, b: f80, c: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(fmaq(a, b, c));
 }
@@ -103,7 +103,7 @@ pub fn __fmax(a: f80, b: f80, c: f80) callconv(.C) f80 {
 ///
 ///      Dekker, T.  A Floating-Point Technique for Extending the
 ///      Available Precision.  Numer. Math. 18, 224-242 (1971).
-pub fn fmaq(x: f128, y: f128, z: f128) callconv(.C) f128 {
+pub fn fmaq(x: f128, y: f128, z: f128) callconv(.c) f128 {
     if (!math.isFinite(x) or !math.isFinite(y)) {
         return x * y + z;
     }
@@ -150,7 +150,7 @@ pub fn fmaq(x: f128, y: f128, z: f128) callconv(.C) f128 {
     }
 }
 
-pub fn fmal(x: c_longdouble, y: c_longdouble, z: c_longdouble) callconv(.C) c_longdouble {
+pub fn fmal(x: c_longdouble, y: c_longdouble, z: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __fmah(x, y, z),
         32 => return fmaf(x, y, z),
lib/compiler_rt/fmax.zig
@@ -18,27 +18,27 @@ comptime {
     @export(&fmaxl, .{ .name = "fmaxl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __fmaxh(x: f16, y: f16) callconv(.C) f16 {
+pub fn __fmaxh(x: f16, y: f16) callconv(.c) f16 {
     return generic_fmax(f16, x, y);
 }
 
-pub fn fmaxf(x: f32, y: f32) callconv(.C) f32 {
+pub fn fmaxf(x: f32, y: f32) callconv(.c) f32 {
     return generic_fmax(f32, x, y);
 }
 
-pub fn fmax(x: f64, y: f64) callconv(.C) f64 {
+pub fn fmax(x: f64, y: f64) callconv(.c) f64 {
     return generic_fmax(f64, x, y);
 }
 
-pub fn __fmaxx(x: f80, y: f80) callconv(.C) f80 {
+pub fn __fmaxx(x: f80, y: f80) callconv(.c) f80 {
     return generic_fmax(f80, x, y);
 }
 
-pub fn fmaxq(x: f128, y: f128) callconv(.C) f128 {
+pub fn fmaxq(x: f128, y: f128) callconv(.c) f128 {
     return generic_fmax(f128, x, y);
 }
 
-pub fn fmaxl(x: c_longdouble, y: c_longdouble) callconv(.C) c_longdouble {
+pub fn fmaxl(x: c_longdouble, y: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __fmaxh(x, y),
         32 => return fmaxf(x, y),
lib/compiler_rt/fmin.zig
@@ -18,27 +18,27 @@ comptime {
     @export(&fminl, .{ .name = "fminl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __fminh(x: f16, y: f16) callconv(.C) f16 {
+pub fn __fminh(x: f16, y: f16) callconv(.c) f16 {
     return generic_fmin(f16, x, y);
 }
 
-pub fn fminf(x: f32, y: f32) callconv(.C) f32 {
+pub fn fminf(x: f32, y: f32) callconv(.c) f32 {
     return generic_fmin(f32, x, y);
 }
 
-pub fn fmin(x: f64, y: f64) callconv(.C) f64 {
+pub fn fmin(x: f64, y: f64) callconv(.c) f64 {
     return generic_fmin(f64, x, y);
 }
 
-pub fn __fminx(x: f80, y: f80) callconv(.C) f80 {
+pub fn __fminx(x: f80, y: f80) callconv(.c) f80 {
     return generic_fmin(f80, x, y);
 }
 
-pub fn fminq(x: f128, y: f128) callconv(.C) f128 {
+pub fn fminq(x: f128, y: f128) callconv(.c) f128 {
     return generic_fmin(f128, x, y);
 }
 
-pub fn fminl(x: c_longdouble, y: c_longdouble) callconv(.C) c_longdouble {
+pub fn fminl(x: c_longdouble, y: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __fminh(x, y),
         32 => return fminf(x, y),
lib/compiler_rt/fmod.zig
@@ -20,22 +20,22 @@ comptime {
     @export(&fmodl, .{ .name = "fmodl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __fmodh(x: f16, y: f16) callconv(.C) f16 {
+pub fn __fmodh(x: f16, y: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(fmodf(x, y));
 }
 
-pub fn fmodf(x: f32, y: f32) callconv(.C) f32 {
+pub fn fmodf(x: f32, y: f32) callconv(.c) f32 {
     return generic_fmod(f32, x, y);
 }
 
-pub fn fmod(x: f64, y: f64) callconv(.C) f64 {
+pub fn fmod(x: f64, y: f64) callconv(.c) f64 {
     return generic_fmod(f64, x, y);
 }
 
 /// fmodx - floating modulo large, returns the remainder of division for f80 types
 /// Logic and flow heavily inspired by MUSL fmodl for 113 mantissa digits
-pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
+pub fn __fmodx(a: f80, b: f80) callconv(.c) f80 {
     const T = f80;
     const Z = std.meta.Int(.unsigned, @bitSizeOf(T));
 
@@ -133,7 +133,7 @@ pub fn __fmodx(a: f80, b: f80) callconv(.C) f80 {
 
 /// fmodq - floating modulo large, returns the remainder of division for f128 types
 /// Logic and flow heavily inspired by MUSL fmodl for 113 mantissa digits
-pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
+pub fn fmodq(a: f128, b: f128) callconv(.c) f128 {
     var amod = a;
     var bmod = b;
     const aPtr_u64: [*]u64 = @ptrCast(&amod);
@@ -250,7 +250,7 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
     return amod;
 }
 
-pub fn fmodl(a: c_longdouble, b: c_longdouble) callconv(.C) c_longdouble {
+pub fn fmodl(a: c_longdouble, b: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __fmodh(a, b),
         32 => return fmodf(a, b),
lib/compiler_rt/gedf2.zig
@@ -17,20 +17,20 @@ comptime {
 
 /// "These functions return a value greater than or equal to zero if neither
 /// argument is NaN, and a is greater than or equal to b."
-pub fn __gedf2(a: f64, b: f64) callconv(.C) i32 {
+pub fn __gedf2(a: f64, b: f64) callconv(.c) i32 {
     return @intFromEnum(comparef.cmpf2(f64, comparef.GE, a, b));
 }
 
 /// "These functions return a value greater than zero if neither argument is NaN,
 /// and a is strictly greater than b."
-pub fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
+pub fn __gtdf2(a: f64, b: f64) callconv(.c) i32 {
     return __gedf2(a, b);
 }
 
-fn __aeabi_dcmpge(a: f64, b: f64) callconv(.AAPCS) i32 {
+fn __aeabi_dcmpge(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f64, comparef.GE, a, b) != .Less);
 }
 
-fn __aeabi_dcmpgt(a: f64, b: f64) callconv(.AAPCS) i32 {
+fn __aeabi_dcmpgt(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f64, comparef.GE, a, b) == .Greater);
 }
lib/compiler_rt/gehf2.zig
@@ -12,12 +12,12 @@ comptime {
 
 /// "These functions return a value greater than or equal to zero if neither
 /// argument is NaN, and a is greater than or equal to b."
-pub fn __gehf2(a: f16, b: f16) callconv(.C) i32 {
+pub fn __gehf2(a: f16, b: f16) callconv(.c) i32 {
     return @intFromEnum(comparef.cmpf2(f16, comparef.GE, a, b));
 }
 
 /// "These functions return a value greater than zero if neither argument is NaN,
 /// and a is strictly greater than b."
-pub fn __gthf2(a: f16, b: f16) callconv(.C) i32 {
+pub fn __gthf2(a: f16, b: f16) callconv(.c) i32 {
     return __gehf2(a, b);
 }
lib/compiler_rt/gesf2.zig
@@ -17,20 +17,20 @@ comptime {
 
 /// "These functions return a value greater than or equal to zero if neither
 /// argument is NaN, and a is greater than or equal to b."
-pub fn __gesf2(a: f32, b: f32) callconv(.C) i32 {
+pub fn __gesf2(a: f32, b: f32) callconv(.c) i32 {
     return @intFromEnum(comparef.cmpf2(f32, comparef.GE, a, b));
 }
 
 /// "These functions return a value greater than zero if neither argument is NaN,
 /// and a is strictly greater than b."
-pub fn __gtsf2(a: f32, b: f32) callconv(.C) i32 {
+pub fn __gtsf2(a: f32, b: f32) callconv(.c) i32 {
     return __gesf2(a, b);
 }
 
-fn __aeabi_fcmpge(a: f32, b: f32) callconv(.AAPCS) i32 {
+fn __aeabi_fcmpge(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f32, comparef.GE, a, b) != .Less);
 }
 
-fn __aeabi_fcmpgt(a: f32, b: f32) callconv(.AAPCS) i32 {
+fn __aeabi_fcmpgt(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
     return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) == .Greater);
 }
lib/compiler_rt/getf2.zig
@@ -19,12 +19,12 @@ comptime {
 
 /// "These functions return a value greater than or equal to zero if neither
 /// argument is NaN, and a is greater than or equal to b."
-fn __getf2(a: f128, b: f128) callconv(.C) i32 {
+fn __getf2(a: f128, b: f128) callconv(.c) i32 {
     return @intFromEnum(comparef.cmpf2(f128, comparef.GE, a, b));
 }
 
 /// "These functions return a value greater than zero if neither argument is NaN,
 /// and a is strictly greater than b."
-fn __gttf2(a: f128, b: f128) callconv(.C) i32 {
+fn __gttf2(a: f128, b: f128) callconv(.c) i32 {
     return __getf2(a, b);
 }
lib/compiler_rt/gexf2.zig
@@ -8,10 +8,10 @@ comptime {
     @export(&__gtxf2, .{ .name = "__gtxf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __gexf2(a: f80, b: f80) callconv(.C) i32 {
+fn __gexf2(a: f80, b: f80) callconv(.c) i32 {
     return @intFromEnum(comparef.cmp_f80(comparef.GE, a, b));
 }
 
-fn __gtxf2(a: f80, b: f80) callconv(.C) i32 {
+fn __gtxf2(a: f80, b: f80) callconv(.c) i32 {
     return __gexf2(a, b);
 }
lib/compiler_rt/int.zig
@@ -34,7 +34,7 @@ comptime {
     @export(&__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __divmodti4(a: i128, b: i128, rem: *i128) callconv(.C) i128 {
+pub fn __divmodti4(a: i128, b: i128, rem: *i128) callconv(.c) i128 {
     const d = __divti3(a, b);
     rem.* = a -% (d * b);
     return d;
@@ -67,7 +67,7 @@ fn test_one_divmodti4(a: i128, b: i128, expected_q: i128, expected_r: i128) !voi
     try testing.expect(q == expected_q and r == expected_r);
 }
 
-pub fn __divmoddi4(a: i64, b: i64, rem: *i64) callconv(.C) i64 {
+pub fn __divmoddi4(a: i64, b: i64, rem: *i64) callconv(.c) i64 {
     const d = __divdi3(a, b);
     rem.* = a -% (d * b);
     return d;
@@ -101,7 +101,7 @@ test "test_divmoddi4" {
     }
 }
 
-pub fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) callconv(.C) u64 {
+pub fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) callconv(.c) u64 {
     return udivmod(u64, a, b, maybe_rem);
 }
 
@@ -109,7 +109,7 @@ test "test_udivmoddi4" {
     _ = @import("udivmoddi4_test.zig");
 }
 
-pub fn __divdi3(a: i64, b: i64) callconv(.C) i64 {
+pub fn __divdi3(a: i64, b: i64) callconv(.c) i64 {
     // Set aside the sign of the quotient.
     const sign: u64 = @bitCast((a ^ b) >> 63);
     // Take absolute value of a and b via abs(x) = (x^(x >> 63)) - (x >> 63).
@@ -146,7 +146,7 @@ fn test_one_divdi3(a: i64, b: i64, expected_q: i64) !void {
     try testing.expect(q == expected_q);
 }
 
-pub fn __moddi3(a: i64, b: i64) callconv(.C) i64 {
+pub fn __moddi3(a: i64, b: i64) callconv(.c) i64 {
     // Take absolute value of a and b via abs(x) = (x^(x >> 63)) - (x >> 63).
     const abs_a = (a ^ (a >> 63)) -% (a >> 63);
     const abs_b = (b ^ (b >> 63)) -% (b >> 63);
@@ -184,11 +184,11 @@ fn test_one_moddi3(a: i64, b: i64, expected_r: i64) !void {
     try testing.expect(r == expected_r);
 }
 
-pub fn __udivdi3(a: u64, b: u64) callconv(.C) u64 {
+pub fn __udivdi3(a: u64, b: u64) callconv(.c) u64 {
     return __udivmoddi4(a, b, null);
 }
 
-pub fn __umoddi3(a: u64, b: u64) callconv(.C) u64 {
+pub fn __umoddi3(a: u64, b: u64) callconv(.c) u64 {
     var r: u64 = undefined;
     _ = __udivmoddi4(a, b, &r);
     return r;
@@ -207,7 +207,7 @@ fn test_one_umoddi3(a: u64, b: u64, expected_r: u64) !void {
     try testing.expect(r == expected_r);
 }
 
-pub fn __divmodsi4(a: i32, b: i32, rem: *i32) callconv(.C) i32 {
+pub fn __divmodsi4(a: i32, b: i32, rem: *i32) callconv(.c) i32 {
     const d = __divsi3(a, b);
     rem.* = a -% (d * b);
     return d;
@@ -241,17 +241,17 @@ test "test_divmodsi4" {
     }
 }
 
-pub fn __udivmodsi4(a: u32, b: u32, rem: *u32) callconv(.C) u32 {
+pub fn __udivmodsi4(a: u32, b: u32, rem: *u32) callconv(.c) u32 {
     const d = __udivsi3(a, b);
     rem.* = @bitCast(@as(i32, @bitCast(a)) -% (@as(i32, @bitCast(d)) * @as(i32, @bitCast(b))));
     return d;
 }
 
-pub fn __divsi3(n: i32, d: i32) callconv(.C) i32 {
+pub fn __divsi3(n: i32, d: i32) callconv(.c) i32 {
     return div_i32(n, d);
 }
 
-fn __aeabi_idiv(n: i32, d: i32) callconv(.AAPCS) i32 {
+fn __aeabi_idiv(n: i32, d: i32) callconv(.{ .arm_aapcs = .{} }) i32 {
     return div_i32(n, d);
 }
 
@@ -292,11 +292,11 @@ fn test_one_divsi3(a: i32, b: i32, expected_q: i32) !void {
     try testing.expect(q == expected_q);
 }
 
-pub fn __udivsi3(n: u32, d: u32) callconv(.C) u32 {
+pub fn __udivsi3(n: u32, d: u32) callconv(.c) u32 {
     return div_u32(n, d);
 }
 
-fn __aeabi_uidiv(n: u32, d: u32) callconv(.AAPCS) u32 {
+fn __aeabi_uidiv(n: u32, d: u32) callconv(.{ .arm_aapcs = .{} }) u32 {
     return div_u32(n, d);
 }
 
@@ -485,7 +485,7 @@ fn test_one_udivsi3(a: u32, b: u32, expected_q: u32) !void {
     try testing.expect(q == expected_q);
 }
 
-pub fn __modsi3(n: i32, d: i32) callconv(.C) i32 {
+pub fn __modsi3(n: i32, d: i32) callconv(.c) i32 {
     return n -% __divsi3(n, d) * d;
 }
 
@@ -514,7 +514,7 @@ fn test_one_modsi3(a: i32, b: i32, expected_r: i32) !void {
     try testing.expect(r == expected_r);
 }
 
-pub fn __umodsi3(n: u32, d: u32) callconv(.C) u32 {
+pub fn __umodsi3(n: u32, d: u32) callconv(.c) u32 {
     return n -% __udivsi3(n, d) * d;
 }
 
lib/compiler_rt/log.zig
@@ -25,12 +25,12 @@ comptime {
     @export(&logl, .{ .name = "logl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __logh(a: f16) callconv(.C) f16 {
+pub fn __logh(a: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(logf(a));
 }
 
-pub fn logf(x_: f32) callconv(.C) f32 {
+pub fn logf(x_: f32) callconv(.c) f32 {
     const ln2_hi: f32 = 6.9313812256e-01;
     const ln2_lo: f32 = 9.0580006145e-06;
     const Lg1: f32 = 0xaaaaaa.0p-24;
@@ -82,7 +82,7 @@ pub fn logf(x_: f32) callconv(.C) f32 {
     return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi;
 }
 
-pub fn log(x_: f64) callconv(.C) f64 {
+pub fn log(x_: f64) callconv(.c) f64 {
     const ln2_hi: f64 = 6.93147180369123816490e-01;
     const ln2_lo: f64 = 1.90821492927058770002e-10;
     const Lg1: f64 = 6.666666666666735130e-01;
@@ -138,17 +138,17 @@ pub fn log(x_: f64) callconv(.C) f64 {
     return s * (hfsq + R) + dk * ln2_lo - hfsq + f + dk * ln2_hi;
 }
 
-pub fn __logx(a: f80) callconv(.C) f80 {
+pub fn __logx(a: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(logq(a));
 }
 
-pub fn logq(a: f128) callconv(.C) f128 {
+pub fn logq(a: f128) callconv(.c) f128 {
     // TODO: more correct implementation
     return log(@floatCast(a));
 }
 
-pub fn logl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn logl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __logh(x),
         32 => return logf(x),
lib/compiler_rt/log10.zig
@@ -26,12 +26,12 @@ comptime {
     @export(&log10l, .{ .name = "log10l", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __log10h(a: f16) callconv(.C) f16 {
+pub fn __log10h(a: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(log10f(a));
 }
 
-pub fn log10f(x_: f32) callconv(.C) f32 {
+pub fn log10f(x_: f32) callconv(.c) f32 {
     const ivln10hi: f32 = 4.3432617188e-01;
     const ivln10lo: f32 = -3.1689971365e-05;
     const log10_2hi: f32 = 3.0102920532e-01;
@@ -91,7 +91,7 @@ pub fn log10f(x_: f32) callconv(.C) f32 {
     return dk * log10_2lo + (lo + hi) * ivln10lo + lo * ivln10hi + hi * ivln10hi + dk * log10_2hi;
 }
 
-pub fn log10(x_: f64) callconv(.C) f64 {
+pub fn log10(x_: f64) callconv(.c) f64 {
     const ivln10hi: f64 = 4.34294481878168880939e-01;
     const ivln10lo: f64 = 2.50829467116452752298e-11;
     const log10_2hi: f64 = 3.01029995663611771306e-01;
@@ -166,17 +166,17 @@ pub fn log10(x_: f64) callconv(.C) f64 {
     return val_lo + val_hi;
 }
 
-pub fn __log10x(a: f80) callconv(.C) f80 {
+pub fn __log10x(a: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(log10q(a));
 }
 
-pub fn log10q(a: f128) callconv(.C) f128 {
+pub fn log10q(a: f128) callconv(.c) f128 {
     // TODO: more correct implementation
     return log10(@floatCast(a));
 }
 
-pub fn log10l(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn log10l(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __log10h(x),
         32 => return log10f(x),
lib/compiler_rt/log2.zig
@@ -26,12 +26,12 @@ comptime {
     @export(&log2l, .{ .name = "log2l", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __log2h(a: f16) callconv(.C) f16 {
+pub fn __log2h(a: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(log2f(a));
 }
 
-pub fn log2f(x_: f32) callconv(.C) f32 {
+pub fn log2f(x_: f32) callconv(.c) f32 {
     const ivln2hi: f32 = 1.4428710938e+00;
     const ivln2lo: f32 = -1.7605285393e-04;
     const Lg1: f32 = 0xaaaaaa.0p-24;
@@ -87,7 +87,7 @@ pub fn log2f(x_: f32) callconv(.C) f32 {
     return (lo + hi) * ivln2lo + lo * ivln2hi + hi * ivln2hi + @as(f32, @floatFromInt(k));
 }
 
-pub fn log2(x_: f64) callconv(.C) f64 {
+pub fn log2(x_: f64) callconv(.c) f64 {
     const ivln2hi: f64 = 1.44269504072144627571e+00;
     const ivln2lo: f64 = 1.67517131648865118353e-10;
     const Lg1: f64 = 6.666666666666735130e-01;
@@ -158,17 +158,17 @@ pub fn log2(x_: f64) callconv(.C) f64 {
     return val_lo + val_hi;
 }
 
-pub fn __log2x(a: f80) callconv(.C) f80 {
+pub fn __log2x(a: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(log2q(a));
 }
 
-pub fn log2q(a: f128) callconv(.C) f128 {
+pub fn log2q(a: f128) callconv(.c) f128 {
     // TODO: more correct implementation
     return log2(@floatCast(a));
 }
 
-pub fn log2l(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn log2l(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __log2h(x),
         32 => return log2f(x),
lib/compiler_rt/memcmp.zig
@@ -5,7 +5,7 @@ comptime {
     @export(&memcmp, .{ .name = "memcmp", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn memcmp(vl: [*]const u8, vr: [*]const u8, n: usize) callconv(.C) c_int {
+pub fn memcmp(vl: [*]const u8, vr: [*]const u8, n: usize) callconv(.c) c_int {
     var i: usize = 0;
     while (i < n) : (i += 1) {
         const compared = @as(c_int, vl[i]) -% @as(c_int, vr[i]);
lib/compiler_rt/memcpy.zig
@@ -24,7 +24,7 @@ comptime {
     assert(std.math.isPowerOfTwo(@sizeOf(Element)));
 }
 
-fn memcpySmall(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(.C) ?[*]u8 {
+fn memcpySmall(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(.c) ?[*]u8 {
     @setRuntimeSafety(false);
 
     for (0..len) |i| {
@@ -34,7 +34,7 @@ fn memcpySmall(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) call
     return dest;
 }
 
-fn memcpyFast(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(.C) ?[*]u8 {
+fn memcpyFast(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(.c) ?[*]u8 {
     @setRuntimeSafety(false);
 
     const small_limit = 2 * @sizeOf(Element);
lib/compiler_rt/memmove.zig
@@ -21,7 +21,7 @@ comptime {
     }
 }
 
-fn memmoveSmall(opt_dest: ?[*]u8, opt_src: ?[*]const u8, len: usize) callconv(.C) ?[*]u8 {
+fn memmoveSmall(opt_dest: ?[*]u8, opt_src: ?[*]const u8, len: usize) callconv(.c) ?[*]u8 {
     const dest = opt_dest.?;
     const src = opt_src.?;
 
@@ -38,7 +38,7 @@ fn memmoveSmall(opt_dest: ?[*]u8, opt_src: ?[*]const u8, len: usize) callconv(.C
     return dest;
 }
 
-fn memmoveFast(dest: ?[*]u8, src: ?[*]u8, len: usize) callconv(.C) ?[*]u8 {
+fn memmoveFast(dest: ?[*]u8, src: ?[*]u8, len: usize) callconv(.c) ?[*]u8 {
     @setRuntimeSafety(builtin.is_test);
     const small_limit = @max(2 * @sizeOf(Element), @sizeOf(Element));
 
lib/compiler_rt/memset.zig
@@ -9,7 +9,7 @@ comptime {
     }
 }
 
-pub fn memset(dest: ?[*]u8, c: u8, len: usize) callconv(.C) ?[*]u8 {
+pub fn memset(dest: ?[*]u8, c: u8, len: usize) callconv(.c) ?[*]u8 {
     @setRuntimeSafety(false);
 
     if (len != 0) {
@@ -26,7 +26,7 @@ pub fn memset(dest: ?[*]u8, c: u8, len: usize) callconv(.C) ?[*]u8 {
     return dest;
 }
 
-pub fn __memset(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
+pub fn __memset(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.c) ?[*]u8 {
     if (dest_n < n)
         @panic("buffer overflow");
     return memset(dest, c, n);
lib/compiler_rt/modti3.zig
@@ -17,13 +17,13 @@ comptime {
     }
 }
 
-pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
+pub fn __modti3(a: i128, b: i128) callconv(.c) i128 {
     return mod(a, b);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __modti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
+fn __modti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.c) v2u64 {
     return @bitCast(mod(@as(i128, @bitCast(a)), @as(i128, @bitCast(b))));
 }
 
lib/compiler_rt/mulc3_test.zig
@@ -17,7 +17,7 @@ test "mulc3" {
     try testMul(f128, __multc3);
 }
 
-fn testMul(comptime T: type, comptime f: fn (T, T, T, T) callconv(.C) Complex(T)) !void {
+fn testMul(comptime T: type, comptime f: fn (T, T, T, T) callconv(.c) Complex(T)) !void {
     {
         const a: T = 1.0;
         const b: T = 0.0;
lib/compiler_rt/muldc3.zig
@@ -9,6 +9,6 @@ comptime {
     }
 }
 
-pub fn __muldc3(a: f64, b: f64, c: f64, d: f64) callconv(.C) mulc3.Complex(f64) {
+pub fn __muldc3(a: f64, b: f64, c: f64, d: f64) callconv(.c) mulc3.Complex(f64) {
     return mulc3.mulc3(f64, a, b, c, d);
 }
lib/compiler_rt/muldf3.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __muldf3(a: f64, b: f64) callconv(.C) f64 {
+pub fn __muldf3(a: f64, b: f64) callconv(.c) f64 {
     return mulf3(f64, a, b);
 }
 
-fn __aeabi_dmul(a: f64, b: f64) callconv(.AAPCS) f64 {
+fn __aeabi_dmul(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) f64 {
     return mulf3(f64, a, b);
 }
lib/compiler_rt/mulhc3.zig
@@ -9,6 +9,6 @@ comptime {
     }
 }
 
-pub fn __mulhc3(a: f16, b: f16, c: f16, d: f16) callconv(.C) mulc3.Complex(f16) {
+pub fn __mulhc3(a: f16, b: f16, c: f16, d: f16) callconv(.c) mulc3.Complex(f16) {
     return mulc3.mulc3(f16, a, b, c, d);
 }
lib/compiler_rt/mulhf3.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__mulhf3, .{ .name = "__mulhf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __mulhf3(a: f16, b: f16) callconv(.C) f16 {
+pub fn __mulhf3(a: f16, b: f16) callconv(.c) f16 {
     return mulf3(f16, a, b);
 }
lib/compiler_rt/mulo.zig
@@ -48,7 +48,7 @@ inline fn muloXi4_genericFast(comptime ST: type, a: ST, b: ST, overflow: *c_int)
     return @as(ST, @truncate(res));
 }
 
-pub fn __mulosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
+pub fn __mulosi4(a: i32, b: i32, overflow: *c_int) callconv(.c) i32 {
     if (2 * @bitSizeOf(i32) <= @bitSizeOf(usize)) {
         return muloXi4_genericFast(i32, a, b, overflow);
     } else {
@@ -56,7 +56,7 @@ pub fn __mulosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
     }
 }
 
-pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
+pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.c) i64 {
     if (2 * @bitSizeOf(i64) <= @bitSizeOf(usize)) {
         return muloXi4_genericFast(i64, a, b, overflow);
     } else {
@@ -64,7 +64,7 @@ pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
     }
 }
 
-pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
+pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.c) i128 {
     if (2 * @bitSizeOf(i128) <= @bitSizeOf(usize)) {
         return muloXi4_genericFast(i128, a, b, overflow);
     } else {
lib/compiler_rt/mulsc3.zig
@@ -9,6 +9,6 @@ comptime {
     }
 }
 
-pub fn __mulsc3(a: f32, b: f32, c: f32, d: f32) callconv(.C) mulc3.Complex(f32) {
+pub fn __mulsc3(a: f32, b: f32, c: f32, d: f32) callconv(.c) mulc3.Complex(f32) {
     return mulc3.mulc3(f32, a, b, c, d);
 }
lib/compiler_rt/mulsf3.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __mulsf3(a: f32, b: f32) callconv(.C) f32 {
+pub fn __mulsf3(a: f32, b: f32) callconv(.c) f32 {
     return mulf3(f32, a, b);
 }
 
-fn __aeabi_fmul(a: f32, b: f32) callconv(.AAPCS) f32 {
+fn __aeabi_fmul(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) f32 {
     return mulf3(f32, a, b);
 }
lib/compiler_rt/multc3.zig
@@ -11,6 +11,6 @@ comptime {
     }
 }
 
-pub fn __multc3(a: f128, b: f128, c: f128, d: f128) callconv(.C) mulc3.Complex(f128) {
+pub fn __multc3(a: f128, b: f128, c: f128, d: f128) callconv(.c) mulc3.Complex(f128) {
     return mulc3.mulc3(f128, a, b, c, d);
 }
lib/compiler_rt/multf3.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__multf3, .{ .name = "__multf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __multf3(a: f128, b: f128) callconv(.C) f128 {
+pub fn __multf3(a: f128, b: f128) callconv(.c) f128 {
     return mulf3(f128, a, b);
 }
 
-fn _Qp_mul(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
+fn _Qp_mul(c: *f128, a: *const f128, b: *const f128) callconv(.c) void {
     c.* = mulf3(f128, a.*, b.*);
 }
lib/compiler_rt/mulxc3.zig
@@ -9,6 +9,6 @@ comptime {
     }
 }
 
-pub fn __mulxc3(a: f80, b: f80, c: f80, d: f80) callconv(.C) mulc3.Complex(f80) {
+pub fn __mulxc3(a: f80, b: f80, c: f80, d: f80) callconv(.c) mulc3.Complex(f80) {
     return mulc3.mulc3(f80, a, b, c, d);
 }
lib/compiler_rt/mulxf3.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__mulxf3, .{ .name = "__mulxf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __mulxf3(a: f80, b: f80) callconv(.C) f80 {
+pub fn __mulxf3(a: f80, b: f80) callconv(.c) f80 {
     return mulf3(f80, a, b);
 }
lib/compiler_rt/mulXi3.zig
@@ -20,7 +20,7 @@ comptime {
     }
 }
 
-pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 {
+pub fn __mulsi3(a: i32, b: i32) callconv(.c) i32 {
     var ua: u32 = @bitCast(a);
     var ub: u32 = @bitCast(b);
     var r: u32 = 0;
@@ -34,11 +34,11 @@ pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 {
     return @bitCast(r);
 }
 
-pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 {
+pub fn __muldi3(a: i64, b: i64) callconv(.c) i64 {
     return mulX(i64, a, b);
 }
 
-fn __aeabi_lmul(a: i64, b: i64) callconv(.AAPCS) i64 {
+fn __aeabi_lmul(a: i64, b: i64) callconv(.{ .arm_aapcs = .{} }) i64 {
     return mulX(i64, a, b);
 }
 
@@ -86,13 +86,13 @@ fn muldXi(comptime T: type, a: T, b: T) DoubleInt(T) {
     return r.all;
 }
 
-pub fn __multi3(a: i128, b: i128) callconv(.C) i128 {
+pub fn __multi3(a: i128, b: i128) callconv(.c) i128 {
     return mulX(i128, a, b);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __multi3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
+fn __multi3_windows_x86_64(a: v2u64, b: v2u64) callconv(.c) v2u64 {
     return @bitCast(mulX(i128, @as(i128, @bitCast(a)), @as(i128, @bitCast(b))));
 }
 
lib/compiler_rt/negdf2.zig
@@ -10,10 +10,10 @@ comptime {
     }
 }
 
-fn __negdf2(a: f64) callconv(.C) f64 {
+fn __negdf2(a: f64) callconv(.c) f64 {
     return common.fneg(a);
 }
 
-fn __aeabi_dneg(a: f64) callconv(.AAPCS) f64 {
+fn __aeabi_dneg(a: f64) callconv(.{ .arm_aapcs = .{} }) f64 {
     return common.fneg(a);
 }
lib/compiler_rt/neghf2.zig
@@ -6,6 +6,6 @@ comptime {
     @export(&__neghf2, .{ .name = "__neghf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __neghf2(a: f16) callconv(.C) f16 {
+fn __neghf2(a: f16) callconv(.c) f16 {
     return common.fneg(a);
 }
lib/compiler_rt/negsf2.zig
@@ -10,10 +10,10 @@ comptime {
     }
 }
 
-fn __negsf2(a: f32) callconv(.C) f32 {
+fn __negsf2(a: f32) callconv(.c) f32 {
     return common.fneg(a);
 }
 
-fn __aeabi_fneg(a: f32) callconv(.AAPCS) f32 {
+fn __aeabi_fneg(a: f32) callconv(.{ .arm_aapcs = .{} }) f32 {
     return common.fneg(a);
 }
lib/compiler_rt/negtf2.zig
@@ -8,6 +8,6 @@ comptime {
     @export(&__negtf2, .{ .name = "__negtf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __negtf2(a: f128) callconv(.C) f128 {
+fn __negtf2(a: f128) callconv(.c) f128 {
     return common.fneg(a);
 }
lib/compiler_rt/negv.zig
@@ -13,15 +13,15 @@ comptime {
     @export(&__negvti2, .{ .name = "__negvti2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __negvsi2(a: i32) callconv(.C) i32 {
+pub fn __negvsi2(a: i32) callconv(.c) i32 {
     return negvXi(i32, a);
 }
 
-pub fn __negvdi2(a: i64) callconv(.C) i64 {
+pub fn __negvdi2(a: i64) callconv(.c) i64 {
     return negvXi(i64, a);
 }
 
-pub fn __negvti2(a: i128) callconv(.C) i128 {
+pub fn __negvti2(a: i128) callconv(.c) i128 {
     return negvXi(i128, a);
 }
 
lib/compiler_rt/negxf2.zig
@@ -6,6 +6,6 @@ comptime {
     @export(&__negxf2, .{ .name = "__negxf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __negxf2(a: f80) callconv(.C) f80 {
+fn __negxf2(a: f80) callconv(.c) f80 {
     return common.fneg(a);
 }
lib/compiler_rt/negXi2.zig
@@ -18,15 +18,15 @@ comptime {
     @export(&__negti2, .{ .name = "__negti2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __negsi2(a: i32) callconv(.C) i32 {
+pub fn __negsi2(a: i32) callconv(.c) i32 {
     return negXi2(i32, a);
 }
 
-pub fn __negdi2(a: i64) callconv(.C) i64 {
+pub fn __negdi2(a: i64) callconv(.c) i64 {
     return negXi2(i64, a);
 }
 
-pub fn __negti2(a: i128) callconv(.C) i128 {
+pub fn __negti2(a: i128) callconv(.c) i128 {
     return negXi2(i128, a);
 }
 
lib/compiler_rt/os_version_check.zig
@@ -34,7 +34,7 @@ const __isPlatformVersionAtLeast = if (have_availability_version_check) struct {
     }
 
     // Darwin-only
-    fn __isPlatformVersionAtLeast(platform: u32, major: u32, minor: u32, subminor: u32) callconv(.C) i32 {
+    fn __isPlatformVersionAtLeast(platform: u32, major: u32, minor: u32, subminor: u32) callconv(.c) i32 {
         const build_version = dyld_build_version_t{
             .platform = platform,
             .version = constructVersion(major, minor, subminor),
lib/compiler_rt/parity.zig
@@ -13,15 +13,15 @@ comptime {
     @export(&__parityti2, .{ .name = "__parityti2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __paritysi2(a: i32) callconv(.C) i32 {
+pub fn __paritysi2(a: i32) callconv(.c) i32 {
     return parityXi2(i32, a);
 }
 
-pub fn __paritydi2(a: i64) callconv(.C) i32 {
+pub fn __paritydi2(a: i64) callconv(.c) i32 {
     return parityXi2(i64, a);
 }
 
-pub fn __parityti2(a: i128) callconv(.C) i32 {
+pub fn __parityti2(a: i128) callconv(.c) i32 {
     return parityXi2(i128, a);
 }
 
lib/compiler_rt/popcount.zig
@@ -18,15 +18,15 @@ comptime {
     @export(&__popcountti2, .{ .name = "__popcountti2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __popcountsi2(a: i32) callconv(.C) i32 {
+pub fn __popcountsi2(a: i32) callconv(.c) i32 {
     return popcountXi2(i32, a);
 }
 
-pub fn __popcountdi2(a: i64) callconv(.C) i32 {
+pub fn __popcountdi2(a: i64) callconv(.c) i32 {
     return popcountXi2(i64, a);
 }
 
-pub fn __popcountti2(a: i128) callconv(.C) i32 {
+pub fn __popcountti2(a: i128) callconv(.c) i32 {
     return popcountXi2(i128, a);
 }
 
lib/compiler_rt/powiXf2.zig
@@ -35,23 +35,23 @@ inline fn powiXf2(comptime FT: type, a: FT, b: i32) FT {
     return if (is_recip) 1 / r else r;
 }
 
-pub fn __powihf2(a: f16, b: i32) callconv(.C) f16 {
+pub fn __powihf2(a: f16, b: i32) callconv(.c) f16 {
     return powiXf2(f16, a, b);
 }
 
-pub fn __powisf2(a: f32, b: i32) callconv(.C) f32 {
+pub fn __powisf2(a: f32, b: i32) callconv(.c) f32 {
     return powiXf2(f32, a, b);
 }
 
-pub fn __powidf2(a: f64, b: i32) callconv(.C) f64 {
+pub fn __powidf2(a: f64, b: i32) callconv(.c) f64 {
     return powiXf2(f64, a, b);
 }
 
-pub fn __powitf2(a: f128, b: i32) callconv(.C) f128 {
+pub fn __powitf2(a: f128, b: i32) callconv(.c) f128 {
     return powiXf2(f128, a, b);
 }
 
-pub fn __powixf2(a: f80, b: i32) callconv(.C) f80 {
+pub fn __powixf2(a: f80, b: i32) callconv(.c) f80 {
     return powiXf2(f80, a, b);
 }
 
lib/compiler_rt/round.zig
@@ -26,12 +26,12 @@ comptime {
     @export(&roundl, .{ .name = "roundl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __roundh(x: f16) callconv(.C) f16 {
+pub fn __roundh(x: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(roundf(x));
 }
 
-pub fn roundf(x_: f32) callconv(.C) f32 {
+pub fn roundf(x_: f32) callconv(.c) f32 {
     const f32_toint = 1.0 / math.floatEps(f32);
 
     var x = x_;
@@ -66,7 +66,7 @@ pub fn roundf(x_: f32) callconv(.C) f32 {
     }
 }
 
-pub fn round(x_: f64) callconv(.C) f64 {
+pub fn round(x_: f64) callconv(.c) f64 {
     const f64_toint = 1.0 / math.floatEps(f64);
 
     var x = x_;
@@ -101,12 +101,12 @@ pub fn round(x_: f64) callconv(.C) f64 {
     }
 }
 
-pub fn __roundx(x: f80) callconv(.C) f80 {
+pub fn __roundx(x: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(roundq(x));
 }
 
-pub fn roundq(x_: f128) callconv(.C) f128 {
+pub fn roundq(x_: f128) callconv(.c) f128 {
     const f128_toint = 1.0 / math.floatEps(f128);
 
     var x = x_;
@@ -141,7 +141,7 @@ pub fn roundq(x_: f128) callconv(.C) f128 {
     }
 }
 
-pub fn roundl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn roundl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __roundh(x),
         32 => return roundf(x),
lib/compiler_rt/shift.zig
@@ -93,48 +93,48 @@ inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
     return output.all;
 }
 
-pub fn __ashlsi3(a: i32, b: i32) callconv(.C) i32 {
+pub fn __ashlsi3(a: i32, b: i32) callconv(.c) i32 {
     return ashlXi3(i32, a, b);
 }
 
-pub fn __ashrsi3(a: i32, b: i32) callconv(.C) i32 {
+pub fn __ashrsi3(a: i32, b: i32) callconv(.c) i32 {
     return ashrXi3(i32, a, b);
 }
 
-pub fn __lshrsi3(a: i32, b: i32) callconv(.C) i32 {
+pub fn __lshrsi3(a: i32, b: i32) callconv(.c) i32 {
     return lshrXi3(i32, a, b);
 }
 
-pub fn __ashldi3(a: i64, b: i32) callconv(.C) i64 {
+pub fn __ashldi3(a: i64, b: i32) callconv(.c) i64 {
     return ashlXi3(i64, a, b);
 }
-fn __aeabi_llsl(a: i64, b: i32) callconv(.AAPCS) i64 {
+fn __aeabi_llsl(a: i64, b: i32) callconv(.{ .arm_aapcs = .{} }) i64 {
     return ashlXi3(i64, a, b);
 }
 
-pub fn __ashlti3(a: i128, b: i32) callconv(.C) i128 {
+pub fn __ashlti3(a: i128, b: i32) callconv(.c) i128 {
     return ashlXi3(i128, a, b);
 }
 
-pub fn __ashrdi3(a: i64, b: i32) callconv(.C) i64 {
+pub fn __ashrdi3(a: i64, b: i32) callconv(.c) i64 {
     return ashrXi3(i64, a, b);
 }
-fn __aeabi_lasr(a: i64, b: i32) callconv(.AAPCS) i64 {
+fn __aeabi_lasr(a: i64, b: i32) callconv(.{ .arm_aapcs = .{} }) i64 {
     return ashrXi3(i64, a, b);
 }
 
-pub fn __ashrti3(a: i128, b: i32) callconv(.C) i128 {
+pub fn __ashrti3(a: i128, b: i32) callconv(.c) i128 {
     return ashrXi3(i128, a, b);
 }
 
-pub fn __lshrdi3(a: i64, b: i32) callconv(.C) i64 {
+pub fn __lshrdi3(a: i64, b: i32) callconv(.c) i64 {
     return lshrXi3(i64, a, b);
 }
-fn __aeabi_llsr(a: i64, b: i32) callconv(.AAPCS) i64 {
+fn __aeabi_llsr(a: i64, b: i32) callconv(.{ .arm_aapcs = .{} }) i64 {
     return lshrXi3(i64, a, b);
 }
 
-pub fn __lshrti3(a: i128, b: i32) callconv(.C) i128 {
+pub fn __lshrti3(a: i128, b: i32) callconv(.c) i128 {
     return lshrXi3(i128, a, b);
 }
 
lib/compiler_rt/sin.zig
@@ -30,12 +30,12 @@ comptime {
     @export(&sinl, .{ .name = "sinl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __sinh(x: f16) callconv(.C) f16 {
+pub fn __sinh(x: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(sinf(x));
 }
 
-pub fn sinf(x: f32) callconv(.C) f32 {
+pub fn sinf(x: f32) callconv(.c) f32 {
     // Small multiples of pi/2 rounded to double precision.
     const s1pio2: f64 = 1.0 * math.pi / 2.0; // 0x3FF921FB, 0x54442D18
     const s2pio2: f64 = 2.0 * math.pi / 2.0; // 0x400921FB, 0x54442D18
@@ -90,7 +90,7 @@ pub fn sinf(x: f32) callconv(.C) f32 {
     };
 }
 
-pub fn sin(x: f64) callconv(.C) f64 {
+pub fn sin(x: f64) callconv(.c) f64 {
     var ix = @as(u64, @bitCast(x)) >> 32;
     ix &= 0x7fffffff;
 
@@ -119,17 +119,17 @@ pub fn sin(x: f64) callconv(.C) f64 {
     };
 }
 
-pub fn __sinx(x: f80) callconv(.C) f80 {
+pub fn __sinx(x: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(sinq(x));
 }
 
-pub fn sinq(x: f128) callconv(.C) f128 {
+pub fn sinq(x: f128) callconv(.c) f128 {
     // TODO: more correct implementation
     return sin(@floatCast(x));
 }
 
-pub fn sinl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn sinl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __sinh(x),
         32 => return sinf(x),
lib/compiler_rt/sincos.zig
@@ -22,7 +22,7 @@ comptime {
     @export(&sincosl, .{ .name = "sincosl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __sincosh(x: f16, r_sin: *f16, r_cos: *f16) callconv(.C) void {
+pub fn __sincosh(x: f16, r_sin: *f16, r_cos: *f16) callconv(.c) void {
     // TODO: more efficient implementation
     var big_sin: f32 = undefined;
     var big_cos: f32 = undefined;
@@ -31,7 +31,7 @@ pub fn __sincosh(x: f16, r_sin: *f16, r_cos: *f16) callconv(.C) void {
     r_cos.* = @as(f16, @floatCast(big_cos));
 }
 
-pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void {
+pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.c) void {
     const sc1pio2: f64 = 1.0 * math.pi / 2.0; // 0x3FF921FB, 0x54442D18
     const sc2pio2: f64 = 2.0 * math.pi / 2.0; // 0x400921FB, 0x54442D18
     const sc3pio2: f64 = 3.0 * math.pi / 2.0; // 0x4012D97C, 0x7F3321D2
@@ -126,7 +126,7 @@ pub fn sincosf(x: f32, r_sin: *f32, r_cos: *f32) callconv(.C) void {
     }
 }
 
-pub fn sincos(x: f64, r_sin: *f64, r_cos: *f64) callconv(.C) void {
+pub fn sincos(x: f64, r_sin: *f64, r_cos: *f64) callconv(.c) void {
     const ix = @as(u32, @truncate(@as(u64, @bitCast(x)) >> 32)) & 0x7fffffff;
 
     // |x| ~< pi/4
@@ -177,7 +177,7 @@ pub fn sincos(x: f64, r_sin: *f64, r_cos: *f64) callconv(.C) void {
     }
 }
 
-pub fn __sincosx(x: f80, r_sin: *f80, r_cos: *f80) callconv(.C) void {
+pub fn __sincosx(x: f80, r_sin: *f80, r_cos: *f80) callconv(.c) void {
     // TODO: more efficient implementation
     //return sincos_generic(f80, x, r_sin, r_cos);
     var big_sin: f128 = undefined;
@@ -187,7 +187,7 @@ pub fn __sincosx(x: f80, r_sin: *f80, r_cos: *f80) callconv(.C) void {
     r_cos.* = @as(f80, @floatCast(big_cos));
 }
 
-pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void {
+pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.c) void {
     // TODO: more correct implementation
     //return sincos_generic(f128, x, r_sin, r_cos);
     var small_sin: f64 = undefined;
@@ -197,7 +197,7 @@ pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void {
     r_cos.* = small_cos;
 }
 
-pub fn sincosl(x: c_longdouble, r_sin: *c_longdouble, r_cos: *c_longdouble) callconv(.C) void {
+pub fn sincosl(x: c_longdouble, r_sin: *c_longdouble, r_cos: *c_longdouble) callconv(.c) void {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __sincosh(x, r_sin, r_cos),
         32 => return sincosf(x, r_sin, r_cos),
lib/compiler_rt/sqrt.zig
@@ -18,12 +18,12 @@ comptime {
     @export(&sqrtl, .{ .name = "sqrtl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __sqrth(x: f16) callconv(.C) f16 {
+pub fn __sqrth(x: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(sqrtf(x));
 }
 
-pub fn sqrtf(x: f32) callconv(.C) f32 {
+pub fn sqrtf(x: f32) callconv(.c) f32 {
     const tiny: f32 = 1.0e-30;
     const sign: i32 = @bitCast(@as(u32, 0x80000000));
     var ix: i32 = @bitCast(x);
@@ -102,7 +102,7 @@ pub fn sqrtf(x: f32) callconv(.C) f32 {
 /// NOTE: The original code is full of implicit signed -> unsigned assumptions and u32 wraparound
 /// behaviour. Most intermediate i32 values are changed to u32 where appropriate but there are
 /// potentially some edge cases remaining that are not handled in the same way.
-pub fn sqrt(x: f64) callconv(.C) f64 {
+pub fn sqrt(x: f64) callconv(.c) f64 {
     const tiny: f64 = 1.0e-300;
     const sign: u32 = 0x80000000;
     const u: u64 = @bitCast(x);
@@ -232,17 +232,17 @@ pub fn sqrt(x: f64) callconv(.C) f64 {
     return @bitCast(uz);
 }
 
-pub fn __sqrtx(x: f80) callconv(.C) f80 {
+pub fn __sqrtx(x: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(sqrtq(x));
 }
 
-pub fn sqrtq(x: f128) callconv(.C) f128 {
+pub fn sqrtq(x: f128) callconv(.c) f128 {
     // TODO: more correct implementation
     return sqrt(@floatCast(x));
 }
 
-pub fn sqrtl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn sqrtl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __sqrth(x),
         32 => return sqrtf(x),
lib/compiler_rt/ssp.zig
@@ -16,9 +16,9 @@ const std = @import("std");
 const common = @import("./common.zig");
 const builtin = @import("builtin");
 
-extern fn memset(dest: ?[*]u8, c: u8, n: usize) callconv(.C) ?[*]u8;
-extern fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8;
-extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8;
+extern fn memset(dest: ?[*]u8, c: u8, n: usize) callconv(.c) ?[*]u8;
+extern fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) callconv(.c) ?[*]u8;
+extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.c) ?[*]u8;
 
 comptime {
     @export(&__stack_chk_fail, .{ .name = "__stack_chk_fail", .linkage = common.linkage, .visibility = common.visibility });
@@ -33,11 +33,11 @@ comptime {
     @export(&__memset_chk, .{ .name = "__memset_chk", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __stack_chk_fail() callconv(.C) noreturn {
+fn __stack_chk_fail() callconv(.c) noreturn {
     @panic("stack smashing detected");
 }
 
-fn __chk_fail() callconv(.C) noreturn {
+fn __chk_fail() callconv(.c) noreturn {
     @panic("buffer overflow detected");
 }
 
@@ -49,7 +49,7 @@ var __stack_chk_guard: usize = blk: {
     break :blk @as(usize, @bitCast(buf));
 };
 
-fn __strcpy_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.C) [*:0]u8 {
+fn __strcpy_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.c) [*:0]u8 {
     @setRuntimeSafety(false);
 
     var i: usize = 0;
@@ -64,7 +64,7 @@ fn __strcpy_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.C) [
     return dest;
 }
 
-fn __strncpy_chk(dest: [*:0]u8, src: [*:0]const u8, n: usize, dest_n: usize) callconv(.C) [*:0]u8 {
+fn __strncpy_chk(dest: [*:0]u8, src: [*:0]const u8, n: usize, dest_n: usize) callconv(.c) [*:0]u8 {
     @setRuntimeSafety(false);
     if (dest_n < n) __chk_fail();
     var i: usize = 0;
@@ -77,7 +77,7 @@ fn __strncpy_chk(dest: [*:0]u8, src: [*:0]const u8, n: usize, dest_n: usize) cal
     return dest;
 }
 
-fn __strcat_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.C) [*:0]u8 {
+fn __strcat_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.c) [*:0]u8 {
     @setRuntimeSafety(false);
 
     var avail = dest_n;
@@ -102,7 +102,7 @@ fn __strcat_chk(dest: [*:0]u8, src: [*:0]const u8, dest_n: usize) callconv(.C) [
     return dest;
 }
 
-fn __strncat_chk(dest: [*:0]u8, src: [*:0]const u8, n: usize, dest_n: usize) callconv(.C) [*:0]u8 {
+fn __strncat_chk(dest: [*:0]u8, src: [*:0]const u8, n: usize, dest_n: usize) callconv(.c) [*:0]u8 {
     @setRuntimeSafety(false);
 
     var avail = dest_n;
@@ -127,17 +127,17 @@ fn __strncat_chk(dest: [*:0]u8, src: [*:0]const u8, n: usize, dest_n: usize) cal
     return dest;
 }
 
-fn __memcpy_chk(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
+fn __memcpy_chk(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize, dest_n: usize) callconv(.c) ?[*]u8 {
     if (dest_n < n) __chk_fail();
     return memcpy(dest, src, n);
 }
 
-fn __memmove_chk(dest: ?[*]u8, src: ?[*]const u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
+fn __memmove_chk(dest: ?[*]u8, src: ?[*]const u8, n: usize, dest_n: usize) callconv(.c) ?[*]u8 {
     if (dest_n < n) __chk_fail();
     return memmove(dest, src, n);
 }
 
-fn __memset_chk(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 {
+fn __memset_chk(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.c) ?[*]u8 {
     if (dest_n < n) __chk_fail();
     return memset(dest, c, n);
 }
lib/compiler_rt/stack_probe.zig
@@ -37,7 +37,7 @@ comptime {
 }
 
 // Zig's own stack-probe routine (available only on x86 and x86_64)
-pub fn zig_probe_stack() callconv(.Naked) void {
+pub fn zig_probe_stack() callconv(.naked) void {
     @setRuntimeSafety(false);
 
     // Versions of the Linux kernel before 5.1 treat any access below SP as
@@ -245,11 +245,11 @@ fn win_probe_stack_adjust_sp() void {
 // ___chkstk (__alloca) | yes    | yes    |
 // ___chkstk_ms         | no     | no     |
 
-pub fn _chkstk() callconv(.Naked) void {
+pub fn _chkstk() callconv(.naked) void {
     @setRuntimeSafety(false);
     @call(.always_inline, win_probe_stack_adjust_sp, .{});
 }
-pub fn __chkstk() callconv(.Naked) void {
+pub fn __chkstk() callconv(.naked) void {
     @setRuntimeSafety(false);
     if (arch == .thumb or arch == .aarch64) {
         @call(.always_inline, win_probe_stack_only, .{});
@@ -259,15 +259,15 @@ pub fn __chkstk() callconv(.Naked) void {
         else => unreachable,
     }
 }
-pub fn ___chkstk() callconv(.Naked) void {
+pub fn ___chkstk() callconv(.naked) void {
     @setRuntimeSafety(false);
     @call(.always_inline, win_probe_stack_adjust_sp, .{});
 }
-pub fn __chkstk_ms() callconv(.Naked) void {
+pub fn __chkstk_ms() callconv(.naked) void {
     @setRuntimeSafety(false);
     @call(.always_inline, win_probe_stack_only, .{});
 }
-pub fn ___chkstk_ms() callconv(.Naked) void {
+pub fn ___chkstk_ms() callconv(.naked) void {
     @setRuntimeSafety(false);
     @call(.always_inline, win_probe_stack_only, .{});
 }
lib/compiler_rt/subdf3.zig
@@ -11,11 +11,11 @@ comptime {
     }
 }
 
-fn __subdf3(a: f64, b: f64) callconv(.C) f64 {
+fn __subdf3(a: f64, b: f64) callconv(.c) f64 {
     return sub(a, b);
 }
 
-fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
+fn __aeabi_dsub(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) f64 {
     return sub(a, b);
 }
 
lib/compiler_rt/subhf3.zig
@@ -7,7 +7,7 @@ comptime {
     @export(&__subhf3, .{ .name = "__subhf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __subhf3(a: f16, b: f16) callconv(.C) f16 {
+fn __subhf3(a: f16, b: f16) callconv(.c) f16 {
     const neg_b = @as(f16, @bitCast(@as(u16, @bitCast(b)) ^ (@as(u16, 1) << 15)));
     return addf3(f16, a, neg_b);
 }
lib/compiler_rt/subo.zig
@@ -15,13 +15,13 @@ comptime {
     @export(&__suboti4, .{ .name = "__suboti4", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __subosi4(a: i32, b: i32, overflow: *c_int) callconv(.C) i32 {
+pub fn __subosi4(a: i32, b: i32, overflow: *c_int) callconv(.c) i32 {
     return suboXi4_generic(i32, a, b, overflow);
 }
-pub fn __subodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
+pub fn __subodi4(a: i64, b: i64, overflow: *c_int) callconv(.c) i64 {
     return suboXi4_generic(i64, a, b, overflow);
 }
-pub fn __suboti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
+pub fn __suboti4(a: i128, b: i128, overflow: *c_int) callconv(.c) i128 {
     return suboXi4_generic(i128, a, b, overflow);
 }
 
lib/compiler_rt/subsf3.zig
@@ -11,11 +11,11 @@ comptime {
     }
 }
 
-fn __subsf3(a: f32, b: f32) callconv(.C) f32 {
+fn __subsf3(a: f32, b: f32) callconv(.c) f32 {
     return sub(a, b);
 }
 
-fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 {
+fn __aeabi_fsub(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) f32 {
     return sub(a, b);
 }
 
lib/compiler_rt/subtf3.zig
@@ -12,11 +12,11 @@ comptime {
     @export(&__subtf3, .{ .name = "__subtf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __subtf3(a: f128, b: f128) callconv(.C) f128 {
+pub fn __subtf3(a: f128, b: f128) callconv(.c) f128 {
     return sub(a, b);
 }
 
-fn _Qp_sub(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
+fn _Qp_sub(c: *f128, a: *const f128, b: *const f128) callconv(.c) void {
     c.* = sub(a.*, b.*);
 }
 
lib/compiler_rt/subxf3.zig
@@ -7,7 +7,7 @@ comptime {
     @export(&__subxf3, .{ .name = "__subxf3", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
+fn __subxf3(a: f80, b: f80) callconv(.c) f80 {
     var b_rep = std.math.F80.fromFloat(b);
     b_rep.exp ^= 0x8000;
     const neg_b = b_rep.toFloat();
lib/compiler_rt/tan.zig
@@ -32,12 +32,12 @@ comptime {
     @export(&tanl, .{ .name = "tanl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __tanh(x: f16) callconv(.C) f16 {
+pub fn __tanh(x: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(tanf(x));
 }
 
-pub fn tanf(x: f32) callconv(.C) f32 {
+pub fn tanf(x: f32) callconv(.c) f32 {
     // Small multiples of pi/2 rounded to double precision.
     const t1pio2: f64 = 1.0 * math.pi / 2.0; // 0x3FF921FB, 0x54442D18
     const t2pio2: f64 = 2.0 * math.pi / 2.0; // 0x400921FB, 0x54442D18
@@ -81,7 +81,7 @@ pub fn tanf(x: f32) callconv(.C) f32 {
     return kernel.__tandf(y, n & 1 != 0);
 }
 
-pub fn tan(x: f64) callconv(.C) f64 {
+pub fn tan(x: f64) callconv(.c) f64 {
     var ix = @as(u64, @bitCast(x)) >> 32;
     ix &= 0x7fffffff;
 
@@ -105,17 +105,17 @@ pub fn tan(x: f64) callconv(.C) f64 {
     return kernel.__tan(y[0], y[1], n & 1 != 0);
 }
 
-pub fn __tanx(x: f80) callconv(.C) f80 {
+pub fn __tanx(x: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(tanq(x));
 }
 
-pub fn tanq(x: f128) callconv(.C) f128 {
+pub fn tanq(x: f128) callconv(.c) f128 {
     // TODO: more correct implementation
     return tan(@floatCast(x));
 }
 
-pub fn tanl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn tanl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __tanh(x),
         32 => return tanf(x),
lib/compiler_rt/trunc.zig
@@ -26,12 +26,12 @@ comptime {
     @export(&truncl, .{ .name = "truncl", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __trunch(x: f16) callconv(.C) f16 {
+pub fn __trunch(x: f16) callconv(.c) f16 {
     // TODO: more efficient implementation
     return @floatCast(truncf(x));
 }
 
-pub fn truncf(x: f32) callconv(.C) f32 {
+pub fn truncf(x: f32) callconv(.c) f32 {
     const u: u32 = @bitCast(x);
     var e = @as(i32, @intCast(((u >> 23) & 0xFF))) - 0x7F + 9;
     var m: u32 = undefined;
@@ -52,7 +52,7 @@ pub fn truncf(x: f32) callconv(.C) f32 {
     }
 }
 
-pub fn trunc(x: f64) callconv(.C) f64 {
+pub fn trunc(x: f64) callconv(.c) f64 {
     const u: u64 = @bitCast(x);
     var e = @as(i32, @intCast(((u >> 52) & 0x7FF))) - 0x3FF + 12;
     var m: u64 = undefined;
@@ -73,12 +73,12 @@ pub fn trunc(x: f64) callconv(.C) f64 {
     }
 }
 
-pub fn __truncx(x: f80) callconv(.C) f80 {
+pub fn __truncx(x: f80) callconv(.c) f80 {
     // TODO: more efficient implementation
     return @floatCast(truncq(x));
 }
 
-pub fn truncq(x: f128) callconv(.C) f128 {
+pub fn truncq(x: f128) callconv(.c) f128 {
     const u: u128 = @bitCast(x);
     var e = @as(i32, @intCast(((u >> 112) & 0x7FFF))) - 0x3FFF + 16;
     var m: u128 = undefined;
@@ -99,7 +99,7 @@ pub fn truncq(x: f128) callconv(.C) f128 {
     }
 }
 
-pub fn truncl(x: c_longdouble) callconv(.C) c_longdouble {
+pub fn truncl(x: c_longdouble) callconv(.c) c_longdouble {
     switch (@typeInfo(c_longdouble).float.bits) {
         16 => return __trunch(x),
         32 => return truncf(x),
lib/compiler_rt/truncdfhf2.zig
@@ -10,10 +10,10 @@ comptime {
     @export(&__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __truncdfhf2(a: f64) callconv(.C) common.F16T(f64) {
+pub fn __truncdfhf2(a: f64) callconv(.c) common.F16T(f64) {
     return @bitCast(truncf(f16, f64, a));
 }
 
-fn __aeabi_d2h(a: f64) callconv(.AAPCS) u16 {
+fn __aeabi_d2h(a: f64) callconv(.{ .arm_aapcs = .{} }) u16 {
     return @bitCast(truncf(f16, f64, a));
 }
lib/compiler_rt/truncdfsf2.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __truncdfsf2(a: f64) callconv(.C) f32 {
+pub fn __truncdfsf2(a: f64) callconv(.c) f32 {
     return truncf(f32, f64, a);
 }
 
-fn __aeabi_d2f(a: f64) callconv(.AAPCS) f32 {
+fn __aeabi_d2f(a: f64) callconv(.{ .arm_aapcs = .{} }) f32 {
     return truncf(f32, f64, a);
 }
lib/compiler_rt/truncsfhf2.zig
@@ -12,14 +12,14 @@ comptime {
     @export(&__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __truncsfhf2(a: f32) callconv(.C) common.F16T(f32) {
+pub fn __truncsfhf2(a: f32) callconv(.c) common.F16T(f32) {
     return @bitCast(truncf(f16, f32, a));
 }
 
-fn __gnu_f2h_ieee(a: f32) callconv(.C) common.F16T(f32) {
+fn __gnu_f2h_ieee(a: f32) callconv(.c) common.F16T(f32) {
     return @bitCast(truncf(f16, f32, a));
 }
 
-fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
+fn __aeabi_f2h(a: f32) callconv(.{ .arm_aapcs = .{} }) u16 {
     return @bitCast(truncf(f16, f32, a));
 }
lib/compiler_rt/trunctfdf2.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __trunctfdf2(a: f128) callconv(.C) f64 {
+pub fn __trunctfdf2(a: f128) callconv(.c) f64 {
     return truncf(f64, f128, a);
 }
 
-fn _Qp_qtod(a: *const f128) callconv(.C) f64 {
+fn _Qp_qtod(a: *const f128) callconv(.c) f64 {
     return truncf(f64, f128, a.*);
 }
lib/compiler_rt/trunctfhf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __trunctfhf2(a: f128) callconv(.C) common.F16T(f128) {
+pub fn __trunctfhf2(a: f128) callconv(.c) common.F16T(f128) {
     return @bitCast(truncf(f16, f128, a));
 }
lib/compiler_rt/trunctfsf2.zig
@@ -12,10 +12,10 @@ comptime {
     @export(&__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __trunctfsf2(a: f128) callconv(.C) f32 {
+pub fn __trunctfsf2(a: f128) callconv(.c) f32 {
     return truncf(f32, f128, a);
 }
 
-fn _Qp_qtos(a: *const f128) callconv(.C) f32 {
+fn _Qp_qtos(a: *const f128) callconv(.c) f32 {
     return truncf(f32, f128, a.*);
 }
lib/compiler_rt/trunctfxf2.zig
@@ -8,7 +8,7 @@ comptime {
     @export(&__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
+pub fn __trunctfxf2(a: f128) callconv(.c) f80 {
     const src_sig_bits = math.floatMantissaBits(f128);
     const dst_sig_bits = math.floatMantissaBits(f80) - 1; // -1 for the integer bit
 
lib/compiler_rt/truncxfdf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__truncxfdf2, .{ .name = "__truncxfdf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __truncxfdf2(a: f80) callconv(.C) f64 {
+fn __truncxfdf2(a: f80) callconv(.c) f64 {
     return trunc_f80(f64, a);
 }
lib/compiler_rt/truncxfhf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__truncxfhf2, .{ .name = "__truncxfhf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __truncxfhf2(a: f80) callconv(.C) common.F16T(f80) {
+fn __truncxfhf2(a: f80) callconv(.c) common.F16T(f80) {
     return @bitCast(trunc_f80(f16, a));
 }
lib/compiler_rt/truncxfsf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__truncxfsf2, .{ .name = "__truncxfsf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __truncxfsf2(a: f80) callconv(.C) f32 {
+fn __truncxfsf2(a: f80) callconv(.c) f32 {
     return trunc_f80(f32, a);
 }
lib/compiler_rt/udivmodei4.zig
@@ -112,7 +112,7 @@ pub fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
     }
 }
 
-pub fn __udivei4(r_q: [*]u32, u_p: [*]const u32, v_p: [*]const u32, bits: usize) callconv(.C) void {
+pub fn __udivei4(r_q: [*]u32, u_p: [*]const u32, v_p: [*]const u32, bits: usize) callconv(.c) void {
     @setRuntimeSafety(builtin.is_test);
     const u = u_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
     const v = v_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
@@ -120,7 +120,7 @@ pub fn __udivei4(r_q: [*]u32, u_p: [*]const u32, v_p: [*]const u32, bits: usize)
     @call(.always_inline, divmod, .{ q, null, u, v }) catch unreachable;
 }
 
-pub fn __umodei4(r_p: [*]u32, u_p: [*]const u32, v_p: [*]const u32, bits: usize) callconv(.C) void {
+pub fn __umodei4(r_p: [*]u32, u_p: [*]const u32, v_p: [*]const u32, bits: usize) callconv(.c) void {
     @setRuntimeSafety(builtin.is_test);
     const u = u_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
     const v = v_p[0 .. std.math.divCeil(usize, bits, 32) catch unreachable];
lib/compiler_rt/udivmodti4.zig
@@ -13,13 +13,13 @@ comptime {
     }
 }
 
-pub fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) callconv(.C) u128 {
+pub fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) callconv(.c) u128 {
     return udivmod(u128, a, b, maybe_rem);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __udivmodti4_windows_x86_64(a: v2u64, b: v2u64, maybe_rem: ?*u128) callconv(.C) v2u64 {
+fn __udivmodti4_windows_x86_64(a: v2u64, b: v2u64, maybe_rem: ?*u128) callconv(.c) v2u64 {
     return @bitCast(udivmod(u128, @bitCast(a), @bitCast(b), maybe_rem));
 }
 
lib/compiler_rt/udivti3.zig
@@ -13,12 +13,12 @@ comptime {
     }
 }
 
-pub fn __udivti3(a: u128, b: u128) callconv(.C) u128 {
+pub fn __udivti3(a: u128, b: u128) callconv(.c) u128 {
     return udivmod(u128, a, b, null);
 }
 
 const v2u64 = @Vector(2, u64);
 
-fn __udivti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
+fn __udivti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.c) v2u64 {
     return @bitCast(udivmod(u128, @bitCast(a), @bitCast(b), null));
 }
lib/compiler_rt/umodti3.zig
@@ -13,7 +13,7 @@ comptime {
     }
 }
 
-pub fn __umodti3(a: u128, b: u128) callconv(.C) u128 {
+pub fn __umodti3(a: u128, b: u128) callconv(.c) u128 {
     var r: u128 = undefined;
     _ = udivmod(u128, a, b, &r);
     return r;
@@ -21,7 +21,7 @@ pub fn __umodti3(a: u128, b: u128) callconv(.C) u128 {
 
 const v2u64 = @Vector(2, u64);
 
-fn __umodti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
+fn __umodti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.c) v2u64 {
     var r: u128 = undefined;
     _ = udivmod(u128, @bitCast(a), @bitCast(b), &r);
     return @bitCast(r);
lib/compiler_rt/unorddf2.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __unorddf2(a: f64, b: f64) callconv(.C) i32 {
+pub fn __unorddf2(a: f64, b: f64) callconv(.c) i32 {
     return comparef.unordcmp(f64, a, b);
 }
 
-fn __aeabi_dcmpun(a: f64, b: f64) callconv(.AAPCS) i32 {
+fn __aeabi_dcmpun(a: f64, b: f64) callconv(.{ .arm_aapcs = .{} }) i32 {
     return comparef.unordcmp(f64, a, b);
 }
lib/compiler_rt/unordhf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__unordhf2, .{ .name = "__unordhf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __unordhf2(a: f16, b: f16) callconv(.C) i32 {
+pub fn __unordhf2(a: f16, b: f16) callconv(.c) i32 {
     return comparef.unordcmp(f16, a, b);
 }
lib/compiler_rt/unordsf2.zig
@@ -11,10 +11,10 @@ comptime {
     }
 }
 
-pub fn __unordsf2(a: f32, b: f32) callconv(.C) i32 {
+pub fn __unordsf2(a: f32, b: f32) callconv(.c) i32 {
     return comparef.unordcmp(f32, a, b);
 }
 
-fn __aeabi_fcmpun(a: f32, b: f32) callconv(.AAPCS) i32 {
+fn __aeabi_fcmpun(a: f32, b: f32) callconv(.{ .arm_aapcs = .{} }) i32 {
     return comparef.unordcmp(f32, a, b);
 }
lib/compiler_rt/unordtf2.zig
@@ -13,6 +13,6 @@ comptime {
     @export(&__unordtf2, .{ .name = "__unordtf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-fn __unordtf2(a: f128, b: f128) callconv(.C) i32 {
+fn __unordtf2(a: f128, b: f128) callconv(.c) i32 {
     return comparef.unordcmp(f128, a, b);
 }
lib/compiler_rt/unordxf2.zig
@@ -7,6 +7,6 @@ comptime {
     @export(&__unordxf2, .{ .name = "__unordxf2", .linkage = common.linkage, .visibility = common.visibility });
 }
 
-pub fn __unordxf2(a: f80, b: f80) callconv(.C) i32 {
+pub fn __unordxf2(a: f80, b: f80) callconv(.c) i32 {
     return comparef.unordcmp(f80, a, b);
 }
lib/std/os/linux/aarch64.zig
@@ -99,7 +99,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         x0,   x1,    w2,    x3,  x4,   x5,  x6
     //
@@ -141,7 +141,7 @@ pub fn clone() callconv(.Naked) usize {
 
 pub const restore = restore_rt;
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     switch (@import("builtin").zig_backend) {
         .stage2_c => asm volatile (
             \\ mov x8, %[number]
lib/std/os/linux/arm.zig
@@ -98,7 +98,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         r0,   r1,    r2,    r3,  +0,   +4,  +8
     //
@@ -134,7 +134,7 @@ pub fn clone() callconv(.Naked) usize {
     );
 }
 
-pub fn restore() callconv(.Naked) noreturn {
+pub fn restore() callconv(.naked) noreturn {
     switch (@import("builtin").zig_backend) {
         .stage2_c => asm volatile (
             \\ mov r7, %[number]
@@ -152,7 +152,7 @@ pub fn restore() callconv(.Naked) noreturn {
     }
 }
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     switch (@import("builtin").zig_backend) {
         .stage2_c => asm volatile (
             \\ mov r7, %[number]
lib/std/os/linux/hexagon.zig
@@ -96,7 +96,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         r0,   r1,    r2,    r3,  r4,   r5,  +0
     //
@@ -137,7 +137,7 @@ pub fn clone() callconv(.Naked) usize {
 
 pub const restore = restore_rt;
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\ trap0(#0)
         :
lib/std/os/linux/loongarch64.zig
@@ -100,7 +100,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //           a0,    a1,    a2,  a3,   a4,  a5,   a6
     // sys_clone(flags, stack, ptid, ctid, tls)
@@ -140,7 +140,7 @@ pub fn clone() callconv(.Naked) usize {
 
 pub const restore = restore_rt;
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\ or $a7, $zero, %[number]
         \\ syscall 0
lib/std/os/linux/mips.zig
@@ -199,7 +199,7 @@ pub fn syscall7(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         3,    4,     5,     6,   7,    8,   9
     //
@@ -250,7 +250,7 @@ pub fn clone() callconv(.Naked) usize {
     );
 }
 
-pub fn restore() callconv(.Naked) noreturn {
+pub fn restore() callconv(.naked) noreturn {
     asm volatile (
         \\ syscall
         :
@@ -259,7 +259,7 @@ pub fn restore() callconv(.Naked) noreturn {
     );
 }
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\ syscall
         :
lib/std/os/linux/mips64.zig
@@ -182,7 +182,7 @@ pub fn syscall7(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         3,    4,     5,     6,   7,    8,   9
     //
@@ -229,7 +229,7 @@ pub fn clone() callconv(.Naked) usize {
     );
 }
 
-pub fn restore() callconv(.Naked) noreturn {
+pub fn restore() callconv(.naked) noreturn {
     asm volatile (
         \\ syscall
         :
@@ -238,7 +238,7 @@ pub fn restore() callconv(.Naked) noreturn {
     );
 }
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\ syscall
         :
lib/std/os/linux/powerpc.zig
@@ -127,7 +127,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         3,    4,     5,     6,   7,    8,   9
     //
@@ -199,7 +199,7 @@ pub fn clone() callconv(.Naked) usize {
 
 pub const restore = restore_rt;
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\ sc
         :
lib/std/os/linux/powerpc64.zig
@@ -127,7 +127,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         3,    4,     5,     6,   7,    8,   9
     //
@@ -184,7 +184,7 @@ pub fn clone() callconv(.Naked) usize {
 
 pub const restore = restore_rt;
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\ sc
         :
lib/std/os/linux/riscv32.zig
@@ -96,7 +96,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         a0,   a1,    a2,    a3,  a4,   a5,  a6
     //
@@ -142,7 +142,7 @@ pub fn clone() callconv(.Naked) usize {
 
 pub const restore = restore_rt;
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\ ecall
         :
lib/std/os/linux/riscv64.zig
@@ -96,7 +96,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         a0,   a1,    a2,    a3,  a4,   a5,  a6
     //
@@ -142,7 +142,7 @@ pub fn clone() callconv(.Naked) usize {
 
 pub const restore = restore_rt;
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\ ecall
         :
lib/std/os/linux/s390x.zig
@@ -90,7 +90,7 @@ pub fn syscall6(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize,
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     asm volatile (
         \\# int clone(
         \\#    fn,      a = r2
@@ -156,7 +156,7 @@ pub fn clone() callconv(.Naked) usize {
 
 pub const restore = restore_rt;
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\svc 0
         :
lib/std/os/linux/sparc64.zig
@@ -179,7 +179,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         i0,   i1,    i2,    i3,  i4,   i5,  sp
     //
lib/std/os/linux/thumb.zig
@@ -143,7 +143,7 @@ pub fn syscall6(
 
 pub const clone = @import("arm.zig").clone;
 
-pub fn restore() callconv(.Naked) noreturn {
+pub fn restore() callconv(.naked) noreturn {
     asm volatile (
         \\ mov r7, %[number]
         \\ svc #0
@@ -152,7 +152,7 @@ pub fn restore() callconv(.Naked) noreturn {
     );
 }
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     asm volatile (
         \\ mov r7, %[number]
         \\ svc #0
lib/std/os/linux/x86.zig
@@ -122,7 +122,7 @@ pub fn socketcall(call: usize, args: [*]const usize) usize {
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     // __clone(func, stack, flags, arg, ptid, tls, ctid)
     //         +8,   +12,   +16,   +20, +24,  +28, +32
     //
@@ -172,7 +172,7 @@ pub fn clone() callconv(.Naked) usize {
     );
 }
 
-pub fn restore() callconv(.Naked) noreturn {
+pub fn restore() callconv(.naked) noreturn {
     switch (@import("builtin").zig_backend) {
         .stage2_c => asm volatile (
             \\ movl %[number], %%eax
@@ -190,7 +190,7 @@ pub fn restore() callconv(.Naked) noreturn {
     }
 }
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     switch (@import("builtin").zig_backend) {
         .stage2_c => asm volatile (
             \\ movl %[number], %%eax
@@ -405,7 +405,7 @@ noinline fn getContextReturnAddress() usize {
     return @returnAddress();
 }
 
-pub fn getContextInternal() callconv(.Naked) usize {
+pub fn getContextInternal() callconv(.naked) usize {
     asm volatile (
         \\ movl $0, %[flags_offset:c](%%edx)
         \\ movl $0, %[link_offset:c](%%edx)
lib/std/os/linux/x86_64.zig
@@ -101,7 +101,7 @@ pub fn syscall6(
     );
 }
 
-pub fn clone() callconv(.Naked) usize {
+pub fn clone() callconv(.naked) usize {
     asm volatile (
         \\      movl $56,%%eax // SYS_clone
         \\      movq %%rdi,%%r11
@@ -137,7 +137,7 @@ pub fn clone() callconv(.Naked) usize {
 
 pub const restore = restore_rt;
 
-pub fn restore_rt() callconv(.Naked) noreturn {
+pub fn restore_rt() callconv(.naked) noreturn {
     switch (@import("builtin").zig_backend) {
         .stage2_c => asm volatile (
             \\ movl %[number], %%eax
@@ -382,7 +382,7 @@ fn gpRegisterOffset(comptime reg_index: comptime_int) usize {
     return @offsetOf(ucontext_t, "mcontext") + @offsetOf(mcontext_t, "gregs") + @sizeOf(usize) * reg_index;
 }
 
-fn getContextInternal() callconv(.Naked) usize {
+fn getContextInternal() callconv(.naked) usize {
     // TODO: Read GS/FS registers?
     asm volatile (
         \\ movq $0, %[flags_offset:c](%%rdi)
lib/std/zig/parser_test.zig
@@ -1096,10 +1096,10 @@ test "zig fmt: block in slice expression" {
 test "zig fmt: async function" {
     try testCanonical(
         \\pub const Server = struct {
-        \\    handleRequestFn: fn (*Server, *const std.net.Address, File) callconv(.Async) void,
+        \\    handleRequestFn: fn (*Server, *const std.net.Address, File) callconv(.@"async") void,
         \\};
         \\test "hi" {
-        \\    var ptr: fn (i32) callconv(.Async) void = @ptrCast(other);
+        \\    var ptr: fn (i32) callconv(.@"async") void = @ptrCast(other);
         \\}
         \\
     );
@@ -1259,7 +1259,7 @@ test "zig fmt: threadlocal" {
 test "zig fmt: linksection" {
     try testCanonical(
         \\export var aoeu: u64 linksection(".text.derp") = 1234;
-        \\export fn _start() linksection(".text.boot") callconv(.Naked) noreturn {}
+        \\export fn _start() linksection(".text.boot") callconv(.naked) noreturn {}
         \\
     );
 }
@@ -3926,7 +3926,7 @@ test "zig fmt: fn type" {
         \\}
         \\
         \\const a: fn (u8) u8 = undefined;
-        \\const b: fn (u8) callconv(.Naked) u8 = undefined;
+        \\const b: fn (u8) callconv(.naked) u8 = undefined;
         \\const ap: fn (u8) u8 = a;
         \\
     );
lib/c.zig
@@ -54,11 +54,11 @@ pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, _: ?
 }
 
 extern fn main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
-fn wasm_start() callconv(.C) void {
+fn wasm_start() callconv(.c) void {
     _ = main(0, undefined);
 }
 
-fn strcpy(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
+fn strcpy(dest: [*:0]u8, src: [*:0]const u8) callconv(.c) [*:0]u8 {
     var i: usize = 0;
     while (src[i] != 0) : (i += 1) {
         dest[i] = src[i];
@@ -76,7 +76,7 @@ test "strcpy" {
     try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0));
 }
 
-fn strncpy(dest: [*:0]u8, src: [*:0]const u8, n: usize) callconv(.C) [*:0]u8 {
+fn strncpy(dest: [*:0]u8, src: [*:0]const u8, n: usize) callconv(.c) [*:0]u8 {
     var i: usize = 0;
     while (i < n and src[i] != 0) : (i += 1) {
         dest[i] = src[i];
@@ -96,7 +96,7 @@ test "strncpy" {
     try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0));
 }
 
-fn strcat(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
+fn strcat(dest: [*:0]u8, src: [*:0]const u8) callconv(.c) [*:0]u8 {
     var dest_end: usize = 0;
     while (dest[dest_end] != 0) : (dest_end += 1) {}
 
@@ -119,7 +119,7 @@ test "strcat" {
     try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0));
 }
 
-fn strncat(dest: [*:0]u8, src: [*:0]const u8, avail: usize) callconv(.C) [*:0]u8 {
+fn strncat(dest: [*:0]u8, src: [*:0]const u8, avail: usize) callconv(.c) [*:0]u8 {
     var dest_end: usize = 0;
     while (dest[dest_end] != 0) : (dest_end += 1) {}
 
@@ -142,7 +142,7 @@ test "strncat" {
     try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0));
 }
 
-fn strcmp(s1: [*:0]const u8, s2: [*:0]const u8) callconv(.C) c_int {
+fn strcmp(s1: [*:0]const u8, s2: [*:0]const u8) callconv(.c) c_int {
     return switch (std.mem.orderZ(u8, s1, s2)) {
         .lt => -1,
         .eq => 0,
@@ -150,11 +150,11 @@ fn strcmp(s1: [*:0]const u8, s2: [*:0]const u8) callconv(.C) c_int {
     };
 }
 
-fn strlen(s: [*:0]const u8) callconv(.C) usize {
+fn strlen(s: [*:0]const u8) callconv(.c) usize {
     return std.mem.len(s);
 }
 
-fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int {
+fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.c) c_int {
     if (_n == 0) return 0;
     var l = _l;
     var r = _r;
@@ -167,7 +167,7 @@ fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int {
     return @as(c_int, l[0]) - @as(c_int, r[0]);
 }
 
-fn strerror(errnum: c_int) callconv(.C) [*:0]const u8 {
+fn strerror(errnum: c_int) callconv(.c) [*:0]const u8 {
     _ = errnum;
     return "TODO strerror implementation";
 }
lib/fuzzer.zig
@@ -453,7 +453,7 @@ export fn fuzzer_coverage_id() u64 {
     return fuzzer.coverage_id;
 }
 
-var fuzzer_one: *const fn (input_ptr: [*]const u8, input_len: usize) callconv(.C) void = undefined;
+var fuzzer_one: *const fn (input_ptr: [*]const u8, input_len: usize) callconv(.c) void = undefined;
 
 export fn fuzzer_start(testOne: @TypeOf(fuzzer_one)) void {
     fuzzer_one = testOne;
src/codegen/spirv/Assembler.zig
@@ -276,7 +276,7 @@ fn todo(self: *Assembler, comptime fmt: []const u8, args: anytype) Error {
 fn processInstruction(self: *Assembler) !void {
     const result: AsmValue = switch (self.inst.opcode) {
         .OpEntryPoint => {
-            return self.fail(0, "cannot export entry points via OpEntryPoint, export the kernel using callconv(.Kernel)", .{});
+            return self.fail(0, "cannot export entry points via OpEntryPoint, export the kernel using callconv(.kernel)", .{});
         },
         .OpCapability => {
             try self.spv.addCapability(@enumFromInt(self.inst.operands.items[0].value));
src/link/NvPtx.zig
@@ -1,7 +1,7 @@
 //! NVidia PTX (Parallel Thread Execution)
 //! https://docs.nvidia.com/cuda/parallel-thread-execution/index.html
 //! For this we rely on the nvptx backend of LLVM
-//! Kernel functions need to be marked both as "export" and "callconv(.Kernel)"
+//! Kernel functions need to be marked both as "export" and "callconv(.kernel)"
 
 const NvPtx = @This();
 
test/behavior/async_fn.zig
@@ -137,13 +137,13 @@ test "@frameSize" {
     const S = struct {
         fn doTheTest() !void {
             {
-                var ptr = @as(fn (i32) callconv(.Async) void, @ptrCast(other));
+                var ptr = @as(fn (i32) callconv(.@"async") void, @ptrCast(other));
                 _ = &ptr;
                 const size = @frameSize(ptr);
                 try expect(size == @sizeOf(@Frame(other)));
             }
             {
-                var ptr = @as(fn () callconv(.Async) void, @ptrCast(first));
+                var ptr = @as(fn () callconv(.@"async") void, @ptrCast(first));
                 _ = &ptr;
                 const size = @frameSize(ptr);
                 try expect(size == @sizeOf(@Frame(first)));
@@ -220,7 +220,7 @@ test "coroutine suspend with block" {
 
 var a_promise: anyframe = undefined;
 var global_result = false;
-fn testSuspendBlock() callconv(.Async) void {
+fn testSuspendBlock() callconv(.@"async") void {
     suspend {
         comptime assert(@TypeOf(@frame()) == *@Frame(testSuspendBlock)) catch unreachable;
         a_promise = @frame();
@@ -249,14 +249,14 @@ test "coroutine await" {
     try expect(await_final_result == 1234);
     try expect(std.mem.eql(u8, &await_points, "abcdefghi"));
 }
-fn await_amain() callconv(.Async) void {
+fn await_amain() callconv(.@"async") void {
     await_seq('b');
     var p = async await_another();
     await_seq('e');
     await_final_result = await p;
     await_seq('h');
 }
-fn await_another() callconv(.Async) i32 {
+fn await_another() callconv(.@"async") i32 {
     await_seq('c');
     suspend {
         await_seq('d');
@@ -287,14 +287,14 @@ test "coroutine await early return" {
     try expect(early_final_result == 1234);
     try expect(std.mem.eql(u8, &early_points, "abcdef"));
 }
-fn early_amain() callconv(.Async) void {
+fn early_amain() callconv(.@"async") void {
     early_seq('b');
     var p = async early_another();
     early_seq('d');
     early_final_result = await p;
     early_seq('e');
 }
-fn early_another() callconv(.Async) i32 {
+fn early_another() callconv(.@"async") i32 {
     early_seq('c');
     return 1234;
 }
@@ -313,7 +313,7 @@ test "async function with dot syntax" {
 
     const S = struct {
         var y: i32 = 1;
-        fn foo() callconv(.Async) void {
+        fn foo() callconv(.@"async") void {
             y += 1;
             suspend {}
         }
@@ -329,7 +329,7 @@ test "async fn pointer in a struct field" {
 
     var data: i32 = 1;
     const Foo = struct {
-        bar: fn (*i32) callconv(.Async) void,
+        bar: fn (*i32) callconv(.@"async") void,
     };
     var foo = Foo{ .bar = simpleAsyncFn2 };
     _ = &foo;
@@ -346,7 +346,7 @@ test "async fn pointer in a struct field" {
 fn doTheAwait(f: anyframe->void) void {
     await f;
 }
-fn simpleAsyncFn2(y: *i32) callconv(.Async) void {
+fn simpleAsyncFn2(y: *i32) callconv(.@"async") void {
     defer y.* += 2;
     y.* += 1;
     suspend {}
@@ -357,10 +357,10 @@ test "@asyncCall with return type" {
     if (builtin.os.tag == .wasi) return error.SkipZigTest; // TODO
 
     const Foo = struct {
-        bar: fn () callconv(.Async) i32,
+        bar: fn () callconv(.@"async") i32,
 
         var global_frame: anyframe = undefined;
-        fn middle() callconv(.Async) i32 {
+        fn middle() callconv(.@"async") i32 {
             return afunc();
         }
 
@@ -396,7 +396,7 @@ test "async fn with inferred error set" {
             resume global_frame;
             try std.testing.expectError(error.Fail, result);
         }
-        fn middle() callconv(.Async) !void {
+        fn middle() callconv(.@"async") !void {
             var f = async middle2();
             return await f;
         }
@@ -441,11 +441,11 @@ fn nonFailing() (anyframe->anyerror!void) {
     Static.frame = async suspendThenFail();
     return &Static.frame;
 }
-fn suspendThenFail() callconv(.Async) anyerror!void {
+fn suspendThenFail() callconv(.@"async") anyerror!void {
     suspend {}
     return error.Fail;
 }
-fn printTrace(p: anyframe->(anyerror!void)) callconv(.Async) void {
+fn printTrace(p: anyframe->(anyerror!void)) callconv(.@"async") void {
     (await p) catch |e| {
         std.testing.expect(e == error.Fail) catch @panic("test failure");
         if (@errorReturnTrace()) |trace| {
@@ -466,7 +466,7 @@ test "break from suspend" {
     _ = p;
     try std.testing.expect(my_result == 2);
 }
-fn testBreakFromSuspend(my_result: *i32) callconv(.Async) void {
+fn testBreakFromSuspend(my_result: *i32) callconv(.@"async") void {
     suspend {
         resume @frame();
     }
@@ -949,7 +949,7 @@ test "cast fn to async fn when it is inferred to be async" {
         var ok = false;
 
         fn doTheTest() void {
-            var ptr: fn () callconv(.Async) i32 = undefined;
+            var ptr: fn () callconv(.@"async") i32 = undefined;
             ptr = func;
             var buf: [100]u8 align(16) = undefined;
             var result: i32 = undefined;
@@ -980,7 +980,7 @@ test "cast fn to async fn when it is inferred to be async, awaited directly" {
         var ok = false;
 
         fn doTheTest() void {
-            var ptr: fn () callconv(.Async) i32 = undefined;
+            var ptr: fn () callconv(.@"async") i32 = undefined;
             ptr = func;
             var buf: [100]u8 align(16) = undefined;
             var result: i32 = undefined;
@@ -1093,7 +1093,7 @@ test "@asyncCall with comptime-known function, but not awaited directly" {
             resume global_frame;
             try std.testing.expectError(error.Fail, result);
         }
-        fn middle() callconv(.Async) !void {
+        fn middle() callconv(.@"async") !void {
             var f = async middle2();
             return await f;
         }
@@ -1133,7 +1133,7 @@ test "@asyncCall using the result location inside the frame" {
     if (builtin.os.tag == .wasi) return error.SkipZigTest; // TODO
 
     const S = struct {
-        fn simple2(y: *i32) callconv(.Async) i32 {
+        fn simple2(y: *i32) callconv(.@"async") i32 {
             defer y.* += 2;
             y.* += 1;
             suspend {}
@@ -1145,7 +1145,7 @@ test "@asyncCall using the result location inside the frame" {
     };
     var data: i32 = 1;
     const Foo = struct {
-        bar: fn (*i32) callconv(.Async) i32,
+        bar: fn (*i32) callconv(.@"async") i32,
     };
     var foo = Foo{ .bar = S.simple2 };
     _ = &foo;
@@ -1271,7 +1271,7 @@ test "await used in expression and awaiting fn with no suspend but async calling
             const sum = (await f1) + (await f2);
             expect(sum == 10) catch @panic("test failure");
         }
-        fn add(a: i32, b: i32) callconv(.Async) i32 {
+        fn add(a: i32, b: i32) callconv(.@"async") i32 {
             return a + b;
         }
     };
@@ -1289,7 +1289,7 @@ test "await used in expression after a fn call" {
             sum = foo() + await f1;
             expect(sum == 8) catch @panic("test failure");
         }
-        fn add(a: i32, b: i32) callconv(.Async) i32 {
+        fn add(a: i32, b: i32) callconv(.@"async") i32 {
             return a + b;
         }
         fn foo() i32 {
@@ -1309,7 +1309,7 @@ test "async fn call used in expression after a fn call" {
             sum = foo() + add(3, 4);
             expect(sum == 8) catch @panic("test failure");
         }
-        fn add(a: i32, b: i32) callconv(.Async) i32 {
+        fn add(a: i32, b: i32) callconv(.@"async") i32 {
             return a + b;
         }
         fn foo() i32 {
@@ -1598,7 +1598,7 @@ test "async function call resolves target fn frame, runtime func" {
         fn foo() anyerror!void {
             const stack_size = 1000;
             var stack_frame: [stack_size]u8 align(std.Target.stack_align) = undefined;
-            var func: fn () callconv(.Async) anyerror!void = bar;
+            var func: fn () callconv(.@"async") anyerror!void = bar;
             _ = &func;
             return await @asyncCall(&stack_frame, {}, func, .{});
         }
@@ -1860,7 +1860,7 @@ test "@asyncCall with pass-by-value arguments" {
         pub const ST = struct { f0: usize, f1: usize };
         pub const AT = [5]u8;
 
-        pub fn f(_fill0: u64, s: ST, _fill1: u64, a: AT, _fill2: u64) callconv(.Async) void {
+        pub fn f(_fill0: u64, s: ST, _fill1: u64, a: AT, _fill2: u64) callconv(.@"async") void {
             _ = s;
             _ = a;
             // Check that the array and struct arguments passed by value don't
@@ -1893,7 +1893,7 @@ test "@asyncCall with arguments having non-standard alignment" {
     const F1: u64 = 0xf00df00df00df00d;
 
     const S = struct {
-        pub fn f(_fill0: u32, s: struct { x: u64 align(16) }, _fill1: u64) callconv(.Async) void {
+        pub fn f(_fill0: u32, s: struct { x: u64 align(16) }, _fill1: u64) callconv(.@"async") void {
             _ = s;
             // The compiler inserts extra alignment for s, check that the
             // generated code picks the right slot for fill1.
test/behavior/await_struct.zig
@@ -21,14 +21,14 @@ test "coroutine await struct" {
     try expect(await_final_result.x == 1234);
     try expect(std.mem.eql(u8, &await_points, "abcdefghi"));
 }
-fn await_amain() callconv(.Async) void {
+fn await_amain() callconv(.@"async") void {
     await_seq('b');
     var p = async await_another();
     await_seq('e');
     await_final_result = await p;
     await_seq('h');
 }
-fn await_another() callconv(.Async) Foo {
+fn await_another() callconv(.@"async") Foo {
     await_seq('c');
     suspend {
         await_seq('d');
test/c_abi/main.zig
@@ -5694,7 +5694,7 @@ test "Stdcall ABI big union" {
     stdcall_big_union(x);
 }
 
-extern fn c_explict_win64(ByRef) callconv(.Win64) ByRef;
+extern fn c_explict_win64(ByRef) callconv(.{ .x86_64_win = .{} }) ByRef;
 test "explicit SysV calling convention" {
     if (builtin.cpu.arch != .x86_64) return error.SkipZigTest;
 
@@ -5702,7 +5702,7 @@ test "explicit SysV calling convention" {
     try expect(res.val == 42);
 }
 
-extern fn c_explict_sys_v(ByRef) callconv(.SysV) ByRef;
+extern fn c_explict_sys_v(ByRef) callconv(.{ .x86_64_sysv = .{} }) ByRef;
 test "explicit Win64 calling convention" {
     if (builtin.cpu.arch != .x86_64) return error.SkipZigTest;
 
test/cases/compile_errors/async/async_function_depends_on_its_own_frame.zig
@@ -1,7 +1,7 @@
 export fn entry() void {
     _ = async amain();
 }
-fn amain() callconv(.Async) void {
+fn amain() callconv(.@"async") void {
     var x: [@sizeOf(@Frame(amain))]u8 = undefined;
     _ = &x;
 }
test/cases/compile_errors/async/async_function_indirectly_depends_on_its_own_frame.zig
@@ -1,7 +1,7 @@
 export fn entry() void {
     _ = async amain();
 }
-fn amain() callconv(.Async) void {
+fn amain() callconv(.@"async") void {
     other();
 }
 fn other() void {
test/cases/compile_errors/async/bad_alignment_in_asynccall.zig
@@ -1,10 +1,10 @@
 export fn entry() void {
-    var ptr: fn () callconv(.Async) void = func;
+    var ptr: fn () callconv(.@"async") void = func;
     var bytes: [64]u8 = undefined;
     _ = @asyncCall(&bytes, {}, ptr, .{});
     _ = &ptr;
 }
-fn func() callconv(.Async) void {}
+fn func() callconv(.@"async") void {}
 
 // error
 // backend=stage1
test/cases/compile_errors/async/exported_async_function.zig
@@ -1,4 +1,4 @@
-export fn foo() callconv(.Async) void {}
+export fn foo() callconv(.@"async") void {}
 
 // error
 // backend=stage1
test/cases/compile_errors/async/returning_error_from_void_async_function.zig
@@ -1,7 +1,7 @@
 export fn entry() void {
     _ = async amain();
 }
-fn amain() callconv(.Async) void {
+fn amain() callconv(.@"async") void {
     return error.ShouldBeCompileError;
 }
 
test/cases/compile_errors/async/runtime-known_async_function_called.zig
@@ -6,7 +6,7 @@ fn amain() void {
     _ = ptr();
     _ = &ptr;
 }
-fn afunc() callconv(.Async) void {}
+fn afunc() callconv(.@"async") void {}
 
 // error
 // backend=stage1
test/cases/compile_errors/async/runtime-known_function_called_with_async_keyword.zig
@@ -4,7 +4,7 @@ export fn entry() void {
     _ = &ptr;
 }
 
-fn afunc() callconv(.Async) void {}
+fn afunc() callconv(.@"async") void {}
 
 // error
 // backend=stage1
test/cases/compile_errors/call_from_naked_func.zig
@@ -1,16 +1,16 @@
-export fn runtimeCall() callconv(.Naked) void {
+export fn runtimeCall() callconv(.naked) void {
     f();
 }
 
-export fn runtimeBuiltinCall() callconv(.Naked) void {
+export fn runtimeBuiltinCall() callconv(.naked) void {
     @call(.auto, f, .{});
 }
 
-export fn comptimeCall() callconv(.Naked) void {
+export fn comptimeCall() callconv(.naked) void {
     comptime f();
 }
 
-export fn comptimeBuiltinCall() callconv(.Naked) void {
+export fn comptimeBuiltinCall() callconv(.naked) void {
     @call(.compile_time, f, .{});
 }
 
test/cases/compile_errors/callconv_apcs_aapcs_aapcsvfp_on_unsupported_platform.zig
@@ -1,5 +1,5 @@
-export fn entry2() callconv(.AAPCS) void {}
-export fn entry3() callconv(.AAPCSVFP) void {}
+export fn entry2() callconv(.{ .arm_aapcs = .{} }) void {}
+export fn entry3() callconv(.{ .arm_aapcs_vfp = .{} }) void {}
 
 // error
 // target=x86_64-linux-none
test/cases/compile_errors/callconv_interrupt_on_unsupported_platform.zig
@@ -1,7 +1,11 @@
-export fn entry() callconv(.Interrupt) void {}
+export fn entry1() callconv(.{ .x86_64_interrupt = .{} }) void {}
+export fn entry2() callconv(.{ .x86_interrupt = .{} }) void {}
+export fn entry3() callconv(.avr_interrupt) void {}
 
 // error
 // backend=stage2
 // target=aarch64-linux-none
 //
-// :1:29: error: calling convention 'Interrupt' is only available on x86, x86_64, AVR, and MSP430, not aarch64
+// :1:30: error: calling convention 'x86_64_interrupt' only available on architectures 'x86_64'
+// :1:30: error: calling convention 'x86_interrupt' only available on architectures 'x86'
+// :1:30: error: calling convention 'avr_interrupt' only available on architectures 'avr'
test/cases/compile_errors/closure_get_depends_on_failed_decl.zig
@@ -1,7 +1,7 @@
 pub inline fn instanceRequestAdapter() void {}
 
 pub inline fn requestAdapter(
-    comptime callbackArg: fn () callconv(.Inline) void,
+    comptime callbackArg: fn () callconv(.@"inline") void,
 ) void {
     _ = &(struct {
         pub fn callback() callconv(.c) void {
test/cases/compile_errors/invalid_func_for_callconv.zig
@@ -1,12 +1,12 @@
-export fn interrupt_param1(_: u32) callconv(.Interrupt) void {}
-export fn interrupt_param2(_: *anyopaque, _: u32) callconv(.Interrupt) void {}
-export fn interrupt_param3(_: *anyopaque, _: u64, _: u32) callconv(.Interrupt) void {}
-export fn interrupt_ret(_: *anyopaque, _: u64) callconv(.Interrupt) u32 {
+export fn interrupt_param1(_: u32) callconv(.{ .x86_64_interrupt = .{} }) void {}
+export fn interrupt_param2(_: *anyopaque, _: u32) callconv(.{ .x86_64_interrupt = .{} }) void {}
+export fn interrupt_param3(_: *anyopaque, _: u64, _: u32) callconv(.{ .x86_64_interrupt = .{} }) void {}
+export fn interrupt_ret(_: *anyopaque, _: u64) callconv(.{ .x86_64_interrupt = .{} }) u32 {
     return 0;
 }
 
-export fn signal_param(_: u32) callconv(.Signal) void {}
-export fn signal_ret() callconv(.Signal) noreturn {}
+export fn signal_param(_: u32) callconv(.avr_signal) void {}
+export fn signal_ret() callconv(.avr_signal) noreturn {}
 
 // error
 // target=x86_64-linux
@@ -14,6 +14,6 @@ export fn signal_ret() callconv(.Signal) noreturn {}
 // :1:28: error: first parameter of function with 'x86_64_interrupt' calling convention must be a pointer type
 // :2:43: error: second parameter of function with 'x86_64_interrupt' calling convention must be a 64-bit integer
 // :3:51: error: 'x86_64_interrupt' calling convention supports up to 2 parameters, found 3
-// :4:69: error: function with calling convention 'x86_64_interrupt' must return 'void' or 'noreturn'
+// :4:87: error: function with calling convention 'x86_64_interrupt' must return 'void' or 'noreturn'
 // :8:24: error: parameters are not allowed with 'avr_signal' calling convention
 // :9:34: error: calling convention 'avr_signal' only available on architectures 'avr'
test/cases/compile_errors/return_from_naked_function.zig
@@ -1,4 +1,4 @@
-fn foo() callconv(.Naked) void {
+fn foo() callconv(.naked) void {
     return;
 }
 
test/cases/compile_errors/stack_usage_in_naked_function.zig
@@ -1,4 +1,4 @@
-export fn a() callconv(.Naked) noreturn {
+export fn a() callconv(.naked) noreturn {
     var x: u32 = 10;
     _ = &x;
 
@@ -6,7 +6,7 @@ export fn a() callconv(.Naked) noreturn {
     _ = y;
 }
 
-export fn b() callconv(.Naked) noreturn {
+export fn b() callconv(.naked) noreturn {
     var x = @as(u32, 10);
     _ = &x;
 
@@ -15,7 +15,7 @@ export fn b() callconv(.Naked) noreturn {
     _ = &z;
 }
 
-export fn c() callconv(.Naked) noreturn {
+export fn c() callconv(.naked) noreturn {
     const Foo = struct {
         y: u32,
     };
@@ -24,7 +24,7 @@ export fn c() callconv(.Naked) noreturn {
     _ = &x;
 }
 
-export fn d() callconv(.Naked) noreturn {
+export fn d() callconv(.naked) noreturn {
     const Foo = struct {
         inline fn bar() void {
             var x: u32 = 10;
test/cases/compile_errors/unreachable_in_naked_func.zig
@@ -1,13 +1,13 @@
-fn runtimeSafetyDefault() callconv(.Naked) void {
+fn runtimeSafetyDefault() callconv(.naked) void {
     unreachable;
 }
 
-fn runtimeSafetyOn() callconv(.Naked) void {
+fn runtimeSafetyOn() callconv(.naked) void {
     @setRuntimeSafety(true);
     unreachable;
 }
 
-fn runtimeSafetyOff() callconv(.Naked) void {
+fn runtimeSafetyOff() callconv(.naked) void {
     @setRuntimeSafety(false);
     unreachable;
 }
test/cases/safety/@asyncCall with too small a frame.zig
@@ -18,7 +18,7 @@ pub fn main() !void {
     _ = &frame;
     return error.TestFailed;
 }
-fn other() callconv(.Async) void {
+fn other() callconv(.@"async") void {
     suspend {}
 }
 // run
test/cases/safety/error return trace across suspend points.zig
@@ -26,7 +26,7 @@ fn failing() anyerror!void {
     return second();
 }
 
-fn second() callconv(.Async) anyerror!void {
+fn second() callconv(.@"async") anyerror!void {
     return error.Fail;
 }
 
test/link/glibc_compat/glibc_runtime_check.zig
@@ -95,7 +95,7 @@ fn checkStrlcpy_v2_38() !void {
 }
 
 // atexit is part of libc_nonshared, so ensure its linked in correctly
-fn forceExit0Callback() callconv(.C) void {
+fn forceExit0Callback() callconv(.c) void {
     std.c.exit(0); // Override the main() exit code
 }
 
test/link/elf.zig
@@ -931,7 +931,7 @@ fn testEmitStaticLib(b: *Build, opts: Options) *Step {
     const obj3 = addObject(b, opts, .{
         .name = "a_very_long_file_name_so_that_it_ends_up_in_strtab",
         .zig_source_bytes =
-        \\fn weakFoo() callconv(.C) usize {
+        \\fn weakFoo() callconv(.c) usize {
         \\    return 42;
         \\}
         \\export var strongBar: usize = 100;
test/link/macho.zig
@@ -199,7 +199,7 @@ fn testDuplicateDefinitions(b: *Build, opts: Options) *Step {
         \\var x: usize = 1;
         \\export fn strong() void { x += 1; }
         \\comptime { @export(&weakImpl, .{ .name = "weak", .linkage = .weak }); }
-        \\fn weakImpl() callconv(.C) void { x += 1; }
+        \\fn weakImpl() callconv(.c) void { x += 1; }
         \\extern fn weak() void;
         \\pub fn main() void {
         \\    weak();
@@ -937,7 +937,7 @@ fn testLinksection(b: *Build, opts: Options) *Step {
 
     const obj = addObject(b, opts, .{ .name = "main", .zig_source_bytes = 
         \\export var test_global: u32 linksection("__DATA,__TestGlobal") = undefined;
-        \\export fn testFn() linksection("__TEXT,__TestFn") callconv(.C) void {
+        \\export fn testFn() linksection("__TEXT,__TestFn") callconv(.c) void {
         \\    TestGenericFn("A").f();
         \\}
         \\fn TestGenericFn(comptime suffix: []const u8) type {
@@ -2667,7 +2667,7 @@ fn testUnresolvedError2(b: *Build, opts: Options) *Step {
     const exe = addExecutable(b, opts, .{ .name = "main", .zig_source_bytes = 
         \\pub fn main() !void {
         \\    const msg_send_fn = @extern(
-        \\        *const fn () callconv(.C) usize,
+        \\        *const fn () callconv(.c) usize,
         \\        .{ .name = "objc_msgSend$initWithContentRect:styleMask:backing:defer:screen:" },
         \\    );
         \\    _ = @call(
test/src/Debugger.zig
@@ -677,7 +677,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void {
                 \\    param6: u64,
                 \\    param7: u64,
                 \\    param8: u64,
-                \\) callconv(.C) void {
+                \\) callconv(.c) void {
                 \\    const local_comptime_val: u64 = global_const *% global_const;
                 \\    const local_comptime_ptr: struct { u64 } = .{ local_comptime_val *% local_comptime_val };
                 \\    const local_const: u64 = global_var ^ global_threadlocal1 ^ global_threadlocal2 ^
test/standalone/extern/main.zig
@@ -1,8 +1,8 @@
 const assert = @import("std").debug.assert;
 const testing = @import("std").testing;
 
-const updateHidden = @extern(*const fn (u32) callconv(.C) void, .{ .name = "updateHidden" });
-const getHidden = @extern(*const fn () callconv(.C) u32, .{ .name = "getHidden" });
+const updateHidden = @extern(*const fn (u32) callconv(.c) void, .{ .name = "updateHidden" });
+const getHidden = @extern(*const fn () callconv(.c) u32, .{ .name = "getHidden" });
 
 const T = extern struct { x: u32 };
 
test/standalone/install_raw_hex/main.zig
@@ -1,3 +1,3 @@
-export fn _start() callconv(.C) noreturn {
+export fn _start() callconv(.c) noreturn {
     while (true) {}
 }
test/standalone/issue_12706/main.zig
@@ -3,7 +3,7 @@ extern fn testFnPtr(n: c_int, ...) void;
 
 const val: c_int = 123;
 
-fn func(a: c_int) callconv(.C) void {
+fn func(a: c_int) callconv(.c) void {
     std.debug.assert(a == val);
 }
 
test/standalone/issue_8550/main.zig
@@ -1,4 +1,4 @@
-export fn main(r0: u32, r1: u32, atags: u32) callconv(.C) noreturn {
+export fn main(r0: u32, r1: u32, atags: u32) callconv(.c) noreturn {
     _ = r0;
     _ = r1;
     _ = atags;
test/standalone/load_dynamic_library/main.zig
@@ -11,7 +11,7 @@ pub fn main() !void {
     var lib = try std.DynLib.open(dynlib_name);
     defer lib.close();
 
-    const Add = *const fn (i32, i32) callconv(.C) i32;
+    const Add = *const fn (i32, i32) callconv(.c) i32;
     const addFn = lib.lookup(Add, "add") orelse return error.SymbolNotFound;
 
     const result = addFn(12, 34);
test/standalone/stack_iterator/shared_lib_unwind.zig
@@ -23,7 +23,7 @@ noinline fn frame3(expected: *[5]usize, unwound: *[5]usize) void {
     frame4(expected, unwound);
 }
 
-fn frame2(expected: *[5]usize, unwound: *[5]usize) callconv(.C) void {
+fn frame2(expected: *[5]usize, unwound: *[5]usize) callconv(.c) void {
     expected[2] = @returnAddress();
     frame3(expected, unwound);
 }
@@ -31,7 +31,7 @@ fn frame2(expected: *[5]usize, unwound: *[5]usize) callconv(.C) void {
 extern fn frame0(
     expected: *[5]usize,
     unwound: *[5]usize,
-    frame_2: *const fn (expected: *[5]usize, unwound: *[5]usize) callconv(.C) void,
+    frame_2: *const fn (expected: *[5]usize, unwound: *[5]usize) callconv(.c) void,
 ) void;
 
 pub fn main() !void {
test/standalone/stack_iterator/unwind_freestanding.zig
@@ -37,7 +37,7 @@ noinline fn frame0(expected: *[4]usize, unwound: *[4]usize) void {
 }
 
 // No-OS entrypoint
-export fn _start() callconv(.C) noreturn {
+export fn _start() callconv(.c) noreturn {
     var expected: [4]usize = undefined;
     var unwound: [4]usize = undefined;
     frame0(&expected, &unwound);
test/nvptx.zig
@@ -15,7 +15,7 @@ pub fn addCases(ctx: *Cases, b: *std.Build) !void {
             \\    return a + b;
             \\}
             \\
-            \\pub export fn add_and_substract(a: i32, out: *i32) callconv(.Kernel) void {
+            \\pub export fn add_and_substract(a: i32, out: *i32) callconv(.kernel) void {
             \\    const x = add(a, 7);
             \\    var y = add(2, 0);
             \\    y -= x;
@@ -34,7 +34,7 @@ pub fn addCases(ctx: *Cases, b: *std.Build) !void {
             \\    );
             \\}
             \\
-            \\pub export fn special_reg(a: []const i32, out: []i32) callconv(.Kernel) void {
+            \\pub export fn special_reg(a: []const i32, out: []i32) callconv(.kernel) void {
             \\    const i = threadIdX();
             \\    out[i] = a[i] + 7;
             \\}
@@ -47,7 +47,7 @@ pub fn addCases(ctx: *Cases, b: *std.Build) !void {
         case.addCompile(
             \\var x: i32 addrspace(.global) = 0;
             \\
-            \\pub export fn increment(out: *i32) callconv(.Kernel) void {
+            \\pub export fn increment(out: *i32) callconv(.kernel) void {
             \\    x += 1;
             \\    out.* = x;
             \\}
@@ -64,7 +64,7 @@ pub fn addCases(ctx: *Cases, b: *std.Build) !void {
             \\}
             \\
             \\ var _sdata: [1024]f32 addrspace(.shared) = undefined;
-            \\ pub export fn reduceSum(d_x: []const f32, out: *f32) callconv(.Kernel) void {
+            \\ pub export fn reduceSum(d_x: []const f32, out: *f32) callconv(.kernel) void {
             \\     var sdata: *addrspace(.generic) [1024]f32 = @addrSpaceCast(&_sdata);
             \\     const tid: u32 = threadIdX();
             \\     var sum = d_x[tid];
tools/gen_outline_atomics.zig
@@ -78,7 +78,7 @@ fn writeFunction(
     };
     const fn_sig = try std.fmt.allocPrint(
         arena,
-        "fn {[name]s}() align(16) callconv(.Naked) void {{",
+        "fn {[name]s}() align(16) callconv(.naked) void {{",
         .{ .name = name },
     );
     try w.writeAll(fn_sig);
tools/lldb_pretty_printers.py
@@ -599,8 +599,8 @@ type_tag_handlers = {
     'slice_const_u8_sentinel_0': lambda payload: '[:0]const u8',
     'fn_noreturn_no_args': lambda payload: 'fn() noreturn',
     'fn_void_no_args': lambda payload: 'fn() void',
-    'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.Naked) noreturn',
-    'fn_ccc_void_no_args': lambda payload: 'fn() callconv(.C) void',
+    'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.naked) noreturn',
+    'fn_ccc_void_no_args': lambda payload: 'fn() callconv(.c) void',
     'single_const_pointer_to_comptime_int': lambda payload: '*const comptime_int',
     'manyptr_u8': lambda payload: '[*]u8',
     'manyptr_const_u8': lambda payload: '[*]const u8',