master
1const builtin = @import("builtin");
2const std = @import("std");
3const assert = std.debug.assert;
4
5const Type = @import("Type.zig");
6const AddressSpace = std.builtin.AddressSpace;
7const Alignment = @import("InternPool.zig").Alignment;
8const Compilation = @import("Compilation.zig");
9const Feature = @import("Zcu.zig").Feature;
10
11pub const default_stack_protector_buffer_size = 4;
12
13pub fn cannotDynamicLink(target: *const std.Target) bool {
14 return switch (target.os.tag) {
15 .freestanding => true,
16 else => target.cpu.arch.isSpirV(),
17 };
18}
19
20/// On Darwin, we always link libSystem which contains libc.
21/// Similarly on FreeBSD and NetBSD we always link system libc
22/// since this is the stable syscall interface.
23pub fn osRequiresLibC(target: *const std.Target) bool {
24 return target.requiresLibC();
25}
26
27pub fn libCNeedsLibUnwind(target: *const std.Target, link_mode: std.builtin.LinkMode) bool {
28 return target.isGnuLibC() and link_mode == .static;
29}
30
31pub fn libCxxNeedsLibUnwind(target: *const std.Target) bool {
32 return switch (target.os.tag) {
33 .maccatalyst,
34 .macos,
35 .ios,
36 .watchos,
37 .tvos,
38 .visionos,
39 .freestanding,
40 .wasi, // Wasm/WASI currently doesn't offer support for libunwind, so don't link it.
41 => false,
42
43 .windows => target.abi.isGnu(),
44 else => true,
45 };
46}
47
48/// This function returns whether non-pic code is completely invalid on the given target.
49pub fn requiresPIC(target: *const std.Target, linking_libc: bool) bool {
50 return target.abi.isAndroid() or
51 target.os.tag == .windows or target.os.tag == .uefi or
52 osRequiresLibC(target) or
53 (linking_libc and target.isGnuLibC());
54}
55
56pub fn picLevel(target: *const std.Target) u32 {
57 // MIPS always uses PIC level 1; other platforms vary in their default PIC levels, but they
58 // support both level 1 and 2, in which case we prefer 2.
59 return if (target.cpu.arch.isMIPS()) 1 else 2;
60}
61
62/// This is not whether the target supports Position Independent Code, but whether the -fPIC
63/// C compiler argument is valid to Clang.
64pub fn supports_fpic(target: *const std.Target) bool {
65 return switch (target.os.tag) {
66 .windows,
67 .uefi,
68 => target.abi == .gnu,
69 else => true,
70 };
71}
72
73pub fn alwaysSingleThreaded(target: *const std.Target) bool {
74 _ = target;
75 return false;
76}
77
78pub fn defaultSingleThreaded(target: *const std.Target) bool {
79 switch (target.cpu.arch) {
80 .wasm32, .wasm64 => return true,
81 else => {},
82 }
83 switch (target.os.tag) {
84 .haiku => return true,
85 else => {},
86 }
87 return false;
88}
89
90pub fn useEmulatedTls(target: *const std.Target) bool {
91 if (target.abi.isAndroid()) {
92 if (target.os.version_range.linux.android < 29) return true;
93 return false;
94 }
95 if (target.abi.isOpenHarmony()) return true;
96 return switch (target.os.tag) {
97 .openbsd => true,
98 else => false,
99 };
100}
101
102pub fn hasValgrindSupport(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
103 // We can't currently output the necessary Valgrind client request assembly when using the C
104 // backend and compiling with an MSVC-like compiler.
105 const ofmt_c_msvc = (target.abi == .msvc or target.abi == .itanium) and target.ofmt == .c;
106
107 return switch (target.cpu.arch) {
108 .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
109 .linux => true,
110 else => false,
111 },
112 .aarch64, .aarch64_be => switch (target.os.tag) {
113 .linux, .freebsd => true,
114 else => false,
115 },
116 .mips, .mipsel, .mips64, .mips64el => switch (target.os.tag) {
117 .linux => true,
118 else => false,
119 },
120 .powerpc, .powerpcle, .powerpc64, .powerpc64le => switch (target.os.tag) {
121 .linux => backend != .stage2_powerpc, // Insufficient inline assembly support in self-hosted.
122 else => false,
123 },
124 .riscv64 => switch (target.os.tag) {
125 .linux => backend != .stage2_riscv64, // Insufficient inline assembly support in self-hosted.
126 else => false,
127 },
128 .s390x => switch (target.os.tag) {
129 .linux => true,
130 else => false,
131 },
132 .x86 => switch (target.os.tag) {
133 .linux, .freebsd, .illumos => true,
134 .windows => !ofmt_c_msvc,
135 else => false,
136 },
137 .x86_64 => switch (target.os.tag) {
138 .linux => target.abi != .gnux32 and target.abi != .muslx32,
139 .freebsd, .illumos => true,
140 .windows => !ofmt_c_msvc,
141 else => false,
142 },
143 else => false,
144 };
145}
146
147/// The set of targets that LLVM has non-experimental support for.
148/// Used to select between LLVM backend and self-hosted backend when compiling in
149/// release modes.
150pub fn hasLlvmSupport(target: *const std.Target, ofmt: std.Target.ObjectFormat) bool {
151 switch (ofmt) {
152 // LLVM does not support these object formats:
153 .c,
154 .plan9,
155 => return false,
156
157 .coff,
158 .elf,
159 .hex,
160 .macho,
161 .spirv,
162 .raw,
163 .wasm,
164 => {},
165 }
166
167 return switch (target.cpu.arch) {
168 .arm,
169 .armeb,
170 .aarch64,
171 .aarch64_be,
172 .arc,
173 .avr,
174 .bpfel,
175 .bpfeb,
176 .hexagon,
177 .loongarch32,
178 .loongarch64,
179 .m68k,
180 .mips,
181 .mipsel,
182 .mips64,
183 .mips64el,
184 .msp430,
185 .powerpc,
186 .powerpcle,
187 .powerpc64,
188 .powerpc64le,
189 .amdgcn,
190 .riscv32,
191 .riscv32be,
192 .riscv64,
193 .riscv64be,
194 .sparc,
195 .sparc64,
196 .spirv32,
197 .spirv64,
198 .s390x,
199 .thumb,
200 .thumbeb,
201 .x86,
202 .x86_64,
203 .xcore,
204 .nvptx,
205 .nvptx64,
206 .lanai,
207 .wasm32,
208 .wasm64,
209 .ve,
210 => true,
211
212 // LLVM backend exists but can produce neither assembly nor object files.
213 .csky,
214 .xtensa,
215 => false,
216
217 // No LLVM backend exists.
218 .alpha,
219 .arceb,
220 .hppa,
221 .hppa64,
222 .kalimba,
223 .kvx,
224 .microblaze,
225 .microblazeel,
226 .or1k,
227 .propeller,
228 .sh,
229 .sheb,
230 .x86_16,
231 .xtensaeb,
232 => false,
233 };
234}
235
236/// The set of targets that Zig supports using LLD to link for.
237pub fn hasLldSupport(ofmt: std.Target.ObjectFormat) bool {
238 return switch (ofmt) {
239 .elf, .coff, .wasm => true,
240 else => false,
241 };
242}
243
244pub fn hasNewLinkerSupport(ofmt: std.Target.ObjectFormat, backend: std.builtin.CompilerBackend) bool {
245 return switch (ofmt) {
246 .elf, .coff => switch (backend) {
247 .stage2_x86_64 => true,
248 else => false,
249 },
250 else => false,
251 };
252}
253
254/// The set of targets that our own self-hosted backends have robust support for.
255/// Used to select between LLVM backend and self-hosted backend when compiling in
256/// debug mode. A given target should only return true here if it is passing greater
257/// than or equal to the number of behavior tests as the respective LLVM backend.
258pub fn selfHostedBackendIsAsRobustAsLlvm(target: *const std.Target) bool {
259 if (comptime builtin.cpu.arch.endian() == .big) return false; // https://github.com/ziglang/zig/issues/25961
260 if (target.cpu.arch.isSpirV()) return true;
261 if (target.cpu.arch == .x86_64 and target.ptrBitWidth() == 64) {
262 if (target.os.tag == .illumos) {
263 // https://github.com/ziglang/zig/issues/25699
264 return false;
265 }
266 if (target.os.tag.isBSD()) {
267 // Self-hosted linker needs work: https://github.com/ziglang/zig/issues/24341
268 return false;
269 }
270 return switch (target.ofmt) {
271 .elf, .macho => true,
272 else => false,
273 };
274 }
275 return false;
276}
277
278pub fn supportsStackProbing(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
279 return switch (backend) {
280 .stage2_aarch64, .stage2_x86_64 => true,
281 .stage2_llvm => target.os.tag != .windows and target.os.tag != .uefi and
282 (target.cpu.arch == .x86 or target.cpu.arch == .x86_64),
283 else => false,
284 };
285}
286
287pub fn supportsStackProtector(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
288 switch (target.os.tag) {
289 .plan9 => return false,
290 else => {},
291 }
292 switch (target.cpu.arch) {
293 .spirv32, .spirv64 => return false,
294 else => {},
295 }
296 return switch (backend) {
297 .stage2_llvm => true,
298 else => false,
299 };
300}
301
302pub fn clangSupportsStackProtector(target: *const std.Target) bool {
303 return switch (target.cpu.arch) {
304 .spirv32, .spirv64 => return false,
305 else => true,
306 };
307}
308
309pub fn libcProvidesStackProtector(target: *const std.Target) bool {
310 return !target.isMinGW() and target.os.tag != .wasi and !target.cpu.arch.isSpirV();
311}
312
313/// Returns true if `@returnAddress()` is supported by the target and has a
314/// reasonably performant implementation for the requested optimization mode.
315pub fn supportsReturnAddress(target: *const std.Target, optimize: std.builtin.OptimizeMode) bool {
316 return switch (target.cpu.arch) {
317 // Emscripten currently implements `emscripten_return_address()` by calling
318 // out into JavaScript and parsing a stack trace, which introduces significant
319 // overhead that we would prefer to avoid in release builds.
320 .wasm32, .wasm64 => target.os.tag == .emscripten and optimize == .Debug,
321 .bpfel, .bpfeb => false,
322 .spirv32, .spirv64 => false,
323 else => true,
324 };
325}
326
327pub const CompilerRtClassification = enum { none, only_compiler_rt, only_libunwind, both };
328
329pub fn classifyCompilerRtLibName(name: []const u8) CompilerRtClassification {
330 if (std.mem.eql(u8, name, "gcc_s")) {
331 // libgcc_s includes exception handling functions, so if linking this library
332 // is requested, zig needs to instead link libunwind. Otherwise we end up with
333 // the linker unable to find `_Unwind_RaiseException` and other related symbols.
334 return .both;
335 }
336 if (std.mem.eql(u8, name, "compiler_rt") or
337 std.mem.eql(u8, name, "gcc") or
338 std.mem.eql(u8, name, "atomic") or
339 std.mem.eql(u8, name, "ssp"))
340 {
341 return .only_compiler_rt;
342 }
343 if (std.mem.eql(u8, name, "unwind") or
344 std.mem.eql(u8, name, "gcc_eh"))
345 {
346 return .only_libunwind;
347 }
348 return .none;
349}
350
351pub fn hasDebugInfo(target: *const std.Target) bool {
352 return switch (target.cpu.arch) {
353 // TODO: We should make newer PTX versions depend on older ones so we'd just check `ptx75`.
354 .nvptx, .nvptx64 => target.cpu.hasAny(.nvptx, &.{
355 .ptx75,
356 .ptx76,
357 .ptx77,
358 .ptx78,
359 .ptx80,
360 .ptx81,
361 .ptx82,
362 .ptx83,
363 .ptx84,
364 .ptx85,
365 .ptx86,
366 .ptx87,
367 }),
368 .bpfel, .bpfeb => false,
369 else => true,
370 };
371}
372
373pub fn defaultCompilerRtOptimizeMode(target: *const std.Target) std.builtin.OptimizeMode {
374 if (target.cpu.arch.isWasm() and target.os.tag == .freestanding) {
375 return .ReleaseSmall;
376 } else {
377 return .ReleaseFast;
378 }
379}
380
381pub fn canBuildLibCompilerRt(target: *const std.Target) enum { no, yes, llvm_only } {
382 switch (target.os.tag) {
383 .plan9 => return .no,
384 else => {},
385 }
386 switch (target.cpu.arch) {
387 .spirv32, .spirv64 => return .no,
388 // Remove this once https://github.com/ziglang/zig/issues/23714 is fixed
389 .amdgcn => return .no,
390 else => {},
391 }
392 return switch (zigBackend(target, false)) {
393 .stage2_aarch64, .stage2_x86_64 => .yes,
394 else => .llvm_only,
395 };
396}
397
398pub fn canBuildLibUbsanRt(target: *const std.Target) enum { no, yes, llvm_only, llvm_lld_only } {
399 switch (target.cpu.arch) {
400 .spirv32, .spirv64 => return .no,
401 // Remove this once https://github.com/ziglang/zig/issues/23715 is fixed
402 .nvptx, .nvptx64 => return .no,
403 else => {},
404 }
405 return switch (zigBackend(target, false)) {
406 .stage2_wasm => .llvm_lld_only,
407 .stage2_x86_64 => .yes,
408 else => .llvm_only,
409 };
410}
411
412pub fn hasRedZone(target: *const std.Target) bool {
413 return switch (target.cpu.arch) {
414 .aarch64,
415 .aarch64_be,
416 .powerpc,
417 .powerpcle,
418 .powerpc64,
419 .powerpc64le,
420 .x86_64,
421 .x86,
422 => true,
423
424 else => false,
425 };
426}
427
428pub fn libcFullLinkFlags(target: *const std.Target) []const []const u8 {
429 // The linking order of these is significant and should match the order other
430 // c compilers such as gcc or clang use.
431 const result: []const []const u8 = switch (target.os.tag) {
432 .dragonfly, .freebsd, .netbsd, .openbsd => &.{ "-lm", "-lpthread", "-lc", "-lutil" },
433 .illumos => &.{ "-lm", "-lsocket", "-lnsl", "-lc" },
434 .haiku => &.{ "-lm", "-lroot", "-lpthread", "-lc", "-lnetwork" },
435 .linux => switch (target.abi) {
436 .android, .androideabi, .ohos, .ohoseabi => &.{ "-lm", "-lc", "-ldl" },
437 else => &.{ "-lm", "-lpthread", "-lc", "-ldl", "-lrt", "-lutil" },
438 },
439 // On SerenityOS libc includes libm, libpthread, libdl, and libssp.
440 .serenity => &.{"-lc"},
441 else => &.{},
442 };
443 return result;
444}
445
446pub fn clangMightShellOutForAssembly(target: *const std.Target) bool {
447 // Clang defaults to using the system assembler in some cases.
448 return target.cpu.arch.isNvptx() or target.cpu.arch == .xcore;
449}
450
451/// Each backend architecture in Clang has a different codepath which may or may not
452/// support an -mcpu flag.
453pub fn clangAssemblerSupportsMcpuArg(target: *const std.Target) bool {
454 return switch (target.cpu.arch) {
455 .arm, .armeb, .thumb, .thumbeb => true,
456 else => false,
457 };
458}
459
460/// Some experimental or poorly-maintained LLVM targets do not properly process CPU models in their
461/// Clang driver code. For these, we should omit the `-Xclang -target-cpu -Xclang <model>` flags.
462pub fn clangSupportsTargetCpuArg(target: *const std.Target) bool {
463 return switch (target.cpu.arch) {
464 .arc,
465 .msp430,
466 .ve,
467 .xcore,
468 .xtensa,
469 => false,
470 else => true,
471 };
472}
473
474pub fn clangSupportsFloatAbiArg(target: *const std.Target) bool {
475 return switch (target.cpu.arch) {
476 .arm,
477 .armeb,
478 .thumb,
479 .thumbeb,
480 .csky,
481 .mips,
482 .mipsel,
483 .mips64,
484 .mips64el,
485 .powerpc,
486 .powerpcle,
487 .powerpc64,
488 .powerpc64le,
489 .s390x,
490 .sparc,
491 .sparc64,
492 => true,
493 // We use the target triple for LoongArch.
494 .loongarch32, .loongarch64 => false,
495 else => false,
496 };
497}
498
499pub fn clangSupportsNoImplicitFloatArg(target: *const std.Target) bool {
500 return switch (target.cpu.arch) {
501 .aarch64,
502 .aarch64_be,
503 .arm,
504 .armeb,
505 .thumb,
506 .thumbeb,
507 .riscv32,
508 .riscv32be,
509 .riscv64,
510 .riscv64be,
511 .x86,
512 .x86_64,
513 => true,
514 else => false,
515 };
516}
517
518pub fn defaultUnwindTables(target: *const std.Target, libunwind: bool, libtsan: bool) std.builtin.UnwindTables {
519 if (target.os.tag == .windows) {
520 // The old 32-bit x86 variant of SEH doesn't use tables.
521 return if (target.cpu.arch != .x86) .async else .none;
522 }
523 if (target.os.tag.isDarwin()) return .async;
524 if (libunwind) return .async;
525 if (libtsan) return .async;
526 if (std.debug.Dwarf.supportsUnwinding(target)) return .async;
527 return .none;
528}
529
530pub fn defaultAddressSpace(
531 target: *const std.Target,
532 context: enum {
533 /// Query the default address space for global constant values.
534 global_constant,
535 /// Query the default address space for global mutable values.
536 global_mutable,
537 /// Query the default address space for function-local values.
538 local,
539 /// Query the default address space for functions themselves.
540 function,
541 },
542) AddressSpace {
543 // The default address space for functions on AVR is .flash to produce
544 // correct fixups into progmem.
545 if (context == .function and target.cpu.arch == .avr) return .flash;
546 return .generic;
547}
548
549/// Returns true if pointers in `from` can be converted to a pointer in `to`.
550pub fn addrSpaceCastIsValid(
551 target: *const std.Target,
552 from: AddressSpace,
553 to: AddressSpace,
554) bool {
555 switch (target.cpu.arch) {
556 .x86_64, .x86 => return target.supportsAddressSpace(from, null) and target.supportsAddressSpace(to, null),
557 .nvptx64, .nvptx, .amdgcn => {
558 const to_generic = target.supportsAddressSpace(from, null) and to == .generic;
559 const from_generic = target.supportsAddressSpace(to, null) and from == .generic;
560 return to_generic or from_generic;
561 },
562 else => return from == .generic and to == .generic,
563 }
564}
565
566/// Returns whether pointer operations (arithmetic, indexing, etc.) should be blocked
567/// for the given address space on the target architecture.
568///
569/// Under SPIR-V with Vulkan
570/// (a) all physical pointers (.physical_storage_buffer, .global) always support pointer operations,
571/// (b) by default logical pointers (.constant, .input, .output, etc.) never support operations
572/// (c) some logical pointers (.storage_buffer, .shared) do support operations when
573/// the VariablePointers capability is enabled (which enables OpPtrAccessChain).
574pub fn shouldBlockPointerOps(target: *const std.Target, as: AddressSpace) bool {
575 if (target.os.tag != .vulkan) return false;
576
577 return switch (as) {
578 // TODO: Vulkan doesn't support pointers in the generic address space, we
579 // should remove this case but this requires a change in defaultAddressSpace().
580 .generic => true,
581 // For now, all global pointers are represented using StorageBuffer or CrossWorkgroup,
582 // so these are real pointers.
583 // Physical pointers always support operations
584 .global, .physical_storage_buffer => false,
585 // Logical pointers that support operations with VariablePointers capability
586 .shared => !target.cpu.features.isEnabled(@intFromEnum(std.Target.spirv.Feature.variable_pointers)),
587 .storage_buffer => !target.cpu.features.isEnabled(@intFromEnum(std.Target.spirv.Feature.variable_pointers)),
588 // Logical pointers that never support operations
589 .constant,
590 .local,
591 .input,
592 .output,
593 .uniform,
594 .push_constant,
595 => true,
596 else => unreachable,
597 };
598}
599
600pub fn isDynamicAMDGCNFeature(target: *const std.Target, feature: std.Target.Cpu.Feature) bool {
601 if (target.cpu.arch != .amdgcn) return false;
602
603 const sramecc_only = &[_]*const std.Target.Cpu.Model{
604 &std.Target.amdgcn.cpu.gfx1010,
605 &std.Target.amdgcn.cpu.gfx1011,
606 &std.Target.amdgcn.cpu.gfx1012,
607 &std.Target.amdgcn.cpu.gfx1013,
608 };
609 const xnack_or_sramecc = &[_]*const std.Target.Cpu.Model{
610 &std.Target.amdgcn.cpu.gfx1030,
611 &std.Target.amdgcn.cpu.gfx1031,
612 &std.Target.amdgcn.cpu.gfx1032,
613 &std.Target.amdgcn.cpu.gfx1033,
614 &std.Target.amdgcn.cpu.gfx1034,
615 &std.Target.amdgcn.cpu.gfx1035,
616 &std.Target.amdgcn.cpu.gfx1036,
617 &std.Target.amdgcn.cpu.gfx1100,
618 &std.Target.amdgcn.cpu.gfx1101,
619 &std.Target.amdgcn.cpu.gfx1102,
620 &std.Target.amdgcn.cpu.gfx1103,
621 &std.Target.amdgcn.cpu.gfx1150,
622 &std.Target.amdgcn.cpu.gfx1151,
623 &std.Target.amdgcn.cpu.gfx1152,
624 &std.Target.amdgcn.cpu.gfx1153,
625 &std.Target.amdgcn.cpu.gfx1200,
626 &std.Target.amdgcn.cpu.gfx1201,
627 };
628 const feature_tag: std.Target.amdgcn.Feature = @enumFromInt(feature.index);
629
630 if (feature_tag == .sramecc) {
631 if (std.mem.indexOfScalar(
632 *const std.Target.Cpu.Model,
633 sramecc_only ++ xnack_or_sramecc,
634 target.cpu.model,
635 )) |_| return true;
636 }
637 if (feature_tag == .xnack) {
638 if (std.mem.indexOfScalar(
639 *const std.Target.Cpu.Model,
640 xnack_or_sramecc,
641 target.cpu.model,
642 )) |_| return true;
643 }
644
645 return false;
646}
647
648pub fn llvmMachineAbi(target: *const std.Target) ?[:0]const u8 {
649 return switch (target.cpu.arch) {
650 .arm, .armeb, .thumb, .thumbeb => "aapcs",
651 .loongarch64 => switch (target.abi) {
652 .gnusf, .muslsf => "lp64s",
653 .gnuf32, .muslf32 => "lp64f",
654 else => "lp64d",
655 },
656 .loongarch32 => switch (target.abi) {
657 .gnusf => "ilp32s",
658 .gnuf32 => "ilp32f",
659 else => "ilp32d",
660 },
661 .mips, .mipsel => "o32",
662 .mips64, .mips64el => switch (target.abi) {
663 .gnuabin32, .muslabin32 => "n32",
664 else => "n64",
665 },
666 .powerpc64, .powerpc64le => "elfv2", // We do not support ELFv1.
667 .riscv64, .riscv64be => if (target.cpu.has(.riscv, .e))
668 "lp64e"
669 else if (target.cpu.has(.riscv, .d))
670 "lp64d"
671 else if (target.cpu.has(.riscv, .f))
672 "lp64f"
673 else
674 "lp64",
675 .riscv32, .riscv32be => if (target.cpu.has(.riscv, .e))
676 "ilp32e"
677 else if (target.cpu.has(.riscv, .d))
678 "ilp32d"
679 else if (target.cpu.has(.riscv, .f))
680 "ilp32f"
681 else
682 "ilp32",
683 else => null,
684 };
685}
686
687/// This function returns 1 if function alignment is not observable or settable. Note that this
688/// value will not necessarily match the backend's default function alignment (e.g. for LLVM).
689pub fn defaultFunctionAlignment(target: *const std.Target) Alignment {
690 // Overrides of the minimum for performance.
691 return switch (target.cpu.arch) {
692 .csky,
693 .thumb,
694 .thumbeb,
695 .xcore,
696 => .@"4",
697 .aarch64,
698 .aarch64_be,
699 .hexagon,
700 .powerpc,
701 .powerpcle,
702 .powerpc64,
703 .powerpc64le,
704 .s390x,
705 .x86,
706 .x86_64,
707 => .@"16",
708 .loongarch32,
709 .loongarch64,
710 => .@"32",
711 else => minFunctionAlignment(target),
712 };
713}
714
715/// This function returns 1 if function alignment is not observable or settable.
716pub fn minFunctionAlignment(target: *const std.Target) Alignment {
717 return switch (target.cpu.arch) {
718 .riscv32,
719 .riscv32be,
720 .riscv64,
721 .riscv64be,
722 => if (target.cpu.hasAny(.riscv, &.{ .c, .zca })) .@"2" else .@"4",
723 .thumb,
724 .thumbeb,
725 .csky,
726 .m68k,
727 .msp430,
728 .sh,
729 .sheb,
730 .s390x,
731 .xcore,
732 => .@"2",
733 .aarch64,
734 .aarch64_be,
735 .alpha,
736 .arc,
737 .arceb,
738 .arm,
739 .armeb,
740 .hexagon,
741 .hppa,
742 .hppa64,
743 .lanai,
744 .loongarch32,
745 .loongarch64,
746 .microblaze,
747 .microblazeel,
748 .mips,
749 .mipsel,
750 .powerpc,
751 .powerpcle,
752 .powerpc64,
753 .powerpc64le,
754 .sparc,
755 .sparc64,
756 .xtensa,
757 .xtensaeb,
758 => .@"4",
759 .bpfeb,
760 .bpfel,
761 .kvx,
762 .mips64,
763 .mips64el,
764 => .@"8",
765 .ve,
766 => .@"16",
767 else => .@"1",
768 };
769}
770
771pub fn supportsFunctionAlignment(target: *const std.Target) bool {
772 return switch (target.cpu.arch) {
773 .nvptx,
774 .nvptx64,
775 .spirv32,
776 .spirv64,
777 .wasm32,
778 .wasm64,
779 => false,
780 else => true,
781 };
782}
783
784pub fn functionPointerMask(target: *const std.Target) ?u64 {
785 // 32-bit Arm uses the LSB to mean that the target function contains Thumb code.
786 // MIPS uses the LSB to mean that the target function contains MIPS16/microMIPS code.
787 return if (target.cpu.arch.isArm() or target.cpu.arch.isMIPS32())
788 ~@as(u32, 1)
789 else if (target.cpu.arch.isMIPS64())
790 ~@as(u64, 1)
791 else
792 null;
793}
794
795pub fn supportsTailCall(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
796 switch (backend) {
797 .stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target),
798 .stage2_c => return true,
799 else => return false,
800 }
801}
802
803pub fn supportsThreads(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
804 _ = target;
805 return switch (backend) {
806 .stage2_aarch64 => false,
807 else => true,
808 };
809}
810
811pub fn libcFloatPrefix(float_bits: u16) []const u8 {
812 return switch (float_bits) {
813 16, 80 => "__",
814 32, 64, 128 => "",
815 else => unreachable,
816 };
817}
818
819pub fn libcFloatSuffix(float_bits: u16) []const u8 {
820 return switch (float_bits) {
821 16 => "h", // Non-standard
822 32 => "f",
823 64 => "",
824 80 => "x", // Non-standard
825 128 => "q", // Non-standard (mimics convention in GCC libquadmath)
826 else => unreachable,
827 };
828}
829
830pub fn compilerRtFloatAbbrev(float_bits: u16) []const u8 {
831 return switch (float_bits) {
832 16 => "h",
833 32 => "s",
834 64 => "d",
835 80 => "x",
836 128 => "t",
837 else => unreachable,
838 };
839}
840
841pub fn compilerRtIntAbbrev(bits: u16) []const u8 {
842 return switch (bits) {
843 16 => "h",
844 32 => "s",
845 64 => "d",
846 128 => "t",
847 else => unreachable,
848 };
849}
850
851pub fn fnCallConvAllowsZigTypes(cc: std.builtin.CallingConvention) bool {
852 return switch (cc) {
853 .auto, .async, .@"inline" => true,
854 // For now we want to authorize PTX kernel to use zig objects, even if
855 // we end up exposing the ABI. The goal is to experiment with more
856 // integrated CPU/GPU code.
857 .nvptx_kernel => true,
858 else => false,
859 };
860}
861
862pub fn zigBackend(target: *const std.Target, use_llvm: bool) std.builtin.CompilerBackend {
863 if (use_llvm) return .stage2_llvm;
864 if (target.ofmt == .c) return .stage2_c;
865 return switch (target.cpu.arch) {
866 .aarch64, .aarch64_be => .stage2_aarch64,
867 .arm, .armeb, .thumb, .thumbeb => .stage2_arm,
868 .powerpc, .powerpcle, .powerpc64, .powerpc64le => .stage2_powerpc,
869 .riscv64 => .stage2_riscv64,
870 .sparc64 => .stage2_sparc64,
871 .spirv32, .spirv64 => .stage2_spirv,
872 .wasm32, .wasm64 => .stage2_wasm,
873 .x86 => .stage2_x86,
874 .x86_64 => .stage2_x86_64,
875 else => .other,
876 };
877}
878
879pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, comptime feature: Feature) bool {
880 return switch (feature) {
881 .panic_fn => switch (backend) {
882 .stage2_aarch64,
883 .stage2_c,
884 .stage2_llvm,
885 .stage2_x86_64,
886 .stage2_riscv64,
887 => true,
888 else => false,
889 },
890 .error_return_trace => switch (backend) {
891 .stage2_llvm, .stage2_x86_64 => true,
892 else => false,
893 },
894 .is_named_enum_value => switch (backend) {
895 .stage2_llvm, .stage2_x86_64 => true,
896 else => false,
897 },
898 .error_set_has_value => switch (backend) {
899 .stage2_llvm, .stage2_wasm, .stage2_x86_64 => true,
900 else => false,
901 },
902 .field_reordering => switch (backend) {
903 .stage2_aarch64, .stage2_c, .stage2_llvm, .stage2_x86_64 => true,
904 else => false,
905 },
906 .separate_thread => switch (backend) {
907 // Supports a separate thread but does not support N separate
908 // threads because they would all just be locking the same mutex to
909 // protect Builder.
910 .stage2_llvm => false,
911 // Same problem. Frontend needs to allow this backend to run in the
912 // linker thread.
913 .stage2_spirv => false,
914 // Please do not make any more exceptions. Backends must support
915 // being run in a separate thread from now on.
916 else => true,
917 },
918 };
919}