Commit d1d95294fd
Changed files (23)
lib
compiler
compiler_rt
std
test
lib/compiler/aro/aro/Attribute.zig
@@ -892,7 +892,7 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ
else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?),
},
.vectorcall => switch (p.comp.target.cpu.arch) {
- .x86, .aarch64, .aarch64_be, .aarch64_32 => try p.attr_application_buf.append(p.gpa, attr),
+ .x86, .aarch64, .aarch64_be => try p.attr_application_buf.append(p.gpa, attr),
else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?),
},
},
lib/compiler/aro/aro/Compilation.zig
@@ -663,7 +663,7 @@ fn generateBuiltinTypes(comp: *Compilation) !void {
.arm, .armeb, .thumb, .thumbeb => .{
.specifier = if (os != .windows and os != .netbsd and os != .openbsd) .uint else .int,
},
- .aarch64, .aarch64_be, .aarch64_32 => .{
+ .aarch64, .aarch64_be => .{
.specifier = if (!os.isDarwin() and os != .netbsd) .uint else .int,
},
.x86_64, .x86 => .{ .specifier = if (os == .windows) .ushort else .int },
lib/compiler/aro/aro/target.zig
@@ -132,7 +132,7 @@ pub fn int64Type(target: std.Target) Type {
pub fn defaultFunctionAlignment(target: std.Target) u8 {
return switch (target.cpu.arch) {
.arm, .armeb => 4,
- .aarch64, .aarch64_32, .aarch64_be => 4,
+ .aarch64, .aarch64_be => 4,
.sparc, .sparcel, .sparc64 => 4,
.riscv64 => 2,
else => 1,
@@ -322,7 +322,6 @@ pub const FPSemantics = enum {
pub fn halfPrecisionType(target: std.Target) ?FPSemantics {
switch (target.cpu.arch) {
.aarch64,
- .aarch64_32,
.aarch64_be,
.arm,
.armeb,
@@ -478,7 +477,6 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
.kalimba,
.lanai,
.wasm32,
- .aarch64_32,
.spirv32,
.loongarch32,
.dxil,
@@ -542,7 +540,6 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
.x86_64,
=> {}, // Already 64 bit
- .aarch64_32 => copy.cpu.arch = .aarch64,
.arm => copy.cpu.arch = .aarch64,
.armeb => copy.cpu.arch = .aarch64_be,
.loongarch32 => copy.cpu.arch = .loongarch64,
@@ -574,9 +571,8 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
- .aarch64 => "aarch64",
+ .aarch64 => if (target.abi == .ilp32) "aarch64_32" else "aarch64",
.aarch64_be => "aarch64_be",
- .aarch64_32 => "aarch64_32",
.arc => "arc",
.avr => "avr",
.bpfel => "bpfel",
@@ -687,7 +683,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
writer.writeByte('-') catch unreachable;
const llvm_abi = switch (target.abi) {
- .none => "unknown",
+ .none, .ilp32 => "unknown",
.gnu => "gnu",
.gnuabin32 => "gnuabin32",
.gnuabi64 => "gnuabi64",
lib/compiler_rt/clear_cache.zig
@@ -25,7 +25,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void {
else => false,
};
const arm64 = switch (arch) {
- .aarch64, .aarch64_be, .aarch64_32 => true,
+ .aarch64, .aarch64_be => true,
else => false,
};
const mips = switch (arch) {
lib/compiler_rt/common.zig
@@ -92,7 +92,7 @@ pub fn F16T(comptime OtherType: type) type {
}
else
u16,
- .aarch64, .aarch64_be, .aarch64_32 => f16,
+ .aarch64, .aarch64_be => f16,
.riscv64 => if (builtin.zig_backend == .stage1) u16 else f16,
.x86, .x86_64 => if (builtin.target.isDarwin()) switch (OtherType) {
// Starting with LLVM 16, Darwin uses different abi for f16
lib/std/zig/system/darwin/macos.zig
@@ -406,7 +406,7 @@ pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
const current_arch = builtin.cpu.arch;
switch (current_arch) {
- .aarch64, .aarch64_be, .aarch64_32 => {
+ .aarch64, .aarch64_be => {
const model = switch (cpu_family) {
.ARM_EVEREST_SAWTOOTH => &Target.aarch64.cpu.apple_a16,
.ARM_BLIZZARD_AVALANCHE => &Target.aarch64.cpu.apple_a15,
lib/std/zig/system/linux.zig
@@ -264,7 +264,7 @@ const ArmCpuinfoImpl = struct {
if (self.core_no == 0) return null;
const is_64bit = switch (arch) {
- .aarch64, .aarch64_be, .aarch64_32 => true,
+ .aarch64, .aarch64_be => true,
else => false,
};
@@ -391,7 +391,7 @@ pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
.arm, .armeb, .thumb, .thumbeb => {
return ArmCpuinfoParser.parse(current_arch, f.reader()) catch null;
},
- .aarch64, .aarch64_be, .aarch64_32 => {
+ .aarch64, .aarch64_be => {
const registers = [12]u64{
getAArch64CpuFeature("MIDR_EL1"),
getAArch64CpuFeature("ID_AA64PFR0_EL1"),
lib/std/zig/system/windows.zig
@@ -209,7 +209,7 @@ fn genericCpuAndNativeFeatures(arch: Target.Cpu.Arch) Target.Cpu {
};
switch (arch) {
- .aarch64, .aarch64_be, .aarch64_32 => {
+ .aarch64, .aarch64_be => {
const Feature = Target.aarch64.Feature;
// Override any features that are either present or absent
@@ -229,7 +229,7 @@ fn genericCpuAndNativeFeatures(arch: Target.Cpu.Arch) Target.Cpu {
pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
const current_arch = builtin.cpu.arch;
const cpu: ?Target.Cpu = switch (current_arch) {
- .aarch64, .aarch64_be, .aarch64_32 => blk: {
+ .aarch64, .aarch64_be => blk: {
var cores: [128]Target.Cpu = undefined;
const core_count = getCpuCount();
lib/std/zig/LibCDirs.zig
@@ -246,6 +246,7 @@ fn libCGenericName(target: std.Target) [:0]const u8 {
.code16,
.eabi,
.eabihf,
+ .ilp32,
.android,
.msvc,
.itanium,
lib/std/atomic.zig
@@ -388,7 +388,7 @@ pub inline fn spinLoopHint() void {
// on common aarch64 CPUs.
// https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8258604
// https://bugs.mysql.com/bug.php?id=100664
- .aarch64, .aarch64_be, .aarch64_32 => asm volatile ("isb" ::: "memory"),
+ .aarch64, .aarch64_be => asm volatile ("isb" ::: "memory"),
// `yield` was introduced in v6k but is also available on v6m.
// https://www.keil.com/support/man/docs/armasm/armasm_dom1361289926796.htm
lib/std/c.zig
@@ -6829,7 +6829,7 @@ pub const padded_pthread_spin_t = switch (native_os) {
pub const pthread_spin_t = switch (native_os) {
.netbsd => switch (builtin.cpu.arch) {
- .aarch64, .aarch64_be, .aarch64_32 => u8,
+ .aarch64, .aarch64_be => u8,
.mips, .mipsel, .mips64, .mips64el => u32,
.powerpc, .powerpc64, .powerpc64le => i32,
.x86, .x86_64 => u8,
lib/std/Target.zig
@@ -197,7 +197,7 @@ pub const Os = struct {
return switch (tag) {
.linux => switch (arch) {
.arm, .armeb, .thumb, .thumbeb => "arm",
- .aarch64, .aarch64_be, .aarch64_32 => "aarch64",
+ .aarch64, .aarch64_be => "aarch64",
.mips, .mipsel, .mips64, .mips64el => "mips",
.powerpc, .powerpcle, .powerpc64, .powerpc64le => "powerpc",
.riscv32, .riscv64 => "riscv",
@@ -631,6 +631,7 @@ pub const Abi = enum {
code16,
eabi,
eabihf,
+ ilp32,
android,
musl,
musleabi,
@@ -983,7 +984,6 @@ pub const Cpu = struct {
armeb,
aarch64,
aarch64_be,
- aarch64_32,
arc,
avr,
bpfel,
@@ -1031,6 +1031,7 @@ pub const Cpu = struct {
spu_2,
// LLVM tags deliberately omitted:
+ // - aarch64_32
// - r600
// - le32
// - le64
@@ -1058,7 +1059,7 @@ pub const Cpu = struct {
pub inline fn isAARCH64(arch: Arch) bool {
return switch (arch) {
- .aarch64, .aarch64_be, .aarch64_32 => true,
+ .aarch64, .aarch64_be => true,
else => false,
};
}
@@ -1172,7 +1173,6 @@ pub const Cpu = struct {
.kalimba => .CSR_KALIMBA,
.lanai => .LANAI,
.wasm32 => .NONE,
- .aarch64_32 => .AARCH64,
.aarch64 => .AARCH64,
.aarch64_be => .AARCH64,
.mips64 => .MIPS,
@@ -1226,7 +1226,6 @@ pub const Cpu = struct {
.kalimba => .Unknown,
.lanai => .Unknown,
.wasm32 => .Unknown,
- .aarch64_32 => .ARM64,
.aarch64 => .ARM64,
.aarch64_be => .ARM64,
.mips64 => .Unknown,
@@ -1258,7 +1257,6 @@ pub const Cpu = struct {
return switch (arch) {
.avr,
.arm,
- .aarch64_32,
.aarch64,
.amdgcn,
.bpfel,
@@ -1333,7 +1331,7 @@ pub const Cpu = struct {
pub fn genericName(arch: Arch) [:0]const u8 {
return switch (arch) {
.arm, .armeb, .thumb, .thumbeb => "arm",
- .aarch64, .aarch64_be, .aarch64_32 => "aarch64",
+ .aarch64, .aarch64_be => "aarch64",
.bpfel, .bpfeb => "bpf",
.loongarch32, .loongarch64 => "loongarch",
.mips, .mipsel, .mips64, .mips64el => "mips",
@@ -1354,7 +1352,7 @@ pub const Cpu = struct {
pub fn allFeaturesList(arch: Arch) []const Cpu.Feature {
return switch (arch) {
.arm, .armeb, .thumb, .thumbeb => &arm.all_features,
- .aarch64, .aarch64_be, .aarch64_32 => &aarch64.all_features,
+ .aarch64, .aarch64_be => &aarch64.all_features,
.arc => &arc.all_features,
.avr => &avr.all_features,
.bpfel, .bpfeb => &bpf.all_features,
@@ -1385,7 +1383,7 @@ pub const Cpu = struct {
return switch (arch) {
.arc => comptime allCpusFromDecls(arc.cpu),
.arm, .armeb, .thumb, .thumbeb => comptime allCpusFromDecls(arm.cpu),
- .aarch64, .aarch64_be, .aarch64_32 => comptime allCpusFromDecls(aarch64.cpu),
+ .aarch64, .aarch64_be => comptime allCpusFromDecls(aarch64.cpu),
.avr => comptime allCpusFromDecls(avr.cpu),
.bpfel, .bpfeb => comptime allCpusFromDecls(bpf.cpu),
.csky => comptime allCpusFromDecls(csky.cpu),
@@ -1471,7 +1469,7 @@ pub const Cpu = struct {
};
return switch (arch) {
.arm, .armeb, .thumb, .thumbeb => &arm.cpu.generic,
- .aarch64, .aarch64_be, .aarch64_32 => &aarch64.cpu.generic,
+ .aarch64, .aarch64_be => &aarch64.cpu.generic,
.avr => &avr.cpu.avr2,
.bpfel, .bpfeb => &bpf.cpu.generic,
.hexagon => &hexagon.cpu.generic,
@@ -1700,7 +1698,6 @@ pub const DynamicLinker = struct {
.aarch64 => init("/lib/ld-linux-aarch64.so.1"),
.aarch64_be => init("/lib/ld-linux-aarch64_be.so.1"),
- .aarch64_32 => init("/lib/ld-linux-aarch64_32.so.1"),
.arm,
.armeb,
@@ -1835,7 +1832,7 @@ pub fn standardDynamicLinkerPath(target: Target) DynamicLinker {
pub fn ptrBitWidth_cpu_abi(cpu: Cpu, abi: Abi) u16 {
switch (abi) {
- .gnux32, .muslx32, .gnuabin32, .gnuilp32 => return 32,
+ .gnux32, .muslx32, .gnuabin32, .gnuilp32, .ilp32 => return 32,
.gnuabi64 => return 64,
else => {},
}
@@ -1866,7 +1863,6 @@ pub fn ptrBitWidth_cpu_abi(cpu: Cpu, abi: Abi) u16 {
.kalimba,
.lanai,
.wasm32,
- .aarch64_32,
.spirv32,
.loongarch32,
.dxil,
@@ -1923,7 +1919,6 @@ pub fn stackAlignment(target: Target) u16 {
=> 8,
.aarch64,
.aarch64_be,
- .aarch64_32,
.bpfeb,
.bpfel,
.mips64,
@@ -1954,7 +1949,6 @@ pub fn stackAlignment(target: Target) u16 {
pub fn charSignedness(target: Target) std.builtin.Signedness {
switch (target.cpu.arch) {
.aarch64,
- .aarch64_32,
.aarch64_be,
.arm,
.armeb,
@@ -2079,7 +2073,6 @@ pub fn c_type_bit_size(target: Target, c_type: CType) u16 {
.riscv64,
.aarch64,
.aarch64_be,
- .aarch64_32,
.s390x,
.sparc,
.sparc64,
@@ -2183,7 +2176,6 @@ pub fn c_type_bit_size(target: Target, c_type: CType) u16 {
.riscv64,
.aarch64,
.aarch64_be,
- .aarch64_32,
.s390x,
.mips64,
.mips64el,
@@ -2207,7 +2199,7 @@ pub fn c_type_bit_size(target: Target, c_type: CType) u16 {
.long, .ulong => return 32,
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 80,
+ .gnu, .gnuilp32, .ilp32, .cygnus => return 80,
else => return 64,
},
},
@@ -2221,7 +2213,7 @@ pub fn c_type_bit_size(target: Target, c_type: CType) u16 {
},
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 80,
+ .gnu, .gnuilp32, .ilp32, .cygnus => return 80,
else => return 64,
},
},
@@ -2240,7 +2232,7 @@ pub fn c_type_bit_size(target: Target, c_type: CType) u16 {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => switch (target.cpu.arch) {
- .x86, .arm, .aarch64_32 => return 32,
+ .x86, .arm => return 32,
.x86_64 => switch (target.abi) {
.gnux32, .muslx32 => return 32,
else => return 64,
@@ -2326,7 +2318,7 @@ pub fn c_type_alignment(target: Target, c_type: CType) u16 {
.windows, .uefi => switch (c_type) {
.longlong, .ulonglong, .double => return 8,
.longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 4,
+ .gnu, .gnuilp32, .ilp32, .cygnus => return 4,
else => return 8,
},
else => {},
@@ -2375,7 +2367,6 @@ pub fn c_type_alignment(target: Target, c_type: CType) u16 {
.xtensa,
=> 4,
- .aarch64_32,
.amdgcn,
.bpfel,
.bpfeb,
@@ -2453,7 +2444,7 @@ pub fn c_type_preferred_alignment(target: Target, c_type: CType) u16 {
.x86 => switch (target.os.tag) {
.windows, .uefi => switch (c_type) {
.longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 4,
+ .gnu, .gnuilp32, .ilp32, .cygnus => return 4,
else => return 8,
},
else => {},
@@ -2490,7 +2481,6 @@ pub fn c_type_preferred_alignment(target: Target, c_type: CType) u16 {
.avr,
.thumb,
.thumbeb,
- .aarch64_32,
.amdgcn,
.bpfel,
.bpfeb,
lib/std/Thread.zig
@@ -1117,7 +1117,7 @@ const LinuxThreadImpl = struct {
[len] "r" (self.mapped.len),
: "memory"
),
- .aarch64, .aarch64_be, .aarch64_32 => asm volatile (
+ .aarch64, .aarch64_be => asm volatile (
\\ mov x8, #215
\\ mov x0, %[ptr]
\\ mov x1, %[len]
src/codegen/llvm.zig
@@ -43,9 +43,8 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
- .aarch64 => "aarch64",
+ .aarch64 => if (target.abi == .ilp32) "aarch64_32" else "aarch64",
.aarch64_be => "aarch64_be",
- .aarch64_32 => "aarch64_32",
.arc => "arc",
.avr => "avr",
.bpfel => "bpfel",
@@ -157,7 +156,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
try llvm_triple.append('-');
const llvm_abi = switch (target.abi) {
- .none => "unknown",
+ .none, .ilp32 => "unknown",
.gnu => "gnu",
.gnuabin32 => "gnuabin32",
.gnuabi64 => "gnuabi64",
@@ -259,7 +258,6 @@ pub fn targetArch(arch_tag: std.Target.Cpu.Arch) llvm.ArchType {
.armeb => .armeb,
.aarch64 => .aarch64,
.aarch64_be => .aarch64_be,
- .aarch64_32 => .aarch64_32,
.arc => .arc,
.avr => .avr,
.bpfel => .bpfel,
@@ -393,7 +391,6 @@ const DataLayoutBuilder = struct {
.pref = pref,
.idx = idx,
};
- if (self.target.cpu.arch == .aarch64_32) continue;
if (!info.force_in_data_layout and matches_default and
self.target.cpu.arch != .riscv64 and
self.target.cpu.arch != .loongarch64 and
@@ -483,7 +480,6 @@ const DataLayoutBuilder = struct {
=> &.{32},
.aarch64,
.aarch64_be,
- .aarch64_32,
.amdgcn,
.bpfeb,
.bpfel,
@@ -587,7 +583,6 @@ const DataLayoutBuilder = struct {
switch (self.target.cpu.arch) {
.aarch64,
.aarch64_be,
- .aarch64_32,
=> if (size == 128) {
abi = size;
pref = size;
@@ -705,7 +700,7 @@ const DataLayoutBuilder = struct {
force_pref = true;
},
.float => switch (self.target.cpu.arch) {
- .aarch64_32, .amdgcn => if (size == 128) {
+ .amdgcn => if (size == 128) {
abi = size;
pref = size;
},
@@ -10860,7 +10855,7 @@ pub const FuncGen = struct {
,
.constraints = "={rdx},{rax},0,~{cc},~{memory}",
},
- .aarch64, .aarch64_32, .aarch64_be => .{
+ .aarch64, .aarch64_be => .{
.template =
\\ror x12, x12, #3 ; ror x12, x12, #13
\\ror x12, x12, #51 ; ror x12, x12, #61
@@ -10932,7 +10927,7 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) Builder
.Fastcall => .x86_fastcallcc,
.Vectorcall => return switch (target.cpu.arch) {
.x86, .x86_64 => .x86_vectorcallcc,
- .aarch64, .aarch64_be, .aarch64_32 => .aarch64_vector_pcs,
+ .aarch64, .aarch64_be => .aarch64_vector_pcs,
else => unreachable,
},
.Thiscall => .x86_thiscallcc,
@@ -11929,7 +11924,7 @@ fn constraintAllowsRegister(constraint: []const u8) bool {
pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
switch (arch) {
- .aarch64, .aarch64_be, .aarch64_32 => {
+ .aarch64, .aarch64_be => {
llvm.LLVMInitializeAArch64Target();
llvm.LLVMInitializeAArch64TargetInfo();
llvm.LLVMInitializeAArch64TargetMC();
src/mingw.zig
@@ -224,7 +224,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
const target_defines = switch (target.cpu.arch) {
.x86 => "#define DEF_I386\n",
.x86_64 => "#define DEF_X64\n",
- .arm, .armeb, .thumb, .thumbeb, .aarch64_32 => "#define DEF_ARM32\n",
+ .arm, .armeb, .thumb, .thumbeb => "#define DEF_ARM32\n",
.aarch64, .aarch64_be => "#define DEF_ARM64\n",
else => unreachable,
};
@@ -323,7 +323,7 @@ fn findDef(
const lib_path = switch (target.cpu.arch) {
.x86 => "lib32",
.x86_64 => "lib64",
- .arm, .armeb, .thumb, .thumbeb, .aarch64_32 => "libarm32",
+ .arm, .armeb, .thumb, .thumbeb => "libarm32",
.aarch64, .aarch64_be => "libarm64",
else => unreachable,
};
src/Sema.zig
@@ -10036,11 +10036,11 @@ fn finishFunc(
else => "x86",
},
.Vectorcall => switch (arch) {
- .x86, .aarch64, .aarch64_be, .aarch64_32 => null,
+ .x86, .aarch64, .aarch64_be => null,
else => "x86 and AArch64",
},
.APCS, .AAPCS, .AAPCSVFP => switch (arch) {
- .arm, .armeb, .aarch64, .aarch64_be, .aarch64_32, .thumb, .thumbeb => null,
+ .arm, .armeb, .aarch64, .aarch64_be, .thumb, .thumbeb => null,
else => "ARM",
},
.SysV, .Win64 => switch (arch) {
src/target.zig
@@ -78,7 +78,6 @@ pub fn hasValgrindSupport(target: std.Target) bool {
.x86,
.x86_64,
.aarch64,
- .aarch64_32,
.aarch64_be,
=> {
return target.os.tag == .linux or target.os.tag == .solaris or target.os.tag == .illumos or
@@ -115,7 +114,6 @@ pub fn hasLlvmSupport(target: std.Target, ofmt: std.Target.ObjectFormat) bool {
.armeb,
.aarch64,
.aarch64_be,
- .aarch64_32,
.arc,
.avr,
.bpfel,
@@ -268,7 +266,6 @@ pub fn hasRedZone(target: std.Target) bool {
.x86,
.aarch64,
.aarch64_be,
- .aarch64_32,
=> true,
else => false,
@@ -412,7 +409,7 @@ pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 {
pub fn defaultFunctionAlignment(target: std.Target) Alignment {
return switch (target.cpu.arch) {
.arm, .armeb => .@"4",
- .aarch64, .aarch64_32, .aarch64_be => .@"4",
+ .aarch64, .aarch64_be => .@"4",
.sparc, .sparcel, .sparc64 => .@"4",
.riscv64 => .@"2",
else => .@"1",
@@ -424,7 +421,6 @@ pub fn minFunctionAlignment(target: std.Target) Alignment {
.arm,
.armeb,
.aarch64,
- .aarch64_32,
.aarch64_be,
.riscv32,
.riscv64,
@@ -517,7 +513,7 @@ pub fn zigBackend(target: std.Target, use_llvm: bool) std.builtin.CompilerBacken
.arm, .armeb, .thumb, .thumbeb => .stage2_arm,
.x86_64 => .stage2_x86_64,
.x86 => .stage2_x86,
- .aarch64, .aarch64_be, .aarch64_32 => .stage2_aarch64,
+ .aarch64, .aarch64_be => .stage2_aarch64,
.riscv64 => .stage2_riscv64,
.sparc64 => .stage2_sparc64,
.spirv64 => .stage2_spirv64,
src/Type.zig
@@ -1634,7 +1634,6 @@ pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 {
.x86,
.aarch64,
.aarch64_be,
- .aarch64_32,
.riscv64,
.bpfel,
.bpfeb,
src/Zcu.zig
@@ -3284,7 +3284,6 @@ pub fn atomicPtrAlignment(
.aarch64,
.aarch64_be,
- .aarch64_32,
=> 128,
.x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64,
test/behavior/align.zig
@@ -175,7 +175,6 @@ test "alignment and size of structs with 128-bit fields" {
.x86,
.aarch64,
.aarch64_be,
- .aarch64_32,
.riscv64,
.bpfel,
.bpfeb,
test/behavior/atomics.zig
@@ -5,7 +5,7 @@ const expectEqual = std.testing.expectEqual;
const supports_128_bit_atomics = switch (builtin.cpu.arch) {
// TODO: Ideally this could be sync'd with the logic in Sema.
- .aarch64, .aarch64_be, .aarch64_32 => true,
+ .aarch64, .aarch64_be => true,
.x86_64 => std.Target.x86.featureSetHas(builtin.cpu.features, .cx16),
else => false,
};
test/behavior/vector.zig
@@ -742,7 +742,6 @@ test "vector shift operators" {
switch (builtin.target.cpu.arch) {
.aarch64_be,
- .aarch64_32,
.armeb,
.thumb,
.thumbeb,
test/llvm_targets.zig
@@ -3,9 +3,11 @@ const Cases = @import("src/Cases.zig");
const targets = [_]std.Target.Query{
.{ .cpu_arch = .aarch64, .os_tag = .freestanding, .abi = .none },
+ .{ .cpu_arch = .aarch64, .os_tag = .freestanding, .abi = .ilp32 },
.{ .cpu_arch = .aarch64, .os_tag = .ios, .abi = .none },
.{ .cpu_arch = .aarch64, .os_tag = .ios, .abi = .simulator },
.{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .none },
+ .{ .cpu_arch = .aarch64, .os_tag = .linux, .abi = .gnuilp32 },
.{ .cpu_arch = .aarch64, .os_tag = .macos, .abi = .none },
.{ .cpu_arch = .aarch64, .os_tag = .watchos, .abi = .none },
.{ .cpu_arch = .aarch64, .os_tag = .watchos, .abi = .simulator },
@@ -18,8 +20,6 @@ const targets = [_]std.Target.Query{
.{ .cpu_arch = .aarch64, .os_tag = .windows, .abi = .msvc },
.{ .cpu_arch = .aarch64_be, .os_tag = .freestanding, .abi = .none },
.{ .cpu_arch = .aarch64_be, .os_tag = .linux, .abi = .none },
- .{ .cpu_arch = .aarch64_32, .os_tag = .freestanding, .abi = .none },
- .{ .cpu_arch = .aarch64_32, .os_tag = .linux, .abi = .none },
.{ .cpu_arch = .amdgcn, .os_tag = .amdhsa, .abi = .none },
.{ .cpu_arch = .amdgcn, .os_tag = .amdpal, .abi = .none },
.{ .cpu_arch = .amdgcn, .os_tag = .linux, .abi = .none },