Commit 9ae78a5890
src/arch/arm/abi.zig
@@ -2,6 +2,149 @@ const std = @import("std");
const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
+const Type = @import("../../type.zig").Type;
+
+pub const Class = union(enum) {
+ memory,
+ byval,
+ none,
+ i32_array: u8,
+ i64_array: u8,
+
+ fn arrSize(total_size: u64, arr_size: u64) Class {
+ const count = @intCast(u8, std.mem.alignForward(total_size, arr_size) / arr_size);
+ if (arr_size == 32) {
+ return .{ .i32_array = count };
+ } else {
+ return .{ .i64_array = count };
+ }
+ }
+};
+
+pub fn classifyType(ty: Type, target: std.Target) Class {
+ if (!ty.hasRuntimeBitsIgnoreComptime()) return .none;
+
+ var maybe_float_bits: ?u16 = null;
+ const max_byval_size = 512;
+ switch (ty.zigTypeTag()) {
+ .Struct => {
+ const bit_size = ty.bitSize(target);
+ if (ty.containerLayout() == .Packed) {
+ if (bit_size > 64) return .memory;
+ return .byval;
+ }
+ if (bit_size > max_byval_size) return .memory;
+ const float_count = countFloats(ty, target, &maybe_float_bits);
+ if (float_count <= byval_float_count) return .byval;
+
+ const fields = ty.structFieldCount();
+ var i: u32 = 0;
+ while (i < fields) : (i += 1) {
+ const field_ty = ty.structFieldType(i);
+ const field_alignment = ty.structFieldAlign(i, target);
+ const field_size = field_ty.bitSize(target);
+ if (field_size > 32 or field_alignment > 32) {
+ return Class.arrSize(bit_size, 64);
+ }
+ }
+ return Class.arrSize(bit_size, 32);
+ },
+ .Union => {
+ const bit_size = ty.bitSize(target);
+ if (ty.containerLayout() == .Packed) {
+ if (bit_size > 64) return .memory;
+ return .byval;
+ }
+ if (bit_size > max_byval_size) return .memory;
+ const float_count = countFloats(ty, target, &maybe_float_bits);
+ if (float_count <= byval_float_count) return .byval;
+
+ for (ty.unionFields().values()) |field| {
+ if (field.ty.bitSize(target) > 32 or field.normalAlignment(target) > 32) {
+ return Class.arrSize(bit_size, 64);
+ }
+ }
+ return Class.arrSize(bit_size, 32);
+ },
+ .Int, .Enum => {
+ const bit_size = ty.bitSize(target);
+ if (bit_size > 64) return .memory;
+ return .byval;
+ },
+ .ErrorSet, .Vector, .Float, .Bool => {
+ const bit_size = ty.bitSize(target);
+ if (bit_size > 128) return .memory;
+ return .byval;
+ },
+ .Optional => {
+ std.debug.assert(ty.isPtrLikeOptional());
+ return .byval;
+ },
+ .Pointer => {
+ std.debug.assert(!ty.isSlice());
+ return .byval;
+ },
+ .ErrorUnion,
+ .Frame,
+ .AnyFrame,
+ .NoReturn,
+ .Void,
+ .Type,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .BoundFn,
+ .Fn,
+ .Opaque,
+ .EnumLiteral,
+ .Array,
+ => unreachable,
+ }
+}
+
+const byval_float_count = 4;
+fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 {
+ const invalid = std.math.maxInt(u32);
+ switch (ty.zigTypeTag()) {
+ .Union => {
+ const fields = ty.unionFields();
+ var max_count: u32 = 0;
+ for (fields.values()) |field| {
+ const field_count = countFloats(field.ty, target, maybe_float_bits);
+ if (field_count == invalid) return invalid;
+ if (field_count > max_count) max_count = field_count;
+ if (max_count > byval_float_count) return invalid;
+ }
+ return max_count;
+ },
+ .Struct => {
+ const fields_len = ty.structFieldCount();
+ var count: u32 = 0;
+ var i: u32 = 0;
+ while (i < fields_len) : (i += 1) {
+ const field_ty = ty.structFieldType(i);
+ const field_count = countFloats(field_ty, target, maybe_float_bits);
+ if (field_count == invalid) return invalid;
+ count += field_count;
+ if (count > byval_float_count) return invalid;
+ }
+ return count;
+ },
+ .Float => {
+ const float_bits = maybe_float_bits.* orelse {
+ const float_bits = ty.floatBits(target);
+ if (float_bits != 32 and float_bits != 64) return invalid;
+ maybe_float_bits.* = float_bits;
+ return 1;
+ };
+ if (ty.floatBits(target) == float_bits) return 1;
+ return invalid;
+ },
+ .Void => return 0,
+ else => return invalid,
+ }
+}
pub const callee_preserved_regs = [_]Register{ .r4, .r5, .r6, .r7, .r8, .r10 };
pub const caller_preserved_regs = [_]Register{ .r0, .r1, .r2, .r3 };
src/codegen/llvm.zig
@@ -24,6 +24,7 @@ const CType = @import("../type.zig").CType;
const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("../arch/wasm/abi.zig");
const aarch64_c_abi = @import("../arch/aarch64/abi.zig");
+const arm_c_abi = @import("../arch/arm/abi.zig");
const Error = error{ OutOfMemory, CodegenFail };
@@ -1130,6 +1131,25 @@ pub const Object = struct {
const casted_ptr = builder.buildBitCast(arg_ptr, param.typeOf().pointerType(0), "");
_ = builder.buildStore(param, casted_ptr);
+ if (isByRef(param_ty)) {
+ try args.append(arg_ptr);
+ } else {
+ const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
+ load_inst.setAlignment(alignment);
+ try args.append(load_inst);
+ }
+ },
+ .i32_array, .i64_array => {
+ const param_ty = fn_info.param_types[it.zig_index - 1];
+ const param_llvm_ty = try dg.lowerType(param_ty);
+ const param = llvm_func.getParam(llvm_arg_i);
+ llvm_arg_i += 1;
+
+ const alignment = param_ty.abiAlignment(target);
+ const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty, alignment, target);
+ const casted_ptr = builder.buildBitCast(arg_ptr, param.typeOf().pointerType(0), "");
+ _ = builder.buildStore(param, casted_ptr);
+
if (isByRef(param_ty)) {
try args.append(arg_ptr);
} else {
@@ -2578,6 +2598,8 @@ pub const DeclGen = struct {
.multiple_llvm_float,
.as_u16,
.float_array,
+ .i32_array,
+ .i64_array,
=> continue,
.slice => unreachable, // extern functions do not support slice types.
@@ -3132,6 +3154,11 @@ pub const DeclGen = struct {
const arr_ty = float_ty.arrayType(field_count);
try llvm_params.append(arr_ty);
},
+ .i32_array, .i64_array => |arr_len| {
+ const elem_size: u8 = if (lowering == .i32_array) 32 else 64;
+ const arr_ty = dg.context.intType(elem_size).arrayType(arr_len);
+ try llvm_params.append(arr_ty);
+ },
};
return llvm.functionType(
@@ -4821,6 +4848,25 @@ pub const FuncGen = struct {
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
},
+ .i32_array, .i64_array => |arr_len| {
+ const elem_size: u8 = if (lowering == .i32_array) 32 else 64;
+ const arg = args[it.zig_index - 1];
+ const arg_ty = self.air.typeOf(arg);
+ var llvm_arg = try self.resolveInst(arg);
+ if (!isByRef(arg_ty)) {
+ const p = self.buildAlloca(llvm_arg.typeOf(), null);
+ const store_inst = self.builder.buildStore(llvm_arg, p);
+ store_inst.setAlignment(arg_ty.abiAlignment(target));
+ llvm_arg = store_inst;
+ }
+
+ const array_llvm_ty = self.dg.context.intType(elem_size).arrayType(arr_len);
+ const casted = self.builder.buildBitCast(llvm_arg, array_llvm_ty.pointerType(0), "");
+ const alignment = arg_ty.abiAlignment(target);
+ const load_inst = self.builder.buildLoad(array_llvm_ty, casted, "");
+ load_inst.setAlignment(alignment);
+ try llvm_args.append(load_inst);
+ },
};
const call = self.builder.buildCall(
@@ -10068,6 +10114,11 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool
},
.wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect,
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target)[0] == .memory,
+ .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, target)) {
+ .memory, .i64_array => return true,
+ .i32_array => |size| return size != 1,
+ .none, .byval => return false,
+ },
else => return false, // TODO investigate C ABI for other architectures
},
else => return false,
@@ -10195,6 +10246,18 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
return dg.context.intType(64).arrayType(2);
},
+ .arm, .armeb => {
+ switch (arm_c_abi.classifyType(fn_info.return_type, target)) {
+ .memory, .i64_array => return dg.context.voidType(),
+ .i32_array => |len| if (len == 1) {
+ return dg.context.intType(32);
+ } else {
+ return dg.context.voidType();
+ },
+ .byval => return dg.lowerType(fn_info.return_type),
+ .none => unreachable,
+ }
+ },
// TODO investigate C ABI for other architectures
else => return dg.lowerType(fn_info.return_type),
}
@@ -10223,6 +10286,8 @@ const ParamTypeIterator = struct {
slice,
as_u16,
float_array: u8,
+ i32_array: u8,
+ i64_array: u8,
};
pub fn next(it: *ParamTypeIterator) ?Lowering {
@@ -10410,6 +10475,20 @@ const ParamTypeIterator = struct {
it.llvm_types_buffer[1] = 64;
return .multiple_llvm_ints;
},
+ .arm, .armeb => {
+ it.zig_index += 1;
+ it.llvm_index += 1;
+ switch (arm_c_abi.classifyType(ty, it.target)) {
+ .none => unreachable,
+ .memory => {
+ it.byval_attr = true;
+ return .byref;
+ },
+ .byval => return .byval,
+ .i32_array => |size| return Lowering{ .i32_array = size },
+ .i64_array => |size| return Lowering{ .i64_array = size },
+ }
+ },
// TODO investigate C ABI for other architectures
else => {
it.zig_index += 1;
test/c_abi/cfuncs.c
@@ -32,6 +32,10 @@ static void assert_or_panic(bool ok) {
# define ZIG_NO_COMPLEX
#endif
+#ifdef __arm__
+# define ZIG_NO_COMPLEX
+#endif
+
#ifndef ZIG_NO_I128
struct i128 {
__int128 value;
test/c_abi/main.zig
@@ -167,7 +167,8 @@ extern fn c_cmultd_comp(a_r: f64, a_i: f64, b_r: f64, b_i: f64) ComplexDouble;
extern fn c_cmultf(a: ComplexFloat, b: ComplexFloat) ComplexFloat;
extern fn c_cmultd(a: ComplexDouble, b: ComplexDouble) ComplexDouble;
-const complex_abi_compatible = builtin.cpu.arch != .i386 and !builtin.cpu.arch.isMIPS();
+const complex_abi_compatible = builtin.cpu.arch != .i386 and !builtin.cpu.arch.isMIPS() and
+ !builtin.cpu.arch.isARM();
test "C ABI complex float" {
if (!complex_abi_compatible) return error.SkipZigTest;
@@ -320,7 +321,6 @@ extern fn c_ret_med_struct_mixed() MedStructMixed;
test "C ABI medium struct of ints and floats" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
- if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
@@ -353,7 +353,6 @@ extern fn c_ret_small_struct_ints() SmallStructInts;
test "C ABI small struct of ints" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
- if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
@@ -435,7 +434,6 @@ extern fn c_split_struct_ints(SplitStructInt) void;
test "C ABI split struct of ints" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
- if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
@@ -463,7 +461,6 @@ extern fn c_ret_split_struct_mixed() SplitStructMixed;
test "C ABI split struct of ints and floats" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
- if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
@@ -543,7 +540,6 @@ const Vector5 = extern struct {
extern fn c_big_struct_floats(Vector5) void;
test "C ABI structs of floats as parameter" {
- if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
@@ -725,7 +721,6 @@ extern fn c_ret_struct_with_array() StructWithArray;
test "Struct with array as padding." {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
- if (comptime builtin.cpu.arch.isARM()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;