Commit 9bb1104e37
Changed files (21)
lib
std
src
arch
aarch64
arm
riscv64
sparc64
wasm
x86_64
codegen
test
behavior
cases
compile_errors
doc/langref.html.in
@@ -8088,6 +8088,35 @@ test "main" {
{#see_also|Import from C Header File|@cImport|@cDefine|@cInclude#}
{#header_close#}
+ {#header_open|@cVaArg#}
+ <pre>{#syntax#}@cVaArg(operand: *std.builtin.VaList, comptime T: type) T{#endsyntax#}</pre>
+ <p>
+ Implements the C macro {#syntax#}va_arg{#endsyntax#}.
+ </p>
+ {#see_also|@cVaCopy|@cVaEnd|@cVaStart#}
+ {#header_close#}
+ {#header_open|@cVaCopy#}
+ <pre>{#syntax#}@cVaCopy(src: *std.builtin.VaList) std.builtin.VaList{#endsyntax#}</pre>
+ <p>
+ Implements the C macro {#syntax#}va_copy{#endsyntax#}.
+ </p>
+ {#see_also|@cVaArg|@cVaEnd|@cVaStart#}
+ {#header_close#}
+ {#header_open|@cVaEnd#}
+ <pre>{#syntax#}@cVaEnd(src: *std.builtin.VaList) void{#endsyntax#}</pre>
+ <p>
+ Implements the C macro {#syntax#}va_end{#endsyntax#}.
+ </p>
+ {#see_also|@cVaArg|@cVaCopy|@cVaStart#}
+ {#header_close#}
+ {#header_open|@cVaStart#}
+ <pre>{#syntax#}@cVaStart() std.builtin.VaList{#endsyntax#}</pre>
+ <p>
+ Implements the C macro {#syntax#}va_start{#endsyntax#}. Only valid inside a variadic function.
+ </p>
+ {#see_also|@cVaArg|@cVaCopy|@cVaEnd#}
+ {#header_close#}
+
{#header_open|@divExact#}
<pre>{#syntax#}@divExact(numerator: T, denominator: T) T{#endsyntax#}</pre>
<p>
@@ -10802,14 +10831,32 @@ test "variadic function" {
}
{#code_end#}
<p>
- Non extern variadic functions are currently not implemented, but there
- is an accepted proposal. See <a href="https://github.com/ziglang/zig/issues/515">#515</a>.
+ Variadic functions can be implemented using {#link|@cVaStart#}, {#link|@cVaEnd#}, {#link|@cVaArg#} and {#link|@cVaCopy#}
</p>
- {#code_begin|obj_err|non-extern function is variadic#}
-export fn printf(format: [*:0]const u8, ...) c_int {
- _ = format;
+ {#code_begin|test|defining_variadic_function#}
+const std = @import("std");
+const testing = std.testing;
+const builtin = @import("builtin");
- return 0;
+fn add(count: c_int, ...) callconv(.C) c_int {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ var i: usize = 0;
+ var sum: c_int = 0;
+ while (i < count) : (i += 1) {
+ sum += @cVaArg(&ap, c_int);
+ }
+ return sum;
+}
+
+test "defining a variadic function" {
+ // Variadic functions are currently disabled on some targets due to miscompilations.
+ if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .windows and builtin.os.tag != .macos) return error.SkipZigTest;
+ if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest;
+
+ try std.testing.expectEqual(@as(c_int, 0), add(0));
+ try std.testing.expectEqual(@as(c_int, 1), add(1, @as(c_int, 1)));
+ try std.testing.expectEqual(@as(c_int, 3), add(2, @as(c_int, 1), @as(c_int, 2)));
}
{#code_end#}
{#header_close#}
lib/std/builtin.zig
@@ -623,6 +623,87 @@ pub const CallModifier = enum {
compile_time,
};
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaListAarch64 = extern struct {
+ __stack: *anyopaque,
+ __gr_top: *anyopaque,
+ __vr_top: *anyopaque,
+ __gr_offs: c_int,
+ __vr_offs: c_int,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaListHexagon = extern struct {
+ __gpr: c_long,
+ __fpr: c_long,
+ __overflow_arg_area: *anyopaque,
+ __reg_save_area: *anyopaque,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaListPowerPc = extern struct {
+ gpr: u8,
+ fpr: u8,
+ reserved: c_ushort,
+ overflow_arg_area: *anyopaque,
+ reg_save_area: *anyopaque,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaListS390x = extern struct {
+ __current_saved_reg_area_pointer: *anyopaque,
+ __saved_reg_area_end_pointer: *anyopaque,
+ __overflow_area_pointer: *anyopaque,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaListX86_64 = extern struct {
+ gp_offset: c_uint,
+ fp_offset: c_uint,
+ overflow_arg_area: *anyopaque,
+ reg_save_area: *anyopaque,
+};
+
+/// This data structure is used by the Zig language code generation and
+/// therefore must be kept in sync with the compiler implementation.
+pub const VaList = switch (builtin.cpu.arch) {
+ .aarch64 => switch (builtin.os.tag) {
+ .windows => *u8,
+ .ios, .macos, .tvos, .watchos => *u8,
+ else => @compileError("disabled due to miscompilations"), // VaListAarch64,
+ },
+ .arm => switch (builtin.os.tag) {
+ .ios, .macos, .tvos, .watchos => *u8,
+ else => *anyopaque,
+ },
+ .amdgcn => *u8,
+ .avr => *anyopaque,
+ .bpfel, .bpfeb => *anyopaque,
+ .hexagon => if (builtin.target.isMusl()) VaListHexagon else *u8,
+ .mips, .mipsel, .mips64, .mips64el => *anyopaque,
+ .riscv32, .riscv64 => *anyopaque,
+ .powerpc, .powerpcle => switch (builtin.os.tag) {
+ .ios, .macos, .tvos, .watchos, .aix => *u8,
+ else => VaListPowerPc,
+ },
+ .powerpc64, .powerpc64le => *u8,
+ .sparc, .sparcel, .sparc64 => *anyopaque,
+ .spirv32, .spirv64 => *anyopaque,
+ .s390x => VaListS390x,
+ .wasm32, .wasm64 => *anyopaque,
+ .x86 => *u8,
+ .x86_64 => switch (builtin.os.tag) {
+ .windows => @compileError("disabled due to miscompilations"), // *u8,
+ else => VaListX86_64,
+ },
+ else => @compileError("VaList not supported for this target yet"),
+};
+
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const PrefetchOptions = struct {
src/arch/aarch64/CodeGen.zig
@@ -875,6 +875,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
+ .c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return self.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return self.fail("TODO implement c_va_start", .{}),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
src/arch/arm/CodeGen.zig
@@ -785,6 +785,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
+ .c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return self.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return self.fail("TODO implement c_va_start", .{}),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
src/arch/riscv64/CodeGen.zig
@@ -699,6 +699,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
+ .c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return self.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return self.fail("TODO implement c_va_start", .{}),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
src/arch/sparc64/CodeGen.zig
@@ -716,6 +716,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.error_set_has_value => @panic("TODO implement error_set_has_value"),
.vector_store_elem => @panic("TODO implement vector_store_elem"),
+ .c_va_arg => @panic("TODO implement c_va_arg"),
+ .c_va_copy => @panic("TODO implement c_va_copy"),
+ .c_va_end => @panic("TODO implement c_va_end"),
+ .c_va_start => @panic("TODO implement c_va_start"),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
src/arch/wasm/CodeGen.zig
@@ -1972,6 +1972,10 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.error_set_has_value,
.addrspace_cast,
.vector_store_elem,
+ .c_va_arg,
+ .c_va_copy,
+ .c_va_end,
+ .c_va_start,
=> |tag| return func.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
.add_optimized,
src/arch/x86_64/CodeGen.zig
@@ -787,6 +787,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
+ .c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return self.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return self.fail("TODO implement c_va_start", .{}),
+
.wasm_memory_size => unreachable,
.wasm_memory_grow => unreachable,
// zig fmt: on
src/codegen/llvm/bindings.zig
@@ -965,6 +965,9 @@ pub const Builder = opaque {
pub const buildAllocaInAddressSpace = ZigLLVMBuildAllocaInAddressSpace;
extern fn ZigLLVMBuildAllocaInAddressSpace(B: *Builder, Ty: *Type, AddressSpace: c_uint, Name: [*:0]const u8) *Value;
+
+ pub const buildVAArg = LLVMBuildVAArg;
+ extern fn LLVMBuildVAArg(*Builder, List: *Value, Ty: *Type, Name: [*:0]const u8) *Value;
};
pub const MDString = opaque {
src/codegen/c.zig
@@ -2909,6 +2909,11 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.is_named_enum_value => return f.fail("TODO: C backend: implement is_named_enum_value", .{}),
.error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}),
.vector_store_elem => return f.fail("TODO: C backend: implement vector_store_elem", .{}),
+
+ .c_va_arg => return f.fail("TODO implement c_va_arg", .{}),
+ .c_va_copy => return f.fail("TODO implement c_va_copy", .{}),
+ .c_va_end => return f.fail("TODO implement c_va_end", .{}),
+ .c_va_start => return f.fail("TODO implement c_va_start", .{}),
// zig fmt: on
};
if (result_value == .local) {
src/codegen/llvm.zig
@@ -4699,6 +4699,11 @@ pub const FuncGen = struct {
.dbg_block_end => try self.airDbgBlockEnd(),
.dbg_var_ptr => try self.airDbgVarPtr(inst),
.dbg_var_val => try self.airDbgVarVal(inst),
+
+ .c_va_arg => try self.airCVaArg(inst),
+ .c_va_copy => try self.airCVaCopy(inst),
+ .c_va_end => try self.airCVaEnd(inst),
+ .c_va_start => try self.airCVaStart(inst),
// zig fmt: on
};
if (opt_value) |val| {
@@ -5136,6 +5141,94 @@ pub const FuncGen = struct {
return null;
}
+ fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const list = try self.resolveInst(ty_op.operand);
+ const arg_ty = self.air.getRefType(ty_op.ty);
+ const llvm_arg_ty = try self.dg.lowerType(arg_ty);
+
+ return self.builder.buildVAArg(list, llvm_arg_ty, "");
+ }
+
+ fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const src_list = try self.resolveInst(ty_op.operand);
+ const va_list_ty = self.air.getRefType(ty_op.ty);
+ const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
+
+ const target = self.dg.module.getTarget();
+ const result_alignment = va_list_ty.abiAlignment(target);
+ const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment);
+
+ const llvm_fn_name = "llvm.va_copy";
+ const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const param_types = [_]*llvm.Type{
+ self.dg.context.intType(8).pointerType(0),
+ self.dg.context.intType(8).pointerType(0),
+ };
+ const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+
+ const args: [2]*llvm.Value = .{ dest_list, src_list };
+ _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+
+ if (isByRef(va_list_ty)) {
+ return dest_list;
+ } else {
+ const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, "");
+ loaded.setAlignment(result_alignment);
+ return loaded;
+ }
+ }
+
+ fn airCVaEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const un_op = self.air.instructions.items(.data)[inst].un_op;
+ const list = try self.resolveInst(un_op);
+
+ const llvm_fn_name = "llvm.va_end";
+ const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const param_types = [_]*llvm.Type{self.dg.context.intType(8).pointerType(0)};
+ const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+ const args: [1]*llvm.Value = .{list};
+ _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+ return null;
+ }
+
+ fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const va_list_ty = self.air.typeOfIndex(inst);
+ const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
+
+ const target = self.dg.module.getTarget();
+ const result_alignment = va_list_ty.abiAlignment(target);
+ const list = self.buildAlloca(llvm_va_list_ty, result_alignment);
+
+ const llvm_fn_name = "llvm.va_start";
+ const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const param_types = [_]*llvm.Type{self.dg.context.intType(8).pointerType(0)};
+ const fn_type = llvm.functionType(self.context.voidType(), ¶m_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+ const args: [1]*llvm.Value = .{list};
+ _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
+
+ if (isByRef(va_list_ty)) {
+ return list;
+ } else {
+ const loaded = self.builder.buildLoad(llvm_va_list_ty, list, "");
+ loaded.setAlignment(result_alignment);
+ return loaded;
+ }
+ }
+
fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
src/Air.zig
@@ -741,6 +741,19 @@ pub const Inst = struct {
/// Uses the `vector_store_elem` field.
vector_store_elem,
+ /// Implements @cVaArg builtin.
+ /// Uses the `ty_op` field.
+ c_va_arg,
+ /// Implements @cVaCopy builtin.
+ /// Uses the `ty_op` field.
+ c_va_copy,
+ /// Implements @cVaEnd builtin.
+ /// Uses the `un_op` field.
+ c_va_end,
+ /// Implements @cVaStart builtin.
+ /// Uses the `ty` field.
+ c_va_start,
+
pub fn fromCmpOp(op: std.math.CompareOperator, optimized: bool) Tag {
switch (op) {
.lt => return if (optimized) .cmp_lt_optimized else .cmp_lt,
@@ -1092,6 +1105,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.ret_ptr,
.arg,
.err_return_trace,
+ .c_va_start,
=> return datas[inst].ty,
.assembly,
@@ -1156,6 +1170,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.byte_swap,
.bit_reverse,
.addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
=> return air.getRefType(datas[inst].ty_op.ty),
.loop,
@@ -1187,6 +1203,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.prefetch,
.set_err_return_trace,
.vector_store_elem,
+ .c_va_end,
=> return Type.void,
.ptrtoint,
src/AstGen.zig
@@ -42,6 +42,7 @@ string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.d
compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
/// The topmost block of the current function.
fn_block: ?*GenZir = null,
+fn_var_args: bool = false,
/// Maps string table indexes to the first `@import` ZIR instruction
/// that uses this string as the operand.
imports: std.AutoArrayHashMapUnmanaged(u32, Ast.TokenIndex) = .{},
@@ -3892,10 +3893,6 @@ fn fnDecl(
.noalias_bits = noalias_bits,
});
} else func: {
- if (is_var_args) {
- return astgen.failTok(fn_proto.ast.fn_token, "non-extern function is variadic", .{});
- }
-
// as a scope, fn_gz encloses ret_gz, but for instruction list, fn_gz stacks on ret_gz
fn_gz.instructions_top = ret_gz.instructions.items.len;
@@ -3903,6 +3900,10 @@ fn fnDecl(
astgen.fn_block = &fn_gz;
defer astgen.fn_block = prev_fn_block;
+ const prev_var_args = astgen.fn_var_args;
+ astgen.fn_var_args = is_var_args;
+ defer astgen.fn_var_args = prev_var_args;
+
astgen.advanceSourceCursorToNode(body_node);
const lbrace_line = astgen.source_line - decl_gz.decl_line;
const lbrace_column = astgen.source_column;
@@ -8384,6 +8385,46 @@ fn builtinCall(
});
return rvalue(gz, ri, result, node);
},
+ .c_va_arg => {
+ if (astgen.fn_block == null) {
+ return astgen.failNode(node, "'@cVaArg' outside function scope", .{});
+ }
+ const result = try gz.addExtendedPayload(.c_va_arg, Zir.Inst.BinNode{
+ .node = gz.nodeIndexToRelative(node),
+ .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
+ .rhs = try typeExpr(gz, scope, params[1]),
+ });
+ return rvalue(gz, ri, result, node);
+ },
+ .c_va_copy => {
+ if (astgen.fn_block == null) {
+ return astgen.failNode(node, "'@cVaCopy' outside function scope", .{});
+ }
+ const result = try gz.addExtendedPayload(.c_va_copy, Zir.Inst.UnNode{
+ .node = gz.nodeIndexToRelative(node),
+ .operand = try expr(gz, scope, .{ .rl = .none }, params[0]),
+ });
+ return rvalue(gz, ri, result, node);
+ },
+ .c_va_end => {
+ if (astgen.fn_block == null) {
+ return astgen.failNode(node, "'@cVaEnd' outside function scope", .{});
+ }
+ const result = try gz.addExtendedPayload(.c_va_end, Zir.Inst.UnNode{
+ .node = gz.nodeIndexToRelative(node),
+ .operand = try expr(gz, scope, .{ .rl = .none }, params[0]),
+ });
+ return rvalue(gz, ri, result, node);
+ },
+ .c_va_start => {
+ if (astgen.fn_block == null) {
+ return astgen.failNode(node, "'@cVaStart' outside function scope", .{});
+ }
+ if (!astgen.fn_var_args) {
+ return astgen.failNode(node, "'@cVaStart' in a non-variadic function", .{});
+ }
+ return rvalue(gz, ri, try gz.addNodeExtended(.c_va_start, node), node);
+ },
}
}
src/BuiltinFn.zig
@@ -30,6 +30,10 @@ pub const Tag = enum {
compile_log,
ctz,
c_undef,
+ c_va_arg,
+ c_va_copy,
+ c_va_end,
+ c_va_start,
div_exact,
div_floor,
div_trunc,
@@ -354,6 +358,30 @@ pub const list = list: {
.param_count = 1,
},
},
+ .{
+ "@cVaArg", .{
+ .tag = .c_va_arg,
+ .param_count = 2,
+ },
+ },
+ .{
+ "@cVaCopy", .{
+ .tag = .c_va_copy,
+ .param_count = 1,
+ },
+ },
+ .{
+ "@cVaEnd", .{
+ .tag = .c_va_end,
+ .param_count = 1,
+ },
+ },
+ .{
+ "@cVaStart", .{
+ .tag = .c_va_start,
+ .param_count = 0,
+ },
+ },
.{
"@divExact",
.{
src/Liveness.zig
@@ -238,6 +238,7 @@ pub fn categorizeOperand(
.wasm_memory_size,
.err_return_trace,
.save_err_return_trace_index,
+ .c_va_start,
=> return .none,
.fence => return .write,
@@ -279,6 +280,8 @@ pub fn categorizeOperand(
.splat,
.error_set_has_value,
.addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
=> {
const o = air_datas[inst].ty_op;
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
@@ -322,6 +325,7 @@ pub fn categorizeOperand(
.trunc_float,
.neg,
.cmp_lt_errors_len,
+ .c_va_end,
=> {
const o = air_datas[inst].un_op;
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
@@ -857,6 +861,7 @@ fn analyzeInst(
.wasm_memory_size,
.err_return_trace,
.save_err_return_trace_index,
+ .c_va_start,
=> return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }),
.not,
@@ -898,6 +903,8 @@ fn analyzeInst(
.splat,
.error_set_has_value,
.addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
=> {
const o = inst_datas[inst].ty_op;
return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none });
@@ -936,6 +943,7 @@ fn analyzeInst(
.neg_optimized,
.cmp_lt_errors_len,
.set_err_return_trace,
+ .c_va_end,
=> {
const operand = inst_datas[inst].un_op;
return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
src/print_air.zig
@@ -191,6 +191,7 @@ const Writer = struct {
.neg_optimized,
.cmp_lt_errors_len,
.set_err_return_trace,
+ .c_va_end,
=> try w.writeUnOp(s, inst),
.breakpoint,
@@ -205,6 +206,7 @@ const Writer = struct {
.ret_ptr,
.arg,
.err_return_trace,
+ .c_va_start,
=> try w.writeTy(s, inst),
.not,
@@ -246,6 +248,8 @@ const Writer = struct {
.bit_reverse,
.error_set_has_value,
.addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
=> try w.writeTyOp(s, inst),
.block,
src/print_zir.zig
@@ -465,6 +465,7 @@ const Writer = struct {
.frame,
.frame_address,
.breakpoint,
+ .c_va_start,
=> try self.writeExtNode(stream, extended),
.builtin_src => {
@@ -504,6 +505,8 @@ const Writer = struct {
.error_to_int,
.int_to_error,
.reify,
+ .c_va_copy,
+ .c_va_end,
=> {
const inst_data = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(inst_data.node);
@@ -518,6 +521,7 @@ const Writer = struct {
.wasm_memory_grow,
.prefetch,
.addrspace_cast,
+ .c_va_arg,
=> {
const inst_data = self.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(inst_data.node);
src/Sema.zig
@@ -1148,6 +1148,10 @@ fn analyzeBodyInner(
.builtin_async_call => try sema.zirBuiltinAsyncCall( block, extended),
.cmpxchg => try sema.zirCmpxchg( block, extended),
.addrspace_cast => try sema.zirAddrSpaceCast( block, extended),
+ .c_va_arg => try sema.zirCVaArg( block, extended),
+ .c_va_copy => try sema.zirCVaCopy( block, extended),
+ .c_va_end => try sema.zirCVaEnd( block, extended),
+ .c_va_start => try sema.zirCVaStart( block, extended),
// zig fmt: on
.fence => {
@@ -6427,6 +6431,11 @@ fn analyzeCall(
else => unreachable,
};
if (!is_comptime_call and module_fn.state == .sema_failure) return error.AnalysisFail;
+ if (func_ty_info.is_var_args) {
+ return sema.fail(block, call_src, "{s} call of variadic function", .{
+ @as([]const u8, if (is_comptime_call) "comptime" else "inline"),
+ });
+ }
// Analyze the ZIR. The same ZIR gets analyzed into a runtime function
// or an inlined call depending on what union tag the `label` field is
@@ -8404,6 +8413,7 @@ fn funcCommon(
) CompileError!Air.Inst.Ref {
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset };
+ const func_src = LazySrcLoc.nodeOffset(src_node_offset);
var is_generic = bare_return_type.tag() == .generic_poison or
alignment == null or
@@ -8411,6 +8421,15 @@ fn funcCommon(
section == .generic or
cc == null;
+ if (var_args) {
+ if (is_generic) {
+ return sema.fail(block, func_src, "generic function cannot be variadic", .{});
+ }
+ if (cc.? != .C) {
+ return sema.fail(block, cc_src, "variadic function must have 'C' calling convention", .{});
+ }
+ }
+
var destroy_fn_on_error = false;
const new_func: *Module.Fn = new_func: {
if (!has_body) break :new_func undefined;
@@ -19054,6 +19073,79 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
});
}
+fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref {
+ const va_list_ty = try sema.getBuiltinType("VaList");
+ const va_list_ptr = try Type.ptr(sema.arena, sema.mod, .{
+ .pointee_type = va_list_ty,
+ .mutable = true,
+ .@"addrspace" = .generic,
+ });
+
+ const inst = try sema.resolveInst(zir_ref);
+ return sema.coerce(block, va_list_ptr, inst, src);
+}
+
+fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
+ const src = LazySrcLoc.nodeOffset(extra.node);
+ const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
+ const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
+
+ const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.lhs);
+ const arg_ty = try sema.resolveType(block, ty_src, extra.rhs);
+
+ if (!try sema.validateExternType(arg_ty, .param_ty)) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, ty_src, "cannot get '{}' from variadic argument", .{arg_ty.fmt(sema.mod)});
+ errdefer msg.destroy(sema.gpa);
+
+ const src_decl = sema.mod.declPtr(block.src_decl);
+ try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), arg_ty, .param_ty);
+
+ try sema.addDeclaredHereNote(msg, arg_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addTyOp(.c_va_arg, arg_ty, va_list_ref);
+}
+
+fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ const src = LazySrcLoc.nodeOffset(extra.node);
+ const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
+
+ const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
+ const va_list_ty = try sema.getBuiltinType("VaList");
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref);
+}
+
+fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
+ const src = LazySrcLoc.nodeOffset(extra.node);
+ const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
+
+ const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand);
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addUnOp(.c_va_end, va_list_ref);
+}
+
+fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand));
+
+ const va_list_ty = try sema.getBuiltinType("VaList");
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addInst(.{
+ .tag = .c_va_start,
+ .data = .{ .ty = va_list_ty },
+ });
+}
+
fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -21558,7 +21650,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
else => |e| return e,
};
break :blk cc_tv.val.toEnum(std.builtin.CallingConvention);
- } else std.builtin.CallingConvention.Unspecified;
+ } else if (sema.owner_decl.is_exported and has_body)
+ .C
+ else
+ .Unspecified;
const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: {
const body_len = sema.code.extra[extra_index];
src/Zir.zig
@@ -1993,6 +1993,18 @@ pub const Inst = struct {
/// Implement the builtin `@addrSpaceCast`
/// `Operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand.
addrspace_cast,
+ /// Implement builtin `@cVaArg`.
+ /// `operand` is payload index to `BinNode`.
+ c_va_arg,
+ /// Implement builtin `@cVaStart`.
+ /// `operand` is payload index to `UnNode`.
+ c_va_copy,
+ /// Implement builtin `@cVaStart`.
+ /// `operand` is payload index to `UnNode`.
+ c_va_end,
+ /// Implement builtin `@cVaStart`.
+ /// `operand` is `src_node: i32`.
+ c_va_start,
pub const InstData = struct {
opcode: Extended,
test/behavior/var_args.zig
@@ -1,5 +1,6 @@
const builtin = @import("builtin");
-const expect = @import("std").testing.expect;
+const std = @import("std");
+const expect = std.testing.expect;
fn add(args: anytype) i32 {
var sum = @as(i32, 0);
@@ -91,3 +92,109 @@ test "pass zero length array to var args param" {
fn doNothingWithFirstArg(args: anytype) void {
_ = args[0];
}
+
+test "simple variadic function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .windows and builtin.os.tag != .macos) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn simple(...) callconv(.C) c_int {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ return @cVaArg(&ap, c_int);
+ }
+
+ fn add(count: c_int, ...) callconv(.C) c_int {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ var i: usize = 0;
+ var sum: c_int = 0;
+ while (i < count) : (i += 1) {
+ sum += @cVaArg(&ap, c_int);
+ }
+ return sum;
+ }
+ };
+
+ try std.testing.expectEqual(@as(c_int, 0), S.simple(@as(c_int, 0)));
+ try std.testing.expectEqual(@as(c_int, 1024), S.simple(@as(c_int, 1024)));
+ try std.testing.expectEqual(@as(c_int, 0), S.add(0));
+ try std.testing.expectEqual(@as(c_int, 1), S.add(1, @as(c_int, 1)));
+ try std.testing.expectEqual(@as(c_int, 3), S.add(2, @as(c_int, 1), @as(c_int, 2)));
+}
+
+test "variadic functions" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .windows and builtin.os.tag != .macos) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn printf(list_ptr: *std.ArrayList(u8), format: [*:0]const u8, ...) callconv(.C) void {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ vprintf(list_ptr, format, &ap);
+ }
+
+ fn vprintf(
+ list: *std.ArrayList(u8),
+ format: [*:0]const u8,
+ ap: *std.builtin.VaList,
+ ) callconv(.C) void {
+ for (std.mem.span(format)) |c| switch (c) {
+ 's' => {
+ const arg = @cVaArg(ap, [*:0]const u8);
+ list.writer().print("{s}", .{arg}) catch return;
+ },
+ 'd' => {
+ const arg = @cVaArg(ap, c_int);
+ list.writer().print("{d}", .{arg}) catch return;
+ },
+ else => unreachable,
+ };
+ }
+ };
+
+ var list = std.ArrayList(u8).init(std.testing.allocator);
+ defer list.deinit();
+ S.printf(&list, "dsd", @as(c_int, 1), @as([*:0]const u8, "hello"), @as(c_int, 5));
+ try std.testing.expectEqualStrings("1hello5", list.items);
+}
+
+test "copy VaList" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .windows and builtin.os.tag != .macos) return error.SkipZigTest; // TODO
+ if (builtin.cpu.arch == .x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; // TODO
+
+ const S = struct {
+ fn add(count: c_int, ...) callconv(.C) c_int {
+ var ap = @cVaStart();
+ defer @cVaEnd(&ap);
+ var copy = @cVaCopy(&ap);
+ defer @cVaEnd(©);
+ var i: usize = 0;
+ var sum: c_int = 0;
+ while (i < count) : (i += 1) {
+ sum += @cVaArg(&ap, c_int);
+ sum += @cVaArg(©, c_int) * 2;
+ }
+ return sum;
+ }
+ };
+
+ try std.testing.expectEqual(@as(c_int, 0), S.add(0));
+ try std.testing.expectEqual(@as(c_int, 3), S.add(1, @as(c_int, 1)));
+ try std.testing.expectEqual(@as(c_int, 9), S.add(2, @as(c_int, 1), @as(c_int, 2)));
+}
test/cases/compile_errors/invalid_variadic_function.zig
@@ -0,0 +1,12 @@
+fn foo(...) void {}
+fn bar(a: anytype, ...) callconv(a) void {}
+
+comptime { _ = foo; }
+comptime { _ = bar; }
+
+// error
+// backend=stage2
+// target=native
+//
+// :1:1: error: variadic function must have 'C' calling convention
+// :2:1: error: generic function cannot be variadic