Commit 42aa1ea115
Changed files (14)
src
src/codegen/llvm/bindings.zig
@@ -632,6 +632,25 @@ pub const Builder = opaque {
DestTy: *const Type,
Name: [*:0]const u8,
) *const Value;
+
+ pub const buildMemSet = LLVMBuildMemSet;
+ extern fn LLVMBuildMemSet(
+ B: *const Builder,
+ Ptr: *const Value,
+ Val: *const Value,
+ Len: *const Value,
+ Align: c_uint,
+ ) *const Value;
+
+ pub const buildMemCpy = LLVMBuildMemCpy;
+ extern fn LLVMBuildMemCpy(
+ B: *const Builder,
+ Dst: *const Value,
+ DstAlign: c_uint,
+ Src: *const Value,
+ SrcAlign: c_uint,
+ Size: *const Value,
+ ) *const Value;
};
pub const IntPredicate = enum(c_uint) {
src/codegen/c.zig
@@ -953,6 +953,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.cmpxchg_strong => try airCmpxchg(f, inst, "strong"),
.atomic_rmw => try airAtomicRmw(f, inst),
.atomic_load => try airAtomicLoad(f, inst),
+ .memset => try airMemset(f, inst),
+ .memcpy => try airMemcpy(f, inst),
.int_to_float,
.float_to_int,
@@ -2005,8 +2007,12 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const atomic_load = f.air.instructions.items(.data)[inst].atomic_load;
- const inst_ty = f.air.typeOfIndex(inst);
const ptr = try f.resolveInst(atomic_load.ptr);
+ const ptr_ty = f.air.typeOf(atomic_load.ptr);
+ if (!ptr_ty.isVolatilePtr() and f.liveness.isUnused(inst))
+ return CValue.none;
+
+ const inst_ty = f.air.typeOfIndex(inst);
const local = try f.allocLocal(inst_ty, .Const);
const writer = f.object.writer();
@@ -2036,6 +2042,44 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
return local;
}
+fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue {
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
+ const dest_ptr = try f.resolveInst(pl_op.operand);
+ const value = try f.resolveInst(extra.lhs);
+ const len = try f.resolveInst(extra.rhs);
+ const writer = f.object.writer();
+
+ try writer.writeAll("memset(");
+ try f.writeCValue(writer, dest_ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, value);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, len);
+ try writer.writeAll(");\n");
+
+ return CValue.none;
+}
+
+fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
+ const pl_op = f.air.instructions.items(.data)[inst].pl_op;
+ const extra = f.air.extraData(Air.Bin, pl_op.payload).data;
+ const dest_ptr = try f.resolveInst(pl_op.operand);
+ const src_ptr = try f.resolveInst(extra.lhs);
+ const len = try f.resolveInst(extra.rhs);
+ const writer = f.object.writer();
+
+ try writer.writeAll("memcpy(");
+ try f.writeCValue(writer, dest_ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, src_ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, len);
+ try writer.writeAll(");\n");
+
+ return CValue.none;
+}
+
fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 {
return switch (order) {
.Unordered => "memory_order_relaxed",
src/codegen/llvm.zig
@@ -1279,6 +1279,8 @@ pub const FuncGen = struct {
.fence => try self.airFence(inst),
.atomic_rmw => try self.airAtomicRmw(inst),
.atomic_load => try self.airAtomicLoad(inst),
+ .memset => try self.airMemset(inst),
+ .memcpy => try self.airMemcpy(inst),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
@@ -2426,6 +2428,8 @@ pub const FuncGen = struct {
const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.air.typeOf(atomic_load.ptr);
+ if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst))
+ return null;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
const operand_ty = ptr_ty.elemType();
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
@@ -2468,6 +2472,55 @@ pub const FuncGen = struct {
return null;
}
+ fn airMemset(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
+ const dest_ptr = try self.resolveInst(pl_op.operand);
+ const ptr_ty = self.air.typeOf(pl_op.operand);
+ const value = try self.resolveInst(extra.lhs);
+ const val_is_undef = if (self.air.value(extra.lhs)) |val| val.isUndef() else false;
+ const len = try self.resolveInst(extra.rhs);
+ const u8_llvm_ty = self.context.intType(8);
+ const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0);
+ const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, "");
+ const fill_char = if (val_is_undef) u8_llvm_ty.constInt(0xaa, .False) else value;
+ const target = self.dg.module.getTarget();
+ const dest_ptr_align = ptr_ty.ptrAlignment(target);
+ const memset = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align);
+ memset.setVolatile(llvm.Bool.fromBool(ptr_ty.isVolatilePtr()));
+
+ if (val_is_undef and self.dg.module.comp.bin_file.options.valgrind) {
+ // TODO generate valgrind client request to mark byte range as undefined
+ // see gen_valgrind_undef() in codegen.cpp
+ }
+ return null;
+ }
+
+ fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
+ const dest_ptr = try self.resolveInst(pl_op.operand);
+ const dest_ptr_ty = self.air.typeOf(pl_op.operand);
+ const src_ptr = try self.resolveInst(extra.lhs);
+ const src_ptr_ty = self.air.typeOf(extra.lhs);
+ const len = try self.resolveInst(extra.rhs);
+ const u8_llvm_ty = self.context.intType(8);
+ const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0);
+ const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, "");
+ const src_ptr_u8 = self.builder.buildBitCast(src_ptr, ptr_u8_llvm_ty, "");
+ const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr();
+ const target = self.dg.module.getTarget();
+ const memcpy = self.builder.buildMemCpy(
+ dest_ptr_u8,
+ dest_ptr_ty.ptrAlignment(target),
+ src_ptr_u8,
+ src_ptr_ty.ptrAlignment(target),
+ len,
+ );
+ memcpy.setVolatile(llvm.Bool.fromBool(is_volatile));
+ return null;
+ }
+
fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
src/link/C/zig.h
@@ -126,6 +126,7 @@
#define int128_t __int128
#define uint128_t unsigned __int128
ZIG_EXTERN_C void *memcpy (void *ZIG_RESTRICT, const void *ZIG_RESTRICT, size_t);
+ZIG_EXTERN_C void *memset (void *, int, size_t);
static inline uint8_t zig_addw_u8(uint8_t lhs, uint8_t rhs, uint8_t max) {
uint8_t thresh = max - rhs;
src/Air.zig
@@ -321,6 +321,19 @@ pub const Inst = struct {
/// Uses the `ty_op` field.
int_to_float,
+ /// Given dest ptr, value, and len, set all elements at dest to value.
+ /// Result type is always void.
+ /// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
+ /// value, `rhs` is the length.
+ /// The element type may be any type, not just u8.
+ memset,
+ /// Given dest ptr, src ptr, and len, copy len elements from src to dest.
+ /// Result type is always void.
+ /// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
+ /// src ptr, `rhs` is the length.
+ /// The element type may be any type, not just u8.
+ memcpy,
+
/// Uses the `ty_pl` field with payload `Cmpxchg`.
cmpxchg_weak,
/// Uses the `ty_pl` field with payload `Cmpxchg`.
@@ -628,6 +641,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
+ .memset,
+ .memcpy,
=> return Type.initTag(.void),
.ptrtoint,
src/AstGen.zig
@@ -2149,8 +2149,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.field_ptr_type,
.field_parent_ptr,
.maximum,
- .memcpy,
- .memset,
.minimum,
.builtin_async_call,
.c_import,
@@ -2204,6 +2202,8 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.set_float_mode,
.set_runtime_safety,
.closure_capture,
+ .memcpy,
+ .memset,
=> break :b true,
}
} else switch (maybe_unused_result) {
@@ -7576,17 +7576,17 @@ fn builtinCall(
},
.memcpy => {
const result = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
- .dest = try expr(gz, scope, .{ .ty = .manyptr_u8_type }, params[0]),
- .source = try expr(gz, scope, .{ .ty = .manyptr_const_u8_type }, params[1]),
- .byte_count = try expr(gz, scope, .{ .ty = .usize_type }, params[2]),
+ .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]),
+ .source = try expr(gz, scope, .{ .coerced_ty = .manyptr_const_u8_type }, params[1]),
+ .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]),
});
return rvalue(gz, rl, result, node);
},
.memset => {
const result = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
- .dest = try expr(gz, scope, .{ .ty = .manyptr_u8_type }, params[0]),
- .byte = try expr(gz, scope, .{ .ty = .u8_type }, params[1]),
- .byte_count = try expr(gz, scope, .{ .ty = .usize_type }, params[2]),
+ .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]),
+ .byte = try expr(gz, scope, .{ .coerced_ty = .u8_type }, params[1]),
+ .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]),
});
return rvalue(gz, rl, result, node);
},
src/codegen.zig
@@ -887,6 +887,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.cmpxchg_weak => try self.airCmpxchg(inst),
.atomic_rmw => try self.airAtomicRmw(inst),
.atomic_load => try self.airAtomicLoad(inst),
+ .memcpy => try self.airMemcpy(inst),
+ .memset => try self.airMemset(inst),
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
@@ -4883,6 +4885,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch});
}
+ fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch});
+ }
+
+ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
+ _ = inst;
+ return self.fail("TODO implement airMemcpy for {}", .{self.target.cpu.arch});
+ }
+
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
src/Liveness.zig
@@ -361,6 +361,11 @@ fn analyzeInst(
const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none });
},
+ .memset, .memcpy => {
+ const pl_op = inst_datas[inst].pl_op;
+ const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
+ return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs });
+ },
.br => {
const br = inst_datas[inst].br;
return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none });
src/print_air.zig
@@ -202,6 +202,8 @@ const Writer = struct {
.atomic_store_release => try w.writeAtomicStore(s, inst, .Release),
.atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .SeqCst),
.atomic_rmw => try w.writeAtomicRmw(s, inst),
+ .memcpy => try w.writeMemcpy(s, inst),
+ .memset => try w.writeMemset(s, inst),
}
}
@@ -322,6 +324,28 @@ const Writer = struct {
try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) });
}
+ fn writeMemset(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const pl_op = w.air.instructions.items(.data)[inst].pl_op;
+ const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
+
+ try w.writeOperand(s, inst, 0, pl_op.operand);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 1, extra.lhs);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 2, extra.rhs);
+ }
+
+ fn writeMemcpy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const pl_op = w.air.instructions.items(.data)[inst].pl_op;
+ const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
+
+ try w.writeOperand(s, inst, 0, pl_op.operand);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 1, extra.lhs);
+ try s.writeAll(", ");
+ try w.writeOperand(s, inst, 2, extra.rhs);
+ }
+
fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const val = w.air.values[ty_pl.payload];
src/print_zir.zig
@@ -210,8 +210,6 @@ const Writer = struct {
.mul_add,
.builtin_call,
.field_parent_ptr,
- .memcpy,
- .memset,
.builtin_async_call,
=> try self.writePlNode(stream, inst),
@@ -222,6 +220,8 @@ const Writer = struct {
.cmpxchg_strong, .cmpxchg_weak => try self.writeCmpxchg(stream, inst),
.atomic_store => try self.writeAtomicStore(stream, inst),
.atomic_rmw => try self.writeAtomicRmw(stream, inst),
+ .memcpy => try self.writeMemcpy(stream, inst),
+ .memset => try self.writeMemset(stream, inst),
.struct_init_anon,
.struct_init_anon_ref,
@@ -692,6 +692,32 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
+ fn writeMemcpy(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data;
+
+ try self.writeInstRef(stream, extra.dest);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.source);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.byte_count);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
+ fn writeMemset(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
+ const extra = self.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data;
+
+ try self.writeInstRef(stream, extra.dest);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.byte);
+ try stream.writeAll(", ");
+ try self.writeInstRef(stream, extra.byte_count);
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
+ }
+
fn writeStructInitAnon(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index);
src/Sema.zig
@@ -341,8 +341,6 @@ pub fn analyzeBody(
.field_ptr_type => try sema.zirFieldPtrType(block, inst),
.field_parent_ptr => try sema.zirFieldParentPtr(block, inst),
.maximum => try sema.zirMaximum(block, inst),
- .memcpy => try sema.zirMemcpy(block, inst),
- .memset => try sema.zirMemset(block, inst),
.minimum => try sema.zirMinimum(block, inst),
.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst),
.@"resume" => try sema.zirResume(block, inst),
@@ -526,6 +524,16 @@ pub fn analyzeBody(
i += 1;
continue;
},
+ .memcpy => {
+ try sema.zirMemcpy(block, inst);
+ i += 1;
+ continue;
+ },
+ .memset => {
+ try sema.zirMemset(block, inst);
+ i += 1;
+ continue;
+ },
// Special case instructions to handle comptime control flow.
.@"break" => {
@@ -8422,16 +8430,119 @@ fn zirMaximum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr
return sema.mod.fail(&block.base, src, "TODO: Sema.zirMaximum", .{});
}
-fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data;
const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{});
+ const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
+ const dest_ptr = sema.resolveInst(extra.dest);
+ const dest_ptr_ty = sema.typeOf(dest_ptr);
+
+ if (dest_ptr_ty.zigTypeTag() != .Pointer) {
+ return sema.mod.fail(&block.base, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty});
+ }
+ if (dest_ptr_ty.isConstPtr()) {
+ return sema.mod.fail(&block.base, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty});
+ }
+
+ const uncasted_src_ptr = sema.resolveInst(extra.source);
+ const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr);
+ if (uncasted_src_ptr_ty.zigTypeTag() != .Pointer) {
+ return sema.mod.fail(&block.base, src_src, "expected pointer, found '{}'", .{
+ uncasted_src_ptr_ty,
+ });
+ }
+ const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data;
+ const wanted_src_ptr_ty = try Module.ptrType(
+ sema.arena,
+ dest_ptr_ty.elemType2(),
+ null,
+ src_ptr_info.@"align",
+ src_ptr_info.@"addrspace",
+ 0,
+ 0,
+ false,
+ src_ptr_info.@"allowzero",
+ src_ptr_info.@"volatile",
+ .Many,
+ );
+ const src_ptr = try sema.coerce(block, wanted_src_ptr_ty, uncasted_src_ptr, src_src);
+ const len = try sema.coerce(block, Type.initTag(.usize), sema.resolveInst(extra.byte_count), len_src);
+
+ const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr);
+ const maybe_src_ptr_val = try sema.resolveDefinedValue(block, src_src, src_ptr);
+ const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len);
+
+ const runtime_src = if (maybe_dest_ptr_val) |dest_ptr_val| rs: {
+ if (maybe_src_ptr_val) |src_ptr_val| {
+ if (maybe_len_val) |len_val| {
+ _ = dest_ptr_val;
+ _ = src_ptr_val;
+ _ = len_val;
+ return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy at comptime", .{});
+ } else break :rs len_src;
+ } else break :rs src_src;
+ } else dest_src;
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ _ = try block.addInst(.{
+ .tag = .memcpy,
+ .data = .{ .pl_op = .{
+ .operand = dest_ptr,
+ .payload = try sema.addExtra(Air.Bin{
+ .lhs = src_ptr,
+ .rhs = len,
+ }),
+ } },
+ });
}
-fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const extra = sema.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data;
const src = inst_data.src();
- return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{});
+ const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
+ const dest_ptr = sema.resolveInst(extra.dest);
+ const dest_ptr_ty = sema.typeOf(dest_ptr);
+ if (dest_ptr_ty.zigTypeTag() != .Pointer) {
+ return sema.mod.fail(&block.base, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty});
+ }
+ if (dest_ptr_ty.isConstPtr()) {
+ return sema.mod.fail(&block.base, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty});
+ }
+ const elem_ty = dest_ptr_ty.elemType2();
+ const value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.byte), value_src);
+ const len = try sema.coerce(block, Type.initTag(.usize), sema.resolveInst(extra.byte_count), len_src);
+
+ const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr);
+ const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len);
+
+ const runtime_src = if (maybe_dest_ptr_val) |ptr_val| rs: {
+ if (maybe_len_val) |len_val| {
+ if (try sema.resolveMaybeUndefVal(block, value_src, value)) |val| {
+ _ = ptr_val;
+ _ = len_val;
+ _ = val;
+ return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset at comptime", .{});
+ } else break :rs value_src;
+ } else break :rs len_src;
+ } else dest_src;
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ _ = try block.addInst(.{
+ .tag = .memset,
+ .data = .{ .pl_op = .{
+ .operand = dest_ptr,
+ .payload = try sema.addExtra(Air.Bin{
+ .lhs = value,
+ .rhs = len,
+ }),
+ } },
+ });
}
fn zirMinimum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -10090,7 +10201,8 @@ fn coerceArrayPtrToMany(
// The comptime Value representation is compatible with both types.
return sema.addConstant(dest_type, val);
}
- return sema.mod.fail(&block.base, inst_src, "TODO implement coerceArrayPtrToMany runtime instruction", .{});
+ try sema.requireRuntimeBlock(block, inst_src);
+ return sema.bitcast(block, dest_type, inst, inst_src);
}
fn analyzeDeclVal(
src/type.zig
@@ -2391,12 +2391,11 @@ pub const Type = extern union {
};
}
- /// Asserts the type is a pointer or array type.
- pub fn elemType(self: Type) Type {
- return switch (self.tag()) {
- .vector => self.castTag(.vector).?.data.elem_type,
- .array => self.castTag(.array).?.data.elem_type,
- .array_sentinel => self.castTag(.array_sentinel).?.data.elem_type,
+ pub fn childType(ty: Type) Type {
+ return switch (ty.tag()) {
+ .vector => ty.castTag(.vector).?.data.elem_type,
+ .array => ty.castTag(.array).?.data.elem_type,
+ .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type,
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
@@ -2405,7 +2404,48 @@ pub const Type = extern union {
.c_mut_pointer,
.const_slice,
.mut_slice,
- => self.castPointer().?.data,
+ => ty.castPointer().?.data,
+
+ .array_u8,
+ .array_u8_sentinel_0,
+ .const_slice_u8,
+ .manyptr_u8,
+ .manyptr_const_u8,
+ => Type.initTag(.u8),
+
+ .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int),
+ .pointer => ty.castTag(.pointer).?.data.pointee_type,
+
+ else => unreachable,
+ };
+ }
+
+ /// Asserts the type is a pointer or array type.
+ /// TODO this is deprecated in favor of `childType`.
+ pub const elemType = childType;
+
+ /// For *[N]T, returns T.
+ /// For ?*T, returns T.
+ /// For ?*[N]T, returns T.
+ /// For ?[*]T, returns T.
+ /// For *T, returns T.
+ /// For [*]T, returns T.
+ pub fn elemType2(ty: Type) Type {
+ return switch (ty.tag()) {
+ .vector => ty.castTag(.vector).?.data.elem_type,
+ .array => ty.castTag(.array).?.data.elem_type,
+ .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type,
+ .many_const_pointer,
+ .many_mut_pointer,
+ .c_const_pointer,
+ .c_mut_pointer,
+ .const_slice,
+ .mut_slice,
+ => ty.castPointer().?.data,
+
+ .single_const_pointer,
+ .single_mut_pointer,
+ => ty.castPointer().?.data.shallowElemType(),
.array_u8,
.array_u8_sentinel_0,
@@ -2415,12 +2455,29 @@ pub const Type = extern union {
=> Type.initTag(.u8),
.single_const_pointer_to_comptime_int => Type.initTag(.comptime_int),
- .pointer => self.castTag(.pointer).?.data.pointee_type,
+ .pointer => {
+ const info = ty.castTag(.pointer).?.data;
+ const child_ty = info.pointee_type;
+ if (info.size == .One) {
+ return child_ty.shallowElemType();
+ } else {
+ return child_ty;
+ }
+ },
+
+ // TODO handle optionals
else => unreachable,
};
}
+ fn shallowElemType(child_ty: Type) Type {
+ return switch (child_ty.zigTypeTag()) {
+ .Array, .Vector => child_ty.childType(),
+ else => child_ty,
+ };
+ }
+
/// Asserts that the type is an optional.
/// Resulting `Type` will have inner memory referencing `buf`.
pub fn optionalChild(self: Type, buf: *Payload.ElemType) Type {
test/behavior/basic.zig
@@ -170,3 +170,21 @@ test "string concatenation" {
test "array mult operator" {
try expect(mem.eql(u8, "ab" ** 5, "ababababab"));
}
+
+test "memcpy and memset intrinsics" {
+ try testMemcpyMemset();
+ // TODO add comptime test coverage
+ //comptime try testMemcpyMemset();
+}
+
+fn testMemcpyMemset() !void {
+ var foo: [20]u8 = undefined;
+ var bar: [20]u8 = undefined;
+
+ @memset(&foo, 'A', foo.len);
+ @memcpy(&bar, &foo, bar.len);
+
+ try expect(bar[0] == 'A');
+ try expect(bar[11] == 'A');
+ try expect(bar[19] == 'A');
+}
test/behavior/misc.zig
@@ -5,16 +5,6 @@ const expectEqualStrings = std.testing.expectEqualStrings;
const mem = std.mem;
const builtin = @import("builtin");
-test "memcpy and memset intrinsics" {
- var foo: [20]u8 = undefined;
- var bar: [20]u8 = undefined;
-
- @memset(&foo, 'A', foo.len);
- @memcpy(&bar, &foo, bar.len);
-
- if (bar[11] != 'A') unreachable;
-}
-
test "slicing" {
var array: [20]i32 = undefined;