Commit 71d16106ad
Changed files (5)
test
cases
compile_errors
src/Sema.zig
@@ -25793,7 +25793,6 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr);
const pt = sema.pt;
const zcu = pt.zcu;
- const target = zcu.getTarget();
if (dest_ty.isConstPtr(zcu)) {
return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{});
@@ -25814,6 +25813,30 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
return sema.failWithOwnedErrorMsg(block, msg);
}
+ const dest_elem_ty = dest_ty.indexablePtrElem(zcu);
+ const src_elem_ty = src_ty.indexablePtrElem(zcu);
+
+ const imc = try sema.coerceInMemoryAllowed(
+ block,
+ dest_elem_ty,
+ src_elem_ty,
+ false,
+ zcu.getTarget(),
+ dest_src,
+ src_src,
+ null,
+ );
+ if (imc != .ok) return sema.failWithOwnedErrorMsg(block, msg: {
+ const msg = try sema.errMsg(
+ src,
+ "pointer element type '{}' cannot coerce into element type '{}'",
+ .{ src_elem_ty.fmt(pt), dest_elem_ty.fmt(pt) },
+ );
+ errdefer msg.destroy(sema.gpa);
+ try imc.report(sema, src, msg);
+ break :msg msg;
+ });
+
var len_val: ?Value = null;
if (dest_len != .none and src_len != .none) check: {
@@ -25855,61 +25878,52 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
}
}
- const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
+ const runtime_src = rs: {
+ const dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src;
+ const src_ptr_val = try sema.resolveDefinedValue(block, src_src, src_ptr) orelse break :rs src_src;
+
+ const raw_dest_ptr = if (dest_ty.isSlice(zcu)) dest_ptr_val.slicePtr(zcu) else dest_ptr_val;
+ const raw_src_ptr = if (src_ty.isSlice(zcu)) src_ptr_val.slicePtr(zcu) else src_ptr_val;
+
+ const len_u64 = try len_val.?.toUnsignedIntSema(pt);
+
+ if (Value.doPointersOverlap(
+ raw_src_ptr,
+ raw_dest_ptr,
+ len_u64,
+ zcu,
+ )) return sema.fail(block, src, "'@memcpy' arguments alias", .{});
+
if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src;
- if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
- const len_u64 = try len_val.?.toUnsignedIntSema(pt);
- const len = try sema.usizeCast(block, dest_src, len_u64);
- for (0..len) |i| {
- const elem_index = try pt.intRef(Type.usize, i);
- const dest_elem_ptr = try sema.elemPtrOneLayerOnly(
- block,
- src,
- dest_ptr,
- elem_index,
- src,
- true, // init
- false, // oob_safety
- );
- const src_elem_ptr = try sema.elemPtrOneLayerOnly(
- block,
- src,
- src_ptr,
- elem_index,
- src,
- false, // init
- false, // oob_safety
- );
- const uncoerced_elem = try sema.analyzeLoad(block, src, src_elem_ptr, src_src);
- try sema.storePtr2(
- block,
- src,
- dest_elem_ptr,
- dest_src,
- uncoerced_elem,
- src_src,
- .store,
- );
- }
- return;
- } else break :rs src_src;
- } else dest_src;
- // If in-memory coercion is not allowed, explode this memcpy call into a
- // for loop that copies element-wise.
- // Likewise if this is an iterable rather than a pointer, do the same
- // lowering. The AIR instruction requires pointers with element types of
- // equal ABI size.
+ // Because comptime pointer access is a somewhat expensive operation, we implement @memcpy
+ // as one load and store of an array, rather than N loads and stores of individual elements.
- if (dest_ty.zigTypeTag(zcu) != .pointer or src_ty.zigTypeTag(zcu) != .pointer) {
- return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{});
- }
+ const array_ty = try pt.arrayType(.{
+ .child = dest_elem_ty.toIntern(),
+ .len = len_u64,
+ });
- const dest_elem_ty = dest_ty.elemType2(zcu);
- const src_elem_ty = src_ty.elemType2(zcu);
- if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src, null)) {
- return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{});
- }
+ const dest_array_ptr_ty = try pt.ptrType(info: {
+ var info = dest_ty.ptrInfo(zcu);
+ info.flags.size = .one;
+ info.child = array_ty.toIntern();
+ break :info info;
+ });
+ const src_array_ptr_ty = try pt.ptrType(info: {
+ var info = src_ty.ptrInfo(zcu);
+ info.flags.size = .one;
+ info.child = array_ty.toIntern();
+ break :info info;
+ });
+
+ const coerced_dest_ptr = try pt.getCoerced(raw_dest_ptr, dest_array_ptr_ty);
+ const coerced_src_ptr = try pt.getCoerced(raw_src_ptr, src_array_ptr_ty);
+
+ const array_val = try sema.pointerDeref(block, src_src, coerced_src_ptr, src_array_ptr_ty) orelse break :rs src_src;
+ try sema.storePtrVal(block, dest_src, coerced_dest_ptr, array_val, array_ty);
+ return;
+ };
// If the length is comptime-known, then upgrade src and destination types
// into pointer-to-array. At this point we know they are both pointers
src/Type.zig
@@ -2057,6 +2057,22 @@ pub fn elemType2(ty: Type, zcu: *const Zcu) Type {
};
}
+/// Given that `ty` is an indexable pointer, returns its element type. Specifically:
+/// * for `*[n]T`, returns `T`
+/// * for `[]T`, returns `T`
+/// * for `[*]T`, returns `T`
+/// * for `[*c]T`, returns `T`
+pub fn indexablePtrElem(ty: Type, zcu: *const Zcu) Type {
+ const ip = &zcu.intern_pool;
+ const ptr_type = ip.indexToKey(ty.toIntern()).ptr_type;
+ switch (ptr_type.flags.size) {
+ .many, .slice, .c => return .fromInterned(ptr_type.child),
+ .one => {},
+ }
+ const array_type = ip.indexToKey(ptr_type.child).array_type;
+ return .fromInterned(array_type.child);
+}
+
fn shallowElemType(child_ty: Type, zcu: *const Zcu) Type {
return switch (child_ty.zigTypeTag(zcu)) {
.array, .vector => child_ty.childType(zcu),
src/Value.zig
@@ -4755,3 +4755,70 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory
},
};
}
+
+/// Returns whether `ptr_val_a[0..elem_count]` and `ptr_val_b[0..elem_count]` overlap.
+/// `ptr_val_a` and `ptr_val_b` are indexable pointers (not slices) whose element types are in-memory coercible.
+pub fn doPointersOverlap(ptr_val_a: Value, ptr_val_b: Value, elem_count: u64, zcu: *const Zcu) bool {
+ const ip = &zcu.intern_pool;
+
+ const a_elem_ty = ptr_val_a.typeOf(zcu).indexablePtrElem(zcu);
+ const b_elem_ty = ptr_val_b.typeOf(zcu).indexablePtrElem(zcu);
+
+ const a_ptr = ip.indexToKey(ptr_val_a.toIntern()).ptr;
+ const b_ptr = ip.indexToKey(ptr_val_b.toIntern()).ptr;
+
+ // If `a_elem_ty` is not comptime-only, then overlapping pointers have identical
+ // `base_addr`, and we just need to look at the byte offset. If it *is* comptime-only,
+ // then `base_addr` may be an `arr_elem`, and we'll have to consider the element index.
+ if (a_elem_ty.comptimeOnly(zcu)) {
+ assert(a_elem_ty.toIntern() == b_elem_ty.toIntern()); // IMC comptime-only types are equivalent
+
+ const a_base_addr: InternPool.Key.Ptr.BaseAddr, const a_idx: u64 = switch (a_ptr.base_addr) {
+ else => .{ a_ptr.base_addr, 0 },
+ .arr_elem => |arr_elem| a: {
+ const base_ptr = Value.fromInterned(arr_elem.base);
+ const base_child_ty = base_ptr.typeOf(zcu).childType(zcu);
+ if (base_child_ty.toIntern() == a_elem_ty.toIntern()) {
+ // This `arr_elem` is indexing into the element type we want.
+ const base_ptr_info = ip.indexToKey(base_ptr.toIntern()).ptr;
+ if (base_ptr_info.byte_offset != 0) {
+ return false; // this pointer is invalid, just let the access fail
+ }
+ break :a .{ base_ptr_info.base_addr, arr_elem.index };
+ }
+ break :a .{ a_ptr.base_addr, 0 };
+ },
+ };
+ const b_base_addr: InternPool.Key.Ptr.BaseAddr, const b_idx: u64 = switch (a_ptr.base_addr) {
+ else => .{ b_ptr.base_addr, 0 },
+ .arr_elem => |arr_elem| b: {
+ const base_ptr = Value.fromInterned(arr_elem.base);
+ const base_child_ty = base_ptr.typeOf(zcu).childType(zcu);
+ if (base_child_ty.toIntern() == b_elem_ty.toIntern()) {
+ // This `arr_elem` is indexing into the element type we want.
+ const base_ptr_info = ip.indexToKey(base_ptr.toIntern()).ptr;
+ if (base_ptr_info.byte_offset != 0) {
+ return false; // this pointer is invalid, just let the access fail
+ }
+ break :b .{ base_ptr_info.base_addr, arr_elem.index };
+ }
+ break :b .{ b_ptr.base_addr, 0 };
+ },
+ };
+ if (!std.meta.eql(a_base_addr, b_base_addr)) return false;
+ const diff = if (a_idx >= b_idx) a_idx - b_idx else b_idx - a_idx;
+ return diff < elem_count;
+ } else {
+ assert(a_elem_ty.abiSize(zcu) == b_elem_ty.abiSize(zcu));
+
+ if (!std.meta.eql(a_ptr.base_addr, b_ptr.base_addr)) return false;
+
+ const bytes_diff = if (a_ptr.byte_offset >= b_ptr.byte_offset)
+ a_ptr.byte_offset - b_ptr.byte_offset
+ else
+ b_ptr.byte_offset - a_ptr.byte_offset;
+
+ const need_bytes_diff = elem_count * a_elem_ty.abiSize(zcu);
+ return bytes_diff < need_bytes_diff;
+ }
+}
test/cases/compile_errors/memcpy_alias.zig
@@ -0,0 +1,14 @@
+var arr: [10]u64 = undefined;
+export fn foo() void {
+ @memcpy(arr[0..6], arr[4..10]);
+}
+
+comptime {
+ var types: [4]type = .{ u8, u16, u32, u64 };
+ @memcpy(types[2..4], types[1..3]);
+}
+
+// error
+//
+// :3:5: error: '@memcpy' arguments alias
+// :8:5: error: '@memcpy' arguments alias
test/cases/compile_errors/memcpy_bad_type.zig
@@ -0,0 +1,10 @@
+const src: [10]u8 = @splat(0);
+var dest: [10]u16 = undefined;
+
+export fn foo() void {
+ @memcpy(&dest, &src);
+}
+
+// error
+//
+// :5:5: error: pointer element type 'u8' cannot coerce into element type 'u16'