Commit a5c910adb6

Andrew Kelley <andrew@ziglang.org>
2023-04-14 06:44:40
change semantics of `@memcpy` and `@memset`
Now they use slices or array pointers with any element type instead of requiring byte pointers. This is a breaking enhancement to the language. The safety check for overlapping pointers will be implemented in a future commit. closes #14040
1 parent 8d88dcd
doc/langref.html.in
@@ -8681,40 +8681,28 @@ test "integer cast panic" {
       {#header_close#}
 
       {#header_open|@memcpy#}
-      <pre>{#syntax#}@memcpy(noalias dest: [*]u8, noalias source: [*]const u8, byte_count: usize) void{#endsyntax#}</pre>
-      <p>
-      This function copies bytes from one region of memory to another. {#syntax#}dest{#endsyntax#} and
-          {#syntax#}source{#endsyntax#} are both pointers and must not overlap.
-      </p>
-      <p>
-      This function is a low level intrinsic with no safety mechanisms. Most code
-      should not use this function, instead using something like this:
-      </p>
-      <pre>{#syntax#}for (dest, source[0..byte_count]) |*d, s| d.* = s;{#endsyntax#}</pre>
-      <p>
-      The optimizer is intelligent enough to turn the above snippet into a memcpy.
-      </p>
-      <p>There is also a standard library function for this:</p>
-      <pre>{#syntax#}const mem = @import("std").mem;
-mem.copy(u8, dest[0..byte_count], source[0..byte_count]);{#endsyntax#}</pre>
+      <pre>{#syntax#}@memcpy(noalias dest, noalias source) void{#endsyntax#}</pre>
+      <p>This function copies bytes from one region of memory to another.</p>
+      <p>{#syntax#}dest{#endsyntax#} must be a mutable slice, or a mutable pointer to an array.
+        It may have any alignment, and it may have any element type.</p>
+      <p>{#syntax#}source{#endsyntax#} must be an array, pointer, or a slice
+        with the same element type as {#syntax#}dest{#endsyntax#}. It may have
+        any alignment. Only {#syntax#}const{#endsyntax#} access is required. It
+        is sliced from 0 to the same length as
+        {#syntax#}dest{#endsyntax#}, triggering the same set of safety checks and
+        possible compile errors as
+        {#syntax#}source[0..dest.len]{#endsyntax#}.</p>
+      <p>It is illegal for {#syntax#}dest{#endsyntax#} and
+        {#syntax#}source[0..dest.len]{#endsyntax#} to overlap. If safety
+        checks are enabled, there will be a runtime check for such overlapping.</p>
       {#header_close#}
 
       {#header_open|@memset#}
-      <pre>{#syntax#}@memset(dest: [*]u8, c: u8, byte_count: usize) void{#endsyntax#}</pre>
-      <p>
-      This function sets a region of memory to {#syntax#}c{#endsyntax#}. {#syntax#}dest{#endsyntax#} is a pointer.
-      </p>
-      <p>
-      This function is a low level intrinsic with no safety mechanisms. Most
-      code should not use this function, instead using something like this:
-      </p>
-      <pre>{#syntax#}for (dest[0..byte_count]) |*b| b.* = c;{#endsyntax#}</pre>
-      <p>
-      The optimizer is intelligent enough to turn the above snippet into a memset.
-      </p>
-      <p>There is also a standard library function for this:</p>
-      <pre>{#syntax#}const mem = @import("std").mem;
-mem.set(u8, dest, c);{#endsyntax#}</pre>
+      <pre>{#syntax#}@memset(dest, elem) void{#endsyntax#}</pre>
+      <p>This function sets all the elements of a memory region to {#syntax#}elem{#endsyntax#}.</p>
+      <p>{#syntax#}dest{#endsyntax#} must be a mutable slice or a mutable pointer to an array.
+      It may have any alignment, and it may have any element type.</p>
+      <p>{#syntax#}elem{#endsyntax#} is coerced to the element type of {#syntax#}dest{#endsyntax#}.</p>
       {#header_close#}
 
       {#header_open|@min#}
lib/compiler_rt/atomics.zig
@@ -121,22 +121,22 @@ fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.C) vo
     _ = model;
     var sl = spinlocks.get(@ptrToInt(src));
     defer sl.release();
-    @memcpy(dest, src, size);
+    @memcpy(dest[0..size], src);
 }
 
 fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.C) void {
     _ = model;
     var sl = spinlocks.get(@ptrToInt(dest));
     defer sl.release();
-    @memcpy(dest, src, size);
+    @memcpy(dest[0..size], src);
 }
 
 fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.C) void {
     _ = model;
     var sl = spinlocks.get(@ptrToInt(ptr));
     defer sl.release();
-    @memcpy(old, ptr, size);
-    @memcpy(ptr, val, size);
+    @memcpy(old[0..size], ptr);
+    @memcpy(ptr[0..size], val);
 }
 
 fn __atomic_compare_exchange(
@@ -155,10 +155,10 @@ fn __atomic_compare_exchange(
         if (expected[i] != b) break;
     } else {
         // The two objects, ptr and expected, are equal
-        @memcpy(ptr, desired, size);
+        @memcpy(ptr[0..size], desired);
         return 1;
     }
-    @memcpy(expected, ptr, size);
+    @memcpy(expected[0..size], ptr);
     return 0;
 }
 
lib/compiler_rt/emutls.zig
@@ -139,10 +139,10 @@ const ObjectArray = struct {
 
             if (control.default_value) |value| {
                 // default value: copy the content to newly allocated object.
-                @memcpy(data, @ptrCast([*]const u8, value), size);
+                @memcpy(data[0..size], @ptrCast([*]const u8, value));
             } else {
                 // no default: return zeroed memory.
-                @memset(data, 0, size);
+                @memset(data[0..size], 0);
             }
 
             self.slots[index] = @ptrCast(*anyopaque, data);
lib/std/c/darwin.zig
@@ -3670,7 +3670,7 @@ pub const MachTask = extern struct {
                 else => |err| return unexpectedKernError(err),
             }
 
-            @memcpy(out_buf[0..].ptr, @intToPtr([*]const u8, vm_memory), curr_bytes_read);
+            @memcpy(out_buf[0..curr_bytes_read], @intToPtr([*]const u8, vm_memory));
             _ = vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read);
 
             out_buf = out_buf[curr_bytes_read..];
lib/std/crypto/tls/Client.zig
@@ -531,7 +531,7 @@ pub fn init(stream: anytype, ca_bundle: Certificate.Bundle, host: []const u8) In
                                     const pub_key = subject.pubKey();
                                     if (pub_key.len > main_cert_pub_key_buf.len)
                                         return error.CertificatePublicKeyInvalid;
-                                    @memcpy(&main_cert_pub_key_buf, pub_key.ptr, pub_key.len);
+                                    @memcpy(main_cert_pub_key_buf[0..pub_key.len], pub_key);
                                     main_cert_pub_key_len = @intCast(@TypeOf(main_cert_pub_key_len), pub_key.len);
                                 } else {
                                     try prev_cert.verify(subject, now_sec);
lib/std/crypto/aes_gcm.zig
@@ -91,7 +91,7 @@ fn AesGcm(comptime Aes: anytype) type {
                 acc |= (computed_tag[p] ^ tag[p]);
             }
             if (acc != 0) {
-                @memset(m.ptr, undefined, m.len);
+                @memset(m, undefined);
                 return error.AuthenticationFailed;
             }
 
lib/std/crypto/utils.zig
@@ -135,11 +135,11 @@ pub fn timingSafeSub(comptime T: type, a: []const T, b: []const T, result: []T,
 /// Sets a slice to zeroes.
 /// Prevents the store from being optimized out.
 pub fn secureZero(comptime T: type, s: []T) void {
-    // NOTE: We do not use a volatile slice cast here since LLVM cannot
-    // see that it can be replaced by a memset.
+    // TODO: implement `@memset` for non-byte-sized element type in the llvm backend
+    //@memset(@as([]volatile T, s), 0);
     const ptr = @ptrCast([*]volatile u8, s.ptr);
     const length = s.len * @sizeOf(T);
-    @memset(ptr, 0, length);
+    @memset(ptr[0..length], 0);
 }
 
 test "crypto.utils.timingSafeEql" {
lib/std/hash/murmur.zig
@@ -115,7 +115,7 @@ pub const Murmur2_64 = struct {
         const offset = len - rest;
         if (rest > 0) {
             var k1: u64 = 0;
-            @memcpy(@ptrCast([*]u8, &k1), @ptrCast([*]const u8, &str[@intCast(usize, offset)]), @intCast(usize, rest));
+            @memcpy(@ptrCast([*]u8, &k1)[0..@intCast(usize, rest)], @ptrCast([*]const u8, &str[@intCast(usize, offset)]));
             if (native_endian == .Big)
                 k1 = @byteSwap(k1);
             h1 ^= k1;
@@ -282,13 +282,8 @@ pub const Murmur3_32 = struct {
 
 fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
     const hashbytes = hashbits / 8;
-    var key: [256]u8 = undefined;
-    var hashes: [hashbytes * 256]u8 = undefined;
-    var final: [hashbytes]u8 = undefined;
-
-    @memset(@ptrCast([*]u8, &key[0]), 0, @sizeOf(@TypeOf(key)));
-    @memset(@ptrCast([*]u8, &hashes[0]), 0, @sizeOf(@TypeOf(hashes)));
-    @memset(@ptrCast([*]u8, &final[0]), 0, @sizeOf(@TypeOf(final)));
+    var key: [256]u8 = [1]u8{0} ** 256;
+    var hashes: [hashbytes * 256]u8 = [1]u8{0} ** (hashbytes * 256);
 
     var i: u32 = 0;
     while (i < 256) : (i += 1) {
@@ -297,7 +292,7 @@ fn SMHasherTest(comptime hash_fn: anytype, comptime hashbits: u32) u32 {
         var h = hash_fn(key[0..i], 256 - i);
         if (native_endian == .Big)
             h = @byteSwap(h);
-        @memcpy(@ptrCast([*]u8, &hashes[i * hashbytes]), @ptrCast([*]u8, &h), hashbytes);
+        @memcpy(hashes[i * hashbytes..][0..hashbytes], @ptrCast([*]u8, &h));
     }
 
     return @truncate(u32, hash_fn(&hashes, 0));
lib/std/heap/general_purpose_allocator.zig
@@ -759,7 +759,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
             if (new_size_class <= size_class) {
                 if (old_mem.len > new_size) {
-                    @memset(old_mem.ptr + new_size, undefined, old_mem.len - new_size);
+                    @memset(old_mem[new_size..], undefined);
                 }
                 if (config.verbose_log) {
                     log.info("small resize {d} bytes at {*} to {d}", .{
@@ -911,7 +911,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                     self.empty_buckets = bucket;
                 }
             } else {
-                @memset(old_mem.ptr, undefined, old_mem.len);
+                @memset(old_mem, undefined);
             }
             if (config.safety) {
                 assert(self.small_allocations.remove(@ptrToInt(old_mem.ptr)));
@@ -1011,7 +1011,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             };
             self.buckets[bucket_index] = ptr;
             // Set the used bits to all zeroes
-            @memset(@as(*[1]u8, ptr.usedBits(0)), 0, usedBitsCount(size_class));
+            @memset(@as([*]u8, @as(*[1]u8, ptr.usedBits(0)))[0..usedBitsCount(size_class)], 0);
             return ptr;
         }
     };
@@ -1412,3 +1412,4 @@ test "bug 9995 fix, large allocs count requested size not backing size" {
     buf = try allocator.realloc(buf, 2);
     try std.testing.expect(gpa.total_requested_bytes == 2);
 }
+
lib/std/math/big/int_test.zig
@@ -2756,7 +2756,7 @@ test "big int conversion read twos complement with padding" {
 
     var buffer1 = try testing.allocator.alloc(u8, 16);
     defer testing.allocator.free(buffer1);
-    @memset(buffer1.ptr, 0xaa, buffer1.len);
+    @memset(buffer1, 0xaa);
 
     // writeTwosComplement:
     // (1) should not write beyond buffer[0..abi_size]
@@ -2773,7 +2773,7 @@ test "big int conversion read twos complement with padding" {
     a.toConst().writeTwosComplement(buffer1[0..16], .Big);
     try testing.expect(std.mem.eql(u8, buffer1, &[_]u8{ 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd }));
 
-    @memset(buffer1.ptr, 0xaa, buffer1.len);
+    @memset(buffer1, 0xaa);
     try a.set(-0x01_02030405_06070809_0a0b0c0d);
     bit_count = 12 * 8 + 2;
 
@@ -2794,7 +2794,7 @@ test "big int write twos complement +/- zero" {
 
     var buffer1 = try testing.allocator.alloc(u8, 16);
     defer testing.allocator.free(buffer1);
-    @memset(buffer1.ptr, 0xaa, buffer1.len);
+    @memset(buffer1, 0xaa);
 
     // Test zero
 
@@ -2807,7 +2807,7 @@ test "big int write twos complement +/- zero" {
     m.toConst().writeTwosComplement(buffer1[0..16], .Big);
     try testing.expect(std.mem.eql(u8, buffer1, &(([_]u8{0} ** 16))));
 
-    @memset(buffer1.ptr, 0xaa, buffer1.len);
+    @memset(buffer1, 0xaa);
     m.positive = false;
 
     // Test negative zero
lib/std/mem/Allocator.zig
@@ -215,7 +215,7 @@ pub fn allocAdvancedWithRetAddr(
     const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
     const byte_ptr = self.rawAlloc(byte_count, log2a(a), return_address) orelse return Error.OutOfMemory;
     // TODO: https://github.com/ziglang/zig/issues/4298
-    @memset(byte_ptr, undefined, byte_count);
+    @memset(byte_ptr[0..byte_count], undefined);
     const byte_slice = byte_ptr[0..byte_count];
     return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
 }
@@ -282,9 +282,9 @@ pub fn reallocAdvanced(
 
     const new_mem = self.rawAlloc(byte_count, log2a(Slice.alignment), return_address) orelse
         return error.OutOfMemory;
-    @memcpy(new_mem, old_byte_slice.ptr, @min(byte_count, old_byte_slice.len));
+    @memcpy(new_mem[0..@min(byte_count, old_byte_slice.len)], old_byte_slice);
     // TODO https://github.com/ziglang/zig/issues/4298
-    @memset(old_byte_slice.ptr, undefined, old_byte_slice.len);
+    @memset(old_byte_slice, undefined);
     self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address);
 
     return mem.bytesAsSlice(T, @alignCast(Slice.alignment, new_mem[0..byte_count]));
@@ -299,7 +299,7 @@ pub fn free(self: Allocator, memory: anytype) void {
     if (bytes_len == 0) return;
     const non_const_ptr = @constCast(bytes.ptr);
     // TODO: https://github.com/ziglang/zig/issues/4298
-    @memset(non_const_ptr, undefined, bytes_len);
+    @memset(non_const_ptr[0..bytes_len], undefined);
     self.rawFree(non_const_ptr[0..bytes_len], log2a(Slice.alignment), @returnAddress());
 }
 
lib/std/os/linux.zig
@@ -1184,7 +1184,7 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact
             .mask = undefined,
             .restorer = @ptrCast(k_sigaction_funcs.restorer, restorer_fn),
         };
-        @memcpy(@ptrCast([*]u8, &ksa.mask), @ptrCast([*]const u8, &new.mask), mask_size);
+        @memcpy(@ptrCast([*]u8, &ksa.mask)[0..mask_size], @ptrCast([*]const u8, &new.mask));
     }
 
     const ksa_arg = if (act != null) @ptrToInt(&ksa) else 0;
@@ -1200,7 +1200,7 @@ pub fn sigaction(sig: u6, noalias act: ?*const Sigaction, noalias oact: ?*Sigact
     if (oact) |old| {
         old.handler.handler = oldksa.handler;
         old.flags = @truncate(c_uint, oldksa.flags);
-        @memcpy(@ptrCast([*]u8, &old.mask), @ptrCast([*]const u8, &oldksa.mask), mask_size);
+        @memcpy(@ptrCast([*]u8, &old.mask)[0..mask_size], @ptrCast([*]const u8, &oldksa.mask));
     }
 
     return 0;
@@ -1515,7 +1515,7 @@ pub fn sched_yield() usize {
 pub fn sched_getaffinity(pid: pid_t, size: usize, set: *cpu_set_t) usize {
     const rc = syscall3(.sched_getaffinity, @bitCast(usize, @as(isize, pid)), size, @ptrToInt(set));
     if (@bitCast(isize, rc) < 0) return rc;
-    if (rc < size) @memset(@ptrCast([*]u8, set) + rc, 0, size - rc);
+    if (rc < size) @memset(@ptrCast([*]u8, set)[rc..size], 0);
     return 0;
 }
 
lib/std/os/windows.zig
@@ -755,9 +755,9 @@ pub fn CreateSymbolicLink(
     };
 
     std.mem.copy(u8, buffer[0..], std.mem.asBytes(&symlink_data));
-    @memcpy(buffer[@sizeOf(SYMLINK_DATA)..], @ptrCast([*]const u8, target_path), target_path.len * 2);
+    @memcpy(buffer[@sizeOf(SYMLINK_DATA)..][0..target_path.len * 2], @ptrCast([*]const u8, target_path));
     const paths_start = @sizeOf(SYMLINK_DATA) + target_path.len * 2;
-    @memcpy(buffer[paths_start..].ptr, @ptrCast([*]const u8, target_path), target_path.len * 2);
+    @memcpy(buffer[paths_start..][0..target_path.len * 2], @ptrCast([*]const u8, target_path));
     _ = try DeviceIoControl(symlink_handle, FSCTL_SET_REPARSE_POINT, buffer[0..buf_len], null);
 }
 
@@ -1179,7 +1179,7 @@ pub fn GetFinalPathNameByHandle(
             var input_struct = @ptrCast(*MOUNTMGR_MOUNT_POINT, &input_buf[0]);
             input_struct.DeviceNameOffset = @sizeOf(MOUNTMGR_MOUNT_POINT);
             input_struct.DeviceNameLength = @intCast(USHORT, volume_name_u16.len * 2);
-            @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..], @ptrCast([*]const u8, volume_name_u16.ptr), volume_name_u16.len * 2);
+            @memcpy(input_buf[@sizeOf(MOUNTMGR_MOUNT_POINT)..][0..volume_name_u16.len * 2], @ptrCast([*]const u8, volume_name_u16.ptr));
 
             DeviceIoControl(mgmt_handle, IOCTL_MOUNTMGR_QUERY_POINTS, &input_buf, &output_buf) catch |err| switch (err) {
                 error.AccessDenied => unreachable,
lib/std/zig/c_builtins.zig
@@ -152,7 +152,7 @@ pub inline fn __builtin___memset_chk(
 
 pub inline fn __builtin_memset(dst: ?*anyopaque, val: c_int, len: usize) ?*anyopaque {
     const dst_cast = @ptrCast([*c]u8, dst);
-    @memset(dst_cast, @bitCast(u8, @truncate(i8, val)), len);
+    @memset(dst_cast[0..len], @bitCast(u8, @truncate(i8, val)));
     return dst;
 }
 
@@ -174,7 +174,7 @@ pub inline fn __builtin_memcpy(
     const dst_cast = @ptrCast([*c]u8, dst);
     const src_cast = @ptrCast([*c]const u8, src);
 
-    @memcpy(dst_cast, src_cast, len);
+    @memcpy(dst_cast[0..len], src_cast);
     return dst;
 }
 
lib/std/array_hash_map.zig
@@ -1893,7 +1893,7 @@ const IndexHeader = struct {
         const index_size = hash_map.capacityIndexSize(new_bit_index);
         const nbytes = @sizeOf(IndexHeader) + index_size * len;
         const bytes = try allocator.alignedAlloc(u8, @alignOf(IndexHeader), nbytes);
-        @memset(bytes.ptr + @sizeOf(IndexHeader), 0xff, bytes.len - @sizeOf(IndexHeader));
+        @memset(bytes[@sizeOf(IndexHeader)..], 0xff);
         const result = @ptrCast(*IndexHeader, bytes.ptr);
         result.* = .{
             .bit_index = new_bit_index,
@@ -1914,7 +1914,7 @@ const IndexHeader = struct {
         const index_size = hash_map.capacityIndexSize(header.bit_index);
         const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
         const nbytes = @sizeOf(IndexHeader) + header.length() * index_size;
-        @memset(ptr + @sizeOf(IndexHeader), 0xff, nbytes - @sizeOf(IndexHeader));
+        @memset(ptr[@sizeOf(IndexHeader)..nbytes], 0xff);
     }
 
     // Verify that the header has sufficient alignment to produce aligned arrays.
lib/std/array_list.zig
@@ -121,7 +121,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
 
             const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
             mem.copy(T, new_memory, self.items);
-            @memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
+            @memset(self.items, undefined);
             self.clearAndFree();
             return new_memory;
         }
@@ -281,11 +281,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
             const new_len = old_len + items.len;
             assert(new_len <= self.capacity);
             self.items.len = new_len;
-            @memcpy(
-                @ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
-                @ptrCast([*]const u8, items.ptr),
-                items.len * @sizeOf(T),
-            );
+            @memcpy(self.items[old_len..][0..items.len], items);
         }
 
         pub const Writer = if (T != u8)
@@ -601,7 +597,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
 
             const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
             mem.copy(T, new_memory, self.items);
-            @memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
+            @memset(self.items, undefined);
             self.clearAndFree(allocator);
             return new_memory;
         }
@@ -740,11 +736,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
             const new_len = old_len + items.len;
             assert(new_len <= self.capacity);
             self.items.len = new_len;
-            @memcpy(
-                @ptrCast([*]align(@alignOf(T)) u8, self.items.ptr + old_len),
-                @ptrCast([*]const u8, items.ptr),
-                items.len * @sizeOf(T),
-            );
+            @memcpy(self.items[old_len..][0..items.len], items);
         }
 
         pub const WriterContext = struct {
lib/std/fifo.zig
@@ -104,7 +104,7 @@ pub fn LinearFifo(
             }
             { // set unused area to undefined
                 const unused = mem.sliceAsBytes(self.buf[self.count..]);
-                @memset(unused.ptr, undefined, unused.len);
+                @memset(unused, undefined);
             }
         }
 
@@ -182,12 +182,12 @@ pub fn LinearFifo(
                 const slice = self.readableSliceMut(0);
                 if (slice.len >= count) {
                     const unused = mem.sliceAsBytes(slice[0..count]);
-                    @memset(unused.ptr, undefined, unused.len);
+                    @memset(unused, undefined);
                 } else {
                     const unused = mem.sliceAsBytes(slice[0..]);
-                    @memset(unused.ptr, undefined, unused.len);
+                    @memset(unused, undefined);
                     const unused2 = mem.sliceAsBytes(self.readableSliceMut(slice.len)[0 .. count - slice.len]);
-                    @memset(unused2.ptr, undefined, unused2.len);
+                    @memset(unused2, undefined);
                 }
             }
             if (autoalign and self.count == count) {
lib/std/hash_map.zig
@@ -1449,7 +1449,7 @@ pub fn HashMapUnmanaged(
         }
 
         fn initMetadatas(self: *Self) void {
-            @memset(@ptrCast([*]u8, self.metadata.?), 0, @sizeOf(Metadata) * self.capacity());
+            @memset(@ptrCast([*]u8, self.metadata.?)[0..@sizeOf(Metadata) * self.capacity()], 0);
         }
 
         // This counts the number of occupied slots (not counting tombstones), which is
lib/std/multi_array_list.zig
@@ -360,11 +360,10 @@ pub fn MultiArrayList(comptime T: type) type {
                     if (@sizeOf(field_info.type) != 0) {
                         const field = @intToEnum(Field, i);
                         const dest_slice = self_slice.items(field)[new_len..];
-                        const byte_count = dest_slice.len * @sizeOf(field_info.type);
                         // We use memset here for more efficient codegen in safety-checked,
                         // valgrind-enabled builds. Otherwise the valgrind client request
                         // will be repeated for every element.
-                        @memset(@ptrCast([*]u8, dest_slice.ptr), undefined, byte_count);
+                        @memset(dest_slice, undefined);
                     }
                 }
                 self.len = new_len;
lib/std/net.zig
@@ -1020,7 +1020,7 @@ fn linuxLookupName(
     for (addrs.items, 0..) |*addr, i| {
         var key: i32 = 0;
         var sa6: os.sockaddr.in6 = undefined;
-        @memset(@ptrCast([*]u8, &sa6), 0, @sizeOf(os.sockaddr.in6));
+        @memset(@ptrCast([*]u8, &sa6)[0..@sizeOf(os.sockaddr.in6)], 0);
         var da6 = os.sockaddr.in6{
             .family = os.AF.INET6,
             .scope_id = addr.addr.in6.sa.scope_id,
@@ -1029,7 +1029,7 @@ fn linuxLookupName(
             .addr = [1]u8{0} ** 16,
         };
         var sa4: os.sockaddr.in = undefined;
-        @memset(@ptrCast([*]u8, &sa4), 0, @sizeOf(os.sockaddr.in));
+        @memset(@ptrCast([*]u8, &sa4)[0..@sizeOf(os.sockaddr.in)], 0);
         var da4 = os.sockaddr.in{
             .family = os.AF.INET,
             .port = 65535,
@@ -1577,7 +1577,7 @@ fn resMSendRc(
 
     // Get local address and open/bind a socket
     var sa: Address = undefined;
-    @memset(@ptrCast([*]u8, &sa), 0, @sizeOf(Address));
+    @memset(@ptrCast([*]u8, &sa)[0..@sizeOf(Address)], 0);
     sa.any.family = family;
     try os.bind(fd, &sa.any, sl);
 
lib/std/os.zig
@@ -5217,7 +5217,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
         .macos, .ios, .watchos, .tvos => {
             // On macOS, we can use F.GETPATH fcntl command to query the OS for
             // the path to the file descriptor.
-            @memset(out_buffer, 0, MAX_PATH_BYTES);
+            @memset(out_buffer[0..MAX_PATH_BYTES], 0);
             switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) {
                 .SUCCESS => {},
                 .BADF => return error.FileNotFound,
@@ -5308,7 +5308,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
             if (comptime builtin.os.version_range.semver.max.order(.{ .major = 6, .minor = 0 }) == .lt) {
                 @compileError("querying for canonical path of a handle is unsupported on this host");
             }
-            @memset(out_buffer, 0, MAX_PATH_BYTES);
+            @memset(out_buffer[0..MAX_PATH_BYTES], 0);
             switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) {
                 .SUCCESS => {},
                 .BADF => return error.FileNotFound,
@@ -5322,7 +5322,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
             if (comptime builtin.os.version_range.semver.max.order(.{ .major = 10, .minor = 0 }) == .lt) {
                 @compileError("querying for canonical path of a handle is unsupported on this host");
             }
-            @memset(out_buffer, 0, MAX_PATH_BYTES);
+            @memset(out_buffer[0..MAX_PATH_BYTES], 0);
             switch (errno(system.fcntl(fd, F.GETPATH, out_buffer))) {
                 .SUCCESS => {},
                 .ACCES => return error.AccessDenied,
@@ -5720,7 +5720,7 @@ pub fn res_mkquery(
 
     // Construct query template - ID will be filled later
     var q: [280]u8 = undefined;
-    @memset(&q, 0, n);
+    @memset(q[0..n], 0);
     q[2] = @as(u8, op) * 8 + 1;
     q[5] = 1;
     mem.copy(u8, q[13..], name);
src/codegen/llvm.zig
@@ -5776,6 +5776,36 @@ pub const FuncGen = struct {
         return result;
     }
 
+    fn sliceOrArrayPtr(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
+        switch (ty.ptrSize()) {
+            .Slice => return fg.builder.buildExtractValue(ptr, 0, ""),
+            .One => return ptr,
+            .Many, .C => unreachable,
+        }
+    }
+
+    fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
+        const target = fg.dg.module.getTarget();
+        const llvm_usize_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
+        switch (ty.ptrSize()) {
+            .Slice => {
+                const len = fg.builder.buildExtractValue(ptr, 1, "");
+                const elem_ty = ty.childType();
+                const abi_size = elem_ty.abiSize(target);
+                if (abi_size == 1) return len;
+                const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False);
+                return fg.builder.buildMul(len, abi_size_llvm_val, "");
+            },
+            .One => {
+                const array_ty = ty.childType();
+                const elem_ty = array_ty.childType();
+                const abi_size = elem_ty.abiSize(target);
+                return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False);
+            },
+            .Many, .C => unreachable,
+        }
+    }
+
     fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
         const ty_op = self.air.instructions.items(.data)[inst].ty_op;
         const operand = try self.resolveInst(ty_op.operand);
@@ -8374,18 +8404,24 @@ pub const FuncGen = struct {
     }
 
     fn airMemset(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
-        const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-        const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
-        const dest_ptr = try self.resolveInst(pl_op.operand);
-        const ptr_ty = self.air.typeOf(pl_op.operand);
-        const value = try self.resolveInst(extra.lhs);
-        const val_is_undef = if (self.air.value(extra.lhs)) |val| val.isUndefDeep() else false;
-        const len = try self.resolveInst(extra.rhs);
-        const u8_llvm_ty = self.context.intType(8);
-        const fill_char = if (val_is_undef) u8_llvm_ty.constInt(0xaa, .False) else value;
+        const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+        const dest_slice = try self.resolveInst(bin_op.lhs);
+        const ptr_ty = self.air.typeOf(bin_op.lhs);
+        const value = try self.resolveInst(bin_op.rhs);
+        const elem_ty = self.air.typeOf(bin_op.rhs);
         const target = self.dg.module.getTarget();
+        const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
+        const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
+        const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty);
+        const u8_llvm_ty = self.context.intType(8);
+        const fill_byte = if (val_is_undef) u8_llvm_ty.constInt(0xaa, .False) else b: {
+            if (elem_ty.abiSize(target) != 1) {
+                return self.dg.todo("implement @memset for non-byte-sized element type", .{});
+            }
+            break :b self.builder.buildBitCast(value, u8_llvm_ty, "");
+        };
         const dest_ptr_align = ptr_ty.ptrAlignment(target);
-        _ = self.builder.buildMemSet(dest_ptr, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr());
+        _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr());
 
         if (val_is_undef and self.dg.module.comp.bin_file.options.valgrind) {
             self.valgrindMarkUndef(dest_ptr, len);
@@ -8394,13 +8430,14 @@ pub const FuncGen = struct {
     }
 
     fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
-        const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-        const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
-        const dest_ptr = try self.resolveInst(pl_op.operand);
-        const dest_ptr_ty = self.air.typeOf(pl_op.operand);
-        const src_ptr = try self.resolveInst(extra.lhs);
-        const src_ptr_ty = self.air.typeOf(extra.lhs);
-        const len = try self.resolveInst(extra.rhs);
+        const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+        const dest_slice = try self.resolveInst(bin_op.lhs);
+        const dest_ptr_ty = self.air.typeOf(bin_op.lhs);
+        const src_slice = try self.resolveInst(bin_op.rhs);
+        const src_ptr_ty = self.air.typeOf(bin_op.rhs);
+        const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty);
+        const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty);
+        const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty);
         const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr();
         const target = self.dg.module.getTarget();
         _ = self.builder.buildMemCpy(
src/Air.zig
@@ -632,17 +632,20 @@ pub const Inst = struct {
         /// Uses the `pl_op` field with `pred` as operand, and payload `Bin`.
         select,
 
-        /// Given dest ptr, value, and len, set all elements at dest to value.
+        /// Given dest pointer and value, set all elements at dest to value.
+        /// Dest pointer is either a slice or a pointer to array.
+        /// The element type may be any type, and the slice may have any alignment.
         /// Result type is always void.
-        /// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
-        /// value, `rhs` is the length.
-        /// The element type may be any type, not just u8.
+        /// Uses the `bin_op` field. LHS is the dest slice. RHS is the element value.
         memset,
-        /// Given dest ptr, src ptr, and len, copy len elements from src to dest.
+        /// Given dest pointer and source pointer, copy elements from source to dest.
+        /// Dest pointer is either a slice or a pointer to array.
+        /// The dest element type may be any type.
+        /// Source pointer must have same element type as dest element type.
+        /// Dest slice may have any alignment; source pointer may have any alignment.
+        /// The two memory regions must not overlap.
         /// Result type is always void.
-        /// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the
-        /// src ptr, `rhs` is the length.
-        /// The element type may be any type, not just u8.
+        /// Uses the `bin_op` field. LHS is the dest slice. RHS is the source pointer.
         memcpy,
 
         /// Uses the `ty_pl` field with payload `Cmpxchg`.
src/AstGen.zig
@@ -8453,18 +8453,16 @@ fn builtinCall(
             return rvalue(gz, ri, result, node);
         },
         .memcpy => {
-            _ = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{
-                .dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
-                .source = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_const_u8_type } }, params[1]),
-                .byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
+            _ = try gz.addPlNode(.memcpy, node, Zir.Inst.Bin{
+                .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
+                .rhs = try expr(gz, scope, .{ .rl = .ref }, params[1]),
             });
             return rvalue(gz, ri, .void_value, node);
         },
         .memset => {
-            _ = try gz.addPlNode(.memset, node, Zir.Inst.Memset{
-                .dest = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .manyptr_u8_type } }, params[0]),
-                .byte = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .u8_type } }, params[1]),
-                .byte_count = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, params[2]),
+            _ = try gz.addPlNode(.memset, node, Zir.Inst.Bin{
+                .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
+                .rhs = try expr(gz, scope, .{ .rl = .none }, params[1]),
             });
             return rvalue(gz, ri, .void_value, node);
         },
src/BuiltinFn.zig
@@ -615,14 +615,14 @@ pub const list = list: {
             "@memcpy",
             .{
                 .tag = .memcpy,
-                .param_count = 3,
+                .param_count = 2,
             },
         },
         .{
             "@memset",
             .{
                 .tag = .memset,
-                .param_count = 3,
+                .param_count = 2,
             },
         },
         .{
src/Liveness.zig
@@ -304,6 +304,8 @@ pub fn categorizeOperand(
         .atomic_store_release,
         .atomic_store_seq_cst,
         .set_union_tag,
+        .memset,
+        .memcpy,
         => {
             const o = air_datas[inst].bin_op;
             if (o.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
@@ -597,16 +599,6 @@ pub fn categorizeOperand(
             if (extra.operand == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
             return .write;
         },
-        .memset,
-        .memcpy,
-        => {
-            const pl_op = air_datas[inst].pl_op;
-            const extra = air.extraData(Air.Bin, pl_op.payload).data;
-            if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
-            if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
-            if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .write);
-            return .write;
-        },
 
         .br => {
             const br = air_datas[inst].br;
@@ -987,6 +979,8 @@ fn analyzeInst(
         .set_union_tag,
         .min,
         .max,
+        .memset,
+        .memcpy,
         => {
             const o = inst_datas[inst].bin_op;
             return analyzeOperands(a, pass, data, inst, .{ o.lhs, o.rhs, .none });
@@ -1234,13 +1228,6 @@ fn analyzeInst(
             const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data;
             return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, extra.operand, .none });
         },
-        .memset,
-        .memcpy,
-        => {
-            const pl_op = inst_datas[inst].pl_op;
-            const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
-            return analyzeOperands(a, pass, data, inst, .{ pl_op.operand, extra.lhs, extra.rhs });
-        },
 
         .br => return analyzeInstBr(a, pass, data, inst),
 
src/print_air.zig
@@ -169,6 +169,8 @@ const Writer = struct {
             .cmp_gte_optimized,
             .cmp_gt_optimized,
             .cmp_neq_optimized,
+            .memcpy,
+            .memset,
             => try w.writeBinOp(s, inst),
 
             .is_null,
@@ -315,8 +317,6 @@ const Writer = struct {
             .atomic_store_release => try w.writeAtomicStore(s, inst, .Release),
             .atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .SeqCst),
             .atomic_rmw => try w.writeAtomicRmw(s, inst),
-            .memcpy => try w.writeMemcpy(s, inst),
-            .memset => try w.writeMemset(s, inst),
             .field_parent_ptr => try w.writeFieldParentPtr(s, inst),
             .wasm_memory_size => try w.writeWasmMemorySize(s, inst),
             .wasm_memory_grow => try w.writeWasmMemoryGrow(s, inst),
@@ -591,17 +591,6 @@ const Writer = struct {
         try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) });
     }
 
-    fn writeMemset(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
-        const pl_op = w.air.instructions.items(.data)[inst].pl_op;
-        const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
-
-        try w.writeOperand(s, inst, 0, pl_op.operand);
-        try s.writeAll(", ");
-        try w.writeOperand(s, inst, 1, extra.lhs);
-        try s.writeAll(", ");
-        try w.writeOperand(s, inst, 2, extra.rhs);
-    }
-
     fn writeFieldParentPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
         const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
         const extra = w.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
@@ -610,17 +599,6 @@ const Writer = struct {
         try s.print(", {d}", .{extra.field_index});
     }
 
-    fn writeMemcpy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
-        const pl_op = w.air.instructions.items(.data)[inst].pl_op;
-        const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
-
-        try w.writeOperand(s, inst, 0, pl_op.operand);
-        try s.writeAll(", ");
-        try w.writeOperand(s, inst, 1, extra.lhs);
-        try s.writeAll(", ");
-        try w.writeOperand(s, inst, 2, extra.rhs);
-    }
-
     fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
         const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
         const val = w.air.values[ty_pl.payload];
src/print_zir.zig
@@ -277,8 +277,6 @@ const Writer = struct {
             .atomic_load => try self.writeAtomicLoad(stream, inst),
             .atomic_store => try self.writeAtomicStore(stream, inst),
             .atomic_rmw => try self.writeAtomicRmw(stream, inst),
-            .memcpy => try self.writeMemcpy(stream, inst),
-            .memset => try self.writeMemset(stream, inst),
             .shuffle => try self.writeShuffle(stream, inst),
             .mul_add => try self.writeMulAdd(stream, inst),
             .field_parent_ptr => try self.writeFieldParentPtr(stream, inst),
@@ -346,6 +344,8 @@ const Writer = struct {
             .vector_type,
             .max,
             .min,
+            .memcpy,
+            .memset,
             .elem_ptr_node,
             .elem_val_node,
             .elem_ptr,
@@ -1000,32 +1000,6 @@ const Writer = struct {
         try self.writeSrc(stream, inst_data.src());
     }
 
-    fn writeMemcpy(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
-        const inst_data = self.code.instructions.items(.data)[inst].pl_node;
-        const extra = self.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data;
-
-        try self.writeInstRef(stream, extra.dest);
-        try stream.writeAll(", ");
-        try self.writeInstRef(stream, extra.source);
-        try stream.writeAll(", ");
-        try self.writeInstRef(stream, extra.byte_count);
-        try stream.writeAll(") ");
-        try self.writeSrc(stream, inst_data.src());
-    }
-
-    fn writeMemset(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
-        const inst_data = self.code.instructions.items(.data)[inst].pl_node;
-        const extra = self.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data;
-
-        try self.writeInstRef(stream, extra.dest);
-        try stream.writeAll(", ");
-        try self.writeInstRef(stream, extra.byte);
-        try stream.writeAll(", ");
-        try self.writeInstRef(stream, extra.byte_count);
-        try stream.writeAll(") ");
-        try self.writeSrc(stream, inst_data.src());
-    }
-
     fn writeStructInitAnon(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
         const inst_data = self.code.instructions.items(.data)[inst].pl_node;
         const extra = self.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index);
src/Sema.zig
@@ -9861,8 +9861,11 @@ fn zirSliceStart(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
     const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data;
     const array_ptr = try sema.resolveInst(extra.lhs);
     const start = try sema.resolveInst(extra.start);
+    const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
+    const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
+    const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
 
-    return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded);
+    return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded, ptr_src, start_src, end_src);
 }
 
 fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9875,8 +9878,11 @@ fn zirSliceEnd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     const array_ptr = try sema.resolveInst(extra.lhs);
     const start = try sema.resolveInst(extra.start);
     const end = try sema.resolveInst(extra.end);
+    const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
+    const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
+    const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
 
-    return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded);
+    return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded, ptr_src, start_src, end_src);
 }
 
 fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9891,8 +9897,11 @@ fn zirSliceSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
     const start = try sema.resolveInst(extra.start);
     const end = try sema.resolveInst(extra.end);
     const sentinel = try sema.resolveInst(extra.sentinel);
+    const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node };
+    const start_src: LazySrcLoc = .{ .node_offset_slice_start = inst_data.src_node };
+    const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node };
 
-    return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src);
+    return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src, ptr_src, start_src, end_src);
 }
 
 fn zirSwitchCapture(
@@ -20393,6 +20402,22 @@ fn checkPtrType(
     return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)});
 }
 
+fn checkSliceOrArrayType(
+    sema: *Sema,
+    block: *Block,
+    ty_src: LazySrcLoc,
+    ty: Type,
+) CompileError!void {
+    if (ty.zigTypeTag() == .Pointer) {
+        switch (ty.ptrSize()) {
+            .Slice => return,
+            .One => if (ty.childType().zigTypeTag() == .Array) return,
+            else => {},
+        }
+    }
+    return sema.fail(block, ty_src, "expected slice or array pointer; found '{}'", .{ty.fmt(sema.mod)});
+}
+
 fn checkVectorElemType(
     sema: *Sema,
     block: *Block,
@@ -21750,88 +21775,64 @@ fn analyzeMinMax(
 
 fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
-    const extra = sema.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data;
+    const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const src = inst_data.src();
     const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
     const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
-    const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
-    const uncasted_dest_ptr = try sema.resolveInst(extra.dest);
-
-    // TODO AstGen's coerced_ty cannot handle volatile here
-    var dest_ptr_info = Type.initTag(.manyptr_u8).ptrInfo().data;
-    dest_ptr_info.@"volatile" = sema.typeOf(uncasted_dest_ptr).isVolatilePtr();
-    const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, dest_ptr_info);
-    const dest_ptr = try sema.coerce(block, dest_ptr_ty, uncasted_dest_ptr, dest_src);
-
-    const uncasted_src_ptr = try sema.resolveInst(extra.source);
-    var src_ptr_info = Type.initTag(.manyptr_const_u8).ptrInfo().data;
-    src_ptr_info.@"volatile" = sema.typeOf(uncasted_src_ptr).isVolatilePtr();
-    const src_ptr_ty = try Type.ptr(sema.arena, sema.mod, src_ptr_info);
-    const src_ptr = try sema.coerce(block, src_ptr_ty, uncasted_src_ptr, src_src);
-    const len = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.byte_count), len_src);
+    const dest_ptr = try sema.resolveInst(extra.lhs);
+    const src_ptr_ptr = try sema.resolveInst(extra.rhs);
+    const dest_ptr_ty = sema.typeOf(dest_ptr);
+    try checkSliceOrArrayType(sema, block, dest_src, dest_ptr_ty);
+
+    const dest_len = try sema.fieldVal(block, dest_src, dest_ptr, "len", dest_src);
+    const src_ptr = try sema.analyzeSlice(block, src_src, src_ptr_ptr, .zero_usize, dest_len, .none, .unneeded, src_src, src_src, src_src);
 
     const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
         if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src;
         if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |src_ptr_val| {
             if (!src_ptr_val.isComptimeMutablePtr()) break :rs src_src;
-            if (try sema.resolveDefinedValue(block, len_src, len)) |len_val| {
-                _ = len_val;
-                return sema.fail(block, src, "TODO: Sema.zirMemcpy at comptime", .{});
-            } else break :rs len_src;
+            return sema.fail(block, src, "TODO: @memcpy at comptime", .{});
         } else break :rs src_src;
     } else dest_src;
 
     try sema.requireRuntimeBlock(block, src, runtime_src);
     _ = try block.addInst(.{
         .tag = .memcpy,
-        .data = .{ .pl_op = .{
-            .operand = dest_ptr,
-            .payload = try sema.addExtra(Air.Bin{
-                .lhs = src_ptr,
-                .rhs = len,
-            }),
+        .data = .{ .bin_op = .{
+            .lhs = dest_ptr,
+            .rhs = src_ptr,
         } },
     });
 }
 
 fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
-    const extra = sema.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data;
+    const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const src = inst_data.src();
     const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
     const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
-    const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
-    const uncasted_dest_ptr = try sema.resolveInst(extra.dest);
+    const dest_ptr = try sema.resolveInst(extra.lhs);
+    const uncoerced_elem = try sema.resolveInst(extra.rhs);
+    const dest_ptr_ty = sema.typeOf(dest_ptr);
+    try checkSliceOrArrayType(sema, block, dest_src, dest_ptr_ty);
 
-    // TODO AstGen's coerced_ty cannot handle volatile here
-    var ptr_info = Type.initTag(.manyptr_u8).ptrInfo().data;
-    ptr_info.@"volatile" = sema.typeOf(uncasted_dest_ptr).isVolatilePtr();
-    const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
-    const dest_ptr = try sema.coerce(block, dest_ptr_ty, uncasted_dest_ptr, dest_src);
-
-    const value = try sema.coerce(block, Type.u8, try sema.resolveInst(extra.byte), value_src);
-    const len = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.byte_count), len_src);
+    const elem_ty = dest_ptr_ty.elemType2();
+    const elem = try sema.coerce(block, elem_ty, uncoerced_elem, value_src);
 
     const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: {
         if (!ptr_val.isComptimeMutablePtr()) break :rs dest_src;
-        if (try sema.resolveDefinedValue(block, len_src, len)) |len_val| {
-            if (try sema.resolveMaybeUndefVal(value)) |val| {
-                _ = len_val;
-                _ = val;
-                return sema.fail(block, src, "TODO: Sema.zirMemset at comptime", .{});
-            } else break :rs value_src;
-        } else break :rs len_src;
+        if (try sema.resolveMaybeUndefVal(elem)) |elem_val| {
+            _ = elem_val;
+            return sema.fail(block, src, "TODO: @memset at comptime", .{});
+        } else break :rs value_src;
     } else dest_src;
 
     try sema.requireRuntimeBlock(block, src, runtime_src);
     _ = try block.addInst(.{
         .tag = .memset,
-        .data = .{ .pl_op = .{
-            .operand = dest_ptr,
-            .payload = try sema.addExtra(Air.Bin{
-                .lhs = value,
-                .rhs = len,
-            }),
+        .data = .{ .bin_op = .{
+            .lhs = dest_ptr,
+            .rhs = elem,
         } },
     });
 }
@@ -28753,10 +28754,10 @@ fn analyzeSlice(
     uncasted_end_opt: Air.Inst.Ref,
     sentinel_opt: Air.Inst.Ref,
     sentinel_src: LazySrcLoc,
+    ptr_src: LazySrcLoc,
+    start_src: LazySrcLoc,
+    end_src: LazySrcLoc,
 ) CompileError!Air.Inst.Ref {
-    const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = src.node_offset.x };
-    const start_src: LazySrcLoc = .{ .node_offset_slice_start = src.node_offset.x };
-    const end_src: LazySrcLoc = .{ .node_offset_slice_end = src.node_offset.x };
     // Slice expressions can operate on a variable whose type is an array. This requires
     // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
     const ptr_ptr_ty = sema.typeOf(ptr_ptr);
src/Zir.zig
@@ -922,10 +922,10 @@ pub const Inst = struct {
         /// Uses the `pl_node` union field with payload `FieldParentPtr`.
         field_parent_ptr,
         /// Implements the `@memcpy` builtin.
-        /// Uses the `pl_node` union field with payload `Memcpy`.
+        /// Uses the `pl_node` union field with payload `Bin`.
         memcpy,
         /// Implements the `@memset` builtin.
-        /// Uses the `pl_node` union field with payload `Memset`.
+        /// Uses the `pl_node` union field with payload `Bin`.
         memset,
         /// Implements the `@min` builtin.
         /// Uses the `pl_node` union field with payload `Bin`
@@ -3501,18 +3501,6 @@ pub const Inst = struct {
         field_ptr: Ref,
     };
 
-    pub const Memcpy = struct {
-        dest: Ref,
-        source: Ref,
-        byte_count: Ref,
-    };
-
-    pub const Memset = struct {
-        dest: Ref,
-        byte: Ref,
-        byte_count: Ref,
-    };
-
     pub const Shuffle = struct {
         elem_type: Ref,
         a: Ref,
test/behavior/bugs/718.zig
@@ -14,7 +14,7 @@ test "zero keys with @memset" {
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
-    @memset(@ptrCast([*]u8, &keys), 0, @sizeOf(@TypeOf(keys)));
+    @memset(@ptrCast([*]u8, &keys)[0..@sizeOf(@TypeOf(keys))], 0);
     try expect(!keys.up);
     try expect(!keys.down);
     try expect(!keys.left);
test/behavior/basic.zig
@@ -367,8 +367,8 @@ fn testMemcpyMemset() !void {
     var foo: [20]u8 = undefined;
     var bar: [20]u8 = undefined;
 
-    @memset(&foo, 'A', foo.len);
-    @memcpy(&bar, &foo, bar.len);
+    @memset(&foo, 'A');
+    @memcpy(&bar, &foo);
 
     try expect(bar[0] == 'A');
     try expect(bar[11] == 'A');
test/behavior/struct.zig
@@ -91,7 +91,7 @@ test "structs" {
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
     var foo: StructFoo = undefined;
-    @memset(@ptrCast([*]u8, &foo), 0, @sizeOf(StructFoo));
+    @memset(@ptrCast([*]u8, &foo)[0..@sizeOf(StructFoo)], 0);
     foo.a += 1;
     foo.b = foo.a == 1;
     try testFoo(foo);
@@ -498,7 +498,7 @@ test "packed struct fields are ordered from LSB to MSB" {
 
     var all: u64 = 0x7765443322221111;
     var bytes: [8]u8 align(@alignOf(Bitfields)) = undefined;
-    @memcpy(&bytes, @ptrCast([*]u8, &all), 8);
+    @memcpy(bytes[0..8], @ptrCast([*]u8, &all));
     var bitfields = @ptrCast(*Bitfields, &bytes).*;
 
     try expect(bitfields.f1 == 0x1111);