Commit 0d8166be3f

Andrew Kelley <andrew@ziglang.org>
2025-02-04 06:03:27
std: update to new Allocator API
1 parent a4d4e08
Changed files (3)
lib/std/heap/log_to_writer_allocator.zig
@@ -23,6 +23,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
                 .vtable = &.{
                     .alloc = alloc,
                     .resize = resize,
+                    .remap = remap,
                     .free = free,
                 },
             };
@@ -31,12 +32,12 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
         fn alloc(
             ctx: *anyopaque,
             len: usize,
-            log2_ptr_align: u8,
+            alignment: std.mem.Alignment,
             ra: usize,
         ) ?[*]u8 {
             const self: *Self = @ptrCast(@alignCast(ctx));
             self.writer.print("alloc : {}", .{len}) catch {};
-            const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
+            const result = self.parent_allocator.rawAlloc(len, alignment, ra);
             if (result != null) {
                 self.writer.print(" success!\n", .{}) catch {};
             } else {
@@ -48,7 +49,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
         fn resize(
             ctx: *anyopaque,
             buf: []u8,
-            log2_buf_align: u8,
+            alignment: std.mem.Alignment,
             new_len: usize,
             ra: usize,
         ) bool {
@@ -59,7 +60,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
                 self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
             }
 
-            if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) {
+            if (self.parent_allocator.rawResize(buf, alignment, new_len, ra)) {
                 if (new_len > buf.len) {
                     self.writer.print(" success!\n", .{}) catch {};
                 }
@@ -71,15 +72,41 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
             return false;
         }
 
+        fn remap(
+            ctx: *anyopaque,
+            buf: []u8,
+            alignment: std.mem.Alignment,
+            new_len: usize,
+            ra: usize,
+        ) ?[*]u8 {
+            const self: *Self = @ptrCast(@alignCast(ctx));
+            if (new_len <= buf.len) {
+                self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
+            } else {
+                self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
+            }
+
+            if (self.parent_allocator.rawRemap(buf, alignment, new_len, ra)) |new_memory| {
+                if (new_len > buf.len) {
+                    self.writer.print(" success!\n", .{}) catch {};
+                }
+                return new_memory;
+            }
+
+            std.debug.assert(new_len > buf.len);
+            self.writer.print(" failure!\n", .{}) catch {};
+            return null;
+        }
+
         fn free(
             ctx: *anyopaque,
             buf: []u8,
-            log2_buf_align: u8,
+            alignment: std.mem.Alignment,
             ra: usize,
         ) void {
             const self: *Self = @ptrCast(@alignCast(ctx));
             self.writer.print("free  : {}\n", .{buf.len}) catch {};
-            self.parent_allocator.rawFree(buf, log2_buf_align, ra);
+            self.parent_allocator.rawFree(buf, alignment, ra);
         }
     };
 }
lib/std/heap.zig
@@ -147,12 +147,12 @@ const CAllocator = struct {
         return @as(*[*]u8, @ptrFromInt(@intFromPtr(ptr) - @sizeOf(usize)));
     }
 
-    fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 {
-        const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
+    fn alignedAlloc(len: usize, alignment: mem.Alignment) ?[*]u8 {
+        const alignment_bytes = alignment.toByteUnits();
         if (supports_posix_memalign) {
             // The posix_memalign only accepts alignment values that are a
             // multiple of the pointer size
-            const eff_alignment = @max(alignment, @sizeOf(usize));
+            const eff_alignment = @max(alignment_bytes, @sizeOf(usize));
 
             var aligned_ptr: ?*anyopaque = undefined;
             if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0)
@@ -164,9 +164,9 @@ const CAllocator = struct {
         // Thin wrapper around regular malloc, overallocate to account for
         // alignment padding and store the original malloc()'ed pointer before
         // the aligned address.
-        const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null));
+        const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment_bytes - 1 + @sizeOf(usize)) orelse return null));
         const unaligned_addr = @intFromPtr(unaligned_ptr);
-        const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment);
+        const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment_bytes);
         const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
         getHeader(aligned_ptr).* = unaligned_ptr;
 
@@ -195,22 +195,22 @@ const CAllocator = struct {
     fn alloc(
         _: *anyopaque,
         len: usize,
-        log2_align: u8,
+        alignment: mem.Alignment,
         return_address: usize,
     ) ?[*]u8 {
         _ = return_address;
         assert(len > 0);
-        return alignedAlloc(len, log2_align);
+        return alignedAlloc(len, alignment);
     }
 
     fn resize(
         _: *anyopaque,
         buf: []u8,
-        log2_buf_align: u8,
+        alignment: mem.Alignment,
         new_len: usize,
         return_address: usize,
     ) bool {
-        _ = log2_buf_align;
+        _ = alignment;
         _ = return_address;
         if (new_len <= buf.len) {
             return true;
@@ -227,10 +227,10 @@ const CAllocator = struct {
     fn free(
         _: *anyopaque,
         buf: []u8,
-        log2_buf_align: u8,
+        alignment: mem.Alignment,
         return_address: usize,
     ) void {
-        _ = log2_buf_align;
+        _ = alignment;
         _ = return_address;
         alignedFree(buf.ptr);
     }
@@ -267,28 +267,28 @@ const raw_c_allocator_vtable = Allocator.VTable{
 fn rawCAlloc(
     _: *anyopaque,
     len: usize,
-    log2_ptr_align: u8,
+    alignment: mem.Alignment,
     ret_addr: usize,
 ) ?[*]u8 {
     _ = ret_addr;
-    assert(log2_ptr_align <= comptime std.math.log2_int(usize, @alignOf(std.c.max_align_t)));
+    assert(alignment.order(.le, comptime .fromByteUnits(@alignOf(std.c.max_align_t))));
     // Note that this pointer cannot be aligncasted to max_align_t because if
     // len is < max_align_t then the alignment can be smaller. For example, if
     // max_align_t is 16, but the user requests 8 bytes, there is no built-in
     // type in C that is size 8 and has 16 byte alignment, so the alignment may
     // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc
     // is allowed to return a 1-byte aligned pointer.
-    return @as(?[*]u8, @ptrCast(c.malloc(len)));
+    return @ptrCast(c.malloc(len));
 }
 
 fn rawCResize(
     _: *anyopaque,
     buf: []u8,
-    log2_old_align: u8,
+    alignment: mem.Alignment,
     new_len: usize,
     ret_addr: usize,
 ) bool {
-    _ = log2_old_align;
+    _ = alignment;
     _ = ret_addr;
 
     if (new_len <= buf.len)
@@ -305,10 +305,10 @@ fn rawCResize(
 fn rawCFree(
     _: *anyopaque,
     buf: []u8,
-    log2_old_align: u8,
+    alignment: mem.Alignment,
     ret_addr: usize,
 ) void {
-    _ = log2_old_align;
+    _ = alignment;
     _ = ret_addr;
     c.free(buf.ptr);
 }
@@ -380,13 +380,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
         fn alloc(
             ctx: *anyopaque,
             n: usize,
-            log2_ptr_align: u8,
+            alignment: mem.Alignment,
             return_address: usize,
         ) ?[*]u8 {
             _ = return_address;
             const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
 
-            const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
+            const ptr_align = alignment.toByteUnits();
             const amt = n + ptr_align - 1 + @sizeOf(usize);
             const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .seq_cst);
             const heap_handle = optional_heap_handle orelse blk: {
@@ -407,11 +407,11 @@ pub const HeapAllocator = switch (builtin.os.tag) {
         fn resize(
             ctx: *anyopaque,
             buf: []u8,
-            log2_buf_align: u8,
+            alignment: mem.Alignment,
             new_size: usize,
             return_address: usize,
         ) bool {
-            _ = log2_buf_align;
+            _ = alignment;
             _ = return_address;
             const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
 
@@ -432,10 +432,10 @@ pub const HeapAllocator = switch (builtin.os.tag) {
         fn free(
             ctx: *anyopaque,
             buf: []u8,
-            log2_buf_align: u8,
+            alignment: mem.Alignment,
             return_address: usize,
         ) void {
-            _ = log2_buf_align;
+            _ = alignment;
             _ = return_address;
             const self: *HeapAllocator = @ptrCast(@alignCast(ctx));
             windows.HeapFree(self.heap_handle.?, 0, @as(*anyopaque, @ptrFromInt(getRecordPtr(buf).*)));
@@ -482,6 +482,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
                 .vtable = &.{
                     .alloc = alloc,
                     .resize = resize,
+                    .remap = remap,
                     .free = free,
                 },
             };
@@ -496,40 +497,55 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
         fn alloc(
             ctx: *anyopaque,
             len: usize,
-            log2_ptr_align: u8,
+            alignment: mem.Alignment,
             ra: usize,
         ) ?[*]u8 {
             const self: *Self = @ptrCast(@alignCast(ctx));
-            return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse
-                return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra);
+            return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse
+                return self.fallback_allocator.rawAlloc(len, alignment, ra);
         }
 
         fn resize(
             ctx: *anyopaque,
             buf: []u8,
-            log2_buf_align: u8,
+            alignment: mem.Alignment,
             new_len: usize,
             ra: usize,
         ) bool {
             const self: *Self = @ptrCast(@alignCast(ctx));
             if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
-                return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra);
+                return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra);
             } else {
-                return self.fallback_allocator.rawResize(buf, log2_buf_align, new_len, ra);
+                return self.fallback_allocator.rawResize(buf, alignment, new_len, ra);
+            }
+        }
+
+        fn remap(
+            context: *anyopaque,
+            memory: []u8,
+            alignment: mem.Alignment,
+            new_len: usize,
+            return_address: usize,
+        ) ?[*]u8 {
+            const self: *Self = @ptrCast(@alignCast(context));
+            if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) {
+                return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address);
+            } else {
+                return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address);
             }
         }
 
         fn free(
             ctx: *anyopaque,
             buf: []u8,
-            log2_buf_align: u8,
+            alignment: mem.Alignment,
             ra: usize,
         ) void {
             const self: *Self = @ptrCast(@alignCast(ctx));
             if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
-                return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra);
+                return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra);
             } else {
-                return self.fallback_allocator.rawFree(buf, log2_buf_align, ra);
+                return self.fallback_allocator.rawFree(buf, alignment, ra);
             }
         }
     };
lib/std/mem.zig
@@ -92,6 +92,7 @@ pub fn ValidationAllocator(comptime T: type) type {
                 .vtable = &.{
                     .alloc = alloc,
                     .resize = resize,
+                    .remap = remap,
                     .free = free,
                 },
             };
@@ -105,41 +106,54 @@ pub fn ValidationAllocator(comptime T: type) type {
         pub fn alloc(
             ctx: *anyopaque,
             n: usize,
-            log2_ptr_align: u8,
+            alignment: mem.Alignment,
             ret_addr: usize,
         ) ?[*]u8 {
             assert(n > 0);
             const self: *Self = @ptrCast(@alignCast(ctx));
             const underlying = self.getUnderlyingAllocatorPtr();
-            const result = underlying.rawAlloc(n, log2_ptr_align, ret_addr) orelse
+            const result = underlying.rawAlloc(n, alignment, ret_addr) orelse
                 return null;
-            assert(mem.isAlignedLog2(@intFromPtr(result), log2_ptr_align));
+            assert(alignment.check(@intFromPtr(result)));
             return result;
         }
 
         pub fn resize(
             ctx: *anyopaque,
             buf: []u8,
-            log2_buf_align: u8,
+            alignment: Alignment,
             new_len: usize,
             ret_addr: usize,
         ) bool {
             const self: *Self = @ptrCast(@alignCast(ctx));
             assert(buf.len > 0);
             const underlying = self.getUnderlyingAllocatorPtr();
-            return underlying.rawResize(buf, log2_buf_align, new_len, ret_addr);
+            return underlying.rawResize(buf, alignment, new_len, ret_addr);
+        }
+
+        pub fn remap(
+            ctx: *anyopaque,
+            buf: []u8,
+            alignment: Alignment,
+            new_len: usize,
+            ret_addr: usize,
+        ) ?[*]u8 {
+            const self: *Self = @ptrCast(@alignCast(ctx));
+            assert(buf.len > 0);
+            const underlying = self.getUnderlyingAllocatorPtr();
+            return underlying.rawRemap(buf, alignment, new_len, ret_addr);
         }
 
         pub fn free(
             ctx: *anyopaque,
             buf: []u8,
-            log2_buf_align: u8,
+            alignment: Alignment,
             ret_addr: usize,
         ) void {
             const self: *Self = @ptrCast(@alignCast(ctx));
             assert(buf.len > 0);
             const underlying = self.getUnderlyingAllocatorPtr();
-            underlying.rawFree(buf, log2_buf_align, ret_addr);
+            underlying.rawFree(buf, alignment, ret_addr);
         }
 
         pub fn reset(self: *Self) void {
@@ -167,27 +181,9 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
     return adjusted;
 }
 
-const fail_allocator = Allocator{
-    .ptr = undefined,
-    .vtable = &failAllocator_vtable,
-};
-
-const failAllocator_vtable = Allocator.VTable{
-    .alloc = failAllocatorAlloc,
-    .resize = Allocator.noResize,
-    .free = Allocator.noFree,
-};
-
-fn failAllocatorAlloc(_: *anyopaque, n: usize, log2_alignment: u8, ra: usize) ?[*]u8 {
-    _ = n;
-    _ = log2_alignment;
-    _ = ra;
-    return null;
-}
-
 test "Allocator basics" {
-    try testing.expectError(error.OutOfMemory, fail_allocator.alloc(u8, 1));
-    try testing.expectError(error.OutOfMemory, fail_allocator.allocSentinel(u8, 1, 0));
+    try testing.expectError(error.OutOfMemory, testing.failing_allocator.alloc(u8, 1));
+    try testing.expectError(error.OutOfMemory, testing.failing_allocator.allocSentinel(u8, 1, 0));
 }
 
 test "Allocator.resize" {