Commit 02e5e0ba1f

Lee Cannon <leecannon@leecannon.xyz>
2021-10-31 22:45:27
allocgate: apply missed changes
1 parent 9377f32
Changed files (3)
doc/langref.html.in
@@ -10200,7 +10200,7 @@ test "string literal to constant slice" {
       {#header_open|Implementing an Allocator#}
       <p>Zig programmers can implement their own allocators by fulfilling the Allocator interface.
       In order to do this one must read carefully the documentation comments in std/mem.zig and
-      then supply a {#syntax#}reallocFn{#endsyntax#} and a {#syntax#}shrinkFn{#endsyntax#}.
+      then supply a {#syntax#}allocFn{#endsyntax#} and a {#syntax#}resizeFn{#endsyntax#}.
       </p>
       <p>
       There are many example allocators to look at for inspiration. Look at std/heap.zig and
lib/std/heap/general_purpose_allocator.zig
@@ -555,7 +555,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
 
             // Do memory limit accounting with requested sizes rather than what backing_allocator returns
             // because if we want to return error.OutOfMemory, we have to leave allocation untouched, and
-            // that is impossible to guarantee after calling backing_allocator.resizeFn.
+            // that is impossible to guarantee after calling backing_allocator.vtable.resize.
             const prev_req_bytes = self.total_requested_bytes;
             if (config.enable_memory_limit) {
                 const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
@@ -571,7 +571,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             const result_len = if (config.never_unmap and new_size == 0)
                 0
             else
-                try self.backing_allocator.resizeFn(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
+                try self.backing_allocator.vtable.resize(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
 
             if (config.enable_memory_limit) {
                 entry.value_ptr.requested_size = new_size;
@@ -764,7 +764,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             const new_aligned_size = math.max(len, ptr_align);
             if (new_aligned_size > largest_bucket_object_size) {
                 try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
-                const slice = try self.backing_allocator.allocFn(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
+                const slice = try self.backing_allocator.vtable.alloc(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
 
                 const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
                 if (config.retain_metadata and !config.never_unmap) {
@@ -1191,10 +1191,12 @@ test "double frees" {
 test "bug 9995 fix, large allocs count requested size not backing size" {
     // with AtLeast, buffer likely to be larger than requested, especially when shrinking
     var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
-    var buf = try gpa.allocator.allocAdvanced(u8, 1, page_size + 1, .at_least);
+    const allocator = gpa.allocator();
+    
+    var buf = try allocator.allocAdvanced(u8, 1, page_size + 1, .at_least);
     try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
-    buf = try gpa.allocator.reallocAtLeast(buf, 1);
+    buf = try allocator.reallocAtLeast(buf, 1);
     try std.testing.expect(gpa.total_requested_bytes == 1);
-    buf = try gpa.allocator.reallocAtLeast(buf, 2);
+    buf = try allocator.reallocAtLeast(buf, 2);
     try std.testing.expect(gpa.total_requested_bytes == 2);
 }
lib/std/mem/Allocator.zig
@@ -108,7 +108,7 @@ pub fn NoResize(comptime AllocatorType: type) type {
 /// When the size/alignment is less than or equal to the previous allocation,
 /// this function returns `error.OutOfMemory` when the allocator decides the client
 /// would be better off keeping the extra alignment/size. Clients will call
-/// `resizeFn` when they require the allocator to track a new alignment/size,
+/// `vtable.resize` when they require the allocator to track a new alignment/size,
 /// and so this function should only return success when the allocator considers
 /// the reallocation desirable from the allocator's perspective.
 /// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
@@ -124,7 +124,7 @@ pub fn NoResize(comptime AllocatorType: type) type {
 fn reallocBytes(
     self: Allocator,
     /// Guaranteed to be the same as what was returned from most recent call to
-    /// `allocFn` or `resizeFn`.
+    /// `vtable.alloc` or `vtable.resize`.
     /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
     /// is guaranteed to be >= 1.
     old_mem: []u8,
@@ -507,7 +507,7 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T {
     return new_buf[0..m.len :0];
 }
 
-/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
+/// Call `vtable.resize`, but caller guarantees that `new_len` <= `buf.len` meaning
 /// error.OutOfMemory should be impossible.
 /// This function allows a runtime `buf_align` value. Callers should generally prefer
 /// to call `shrink` directly.