Commit 29de809a92
Changed files (1)
lib
std
lib/std/heap/general_purpose_allocator.zig
@@ -561,6 +561,19 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return error.OutOfMemory;
}
+ // Returns true if an allocation of `size` bytes is within the specified
+ // limits if enable_memory_limit is true
+ fn isAllocationAllowed(self: *Self, size: usize) bool {
+ if (config.enable_memory_limit) {
+ const new_req_bytes = self.total_requested_bytes + size;
+ if (new_req_bytes > self.requested_memory_limit)
+ return false;
+ self.total_requested_bytes = new_req_bytes;
+ }
+
+ return true;
+ }
+
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
@@ -568,39 +581,38 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
defer held.release();
const new_aligned_size = math.max(len, ptr_align);
- const mem_slice = blk: {
- if (new_aligned_size > largest_bucket_object_size) {
- try self.large_allocations.ensureCapacity(
- self.backing_allocator,
- self.large_allocations.entries.items.len + 1,
- );
-
- const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
+ if (new_aligned_size > largest_bucket_object_size) {
+ try self.large_allocations.ensureCapacity(
+ self.backing_allocator,
+ self.large_allocations.entries.items.len + 1,
+ );
- const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
- assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
- gop.entry.value.bytes = slice;
- collectStackTrace(ret_addr, &gop.entry.value.stack_addresses);
-
- break :blk slice;
- } else {
- const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
- const ptr = try self.allocSlot(new_size_class, ret_addr);
- break :blk ptr[0..len];
- }
- };
+ const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
- if (config.enable_memory_limit) {
// The backing allocator may return a memory block bigger than
// `len`, use the effective size for bookkeeping purposes
- const new_req_bytes = self.total_requested_bytes + mem_slice.len;
- if (new_req_bytes > self.requested_memory_limit) {
+ if (!self.isAllocationAllowed(slice.len)) {
+ // Free the block so no memory is leaked
+ const new_len = try self.backing_allocator.resizeFn(self.backing_allocator, slice, ptr_align, 0, 0, ret_addr);
+ assert(new_len == 0);
return error.OutOfMemory;
}
- self.total_requested_bytes = new_req_bytes;
+
+ const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
+ assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
+ gop.entry.value.bytes = slice;
+ collectStackTrace(ret_addr, &gop.entry.value.stack_addresses);
+
+ return slice;
+ }
+
+ if (!self.isAllocationAllowed(len)) {
+ return error.OutOfMemory;
}
- return mem_slice;
+ const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
+ const ptr = try self.allocSlot(new_size_class, ret_addr);
+ return ptr[0..len];
}
fn createBucket(self: *Self, size_class: usize, bucket_index: usize) Error!*BucketHeader {