master
1const std = @import("../std.zig");
2const assert = std.debug.assert;
3const mem = std.mem;
4const Allocator = std.mem.Allocator;
5const Alignment = std.mem.Alignment;
6
7/// This allocator takes an existing allocator, wraps it, and provides an interface where
8/// you can allocate and then free it all together. Calls to free an individual item only
9/// free the item if it was the most recent allocation, otherwise calls to free do
10/// nothing.
11pub const ArenaAllocator = struct {
12 child_allocator: Allocator,
13 state: State,
14
15 /// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator
16 /// as a memory-saving optimization.
17 pub const State = struct {
18 buffer_list: std.SinglyLinkedList = .{},
19 end_index: usize = 0,
20
21 pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator {
22 return .{
23 .child_allocator = child_allocator,
24 .state = self,
25 };
26 }
27 };
28
29 pub fn allocator(self: *ArenaAllocator) Allocator {
30 return .{
31 .ptr = self,
32 .vtable = &.{
33 .alloc = alloc,
34 .resize = resize,
35 .remap = remap,
36 .free = free,
37 },
38 };
39 }
40
41 const BufNode = struct {
42 data: usize,
43 node: std.SinglyLinkedList.Node = .{},
44 };
45 const BufNode_alignment: Alignment = .of(BufNode);
46
47 pub fn init(child_allocator: Allocator) ArenaAllocator {
48 return (State{}).promote(child_allocator);
49 }
50
51 pub fn deinit(self: ArenaAllocator) void {
52 // NOTE: When changing this, make sure `reset()` is adjusted accordingly!
53
54 var it = self.state.buffer_list.first;
55 while (it) |node| {
56 // this has to occur before the free because the free frees node
57 const next_it = node.next;
58 const buf_node: *BufNode = @fieldParentPtr("node", node);
59 const alloc_buf = @as([*]u8, @ptrCast(buf_node))[0..buf_node.data];
60 self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
61 it = next_it;
62 }
63 }
64
65 pub const ResetMode = union(enum) {
66 /// Releases all allocated memory in the arena.
67 free_all,
68 /// This will pre-heat the arena for future allocations by allocating a
69 /// large enough buffer for all previously done allocations.
70 /// Preheating will speed up the allocation process by invoking the backing allocator
71 /// less often than before. If `reset()` is used in a loop, this means that after the
72 /// biggest operation, no memory allocations are performed anymore.
73 retain_capacity,
74 /// This is the same as `retain_capacity`, but the memory will be shrunk to
75 /// this value if it exceeds the limit.
76 retain_with_limit: usize,
77 };
78 /// Queries the current memory use of this arena.
79 /// This will **not** include the storage required for internal keeping.
80 pub fn queryCapacity(self: ArenaAllocator) usize {
81 var size: usize = 0;
82 var it = self.state.buffer_list.first;
83 while (it) |node| : (it = node.next) {
84 // Compute the actually allocated size excluding the
85 // linked list node.
86 const buf_node: *BufNode = @fieldParentPtr("node", node);
87 size += buf_node.data - @sizeOf(BufNode);
88 }
89 return size;
90 }
91 /// Resets the arena allocator and frees all allocated memory.
92 ///
93 /// `mode` defines how the currently allocated memory is handled.
94 /// See the variant documentation for `ResetMode` for the effects of each mode.
95 ///
96 /// The function will return whether the reset operation was successful or not.
97 /// If the reallocation failed `false` is returned. The arena will still be fully
98 /// functional in that case, all memory is released. Future allocations just might
99 /// be slower.
100 ///
101 /// NOTE: If `mode` is `free_all`, the function will always return `true`.
102 pub fn reset(self: *ArenaAllocator, mode: ResetMode) bool {
103 // Some words on the implementation:
104 // The reset function can be implemented with two basic approaches:
105 // - Counting how much bytes were allocated since the last reset, and storing that
106 // information in State. This will make reset fast and alloc only a teeny tiny bit
107 // slower.
108 // - Counting how much bytes were allocated by iterating the chunk linked list. This
109 // will make reset slower, but alloc() keeps the same speed when reset() as if reset()
110 // would not exist.
111 //
112 // The second variant was chosen for implementation, as with more and more calls to reset(),
113 // the function will get faster and faster. At one point, the complexity of the function
114 // will drop to amortized O(1), as we're only ever having a single chunk that will not be
115 // reallocated, and we're not even touching the backing allocator anymore.
116 //
117 // Thus, only the first hand full of calls to reset() will actually need to iterate the linked
118 // list, all future calls are just taking the first node, and only resetting the `end_index`
119 // value.
120 const requested_capacity = switch (mode) {
121 .retain_capacity => self.queryCapacity(),
122 .retain_with_limit => |limit| @min(limit, self.queryCapacity()),
123 .free_all => 0,
124 };
125 if (requested_capacity == 0) {
126 // just reset when we don't have anything to reallocate
127 self.deinit();
128 self.state = State{};
129 return true;
130 }
131 const total_size = requested_capacity + @sizeOf(BufNode);
132 // Free all nodes except for the last one
133 var it = self.state.buffer_list.first;
134 const maybe_first_node = while (it) |node| {
135 // this has to occur before the free because the free frees node
136 const next_it = node.next;
137 if (next_it == null)
138 break node;
139 const buf_node: *BufNode = @fieldParentPtr("node", node);
140 const alloc_buf = @as([*]u8, @ptrCast(buf_node))[0..buf_node.data];
141 self.child_allocator.rawFree(alloc_buf, BufNode_alignment, @returnAddress());
142 it = next_it;
143 } else null;
144 std.debug.assert(maybe_first_node == null or maybe_first_node.?.next == null);
145 // reset the state before we try resizing the buffers, so we definitely have reset the arena to 0.
146 self.state.end_index = 0;
147 if (maybe_first_node) |first_node| {
148 self.state.buffer_list.first = first_node;
149 // perfect, no need to invoke the child_allocator
150 const first_buf_node: *BufNode = @fieldParentPtr("node", first_node);
151 if (first_buf_node.data == total_size)
152 return true;
153 const first_alloc_buf = @as([*]u8, @ptrCast(first_buf_node))[0..first_buf_node.data];
154 if (self.child_allocator.rawResize(first_alloc_buf, BufNode_alignment, total_size, @returnAddress())) {
155 // successful resize
156 first_buf_node.data = total_size;
157 } else {
158 // manual realloc
159 const new_ptr = self.child_allocator.rawAlloc(total_size, BufNode_alignment, @returnAddress()) orelse {
160 // we failed to preheat the arena properly, signal this to the user.
161 return false;
162 };
163 self.child_allocator.rawFree(first_alloc_buf, BufNode_alignment, @returnAddress());
164 const buf_node: *BufNode = @ptrCast(@alignCast(new_ptr));
165 buf_node.* = .{ .data = total_size };
166 self.state.buffer_list.first = &buf_node.node;
167 }
168 }
169 return true;
170 }
171
172 fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) ?*BufNode {
173 const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
174 const big_enough_len = prev_len + actual_min_size;
175 const len = big_enough_len + big_enough_len / 2;
176 const ptr = self.child_allocator.rawAlloc(len, BufNode_alignment, @returnAddress()) orelse
177 return null;
178 const buf_node: *BufNode = @ptrCast(@alignCast(ptr));
179 buf_node.* = .{ .data = len };
180 self.state.buffer_list.prepend(&buf_node.node);
181 self.state.end_index = 0;
182 return buf_node;
183 }
184
185 fn alloc(ctx: *anyopaque, n: usize, alignment: Alignment, ra: usize) ?[*]u8 {
186 const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
187 _ = ra;
188
189 const ptr_align = alignment.toByteUnits();
190 var cur_node: *BufNode = if (self.state.buffer_list.first) |first_node|
191 @fieldParentPtr("node", first_node)
192 else
193 (self.createNode(0, n + ptr_align) orelse return null);
194 while (true) {
195 const cur_alloc_buf = @as([*]u8, @ptrCast(cur_node))[0..cur_node.data];
196 const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..];
197 const addr = @intFromPtr(cur_buf.ptr) + self.state.end_index;
198 const adjusted_addr = mem.alignForward(usize, addr, ptr_align);
199 const adjusted_index = self.state.end_index + (adjusted_addr - addr);
200 const new_end_index = adjusted_index + n;
201
202 if (new_end_index <= cur_buf.len) {
203 const result = cur_buf[adjusted_index..new_end_index];
204 self.state.end_index = new_end_index;
205 return result.ptr;
206 }
207
208 const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
209 if (self.child_allocator.rawResize(cur_alloc_buf, BufNode_alignment, bigger_buf_size, @returnAddress())) {
210 cur_node.data = bigger_buf_size;
211 } else {
212 // Allocate a new node if that's not possible
213 cur_node = self.createNode(cur_buf.len, n + ptr_align) orelse return null;
214 }
215 }
216 }
217
218 fn resize(ctx: *anyopaque, buf: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
219 const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
220 _ = alignment;
221 _ = ret_addr;
222
223 const cur_node = self.state.buffer_list.first orelse return false;
224 const cur_buf_node: *BufNode = @fieldParentPtr("node", cur_node);
225 const cur_buf = @as([*]u8, @ptrCast(cur_buf_node))[@sizeOf(BufNode)..cur_buf_node.data];
226 if (@intFromPtr(cur_buf.ptr) + self.state.end_index != @intFromPtr(buf.ptr) + buf.len) {
227 // It's not the most recent allocation, so it cannot be expanded,
228 // but it's fine if they want to make it smaller.
229 return new_len <= buf.len;
230 }
231
232 if (buf.len >= new_len) {
233 self.state.end_index -= buf.len - new_len;
234 return true;
235 } else if (cur_buf.len - self.state.end_index >= new_len - buf.len) {
236 self.state.end_index += new_len - buf.len;
237 return true;
238 } else {
239 return false;
240 }
241 }
242
243 fn remap(
244 context: *anyopaque,
245 memory: []u8,
246 alignment: Alignment,
247 new_len: usize,
248 return_address: usize,
249 ) ?[*]u8 {
250 return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
251 }
252
253 fn free(ctx: *anyopaque, buf: []u8, alignment: Alignment, ret_addr: usize) void {
254 _ = alignment;
255 _ = ret_addr;
256
257 const self: *ArenaAllocator = @ptrCast(@alignCast(ctx));
258
259 const cur_node = self.state.buffer_list.first orelse return;
260 const cur_buf_node: *BufNode = @fieldParentPtr("node", cur_node);
261 const cur_buf = @as([*]u8, @ptrCast(cur_buf_node))[@sizeOf(BufNode)..cur_buf_node.data];
262
263 if (@intFromPtr(cur_buf.ptr) + self.state.end_index == @intFromPtr(buf.ptr) + buf.len) {
264 self.state.end_index -= buf.len;
265 }
266 }
267};
268
269test "reset with preheating" {
270 var arena_allocator = ArenaAllocator.init(std.testing.allocator);
271 defer arena_allocator.deinit();
272 // provides some variance in the allocated data
273 var rng_src = std.Random.DefaultPrng.init(std.testing.random_seed);
274 const random = rng_src.random();
275 var rounds: usize = 25;
276 while (rounds > 0) {
277 rounds -= 1;
278 _ = arena_allocator.reset(.retain_capacity);
279 var alloced_bytes: usize = 0;
280 const total_size: usize = random.intRangeAtMost(usize, 256, 16384);
281 while (alloced_bytes < total_size) {
282 const size = random.intRangeAtMost(usize, 16, 256);
283 const alignment: Alignment = .@"32";
284 const slice = try arena_allocator.allocator().alignedAlloc(u8, alignment, size);
285 try std.testing.expect(alignment.check(@intFromPtr(slice.ptr)));
286 try std.testing.expectEqual(size, slice.len);
287 alloced_bytes += slice.len;
288 }
289 }
290}
291
292test "reset while retaining a buffer" {
293 var arena_allocator = ArenaAllocator.init(std.testing.allocator);
294 defer arena_allocator.deinit();
295 const a = arena_allocator.allocator();
296
297 // Create two internal buffers
298 _ = try a.alloc(u8, 1);
299 _ = try a.alloc(u8, 1000);
300
301 // Check that we have at least two buffers
302 try std.testing.expect(arena_allocator.state.buffer_list.first.?.next != null);
303
304 // This retains the first allocated buffer
305 try std.testing.expect(arena_allocator.reset(.{ .retain_with_limit = 1 }));
306}