master
1const std = @import("../std.zig");
2const builtin = @import("builtin");
3const math = std.math;
4const Allocator = std.mem.Allocator;
5const mem = std.mem;
6const heap = std.heap;
7const assert = std.debug.assert;
8
9pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
10 return struct {
11 pub const vtable: Allocator.VTable = .{
12 .alloc = alloc,
13 .resize = resize,
14 .remap = remap,
15 .free = free,
16 };
17
18 pub const Error = Allocator.Error;
19
20 const max_usize = math.maxInt(usize);
21 const ushift = math.Log2Int(usize);
22 const bigpage_size = 64 * 1024;
23 const pages_per_bigpage = bigpage_size / heap.pageSize();
24 const bigpage_count = max_usize / bigpage_size;
25
26 /// Because of storing free list pointers, the minimum size class is 3.
27 const min_class = math.log2(math.ceilPowerOfTwoAssert(usize, 1 + @sizeOf(usize)));
28 const size_class_count = math.log2(bigpage_size) - min_class;
29 /// 0 - 1 bigpage
30 /// 1 - 2 bigpages
31 /// 2 - 4 bigpages
32 /// etc.
33 const big_size_class_count = math.log2(bigpage_count);
34
35 var next_addrs = [1]usize{0} ** size_class_count;
36 /// For each size class, points to the freed pointer.
37 var frees = [1]usize{0} ** size_class_count;
38 /// For each big size class, points to the freed pointer.
39 var big_frees = [1]usize{0} ** big_size_class_count;
40
41 // TODO don't do the naive locking strategy
42 var lock: std.Thread.Mutex = .{};
43 fn alloc(ctx: *anyopaque, len: usize, alignment: mem.Alignment, return_address: usize) ?[*]u8 {
44 _ = ctx;
45 _ = return_address;
46 lock.lock();
47 defer lock.unlock();
48 // Make room for the freelist next pointer.
49 const actual_len = @max(len +| @sizeOf(usize), alignment.toByteUnits());
50 const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null;
51 const class = math.log2(slot_size) - min_class;
52 if (class < size_class_count) {
53 const addr = a: {
54 const top_free_ptr = frees[class];
55 if (top_free_ptr != 0) {
56 const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize))));
57 frees[class] = node.*;
58 break :a top_free_ptr;
59 }
60
61 const next_addr = next_addrs[class];
62 if (next_addr % heap.pageSize() == 0) {
63 const addr = allocBigPages(1);
64 if (addr == 0) return null;
65 //std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{
66 // slot_size, class, addr,
67 //});
68 next_addrs[class] = addr + slot_size;
69 break :a addr;
70 } else {
71 next_addrs[class] = next_addr + slot_size;
72 break :a next_addr;
73 }
74 };
75 return @as([*]u8, @ptrFromInt(addr));
76 }
77 const bigpages_needed = bigPagesNeeded(actual_len);
78 const addr = allocBigPages(bigpages_needed);
79 return @as([*]u8, @ptrFromInt(addr));
80 }
81
82 fn resize(
83 ctx: *anyopaque,
84 buf: []u8,
85 alignment: mem.Alignment,
86 new_len: usize,
87 return_address: usize,
88 ) bool {
89 _ = ctx;
90 _ = return_address;
91 lock.lock();
92 defer lock.unlock();
93 // We don't want to move anything from one size class to another, but we
94 // can recover bytes in between powers of two.
95 const buf_align = alignment.toByteUnits();
96 const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align);
97 const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align);
98 const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len);
99 const old_small_class = math.log2(old_small_slot_size) - min_class;
100 if (old_small_class < size_class_count) {
101 const new_small_slot_size = math.ceilPowerOfTwo(usize, new_actual_len) catch return false;
102 return old_small_slot_size == new_small_slot_size;
103 } else {
104 const old_bigpages_needed = bigPagesNeeded(old_actual_len);
105 const old_big_slot_pages = math.ceilPowerOfTwoAssert(usize, old_bigpages_needed);
106 const new_bigpages_needed = bigPagesNeeded(new_actual_len);
107 const new_big_slot_pages = math.ceilPowerOfTwo(usize, new_bigpages_needed) catch return false;
108 return old_big_slot_pages == new_big_slot_pages;
109 }
110 }
111
112 fn remap(
113 context: *anyopaque,
114 memory: []u8,
115 alignment: mem.Alignment,
116 new_len: usize,
117 return_address: usize,
118 ) ?[*]u8 {
119 return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
120 }
121
122 fn free(
123 ctx: *anyopaque,
124 buf: []u8,
125 alignment: mem.Alignment,
126 return_address: usize,
127 ) void {
128 _ = ctx;
129 _ = return_address;
130 lock.lock();
131 defer lock.unlock();
132 const buf_align = alignment.toByteUnits();
133 const actual_len = @max(buf.len + @sizeOf(usize), buf_align);
134 const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len);
135 const class = math.log2(slot_size) - min_class;
136 const addr = @intFromPtr(buf.ptr);
137 if (class < size_class_count) {
138 const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize))));
139 node.* = frees[class];
140 frees[class] = addr;
141 } else {
142 const bigpages_needed = bigPagesNeeded(actual_len);
143 const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed);
144 const big_slot_size_bytes = pow2_pages * bigpage_size;
145 const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize))));
146 const big_class = math.log2(pow2_pages);
147 node.* = big_frees[big_class];
148 big_frees[big_class] = addr;
149 }
150 }
151
152 inline fn bigPagesNeeded(byte_count: usize) usize {
153 return (byte_count + (bigpage_size + (@sizeOf(usize) - 1))) / bigpage_size;
154 }
155
156 fn allocBigPages(n: usize) usize {
157 const pow2_pages = math.ceilPowerOfTwoAssert(usize, n);
158 const slot_size_bytes = pow2_pages * bigpage_size;
159 const class = math.log2(pow2_pages);
160
161 const top_free_ptr = big_frees[class];
162 if (top_free_ptr != 0) {
163 const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize))));
164 big_frees[class] = node.*;
165 return top_free_ptr;
166 }
167 return sbrk(pow2_pages * pages_per_bigpage * heap.pageSize());
168 }
169 };
170}
171
172test SbrkAllocator {
173 _ = SbrkAllocator(struct {
174 fn sbrk(_: usize) usize {
175 return 0;
176 }
177 }.sbrk);
178}