master
1const std = @import("std.zig");
2const builtin = @import("builtin");
3const root = @import("root");
4const assert = std.debug.assert;
5const testing = std.testing;
6const mem = std.mem;
7const c = std.c;
8const Allocator = std.mem.Allocator;
9const windows = std.os.windows;
10const Alignment = std.mem.Alignment;
11
12pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
13pub const SmpAllocator = @import("heap/SmpAllocator.zig");
14pub const FixedBufferAllocator = @import("heap/FixedBufferAllocator.zig");
15pub const PageAllocator = @import("heap/PageAllocator.zig");
16pub const SbrkAllocator = @import("heap/sbrk_allocator.zig").SbrkAllocator;
17pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig");
18pub const WasmAllocator = @import("heap/WasmAllocator.zig");
19
20pub const DebugAllocatorConfig = @import("heap/debug_allocator.zig").Config;
21pub const DebugAllocator = @import("heap/debug_allocator.zig").DebugAllocator;
22pub const Check = enum { ok, leak };
23/// Deprecated; to be removed after 0.14.0 is tagged.
24pub const GeneralPurposeAllocatorConfig = DebugAllocatorConfig;
25/// Deprecated; to be removed after 0.14.0 is tagged.
26pub const GeneralPurposeAllocator = DebugAllocator;
27
28/// A memory pool that can allocate objects of a single type very quickly.
29/// Use this when you need to allocate a lot of objects of the same type,
30/// because it outperforms general purpose allocators.
31/// Functions that potentially allocate memory accept an `Allocator` parameter.
32pub fn MemoryPool(comptime Item: type) type {
33 return memory_pool.Extra(Item, .{ .alignment = null });
34}
35pub const memory_pool = @import("heap/memory_pool.zig");
36
37/// Deprecated; use `memory_pool.Aligned`.
38pub const MemoryPoolAligned = memory_pool.Aligned;
39/// Deprecated; use `memory_pool.Extra`.
40pub const MemoryPoolExtra = memory_pool.Extra;
41/// Deprecated; use `memory_pool.Options`.
42pub const MemoryPoolOptions = memory_pool.Options;
43
44/// TODO Utilize this on Windows.
45pub var next_mmap_addr_hint: ?[*]align(page_size_min) u8 = null;
46
47/// comptime-known minimum page size of the target.
48///
49/// All pointers from `mmap` or `NtAllocateVirtualMemory` are aligned to at least
50/// `page_size_min`, but their actual alignment may be bigger.
51///
52/// This value can be overridden via `std.options.page_size_min`.
53///
54/// On many systems, the actual page size can only be determined at runtime
55/// with `pageSize`.
56pub const page_size_min: usize = std.options.page_size_min orelse (page_size_min_default orelse
57 @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_min; populate std.options.page_size_min"));
58
59/// comptime-known maximum page size of the target.
60///
61/// Targeting a system with a larger page size may require overriding
62/// `std.options.page_size_max`, as well as providing a corresponding linker
63/// option.
64///
65/// The actual page size can only be determined at runtime with `pageSize`.
66pub const page_size_max: usize = std.options.page_size_max orelse (page_size_max_default orelse if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
67 @compileError("freestanding/other page_size_max must provided with std.options.page_size_max")
68else
69 @compileError(@tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " has unknown page_size_max; populate std.options.page_size_max"));
70
71/// If the page size is comptime-known, return value is comptime.
72/// Otherwise, calls `std.options.queryPageSize` which by default queries the
73/// host operating system at runtime.
74pub inline fn pageSize() usize {
75 if (page_size_min == page_size_max) return page_size_min;
76 return std.options.queryPageSize();
77}
78
79test pageSize {
80 assert(std.math.isPowerOfTwo(pageSize()));
81}
82
83/// The default implementation of `std.options.queryPageSize`.
84/// Asserts that the page size is within `page_size_min` and `page_size_max`
85pub fn defaultQueryPageSize() usize {
86 const global = struct {
87 var cached_result: std.atomic.Value(usize) = .init(0);
88 };
89 var size = global.cached_result.load(.unordered);
90 if (size > 0) return size;
91 size = size: switch (builtin.os.tag) {
92 .linux => if (builtin.link_libc)
93 @max(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE)), 0)
94 else
95 std.os.linux.getauxval(std.elf.AT_PAGESZ),
96 .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => {
97 const task_port = std.c.mach_task_self();
98 // mach_task_self may fail "if there are any resource failures or other errors".
99 if (task_port == std.c.TASK.NULL) break :size 0;
100 var info_count = std.c.TASK.VM.INFO_COUNT;
101 var vm_info: std.c.task_vm_info_data_t = undefined;
102 vm_info.page_size = 0;
103 _ = std.c.task_info(
104 task_port,
105 std.c.TASK.VM.INFO,
106 @as(std.c.task_info_t, @ptrCast(&vm_info)),
107 &info_count,
108 );
109 break :size @intCast(vm_info.page_size);
110 },
111 .windows => {
112 var sbi: windows.SYSTEM_BASIC_INFORMATION = undefined;
113 switch (windows.ntdll.NtQuerySystemInformation(
114 .SystemBasicInformation,
115 &sbi,
116 @sizeOf(windows.SYSTEM_BASIC_INFORMATION),
117 null,
118 )) {
119 .SUCCESS => break :size sbi.PageSize,
120 else => break :size 0,
121 }
122 },
123 else => if (builtin.link_libc)
124 @max(std.c.sysconf(@intFromEnum(std.c._SC.PAGESIZE)), 0)
125 else if (builtin.os.tag == .freestanding or builtin.os.tag == .other)
126 @compileError("unsupported target: freestanding/other")
127 else
128 @compileError("pageSize on " ++ @tagName(builtin.cpu.arch) ++ "-" ++ @tagName(builtin.os.tag) ++ " is not supported without linking libc, using the default implementation"),
129 };
130 if (size == 0) size = page_size_max;
131
132 assert(size >= page_size_min);
133 assert(size <= page_size_max);
134 global.cached_result.store(size, .unordered);
135
136 return size;
137}
138
139test defaultQueryPageSize {
140 if (builtin.cpu.arch.isWasm()) return error.SkipZigTest;
141 assert(std.math.isPowerOfTwo(defaultQueryPageSize()));
142}
143
144const CAllocator = struct {
145 comptime {
146 if (!builtin.link_libc) {
147 @compileError("C allocator is only available when linking against libc");
148 }
149 }
150
151 const vtable: Allocator.VTable = .{
152 .alloc = alloc,
153 .resize = resize,
154 .remap = remap,
155 .free = free,
156 };
157
158 pub const supports_malloc_size = @TypeOf(malloc_size) != void;
159 pub const malloc_size = if (@TypeOf(c.malloc_size) != void)
160 c.malloc_size
161 else if (@TypeOf(c.malloc_usable_size) != void)
162 c.malloc_usable_size
163 else if (@TypeOf(c._msize) != void)
164 c._msize
165 else {};
166
167 pub const supports_posix_memalign = switch (builtin.os.tag) {
168 .dragonfly, .netbsd, .freebsd, .illumos, .openbsd, .linux, .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos, .serenity => true,
169 else => false,
170 };
171
172 fn getHeader(ptr: [*]u8) *[*]u8 {
173 return @ptrCast(@alignCast(ptr - @sizeOf(usize)));
174 }
175
176 fn alignedAlloc(len: usize, alignment: Alignment) ?[*]u8 {
177 const alignment_bytes = alignment.toByteUnits();
178 if (supports_posix_memalign) {
179 // The posix_memalign only accepts alignment values that are a
180 // multiple of the pointer size
181 const effective_alignment = @max(alignment_bytes, @sizeOf(usize));
182
183 var aligned_ptr: ?*anyopaque = undefined;
184 if (c.posix_memalign(&aligned_ptr, effective_alignment, len) != 0)
185 return null;
186
187 return @ptrCast(aligned_ptr);
188 }
189
190 // Thin wrapper around regular malloc, overallocate to account for
191 // alignment padding and store the original malloc()'ed pointer before
192 // the aligned address.
193 const unaligned_ptr = @as([*]u8, @ptrCast(c.malloc(len + alignment_bytes - 1 + @sizeOf(usize)) orelse return null));
194 const unaligned_addr = @intFromPtr(unaligned_ptr);
195 const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment_bytes);
196 const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
197 getHeader(aligned_ptr).* = unaligned_ptr;
198
199 return aligned_ptr;
200 }
201
202 fn alignedFree(ptr: [*]u8) void {
203 if (supports_posix_memalign) {
204 return c.free(ptr);
205 }
206
207 const unaligned_ptr = getHeader(ptr).*;
208 c.free(unaligned_ptr);
209 }
210
211 fn alignedAllocSize(ptr: [*]u8) usize {
212 if (supports_posix_memalign) {
213 return CAllocator.malloc_size(ptr);
214 }
215
216 const unaligned_ptr = getHeader(ptr).*;
217 const delta = @intFromPtr(ptr) - @intFromPtr(unaligned_ptr);
218 return CAllocator.malloc_size(unaligned_ptr) - delta;
219 }
220
221 fn alloc(
222 _: *anyopaque,
223 len: usize,
224 alignment: Alignment,
225 return_address: usize,
226 ) ?[*]u8 {
227 _ = return_address;
228 assert(len > 0);
229 return alignedAlloc(len, alignment);
230 }
231
232 fn resize(
233 _: *anyopaque,
234 buf: []u8,
235 alignment: Alignment,
236 new_len: usize,
237 return_address: usize,
238 ) bool {
239 _ = alignment;
240 _ = return_address;
241 if (new_len <= buf.len) {
242 return true;
243 }
244 if (CAllocator.supports_malloc_size) {
245 const full_len = alignedAllocSize(buf.ptr);
246 if (new_len <= full_len) {
247 return true;
248 }
249 }
250 return false;
251 }
252
253 fn remap(
254 context: *anyopaque,
255 memory: []u8,
256 alignment: Alignment,
257 new_len: usize,
258 return_address: usize,
259 ) ?[*]u8 {
260 // realloc would potentially return a new allocation that does not
261 // respect the original alignment.
262 return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null;
263 }
264
265 fn free(
266 _: *anyopaque,
267 buf: []u8,
268 alignment: Alignment,
269 return_address: usize,
270 ) void {
271 _ = alignment;
272 _ = return_address;
273 alignedFree(buf.ptr);
274 }
275};
276
277/// Supports the full Allocator interface, including alignment, and exploiting
278/// `malloc_usable_size` if available. For an allocator that directly calls
279/// `malloc`/`free`, see `raw_c_allocator`.
280pub const c_allocator: Allocator = .{
281 .ptr = undefined,
282 .vtable = &CAllocator.vtable,
283};
284
285/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly
286/// calls `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
287/// This allocator is safe to use as the backing allocator with
288/// `ArenaAllocator` for example and is more optimal in such a case than
289/// `c_allocator`.
290pub const raw_c_allocator: Allocator = .{
291 .ptr = undefined,
292 .vtable = &raw_c_allocator_vtable,
293};
294const raw_c_allocator_vtable: Allocator.VTable = .{
295 .alloc = rawCAlloc,
296 .resize = rawCResize,
297 .remap = rawCRemap,
298 .free = rawCFree,
299};
300
301fn rawCAlloc(
302 context: *anyopaque,
303 len: usize,
304 alignment: Alignment,
305 return_address: usize,
306) ?[*]u8 {
307 _ = context;
308 _ = return_address;
309 assert(alignment.compare(.lte, .of(std.c.max_align_t)));
310 // Note that this pointer cannot be aligncasted to max_align_t because if
311 // len is < max_align_t then the alignment can be smaller. For example, if
312 // max_align_t is 16, but the user requests 8 bytes, there is no built-in
313 // type in C that is size 8 and has 16 byte alignment, so the alignment may
314 // be 8 bytes rather than 16. Similarly if only 1 byte is requested, malloc
315 // is allowed to return a 1-byte aligned pointer.
316 return @ptrCast(c.malloc(len));
317}
318
319fn rawCResize(
320 context: *anyopaque,
321 memory: []u8,
322 alignment: Alignment,
323 new_len: usize,
324 return_address: usize,
325) bool {
326 _ = context;
327 _ = memory;
328 _ = alignment;
329 _ = new_len;
330 _ = return_address;
331 return false;
332}
333
334fn rawCRemap(
335 context: *anyopaque,
336 memory: []u8,
337 alignment: Alignment,
338 new_len: usize,
339 return_address: usize,
340) ?[*]u8 {
341 _ = context;
342 _ = alignment;
343 _ = return_address;
344 return @ptrCast(c.realloc(memory.ptr, new_len));
345}
346
347fn rawCFree(
348 context: *anyopaque,
349 memory: []u8,
350 alignment: Alignment,
351 return_address: usize,
352) void {
353 _ = context;
354 _ = alignment;
355 _ = return_address;
356 c.free(memory.ptr);
357}
358
359/// On operating systems that support memory mapping, this allocator makes a
360/// syscall directly for every allocation and free.
361///
362/// Otherwise, it falls back to the preferred singleton for the target.
363///
364/// Thread-safe.
365pub const page_allocator: Allocator = if (@hasDecl(root, "os") and
366 @hasDecl(root.os, "heap") and
367 @hasDecl(root.os.heap, "page_allocator"))
368 root.os.heap.page_allocator
369else if (builtin.target.cpu.arch.isWasm()) .{
370 .ptr = undefined,
371 .vtable = &WasmAllocator.vtable,
372} else if (builtin.target.os.tag == .plan9) .{
373 .ptr = undefined,
374 .vtable = &SbrkAllocator(std.os.plan9.sbrk).vtable,
375} else .{
376 .ptr = undefined,
377 .vtable = &PageAllocator.vtable,
378};
379
380pub const smp_allocator: Allocator = .{
381 .ptr = undefined,
382 .vtable = &SmpAllocator.vtable,
383};
384
385/// This allocator is fast, small, and specific to WebAssembly. In the future,
386/// this will be the implementation automatically selected by
387/// `GeneralPurposeAllocator` when compiling in `ReleaseSmall` mode for wasm32
388/// and wasm64 architectures.
389/// Until then, it is available here to play with.
390pub const wasm_allocator: Allocator = .{
391 .ptr = undefined,
392 .vtable = &WasmAllocator.vtable,
393};
394
395/// Returns a `StackFallbackAllocator` allocating using either a
396/// `FixedBufferAllocator` on an array of size `size` and falling back to
397/// `fallback_allocator` if that fails.
398pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
399 return StackFallbackAllocator(size){
400 .buffer = undefined,
401 .fallback_allocator = fallback_allocator,
402 .fixed_buffer_allocator = undefined,
403 };
404}
405
406/// An allocator that attempts to allocate using a
407/// `FixedBufferAllocator` using an array of size `size`. If the
408/// allocation fails, it will fall back to using
409/// `fallback_allocator`. Easily created with `stackFallback`.
410pub fn StackFallbackAllocator(comptime size: usize) type {
411 return struct {
412 const Self = @This();
413
414 buffer: [size]u8,
415 fallback_allocator: Allocator,
416 fixed_buffer_allocator: FixedBufferAllocator,
417 get_called: if (std.debug.runtime_safety) bool else void =
418 if (std.debug.runtime_safety) false else {},
419
420 /// This function both fetches a `Allocator` interface to this
421 /// allocator *and* resets the internal buffer allocator.
422 pub fn get(self: *Self) Allocator {
423 if (std.debug.runtime_safety) {
424 assert(!self.get_called); // `get` called multiple times; instead use `const allocator = stackFallback(N).get();`
425 self.get_called = true;
426 }
427 self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
428 return .{
429 .ptr = self,
430 .vtable = &.{
431 .alloc = alloc,
432 .resize = resize,
433 .remap = remap,
434 .free = free,
435 },
436 };
437 }
438
439 /// Unlike most std allocators `StackFallbackAllocator` modifies
440 /// its internal state before returning an implementation of
441 /// the`Allocator` interface and therefore also doesn't use
442 /// the usual `.allocator()` method.
443 pub const allocator = @compileError("use 'const allocator = stackFallback(N).get();' instead");
444
445 fn alloc(
446 ctx: *anyopaque,
447 len: usize,
448 alignment: Alignment,
449 ra: usize,
450 ) ?[*]u8 {
451 const self: *Self = @ptrCast(@alignCast(ctx));
452 return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse
453 return self.fallback_allocator.rawAlloc(len, alignment, ra);
454 }
455
456 fn resize(
457 ctx: *anyopaque,
458 buf: []u8,
459 alignment: Alignment,
460 new_len: usize,
461 ra: usize,
462 ) bool {
463 const self: *Self = @ptrCast(@alignCast(ctx));
464 if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
465 return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra);
466 } else {
467 return self.fallback_allocator.rawResize(buf, alignment, new_len, ra);
468 }
469 }
470
471 fn remap(
472 context: *anyopaque,
473 memory: []u8,
474 alignment: Alignment,
475 new_len: usize,
476 return_address: usize,
477 ) ?[*]u8 {
478 const self: *Self = @ptrCast(@alignCast(context));
479 if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) {
480 return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address);
481 } else {
482 return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address);
483 }
484 }
485
486 fn free(
487 ctx: *anyopaque,
488 buf: []u8,
489 alignment: Alignment,
490 ra: usize,
491 ) void {
492 const self: *Self = @ptrCast(@alignCast(ctx));
493 if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
494 return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra);
495 } else {
496 return self.fallback_allocator.rawFree(buf, alignment, ra);
497 }
498 }
499 };
500}
501
502test c_allocator {
503 if (builtin.link_libc) {
504 try testAllocator(c_allocator);
505 try testAllocatorAligned(c_allocator);
506 try testAllocatorLargeAlignment(c_allocator);
507 try testAllocatorAlignedShrink(c_allocator);
508 }
509}
510
511test raw_c_allocator {
512 if (builtin.link_libc) {
513 try testAllocator(raw_c_allocator);
514 }
515}
516
517test smp_allocator {
518 if (builtin.single_threaded) return;
519 try testAllocator(smp_allocator);
520 try testAllocatorAligned(smp_allocator);
521 try testAllocatorLargeAlignment(smp_allocator);
522 try testAllocatorAlignedShrink(smp_allocator);
523}
524
525test PageAllocator {
526 const allocator = page_allocator;
527 try testAllocator(allocator);
528 try testAllocatorAligned(allocator);
529 if (!builtin.target.cpu.arch.isWasm()) {
530 try testAllocatorLargeAlignment(allocator);
531 try testAllocatorAlignedShrink(allocator);
532 }
533
534 if (builtin.os.tag == .windows) {
535 const slice = try allocator.alignedAlloc(u8, .fromByteUnits(page_size_min), 128);
536 slice[0] = 0x12;
537 slice[127] = 0x34;
538 allocator.free(slice);
539 }
540 {
541 var buf = try allocator.alloc(u8, pageSize() + 1);
542 defer allocator.free(buf);
543 buf = try allocator.realloc(buf, 1); // shrink past the page boundary
544 }
545}
546
547test ArenaAllocator {
548 var arena_allocator = ArenaAllocator.init(page_allocator);
549 defer arena_allocator.deinit();
550 const allocator = arena_allocator.allocator();
551
552 try testAllocator(allocator);
553 try testAllocatorAligned(allocator);
554 try testAllocatorLargeAlignment(allocator);
555 try testAllocatorAlignedShrink(allocator);
556}
557
558test "StackFallbackAllocator" {
559 {
560 var stack_allocator = stackFallback(4096, std.testing.allocator);
561 try testAllocator(stack_allocator.get());
562 }
563 {
564 var stack_allocator = stackFallback(4096, std.testing.allocator);
565 try testAllocatorAligned(stack_allocator.get());
566 }
567 {
568 var stack_allocator = stackFallback(4096, std.testing.allocator);
569 try testAllocatorLargeAlignment(stack_allocator.get());
570 }
571 {
572 var stack_allocator = stackFallback(4096, std.testing.allocator);
573 try testAllocatorAlignedShrink(stack_allocator.get());
574 }
575}
576
577/// This one should not try alignments that exceed what C malloc can handle.
578pub fn testAllocator(base_allocator: mem.Allocator) !void {
579 var validationAllocator = mem.validationWrap(base_allocator);
580 const allocator = validationAllocator.allocator();
581
582 var slice = try allocator.alloc(*i32, 100);
583 try testing.expect(slice.len == 100);
584 for (slice, 0..) |*item, i| {
585 item.* = try allocator.create(i32);
586 item.*.* = @as(i32, @intCast(i));
587 }
588
589 slice = try allocator.realloc(slice, 20000);
590 try testing.expect(slice.len == 20000);
591
592 for (slice[0..100], 0..) |item, i| {
593 try testing.expect(item.* == @as(i32, @intCast(i)));
594 allocator.destroy(item);
595 }
596
597 if (allocator.resize(slice, 50)) {
598 slice = slice[0..50];
599 if (allocator.resize(slice, 25)) {
600 slice = slice[0..25];
601 try testing.expect(allocator.resize(slice, 0));
602 slice = slice[0..0];
603 slice = try allocator.realloc(slice, 10);
604 try testing.expect(slice.len == 10);
605 }
606 }
607 allocator.free(slice);
608
609 // Zero-length allocation
610 const empty = try allocator.alloc(u8, 0);
611 allocator.free(empty);
612 // Allocation with zero-sized types
613 const zero_bit_ptr = try allocator.create(u0);
614 zero_bit_ptr.* = 0;
615 allocator.destroy(zero_bit_ptr);
616 const zero_len_array = try allocator.create([0]u64);
617 allocator.destroy(zero_len_array);
618
619 const oversize = try allocator.alignedAlloc(u32, null, 5);
620 try testing.expect(oversize.len >= 5);
621 for (oversize) |*item| {
622 item.* = 0xDEADBEEF;
623 }
624 allocator.free(oversize);
625}
626
627pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
628 var validationAllocator = mem.validationWrap(base_allocator);
629 const allocator = validationAllocator.allocator();
630
631 // Test a few alignment values, smaller and bigger than the type's one
632 inline for ([_]Alignment{ .@"1", .@"2", .@"4", .@"8", .@"16", .@"32", .@"64" }) |alignment| {
633 // initial
634 var slice = try allocator.alignedAlloc(u8, alignment, 10);
635 try testing.expect(slice.len == 10);
636 // grow
637 slice = try allocator.realloc(slice, 100);
638 try testing.expect(slice.len == 100);
639 if (allocator.resize(slice, 10)) {
640 slice = slice[0..10];
641 }
642 try testing.expect(allocator.resize(slice, 0));
643 slice = slice[0..0];
644 // realloc from zero
645 slice = try allocator.realloc(slice, 100);
646 try testing.expect(slice.len == 100);
647 if (allocator.resize(slice, 10)) {
648 slice = slice[0..10];
649 }
650 try testing.expect(allocator.resize(slice, 0));
651 }
652}
653
654pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
655 var validationAllocator = mem.validationWrap(base_allocator);
656 const allocator = validationAllocator.allocator();
657
658 const large_align: usize = page_size_min / 2;
659
660 var align_mask: usize = undefined;
661 align_mask = @shlWithOverflow(~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)))[0];
662
663 var slice = try allocator.alignedAlloc(u8, .fromByteUnits(large_align), 500);
664 try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));
665
666 if (allocator.resize(slice, 100)) {
667 slice = slice[0..100];
668 }
669
670 slice = try allocator.realloc(slice, 5000);
671 try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));
672
673 if (allocator.resize(slice, 10)) {
674 slice = slice[0..10];
675 }
676
677 slice = try allocator.realloc(slice, 20000);
678 try testing.expect(@intFromPtr(slice.ptr) & align_mask == @intFromPtr(slice.ptr));
679
680 allocator.free(slice);
681}
682
683pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
684 var validationAllocator = mem.validationWrap(base_allocator);
685 const allocator = validationAllocator.allocator();
686
687 var debug_buffer: [1000]u8 = undefined;
688 var fib = FixedBufferAllocator.init(&debug_buffer);
689 const debug_allocator = fib.allocator();
690
691 const alloc_size = pageSize() * 2 + 50;
692 var slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
693 defer allocator.free(slice);
694
695 var stuff_to_free = std.array_list.Managed([]align(16) u8).init(debug_allocator);
696 // On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
697 // which is 16 pages, hence the 32. This test may require to increase
698 // the size of the allocations feeding the `allocator` parameter if they
699 // fail, because of this high over-alignment we want to have.
700 while (@intFromPtr(slice.ptr) == mem.alignForward(usize, @intFromPtr(slice.ptr), pageSize() * 32)) {
701 try stuff_to_free.append(slice);
702 slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
703 }
704 while (stuff_to_free.pop()) |item| {
705 allocator.free(item);
706 }
707 slice[0] = 0x12;
708 slice[60] = 0x34;
709
710 slice = try allocator.reallocAdvanced(slice, alloc_size / 2, 0);
711 try testing.expect(slice[0] == 0x12);
712 try testing.expect(slice[60] == 0x34);
713}
714
715const page_size_min_default: ?usize = switch (builtin.os.tag) {
716 .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) {
717 .x86_64 => 4 << 10,
718 .aarch64 => 16 << 10,
719 else => null,
720 },
721 .windows => switch (builtin.cpu.arch) {
722 // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200>
723 .x86, .x86_64 => 4 << 10,
724 // SuperH => 4 << 10,
725 .mips, .mipsel, .mips64, .mips64el => 4 << 10,
726 .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
727 // DEC Alpha => 8 << 10,
728 // Itanium => 8 << 10,
729 .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
730 else => null,
731 },
732 .wasi => switch (builtin.cpu.arch) {
733 .wasm32, .wasm64 => 64 << 10,
734 else => null,
735 },
736 // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187
737 .uefi => 4 << 10,
738 .freebsd => switch (builtin.cpu.arch) {
739 // FreeBSD/sys/*
740 .x86, .x86_64 => 4 << 10,
741 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
742 .aarch64, .aarch64_be => 4 << 10,
743 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
744 .riscv32, .riscv64 => 4 << 10,
745 else => null,
746 },
747 .netbsd => switch (builtin.cpu.arch) {
748 // NetBSD/sys/arch/*
749 .alpha => 8 << 10,
750 .x86, .x86_64 => 4 << 10,
751 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
752 .aarch64, .aarch64_be => 4 << 10,
753 .hppa => 4 << 10,
754 .mips, .mipsel, .mips64, .mips64el => 4 << 10,
755 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
756 .sh, .sheb => 4 << 10,
757 .sparc => 4 << 10,
758 .sparc64 => 8 << 10,
759 .riscv32, .riscv64 => 4 << 10,
760 // Sun-2
761 .m68k => 2 << 10,
762 else => null,
763 },
764 .dragonfly => switch (builtin.cpu.arch) {
765 .x86, .x86_64 => 4 << 10,
766 else => null,
767 },
768 .openbsd => switch (builtin.cpu.arch) {
769 // OpenBSD/sys/arch/*
770 .alpha => 8 << 10,
771 .hppa => 4 << 10,
772 .x86, .x86_64 => 4 << 10,
773 .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
774 .mips64, .mips64el => 4 << 10,
775 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
776 .riscv64 => 4 << 10,
777 .sh, .sheb => 4 << 10,
778 .sparc64 => 8 << 10,
779 else => null,
780 },
781 .illumos => switch (builtin.cpu.arch) {
782 // src/uts/*/sys/machparam.h
783 .x86, .x86_64 => 4 << 10,
784 .sparc, .sparc64 => 8 << 10,
785 else => null,
786 },
787 .fuchsia => switch (builtin.cpu.arch) {
788 // fuchsia/kernel/arch/*/include/arch/defines.h
789 .x86_64 => 4 << 10,
790 .aarch64, .aarch64_be => 4 << 10,
791 .riscv64 => 4 << 10,
792 else => null,
793 },
794 // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13
795 .serenity => 4 << 10,
796 .haiku => switch (builtin.cpu.arch) {
797 // haiku/headers/posix/arch/*/limits.h
798 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
799 .aarch64, .aarch64_be => 4 << 10,
800 .m68k => 4 << 10,
801 .mips, .mipsel, .mips64, .mips64el => 4 << 10,
802 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
803 .riscv64 => 4 << 10,
804 .sparc64 => 8 << 10,
805 .x86, .x86_64 => 4 << 10,
806 else => null,
807 },
808 .hurd => switch (builtin.cpu.arch) {
809 // gnumach/*/include/mach/*/vm_param.h
810 .x86, .x86_64 => 4 << 10,
811 .aarch64 => null,
812 else => null,
813 },
814 .plan9 => switch (builtin.cpu.arch) {
815 // 9front/sys/src/9/*/mem.h
816 .x86, .x86_64 => 4 << 10,
817 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
818 .aarch64, .aarch64_be => 4 << 10,
819 .mips, .mipsel, .mips64, .mips64el => 4 << 10,
820 .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
821 .sparc => 4 << 10,
822 else => null,
823 },
824 .ps3 => switch (builtin.cpu.arch) {
825 // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html
826 .powerpc64 => 1 << 20, // 1 MiB
827 else => null,
828 },
829 .ps4 => switch (builtin.cpu.arch) {
830 // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95
831 .x86, .x86_64 => 4 << 10,
832 else => null,
833 },
834 .ps5 => switch (builtin.cpu.arch) {
835 // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95
836 .x86, .x86_64 => 16 << 10,
837 else => null,
838 },
839 // system/lib/libc/musl/arch/emscripten/bits/limits.h
840 .emscripten => 64 << 10,
841 .linux => switch (builtin.cpu.arch) {
842 // Linux/arch/*/Kconfig
843 .alpha => 8 << 10,
844 .arc, .arceb => 4 << 10,
845 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
846 .aarch64, .aarch64_be => 4 << 10,
847 .csky => 4 << 10,
848 .hexagon => 4 << 10,
849 .hppa => 4 << 10,
850 .loongarch32, .loongarch64 => 4 << 10,
851 .m68k => 4 << 10,
852 .microblaze, .microblazeel => 4 << 10,
853 .mips, .mipsel, .mips64, .mips64el => 4 << 10,
854 .or1k => 8 << 10,
855 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
856 .riscv32, .riscv64 => 4 << 10,
857 .s390x => 4 << 10,
858 .sh, .sheb => 4 << 10,
859 .sparc => 4 << 10,
860 .sparc64 => 8 << 10,
861 .x86, .x86_64 => 4 << 10,
862 .xtensa, .xtensaeb => 4 << 10,
863 else => null,
864 },
865 .freestanding, .other => switch (builtin.cpu.arch) {
866 .wasm32, .wasm64 => 64 << 10,
867 .x86, .x86_64 => 4 << 10,
868 .aarch64, .aarch64_be => 4 << 10,
869 else => null,
870 },
871 else => null,
872};
873
874const page_size_max_default: ?usize = switch (builtin.os.tag) {
875 .driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => switch (builtin.cpu.arch) {
876 .x86_64 => 4 << 10,
877 .aarch64 => 16 << 10,
878 else => null,
879 },
880 .windows => switch (builtin.cpu.arch) {
881 // -- <https://devblogs.microsoft.com/oldnewthing/20210510-00/?p=105200>
882 .x86, .x86_64 => 4 << 10,
883 // SuperH => 4 << 10,
884 .mips, .mipsel, .mips64, .mips64el => 4 << 10,
885 .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
886 // DEC Alpha => 8 << 10,
887 // Itanium => 8 << 10,
888 .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
889 else => null,
890 },
891 .wasi => switch (builtin.cpu.arch) {
892 .wasm32, .wasm64 => 64 << 10,
893 else => null,
894 },
895 // https://github.com/tianocore/edk2/blob/b158dad150bf02879668f72ce306445250838201/MdePkg/Include/Uefi/UefiBaseType.h#L180-L187
896 .uefi => 4 << 10,
897 .freebsd => switch (builtin.cpu.arch) {
898 // FreeBSD/sys/*
899 .x86, .x86_64 => 4 << 10,
900 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
901 .aarch64, .aarch64_be => 4 << 10,
902 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
903 .riscv32, .riscv64 => 4 << 10,
904 else => null,
905 },
906 .netbsd => switch (builtin.cpu.arch) {
907 // NetBSD/sys/arch/*
908 .alpha => 8 << 10,
909 .x86, .x86_64 => 4 << 10,
910 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
911 .aarch64, .aarch64_be => 64 << 10,
912 .hppa => 4 << 10,
913 .mips, .mipsel, .mips64, .mips64el => 16 << 10,
914 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 16 << 10,
915 .sh, .sheb => 4 << 10,
916 .sparc => 8 << 10,
917 .sparc64 => 8 << 10,
918 .riscv32, .riscv64 => 4 << 10,
919 .m68k => 8 << 10,
920 else => null,
921 },
922 .dragonfly => switch (builtin.cpu.arch) {
923 .x86, .x86_64 => 4 << 10,
924 else => null,
925 },
926 .openbsd => switch (builtin.cpu.arch) {
927 // OpenBSD/sys/arch/*
928 .alpha => 8 << 10,
929 .hppa => 4 << 10,
930 .x86, .x86_64 => 4 << 10,
931 .thumb, .thumbeb, .arm, .armeb, .aarch64, .aarch64_be => 4 << 10,
932 .mips64, .mips64el => 16 << 10,
933 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
934 .riscv64 => 4 << 10,
935 .sh, .sheb => 4 << 10,
936 .sparc64 => 8 << 10,
937 else => null,
938 },
939 .illumos => switch (builtin.cpu.arch) {
940 // src/uts/*/sys/machparam.h
941 .x86, .x86_64 => 4 << 10,
942 .sparc, .sparc64 => 8 << 10,
943 else => null,
944 },
945 .fuchsia => switch (builtin.cpu.arch) {
946 // fuchsia/kernel/arch/*/include/arch/defines.h
947 .x86_64 => 4 << 10,
948 .aarch64, .aarch64_be => 4 << 10,
949 .riscv64 => 4 << 10,
950 else => null,
951 },
952 // https://github.com/SerenityOS/serenity/blob/62b938b798dc009605b5df8a71145942fc53808b/Kernel/API/POSIX/sys/limits.h#L11-L13
953 .serenity => 4 << 10,
954 .haiku => switch (builtin.cpu.arch) {
955 // haiku/headers/posix/arch/*/limits.h
956 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
957 .aarch64, .aarch64_be => 4 << 10,
958 .m68k => 4 << 10,
959 .mips, .mipsel, .mips64, .mips64el => 4 << 10,
960 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 4 << 10,
961 .riscv64 => 4 << 10,
962 .sparc64 => 8 << 10,
963 .x86, .x86_64 => 4 << 10,
964 else => null,
965 },
966 .hurd => switch (builtin.cpu.arch) {
967 // gnumach/*/include/mach/*/vm_param.h
968 .x86, .x86_64 => 4 << 10,
969 .aarch64 => null,
970 else => null,
971 },
972 .plan9 => switch (builtin.cpu.arch) {
973 // 9front/sys/src/9/*/mem.h
974 .x86, .x86_64 => 4 << 10,
975 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
976 .aarch64, .aarch64_be => 64 << 10,
977 .mips, .mipsel, .mips64, .mips64el => 16 << 10,
978 .powerpc, .powerpcle, .powerpc64, .powerpc64le => 4 << 10,
979 .sparc => 4 << 10,
980 else => null,
981 },
982 .ps3 => switch (builtin.cpu.arch) {
983 // cell/SDK_doc/en/html/C_and_C++_standard_libraries/stdlib.html
984 .powerpc64 => 1 << 20, // 1 MiB
985 else => null,
986 },
987 .ps4 => switch (builtin.cpu.arch) {
988 // https://github.com/ps4dev/ps4sdk/blob/4df9d001b66ae4ec07d9a51b62d1e4c5e270eecc/include/machine/param.h#L95
989 .x86, .x86_64 => 4 << 10,
990 else => null,
991 },
992 .ps5 => switch (builtin.cpu.arch) {
993 // https://github.com/PS5Dev/PS5SDK/blob/a2e03a2a0231a3a3397fa6cd087a01ca6d04f273/include/machine/param.h#L95
994 .x86, .x86_64 => 16 << 10,
995 else => null,
996 },
997 // system/lib/libc/musl/arch/emscripten/bits/limits.h
998 .emscripten => 64 << 10,
999 .linux => switch (builtin.cpu.arch) {
1000 // Linux/arch/*/Kconfig
1001 .alpha => 8 << 10,
1002 .arc, .arceb => 16 << 10,
1003 .thumb, .thumbeb, .arm, .armeb => 4 << 10,
1004 .aarch64, .aarch64_be => 64 << 10,
1005 .csky => 4 << 10,
1006 .hexagon => 256 << 10,
1007 .hppa => 64 << 10,
1008 .loongarch32, .loongarch64 => 64 << 10,
1009 .m68k => 8 << 10,
1010 .microblaze, .microblazeel => 4 << 10,
1011 .mips, .mipsel, .mips64, .mips64el => 64 << 10,
1012 .or1k => 8 << 10,
1013 .powerpc, .powerpc64, .powerpc64le, .powerpcle => 256 << 10,
1014 .riscv32, .riscv64 => 4 << 10,
1015 .s390x => 4 << 10,
1016 .sh, .sheb => 64 << 10,
1017 .sparc => 4 << 10,
1018 .sparc64 => 8 << 10,
1019 .x86, .x86_64 => 4 << 10,
1020 .xtensa, .xtensaeb => 4 << 10,
1021 else => null,
1022 },
1023 .freestanding => switch (builtin.cpu.arch) {
1024 .wasm32, .wasm64 => 64 << 10,
1025 else => null,
1026 },
1027 else => null,
1028};
1029
1030test {
1031 _ = @import("heap/memory_pool.zig");
1032 _ = ArenaAllocator;
1033 _ = GeneralPurposeAllocator;
1034 _ = FixedBufferAllocator;
1035 _ = ThreadSafeAllocator;
1036 _ = SbrkAllocator;
1037 if (builtin.target.cpu.arch.isWasm()) {
1038 _ = WasmAllocator;
1039 }
1040 if (!builtin.single_threaded) _ = smp_allocator;
1041}