master
   1//! An allocator that is intended to be used in Debug mode.
   2//!
   3//! ## Features
   4//!
   5//! * Captures stack traces on allocation, free, and optionally resize.
   6//! * Double free detection, which prints all three traces (first alloc, first
   7//!   free, second free).
   8//! * Leak detection, with stack traces.
   9//! * Never reuses memory addresses, making it easier for Zig to detect branch
  10//!   on undefined values in case of dangling pointers. This relies on
  11//!   the backing allocator to also not reuse addresses.
  12//! * Uses a minimum backing allocation size to avoid operating system errors
  13//!   from having too many active memory mappings.
  14//! * When a page of memory is no longer needed, give it back to resident
  15//!   memory as soon as possible, so that it causes page faults when used.
  16//! * Cross platform. Operates based on a backing allocator which makes it work
  17//!   everywhere, even freestanding.
  18//! * Compile-time configuration.
  19//!
  20//! These features require the allocator to be quite slow and wasteful. For
  21//! example, when allocating a single byte, the efficiency is less than 1%;
  22//! it requires more than 100 bytes of overhead to manage the allocation for
  23//! one byte. The efficiency gets better with larger allocations.
  24//!
  25//! ## Basic Design
  26//!
  27//! Allocations are divided into two categories, small and large.
  28//!
  29//! Small allocations are divided into buckets based on `page_size`:
  30//!
  31//! ```
  32//! index obj_size
  33//! 0     1
  34//! 1     2
  35//! 2     4
  36//! 3     8
  37//! 4     16
  38//! 5     32
  39//! 6     64
  40//! 7     128
  41//! 8     256
  42//! 9     512
  43//! 10    1024
  44//! 11    2048
  45//! ...
  46//! ```
  47//!
  48//! This goes on for `small_bucket_count` indexes.
  49//!
  50//! Allocations are grouped into an object size based on max(len, alignment),
  51//! rounded up to the next power of two.
  52//!
  53//! The main allocator state has an array of all the "current" buckets for each
  54//! size class. Each slot in the array can be null, meaning the bucket for that
  55//! size class is not allocated. When the first object is allocated for a given
  56//! size class, it makes one `page_size` allocation from the backing allocator.
  57//! This allocation is divided into "slots" - one per allocated object, leaving
  58//! room for the allocation metadata (starting with `BucketHeader`), which is
  59//! located at the very end of the "page".
  60//!
  61//! The allocation metadata includes "used bits" - 1 bit per slot representing
  62//! whether the slot is used. Allocations always take the next available slot
  63//! from the current bucket, setting the corresponding used bit, as well as
  64//! incrementing `allocated_count`.
  65//!
  66//! Frees recover the allocation metadata based on the address, length, and
  67//! alignment, relying on the backing allocation's large alignment, combined
  68//! with the fact that allocations are never moved from small to large, or vice
  69//! versa.
  70//!
  71//! When a bucket is full, a new one is allocated, containing a pointer to the
  72//! previous one. This doubly-linked list is iterated during leak detection.
  73//!
  74//! Resizing and remapping work the same on small allocations: if the size
  75//! class would not change, then the operation succeeds, and the address is
  76//! unchanged. Otherwise, the request is rejected.
  77//!
  78//! Large objects are allocated directly using the backing allocator. Metadata
  79//! is stored separately in a `std.HashMap` using the backing allocator.
  80//!
  81//! Resizing and remapping are forwarded directly to the backing allocator,
  82//! except where such operations would change the category from large to small.
  83const builtin = @import("builtin");
  84const StackTrace = std.builtin.StackTrace;
  85
  86const std = @import("std");
  87const log = std.log.scoped(.gpa);
  88const math = std.math;
  89const assert = std.debug.assert;
  90const mem = std.mem;
  91const Allocator = std.mem.Allocator;
  92
  93const default_page_size: usize = switch (builtin.os.tag) {
  94    // Makes `std.heap.PageAllocator` take the happy path.
  95    .windows => 64 * 1024,
  96    else => switch (builtin.cpu.arch) {
  97        // Max alignment supported by `std.heap.WasmAllocator`.
  98        .wasm32, .wasm64 => 64 * 1024,
  99        // Avoids too many active mappings when `page_size_max` is low.
 100        else => @max(std.heap.page_size_max, 128 * 1024),
 101    },
 102};
 103
 104const Log2USize = std.math.Log2Int(usize);
 105
 106const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) 6 else 0;
 107const default_stack_trace_frames: usize = switch (builtin.mode) {
 108    .Debug => default_sys_stack_trace_frames,
 109    else => 0,
 110};
 111
 112pub const Config = struct {
 113    /// Number of stack frames to capture.
 114    stack_trace_frames: usize = default_stack_trace_frames,
 115
 116    /// If true, the allocator will have two fields:
 117    ///  * `total_requested_bytes` which tracks the total allocated bytes of memory requested.
 118    ///  * `requested_memory_limit` which causes allocations to return `error.OutOfMemory`
 119    ///    when the `total_requested_bytes` exceeds this limit.
 120    /// If false, these fields will be `void`.
 121    enable_memory_limit: bool = false,
 122
 123    /// Whether to enable safety checks.
 124    safety: bool = std.debug.runtime_safety,
 125
 126    /// Whether the allocator may be used simultaneously from multiple threads.
 127    thread_safe: bool = !builtin.single_threaded,
 128
 129    /// What type of mutex you'd like to use, for thread safety.
 130    /// when specified, the mutex type must have the same shape as `std.Thread.Mutex` and
 131    /// `DummyMutex`, and have no required fields. Specifying this field causes
 132    /// the `thread_safe` field to be ignored.
 133    ///
 134    /// when null (default):
 135    /// * the mutex type defaults to `std.Thread.Mutex` when thread_safe is enabled.
 136    /// * the mutex type defaults to `DummyMutex` otherwise.
 137    MutexType: ?type = null,
 138
 139    /// This is a temporary debugging trick you can use to turn segfaults into more helpful
 140    /// logged error messages with stack trace details. The downside is that every allocation
 141    /// will be leaked, unless used with retain_metadata!
 142    never_unmap: bool = false,
 143
 144    /// This is a temporary debugging aid that retains metadata about allocations indefinitely.
 145    /// This allows a greater range of double frees to be reported. All metadata is freed when
 146    /// deinit is called. When used with never_unmap, deliberately leaked memory is also freed
 147    /// during deinit. Currently should be used with never_unmap to avoid segfaults.
 148    /// TODO https://github.com/ziglang/zig/issues/4298 will allow use without never_unmap
 149    retain_metadata: bool = false,
 150
 151    /// Enables emitting info messages with the size and address of every allocation.
 152    verbose_log: bool = false,
 153
 154    /// Tell whether the backing allocator returns already-zeroed memory.
 155    backing_allocator_zeroes: bool = true,
 156
 157    /// When resizing an allocation, refresh the stack trace with the resize
 158    /// callsite. Comes with a performance penalty.
 159    resize_stack_traces: bool = false,
 160
 161    /// Magic value that distinguishes allocations owned by this allocator from
 162    /// other regions of memory.
 163    canary: usize = @truncate(0x9232a6ff85dff10f),
 164
 165    /// The size of allocations requested from the backing allocator for
 166    /// subdividing into slots for small allocations.
 167    ///
 168    /// Must be a power of two.
 169    page_size: usize = default_page_size,
 170};
 171
 172/// Default initialization of this struct is deprecated; use `.init` instead.
 173pub fn DebugAllocator(comptime config: Config) type {
 174    return struct {
 175        backing_allocator: Allocator = std.heap.page_allocator,
 176        /// Tracks the active bucket, which is the one that has free slots in it.
 177        buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
 178        large_allocations: LargeAllocTable = .empty,
 179        total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init,
 180        requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init,
 181        mutex: @TypeOf(mutex_init) = mutex_init,
 182
 183        const Self = @This();
 184
 185        pub const init: Self = .{};
 186
 187        /// These can be derived from size_class_index but the calculation is nontrivial.
 188        const slot_counts: [small_bucket_count]SlotIndex = init: {
 189            @setEvalBranchQuota(10000);
 190            var result: [small_bucket_count]SlotIndex = undefined;
 191            for (&result, 0..) |*elem, i| elem.* = calculateSlotCount(i);
 192            break :init result;
 193        };
 194
 195        comptime {
 196            assert(math.isPowerOfTwo(page_size));
 197        }
 198
 199        const page_size = config.page_size;
 200        const page_align: mem.Alignment = .fromByteUnits(page_size);
 201        /// Integer type for pointing to slots in a small allocation
 202        const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1);
 203
 204        const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
 205        const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
 206
 207        const mutex_init = if (config.MutexType) |T|
 208            T{}
 209        else if (config.thread_safe)
 210            std.Thread.Mutex{}
 211        else
 212            DummyMutex{};
 213
 214        const DummyMutex = struct {
 215            inline fn lock(_: DummyMutex) void {}
 216            inline fn unlock(_: DummyMutex) void {}
 217        };
 218
 219        const stack_n = config.stack_trace_frames;
 220        const one_trace_size = @sizeOf(usize) * stack_n;
 221        const traces_per_slot = 2;
 222
 223        pub const Error = mem.Allocator.Error;
 224
 225        /// Avoids creating buckets that would only be able to store a small
 226        /// number of slots. Value of 1 means 2 is the minimum slot count.
 227        const minimum_slots_per_bucket_log2 = 1;
 228        const small_bucket_count = math.log2(page_size) - minimum_slots_per_bucket_log2;
 229        const largest_bucket_object_size = 1 << (small_bucket_count - 1);
 230        const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);
 231
 232        const bucketCompare = struct {
 233            fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order {
 234                return std.math.order(@intFromPtr(a.page), @intFromPtr(b.page));
 235            }
 236        }.compare;
 237
 238        const LargeAlloc = struct {
 239            bytes: []u8,
 240            requested_size: if (config.enable_memory_limit) usize else void,
 241            stack_addresses: [trace_n][stack_n]usize,
 242            freed: if (config.retain_metadata) bool else void,
 243            alignment: if (config.never_unmap and config.retain_metadata) mem.Alignment else void,
 244
 245            const trace_n = if (config.retain_metadata) traces_per_slot else 1;
 246
 247            fn dumpStackTrace(self: *LargeAlloc, trace_kind: TraceKind) void {
 248                std.debug.dumpStackTrace(self.getStackTrace(trace_kind));
 249            }
 250
 251            fn getStackTrace(self: *LargeAlloc, trace_kind: TraceKind) std.builtin.StackTrace {
 252                assert(@intFromEnum(trace_kind) < trace_n);
 253                const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
 254                var len: usize = 0;
 255                while (len < stack_n and stack_addresses[len] != 0) {
 256                    len += 1;
 257                }
 258                return .{
 259                    .instruction_addresses = stack_addresses,
 260                    .index = len,
 261                };
 262            }
 263
 264            fn captureStackTrace(self: *LargeAlloc, ret_addr: usize, trace_kind: TraceKind) void {
 265                assert(@intFromEnum(trace_kind) < trace_n);
 266                const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
 267                collectStackTrace(ret_addr, stack_addresses);
 268            }
 269        };
 270        const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);
 271
 272        /// Bucket: In memory, in order:
 273        /// * BucketHeader
 274        /// * bucket_used_bits: [N]usize, // 1 bit for every slot
 275        /// -- below only exists when config.safety is true --
 276        /// * requested_sizes: [N]LargestSizeClassInt // 1 int for every slot
 277        /// * log2_ptr_aligns: [N]u8 // 1 byte for every slot
 278        /// -- above only exists when config.safety is true --
 279        /// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
 280        const BucketHeader = struct {
 281            allocated_count: SlotIndex,
 282            freed_count: SlotIndex,
 283            prev: ?*BucketHeader,
 284            next: ?*BucketHeader,
 285            canary: usize = config.canary,
 286
 287            fn fromPage(page_addr: usize, slot_count: usize) *BucketHeader {
 288                const unaligned = page_addr + page_size - bucketSize(slot_count);
 289                return @ptrFromInt(unaligned & ~(@as(usize, @alignOf(BucketHeader)) - 1));
 290            }
 291
 292            fn usedBits(bucket: *BucketHeader, index: usize) *usize {
 293                const ptr: [*]u8 = @ptrCast(bucket);
 294                const bits: [*]usize = @ptrCast(@alignCast(ptr + @sizeOf(BucketHeader)));
 295                return &bits[index];
 296            }
 297
 298            fn requestedSizes(bucket: *BucketHeader, slot_count: usize) []LargestSizeClassInt {
 299                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
 300                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(slot_count);
 301                const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
 302                return sizes[0..slot_count];
 303            }
 304
 305            fn log2PtrAligns(bucket: *BucketHeader, slot_count: usize) []mem.Alignment {
 306                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
 307                const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(slot_count);
 308                return @ptrCast(aligns_ptr[0..slot_count]);
 309            }
 310
 311            fn stackTracePtr(
 312                bucket: *BucketHeader,
 313                slot_count: usize,
 314                slot_index: SlotIndex,
 315                trace_kind: TraceKind,
 316            ) *[stack_n]usize {
 317                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(slot_count);
 318                const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
 319                    @intFromEnum(trace_kind) * @as(usize, one_trace_size);
 320                return @ptrCast(@alignCast(addr));
 321            }
 322
 323            fn captureStackTrace(
 324                bucket: *BucketHeader,
 325                ret_addr: usize,
 326                slot_count: usize,
 327                slot_index: SlotIndex,
 328                trace_kind: TraceKind,
 329            ) void {
 330                // Initialize them to 0. When determining the count we must look
 331                // for non zero addresses.
 332                const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
 333                collectStackTrace(ret_addr, stack_addresses);
 334            }
 335        };
 336
 337        pub fn allocator(self: *Self) Allocator {
 338            return .{
 339                .ptr = self,
 340                .vtable = &.{
 341                    .alloc = alloc,
 342                    .resize = resize,
 343                    .remap = remap,
 344                    .free = free,
 345                },
 346            };
 347        }
 348
 349        fn bucketStackTrace(
 350            bucket: *BucketHeader,
 351            slot_count: usize,
 352            slot_index: SlotIndex,
 353            trace_kind: TraceKind,
 354        ) StackTrace {
 355            const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
 356            var len: usize = 0;
 357            while (len < stack_n and stack_addresses[len] != 0) {
 358                len += 1;
 359            }
 360            return .{
 361                .instruction_addresses = stack_addresses,
 362                .index = len,
 363            };
 364        }
 365
 366        fn bucketRequestedSizesStart(slot_count: usize) usize {
 367            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
 368            return mem.alignForward(
 369                usize,
 370                @sizeOf(BucketHeader) + usedBitsSize(slot_count),
 371                @alignOf(LargestSizeClassInt),
 372            );
 373        }
 374
 375        fn bucketAlignsStart(slot_count: usize) usize {
 376            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
 377            return bucketRequestedSizesStart(slot_count) + (@sizeOf(LargestSizeClassInt) * slot_count);
 378        }
 379
 380        fn bucketStackFramesStart(slot_count: usize) usize {
 381            const unaligned_start = if (config.safety)
 382                bucketAlignsStart(slot_count) + slot_count
 383            else
 384                @sizeOf(BucketHeader) + usedBitsSize(slot_count);
 385            return mem.alignForward(usize, unaligned_start, @alignOf(usize));
 386        }
 387
 388        fn bucketSize(slot_count: usize) usize {
 389            return bucketStackFramesStart(slot_count) + one_trace_size * traces_per_slot * slot_count;
 390        }
 391
 392        /// This is executed only at compile-time to prepopulate a lookup table.
 393        fn calculateSlotCount(size_class_index: usize) SlotIndex {
 394            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
 395            var lower: usize = 1 << minimum_slots_per_bucket_log2;
 396            var upper: usize = (page_size - bucketSize(lower)) / size_class;
 397            while (upper > lower) {
 398                const proposed: usize = lower + (upper - lower) / 2;
 399                if (proposed == lower) return lower;
 400                const slots_end = proposed * size_class;
 401                const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
 402                const end = header_begin + bucketSize(proposed);
 403                if (end > page_size) {
 404                    upper = proposed - 1;
 405                } else {
 406                    lower = proposed;
 407                }
 408            }
 409            const slots_end = lower * size_class;
 410            const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
 411            const end = header_begin + bucketSize(lower);
 412            assert(end <= page_size);
 413            return lower;
 414        }
 415
 416        fn usedBitsCount(slot_count: usize) usize {
 417            return (slot_count + (@bitSizeOf(usize) - 1)) / @bitSizeOf(usize);
 418        }
 419
 420        fn usedBitsSize(slot_count: usize) usize {
 421            return usedBitsCount(slot_count) * @sizeOf(usize);
 422        }
 423
 424        fn detectLeaksInBucket(
 425            bucket: *BucketHeader,
 426            size_class_index: usize,
 427            used_bits_count: usize,
 428            tty_config: std.Io.tty.Config,
 429        ) usize {
 430            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
 431            const slot_count = slot_counts[size_class_index];
 432            var leaks: usize = 0;
 433            for (0..used_bits_count) |used_bits_byte| {
 434                const used_int = bucket.usedBits(used_bits_byte).*;
 435                if (used_int != 0) {
 436                    for (0..@bitSizeOf(usize)) |bit_index_usize| {
 437                        const bit_index: Log2USize = @intCast(bit_index_usize);
 438                        const is_used = @as(u1, @truncate(used_int >> bit_index)) != 0;
 439                        if (is_used) {
 440                            const slot_index: SlotIndex = @intCast(used_bits_byte * @bitSizeOf(usize) + bit_index);
 441                            const stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc);
 442                            const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
 443                            const addr = page_addr + slot_index * size_class;
 444                            log.err("memory address 0x{x} leaked: {f}", .{
 445                                addr,
 446                                std.debug.FormatStackTrace{
 447                                    .stack_trace = stack_trace,
 448                                    .tty_config = tty_config,
 449                                },
 450                            });
 451                            leaks += 1;
 452                        }
 453                    }
 454                }
 455            }
 456            return leaks;
 457        }
 458
 459        /// Emits log messages for leaks and then returns the number of detected leaks (0 if no leaks were detected).
 460        pub fn detectLeaks(self: *Self) usize {
 461            var leaks: usize = 0;
 462
 463            const tty_config = std.Io.tty.detectConfig(.stderr());
 464
 465            for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
 466                var optional_bucket = init_optional_bucket;
 467                const slot_count = slot_counts[size_class_index];
 468                const used_bits_count = usedBitsCount(slot_count);
 469                while (optional_bucket) |bucket| {
 470                    leaks += detectLeaksInBucket(bucket, size_class_index, used_bits_count, tty_config);
 471                    optional_bucket = bucket.prev;
 472                }
 473            }
 474
 475            var it = self.large_allocations.valueIterator();
 476            while (it.next()) |large_alloc| {
 477                if (config.retain_metadata and large_alloc.freed) continue;
 478                const stack_trace = large_alloc.getStackTrace(.alloc);
 479                log.err("memory address 0x{x} leaked: {f}", .{
 480                    @intFromPtr(large_alloc.bytes.ptr),
 481                    std.debug.FormatStackTrace{
 482                        .stack_trace = stack_trace,
 483                        .tty_config = tty_config,
 484                    },
 485                });
 486                leaks += 1;
 487            }
 488            return leaks;
 489        }
 490
 491        fn freeRetainedMetadata(self: *Self) void {
 492            comptime assert(config.retain_metadata);
 493            if (config.never_unmap) {
 494                // free large allocations that were intentionally leaked by never_unmap
 495                var it = self.large_allocations.iterator();
 496                while (it.next()) |large| {
 497                    if (large.value_ptr.freed) {
 498                        self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.alignment, @returnAddress());
 499                    }
 500                }
 501            }
 502        }
 503
 504        pub fn flushRetainedMetadata(self: *Self) void {
 505            comptime assert(config.retain_metadata);
 506            self.freeRetainedMetadata();
 507            // also remove entries from large_allocations
 508            var it = self.large_allocations.iterator();
 509            while (it.next()) |large| {
 510                if (large.value_ptr.freed) {
 511                    _ = self.large_allocations.remove(@intFromPtr(large.value_ptr.bytes.ptr));
 512                }
 513            }
 514        }
 515
 516        /// Returns `std.heap.Check.leak` if there were leaks; `std.heap.Check.ok` otherwise.
 517        pub fn deinit(self: *Self) std.heap.Check {
 518            const leaks: usize = if (config.safety) self.detectLeaks() else 0;
 519            self.deinitWithoutLeakChecks();
 520            return if (leaks == 0) .ok else .leak;
 521        }
 522
 523        /// Like `deinit`, but does not check for memory leaks. This is useful if leaks have already
 524        /// been detected manually with `detectLeaks` to avoid reporting them for a second time.
 525        pub fn deinitWithoutLeakChecks(self: *Self) void {
 526            if (config.retain_metadata) self.freeRetainedMetadata();
 527            self.large_allocations.deinit(self.backing_allocator);
 528            self.* = undefined;
 529        }
 530
 531        fn collectStackTrace(first_trace_addr: usize, addr_buf: *[stack_n]usize) void {
 532            const st = std.debug.captureCurrentStackTrace(.{ .first_address = first_trace_addr }, addr_buf);
 533            @memset(addr_buf[@min(st.index, addr_buf.len)..], 0);
 534        }
 535
 536        fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
 537            var addr_buf: [stack_n]usize = undefined;
 538            const second_free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
 539            const tty_config = std.Io.tty.detectConfig(.stderr());
 540            log.err("Double free detected. Allocation: {f} First free: {f} Second free: {f}", .{
 541                std.debug.FormatStackTrace{
 542                    .stack_trace = alloc_stack_trace,
 543                    .tty_config = tty_config,
 544                },
 545                std.debug.FormatStackTrace{
 546                    .stack_trace = free_stack_trace,
 547                    .tty_config = tty_config,
 548                },
 549                std.debug.FormatStackTrace{
 550                    .stack_trace = second_free_stack_trace,
 551                    .tty_config = tty_config,
 552                },
 553            });
 554        }
 555
 556        /// This function assumes the object is in the large object storage regardless
 557        /// of the parameters.
 558        fn resizeLarge(
 559            self: *Self,
 560            old_mem: []u8,
 561            alignment: mem.Alignment,
 562            new_size: usize,
 563            ret_addr: usize,
 564            may_move: bool,
 565        ) ?[*]u8 {
 566            if (config.retain_metadata and may_move) {
 567                // Before looking up the entry (since this could invalidate
 568                // it), we must reserve space for the new entry in case the
 569                // allocation is relocated.
 570                self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
 571            }
 572
 573            const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
 574                if (config.safety) {
 575                    @panic("Invalid free");
 576                } else {
 577                    unreachable;
 578                }
 579            };
 580
 581            if (config.retain_metadata and entry.value_ptr.freed) {
 582                if (config.safety) {
 583                    reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
 584                    @panic("Unrecoverable double free");
 585                } else {
 586                    unreachable;
 587                }
 588            }
 589
 590            if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
 591                var addr_buf: [stack_n]usize = undefined;
 592                const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
 593                const tty_config = std.Io.tty.detectConfig(.stderr());
 594                log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
 595                    entry.value_ptr.bytes.len,
 596                    old_mem.len,
 597                    std.debug.FormatStackTrace{
 598                        .stack_trace = entry.value_ptr.getStackTrace(.alloc),
 599                        .tty_config = tty_config,
 600                    },
 601                    std.debug.FormatStackTrace{
 602                        .stack_trace = free_stack_trace,
 603                        .tty_config = tty_config,
 604                    },
 605                });
 606            }
 607
 608            // If this would move the allocation into a small size class,
 609            // refuse the request, because it would require creating small
 610            // allocation metadata.
 611            const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_size - 1), @intFromEnum(alignment));
 612            if (new_size_class_index < self.buckets.len) return null;
 613
 614            // Do memory limit accounting with requested sizes rather than what
 615            // backing_allocator returns because if we want to return
 616            // error.OutOfMemory, we have to leave allocation untouched, and
 617            // that is impossible to guarantee after calling
 618            // backing_allocator.rawResize.
 619            const prev_req_bytes = self.total_requested_bytes;
 620            if (config.enable_memory_limit) {
 621                const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
 622                if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
 623                    return null;
 624                }
 625                self.total_requested_bytes = new_req_bytes;
 626            }
 627
 628            const opt_resized_ptr = if (may_move)
 629                self.backing_allocator.rawRemap(old_mem, alignment, new_size, ret_addr)
 630            else if (self.backing_allocator.rawResize(old_mem, alignment, new_size, ret_addr))
 631                old_mem.ptr
 632            else
 633                null;
 634
 635            const resized_ptr = opt_resized_ptr orelse {
 636                if (config.enable_memory_limit) {
 637                    self.total_requested_bytes = prev_req_bytes;
 638                }
 639                return null;
 640            };
 641
 642            if (config.enable_memory_limit) {
 643                entry.value_ptr.requested_size = new_size;
 644            }
 645
 646            if (config.verbose_log) {
 647                log.info("large resize {d} bytes at {*} to {d} at {*}", .{
 648                    old_mem.len, old_mem.ptr, new_size, resized_ptr,
 649                });
 650            }
 651            entry.value_ptr.bytes = resized_ptr[0..new_size];
 652            if (config.resize_stack_traces)
 653                entry.value_ptr.captureStackTrace(ret_addr, .alloc);
 654
 655            // Update the key of the hash map if the memory was relocated.
 656            if (resized_ptr != old_mem.ptr) {
 657                const large_alloc = entry.value_ptr.*;
 658                if (config.retain_metadata) {
 659                    entry.value_ptr.freed = true;
 660                    entry.value_ptr.captureStackTrace(ret_addr, .free);
 661                } else {
 662                    self.large_allocations.removeByPtr(entry.key_ptr);
 663                }
 664
 665                const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(resized_ptr));
 666                if (config.retain_metadata and !config.never_unmap) {
 667                    // Backing allocator may be reusing memory that we're retaining metadata for
 668                    assert(!gop.found_existing or gop.value_ptr.freed);
 669                } else {
 670                    assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
 671                }
 672                gop.value_ptr.* = large_alloc;
 673            }
 674
 675            return resized_ptr;
 676        }
 677
 678        /// This function assumes the object is in the large object storage regardless
 679        /// of the parameters.
 680        fn freeLarge(
 681            self: *Self,
 682            old_mem: []u8,
 683            alignment: mem.Alignment,
 684            ret_addr: usize,
 685        ) void {
 686            const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
 687                if (config.safety) {
 688                    @panic("Invalid free");
 689                } else {
 690                    unreachable;
 691                }
 692            };
 693
 694            if (config.retain_metadata and entry.value_ptr.freed) {
 695                if (config.safety) {
 696                    reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
 697                    return;
 698                } else {
 699                    unreachable;
 700                }
 701            }
 702
 703            if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
 704                var addr_buf: [stack_n]usize = undefined;
 705                const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
 706                const tty_config = std.Io.tty.detectConfig(.stderr());
 707                log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
 708                    entry.value_ptr.bytes.len,
 709                    old_mem.len,
 710                    std.debug.FormatStackTrace{
 711                        .stack_trace = entry.value_ptr.getStackTrace(.alloc),
 712                        .tty_config = tty_config,
 713                    },
 714                    std.debug.FormatStackTrace{
 715                        .stack_trace = free_stack_trace,
 716                        .tty_config = tty_config,
 717                    },
 718                });
 719            }
 720
 721            if (!config.never_unmap) {
 722                self.backing_allocator.rawFree(old_mem, alignment, ret_addr);
 723            }
 724
 725            if (config.enable_memory_limit) {
 726                self.total_requested_bytes -= entry.value_ptr.requested_size;
 727            }
 728
 729            if (config.verbose_log) {
 730                log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
 731            }
 732
 733            if (!config.retain_metadata) {
 734                assert(self.large_allocations.remove(@intFromPtr(old_mem.ptr)));
 735            } else {
 736                entry.value_ptr.freed = true;
 737                entry.value_ptr.captureStackTrace(ret_addr, .free);
 738            }
 739        }
 740
 741        fn alloc(context: *anyopaque, len: usize, alignment: mem.Alignment, ret_addr: usize) ?[*]u8 {
 742            const self: *Self = @ptrCast(@alignCast(context));
 743            self.mutex.lock();
 744            defer self.mutex.unlock();
 745
 746            if (config.enable_memory_limit) {
 747                const new_req_bytes = self.total_requested_bytes + len;
 748                if (new_req_bytes > self.requested_memory_limit) return null;
 749                self.total_requested_bytes = new_req_bytes;
 750            }
 751
 752            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(len - 1), @intFromEnum(alignment));
 753            if (size_class_index >= self.buckets.len) {
 754                @branchHint(.unlikely);
 755                self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
 756                const ptr = self.backing_allocator.rawAlloc(len, alignment, ret_addr) orelse return null;
 757                const slice = ptr[0..len];
 758
 759                const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(slice.ptr));
 760                if (config.retain_metadata and !config.never_unmap) {
 761                    // Backing allocator may be reusing memory that we're retaining metadata for
 762                    assert(!gop.found_existing or gop.value_ptr.freed);
 763                } else {
 764                    assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
 765                }
 766                gop.value_ptr.bytes = slice;
 767                if (config.enable_memory_limit)
 768                    gop.value_ptr.requested_size = len;
 769                gop.value_ptr.captureStackTrace(ret_addr, .alloc);
 770                if (config.retain_metadata) {
 771                    gop.value_ptr.freed = false;
 772                    if (config.never_unmap) {
 773                        gop.value_ptr.alignment = alignment;
 774                    }
 775                }
 776
 777                if (config.verbose_log) {
 778                    log.info("large alloc {d} bytes at {*}", .{ slice.len, slice.ptr });
 779                }
 780                return slice.ptr;
 781            }
 782
 783            const slot_count = slot_counts[size_class_index];
 784
 785            if (self.buckets[size_class_index]) |bucket| {
 786                @branchHint(.likely);
 787                const slot_index = bucket.allocated_count;
 788                if (slot_index < slot_count) {
 789                    @branchHint(.likely);
 790                    bucket.allocated_count = slot_index + 1;
 791                    const used_bits_byte = bucket.usedBits(slot_index / @bitSizeOf(usize));
 792                    const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
 793                    used_bits_byte.* |= (@as(usize, 1) << used_bit_index);
 794                    const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
 795                    if (config.stack_trace_frames > 0) {
 796                        bucket.captureStackTrace(ret_addr, slot_count, slot_index, .alloc);
 797                    }
 798                    if (config.safety) {
 799                        bucket.requestedSizes(slot_count)[slot_index] = @intCast(len);
 800                        bucket.log2PtrAligns(slot_count)[slot_index] = alignment;
 801                    }
 802                    const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
 803                    const addr = page_addr + slot_index * size_class;
 804                    if (config.verbose_log) {
 805                        log.info("small alloc {d} bytes at 0x{x}", .{ len, addr });
 806                    }
 807                    return @ptrFromInt(addr);
 808                }
 809            }
 810
 811            const page = self.backing_allocator.rawAlloc(page_size, page_align, @returnAddress()) orelse
 812                return null;
 813            const bucket: *BucketHeader = .fromPage(@intFromPtr(page), slot_count);
 814            bucket.* = .{
 815                .allocated_count = 1,
 816                .freed_count = 0,
 817                .prev = self.buckets[size_class_index],
 818                .next = null,
 819            };
 820            if (self.buckets[size_class_index]) |old_head| {
 821                old_head.next = bucket;
 822            }
 823            self.buckets[size_class_index] = bucket;
 824
 825            if (!config.backing_allocator_zeroes) {
 826                @memset(@as([*]usize, @as(*[1]usize, bucket.usedBits(0)))[0..usedBitsCount(slot_count)], 0);
 827                if (config.safety) @memset(bucket.requestedSizes(slot_count), 0);
 828            }
 829
 830            bucket.usedBits(0).* = 0b1;
 831
 832            if (config.stack_trace_frames > 0) {
 833                bucket.captureStackTrace(ret_addr, slot_count, 0, .alloc);
 834            }
 835
 836            if (config.safety) {
 837                bucket.requestedSizes(slot_count)[0] = @intCast(len);
 838                bucket.log2PtrAligns(slot_count)[0] = alignment;
 839            }
 840
 841            if (config.verbose_log) {
 842                log.info("small alloc {d} bytes at 0x{x}", .{ len, @intFromPtr(page) });
 843            }
 844
 845            return page;
 846        }
 847
 848        fn resize(
 849            context: *anyopaque,
 850            memory: []u8,
 851            alignment: mem.Alignment,
 852            new_len: usize,
 853            return_address: usize,
 854        ) bool {
 855            const self: *Self = @ptrCast(@alignCast(context));
 856            self.mutex.lock();
 857            defer self.mutex.unlock();
 858
 859            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
 860            if (size_class_index >= self.buckets.len) {
 861                return self.resizeLarge(memory, alignment, new_len, return_address, false) != null;
 862            } else {
 863                return resizeSmall(self, memory, alignment, new_len, return_address, size_class_index);
 864            }
 865        }
 866
 867        fn remap(
 868            context: *anyopaque,
 869            memory: []u8,
 870            alignment: mem.Alignment,
 871            new_len: usize,
 872            return_address: usize,
 873        ) ?[*]u8 {
 874            const self: *Self = @ptrCast(@alignCast(context));
 875            self.mutex.lock();
 876            defer self.mutex.unlock();
 877
 878            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
 879            if (size_class_index >= self.buckets.len) {
 880                return self.resizeLarge(memory, alignment, new_len, return_address, true);
 881            } else {
 882                return if (resizeSmall(self, memory, alignment, new_len, return_address, size_class_index)) memory.ptr else null;
 883            }
 884        }
 885
 886        fn free(
 887            context: *anyopaque,
 888            old_memory: []u8,
 889            alignment: mem.Alignment,
 890            return_address: usize,
 891        ) void {
 892            const self: *Self = @ptrCast(@alignCast(context));
 893            self.mutex.lock();
 894            defer self.mutex.unlock();
 895
 896            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(old_memory.len - 1), @intFromEnum(alignment));
 897            if (size_class_index >= self.buckets.len) {
 898                @branchHint(.unlikely);
 899                self.freeLarge(old_memory, alignment, return_address);
 900                return;
 901            }
 902
 903            const slot_count = slot_counts[size_class_index];
 904            const freed_addr = @intFromPtr(old_memory.ptr);
 905            const page_addr = freed_addr & ~(page_size - 1);
 906            const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
 907            if (bucket.canary != config.canary) @panic("Invalid free");
 908            const page_offset = freed_addr - page_addr;
 909            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
 910            const slot_index: SlotIndex = @intCast(page_offset / size_class);
 911            const used_byte_index = slot_index / @bitSizeOf(usize);
 912            const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
 913            const used_byte = bucket.usedBits(used_byte_index);
 914            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
 915            if (!is_used) {
 916                if (config.safety) {
 917                    reportDoubleFree(
 918                        return_address,
 919                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
 920                        bucketStackTrace(bucket, slot_count, slot_index, .free),
 921                    );
 922                    // Recoverable since this is a free.
 923                    return;
 924                } else {
 925                    unreachable;
 926                }
 927            }
 928
 929            // Definitely an in-use small alloc now.
 930            if (config.safety) {
 931                const requested_size = bucket.requestedSizes(slot_count)[slot_index];
 932                if (requested_size == 0) @panic("Invalid free");
 933                const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
 934                if (old_memory.len != requested_size or alignment != slot_alignment) {
 935                    var addr_buf: [stack_n]usize = undefined;
 936                    const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf);
 937                    if (old_memory.len != requested_size) {
 938                        const tty_config = std.Io.tty.detectConfig(.stderr());
 939                        log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
 940                            requested_size,
 941                            old_memory.len,
 942                            std.debug.FormatStackTrace{
 943                                .stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
 944                                .tty_config = tty_config,
 945                            },
 946                            std.debug.FormatStackTrace{
 947                                .stack_trace = free_stack_trace,
 948                                .tty_config = tty_config,
 949                            },
 950                        });
 951                    }
 952                    if (alignment != slot_alignment) {
 953                        const tty_config = std.Io.tty.detectConfig(.stderr());
 954                        log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
 955                            slot_alignment.toByteUnits(),
 956                            alignment.toByteUnits(),
 957                            std.debug.FormatStackTrace{
 958                                .stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
 959                                .tty_config = tty_config,
 960                            },
 961                            std.debug.FormatStackTrace{
 962                                .stack_trace = free_stack_trace,
 963                                .tty_config = tty_config,
 964                            },
 965                        });
 966                    }
 967                }
 968            }
 969
 970            if (config.enable_memory_limit) {
 971                self.total_requested_bytes -= old_memory.len;
 972            }
 973
 974            if (config.stack_trace_frames > 0) {
 975                // Capture stack trace to be the "first free", in case a double free happens.
 976                bucket.captureStackTrace(return_address, slot_count, slot_index, .free);
 977            }
 978
 979            used_byte.* &= ~(@as(usize, 1) << used_bit_index);
 980            if (config.safety) {
 981                bucket.requestedSizes(slot_count)[slot_index] = 0;
 982            }
 983            bucket.freed_count += 1;
 984            if (bucket.freed_count == bucket.allocated_count) {
 985                if (bucket.prev) |prev| {
 986                    prev.next = bucket.next;
 987                }
 988
 989                if (bucket.next) |next| {
 990                    assert(self.buckets[size_class_index] != bucket);
 991                    next.prev = bucket.prev;
 992                } else {
 993                    assert(self.buckets[size_class_index] == bucket);
 994                    self.buckets[size_class_index] = bucket.prev;
 995                }
 996
 997                if (!config.never_unmap) {
 998                    const page: [*]align(page_size) u8 = @ptrFromInt(page_addr);
 999                    self.backing_allocator.rawFree(page[0..page_size], page_align, @returnAddress());
1000                }
1001            }
1002            if (config.verbose_log) {
1003                log.info("small free {d} bytes at {*}", .{ old_memory.len, old_memory.ptr });
1004            }
1005        }
1006
1007        fn resizeSmall(
1008            self: *Self,
1009            memory: []u8,
1010            alignment: mem.Alignment,
1011            new_len: usize,
1012            return_address: usize,
1013            size_class_index: usize,
1014        ) bool {
1015            const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_len - 1), @intFromEnum(alignment));
1016            if (!config.safety) return new_size_class_index == size_class_index;
1017            const slot_count = slot_counts[size_class_index];
1018            const memory_addr = @intFromPtr(memory.ptr);
1019            const page_addr = memory_addr & ~(page_size - 1);
1020            const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
1021            if (bucket.canary != config.canary) @panic("Invalid free");
1022            const page_offset = memory_addr - page_addr;
1023            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
1024            const slot_index: SlotIndex = @intCast(page_offset / size_class);
1025            const used_byte_index = slot_index / @bitSizeOf(usize);
1026            const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
1027            const used_byte = bucket.usedBits(used_byte_index);
1028            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
1029            if (!is_used) {
1030                reportDoubleFree(
1031                    return_address,
1032                    bucketStackTrace(bucket, slot_count, slot_index, .alloc),
1033                    bucketStackTrace(bucket, slot_count, slot_index, .free),
1034                );
1035                // Recoverable since this is a free.
1036                return false;
1037            }
1038
1039            // Definitely an in-use small alloc now.
1040            const requested_size = bucket.requestedSizes(slot_count)[slot_index];
1041            if (requested_size == 0) @panic("Invalid free");
1042            const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
1043            if (memory.len != requested_size or alignment != slot_alignment) {
1044                var addr_buf: [stack_n]usize = undefined;
1045                const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf);
1046                if (memory.len != requested_size) {
1047                    const tty_config = std.Io.tty.detectConfig(.stderr());
1048                    log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
1049                        requested_size,
1050                        memory.len,
1051                        std.debug.FormatStackTrace{
1052                            .stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
1053                            .tty_config = tty_config,
1054                        },
1055                        std.debug.FormatStackTrace{
1056                            .stack_trace = free_stack_trace,
1057                            .tty_config = tty_config,
1058                        },
1059                    });
1060                }
1061                if (alignment != slot_alignment) {
1062                    const tty_config = std.Io.tty.detectConfig(.stderr());
1063                    log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
1064                        slot_alignment.toByteUnits(),
1065                        alignment.toByteUnits(),
1066                        std.debug.FormatStackTrace{
1067                            .stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
1068                            .tty_config = tty_config,
1069                        },
1070                        std.debug.FormatStackTrace{
1071                            .stack_trace = free_stack_trace,
1072                            .tty_config = tty_config,
1073                        },
1074                    });
1075                }
1076            }
1077
1078            if (new_size_class_index != size_class_index) return false;
1079
1080            const prev_req_bytes = self.total_requested_bytes;
1081            if (config.enable_memory_limit) {
1082                const new_req_bytes = prev_req_bytes - memory.len + new_len;
1083                if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
1084                    return false;
1085                }
1086                self.total_requested_bytes = new_req_bytes;
1087            }
1088
1089            if (memory.len > new_len) @memset(memory[new_len..], undefined);
1090            if (config.verbose_log)
1091                log.info("small resize {d} bytes at {*} to {d}", .{ memory.len, memory.ptr, new_len });
1092
1093            if (config.safety)
1094                bucket.requestedSizes(slot_count)[slot_index] = @intCast(new_len);
1095
1096            if (config.resize_stack_traces)
1097                bucket.captureStackTrace(return_address, slot_count, slot_index, .alloc);
1098
1099            return true;
1100        }
1101    };
1102}
1103
1104const TraceKind = enum {
1105    alloc,
1106    free,
1107};
1108
1109const test_config: Config = .{};
1110
1111test "small allocations - free in same order" {
1112    var gpa = DebugAllocator(test_config){};
1113    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1114    const allocator = gpa.allocator();
1115
1116    var list = std.array_list.Managed(*u64).init(std.testing.allocator);
1117    defer list.deinit();
1118
1119    var i: usize = 0;
1120    while (i < 513) : (i += 1) {
1121        const ptr = try allocator.create(u64);
1122        try list.append(ptr);
1123    }
1124
1125    for (list.items) |ptr| {
1126        allocator.destroy(ptr);
1127    }
1128}
1129
1130test "small allocations - free in reverse order" {
1131    var gpa = DebugAllocator(test_config){};
1132    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1133    const allocator = gpa.allocator();
1134
1135    var list = std.array_list.Managed(*u64).init(std.testing.allocator);
1136    defer list.deinit();
1137
1138    var i: usize = 0;
1139    while (i < 513) : (i += 1) {
1140        const ptr = try allocator.create(u64);
1141        try list.append(ptr);
1142    }
1143
1144    while (list.pop()) |ptr| {
1145        allocator.destroy(ptr);
1146    }
1147}
1148
1149test "large allocations" {
1150    var gpa = DebugAllocator(test_config){};
1151    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1152    const allocator = gpa.allocator();
1153
1154    const ptr1 = try allocator.alloc(u64, 42768);
1155    const ptr2 = try allocator.alloc(u64, 52768);
1156    allocator.free(ptr1);
1157    const ptr3 = try allocator.alloc(u64, 62768);
1158    allocator.free(ptr3);
1159    allocator.free(ptr2);
1160}
1161
1162test "very large allocation" {
1163    var gpa = DebugAllocator(test_config){};
1164    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1165    const allocator = gpa.allocator();
1166
1167    try std.testing.expectError(error.OutOfMemory, allocator.alloc(u8, math.maxInt(usize)));
1168}
1169
1170test "realloc" {
1171    var gpa = DebugAllocator(test_config){};
1172    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1173    const allocator = gpa.allocator();
1174
1175    var slice = try allocator.alignedAlloc(u8, .of(u32), 1);
1176    defer allocator.free(slice);
1177    slice[0] = 0x12;
1178
1179    // This reallocation should keep its pointer address.
1180    const old_slice = slice;
1181    slice = try allocator.realloc(slice, 2);
1182    try std.testing.expect(old_slice.ptr == slice.ptr);
1183    try std.testing.expect(slice[0] == 0x12);
1184    slice[1] = 0x34;
1185
1186    // This requires upgrading to a larger size class
1187    slice = try allocator.realloc(slice, 17);
1188    try std.testing.expect(slice[0] == 0x12);
1189    try std.testing.expect(slice[1] == 0x34);
1190}
1191
1192test "shrink" {
1193    var gpa: DebugAllocator(test_config) = .{};
1194    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1195    const allocator = gpa.allocator();
1196
1197    var slice = try allocator.alloc(u8, 20);
1198    defer allocator.free(slice);
1199
1200    @memset(slice, 0x11);
1201
1202    try std.testing.expect(allocator.resize(slice, 17));
1203    slice = slice[0..17];
1204
1205    for (slice) |b| {
1206        try std.testing.expect(b == 0x11);
1207    }
1208
1209    // Does not cross size class boundaries when shrinking.
1210    try std.testing.expect(!allocator.resize(slice, 16));
1211}
1212
1213test "large object - grow" {
1214    if (builtin.target.cpu.arch.isWasm()) {
1215        // Not expected to pass on targets that do not have memory mapping.
1216        return error.SkipZigTest;
1217    }
1218    var gpa: DebugAllocator(test_config) = .{};
1219    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1220    const allocator = gpa.allocator();
1221
1222    var slice1 = try allocator.alloc(u8, default_page_size * 2 - 20);
1223    defer allocator.free(slice1);
1224
1225    const old = slice1;
1226    slice1 = try allocator.realloc(slice1, default_page_size * 2 - 10);
1227    try std.testing.expect(slice1.ptr == old.ptr);
1228
1229    slice1 = try allocator.realloc(slice1, default_page_size * 2);
1230    try std.testing.expect(slice1.ptr == old.ptr);
1231
1232    slice1 = try allocator.realloc(slice1, default_page_size * 2 + 1);
1233}
1234
1235test "realloc small object to large object" {
1236    var gpa = DebugAllocator(test_config){};
1237    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1238    const allocator = gpa.allocator();
1239
1240    var slice = try allocator.alloc(u8, 70);
1241    defer allocator.free(slice);
1242    slice[0] = 0x12;
1243    slice[60] = 0x34;
1244
1245    // This requires upgrading to a large object
1246    const large_object_size = default_page_size * 2 + 50;
1247    slice = try allocator.realloc(slice, large_object_size);
1248    try std.testing.expect(slice[0] == 0x12);
1249    try std.testing.expect(slice[60] == 0x34);
1250}
1251
1252test "shrink large object to large object" {
1253    var gpa: DebugAllocator(test_config) = .{};
1254    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1255    const allocator = gpa.allocator();
1256
1257    var slice = try allocator.alloc(u8, default_page_size * 2 + 50);
1258    defer allocator.free(slice);
1259    slice[0] = 0x12;
1260    slice[60] = 0x34;
1261
1262    if (!allocator.resize(slice, default_page_size * 2 + 1)) return;
1263    slice = slice.ptr[0 .. default_page_size * 2 + 1];
1264    try std.testing.expect(slice[0] == 0x12);
1265    try std.testing.expect(slice[60] == 0x34);
1266
1267    try std.testing.expect(allocator.resize(slice, default_page_size * 2 + 1));
1268    slice = slice[0 .. default_page_size * 2 + 1];
1269    try std.testing.expect(slice[0] == 0x12);
1270    try std.testing.expect(slice[60] == 0x34);
1271
1272    slice = try allocator.realloc(slice, default_page_size * 2);
1273    try std.testing.expect(slice[0] == 0x12);
1274    try std.testing.expect(slice[60] == 0x34);
1275}
1276
1277test "shrink large object to large object with larger alignment" {
1278    if (!builtin.link_libc and builtin.os.tag == .wasi) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22731
1279
1280    var gpa = DebugAllocator(test_config){};
1281    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1282    const allocator = gpa.allocator();
1283
1284    var debug_buffer: [1000]u8 = undefined;
1285    var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
1286    const debug_allocator = fba.allocator();
1287
1288    const alloc_size = default_page_size * 2 + 50;
1289    var slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
1290    defer allocator.free(slice);
1291
1292    const big_alignment: usize = default_page_size * 2;
1293    // This loop allocates until we find a page that is not aligned to the big
1294    // alignment. Then we shrink the allocation after the loop, but increase the
1295    // alignment to the higher one, that we know will force it to realloc.
1296    var stuff_to_free = std.array_list.Managed([]align(16) u8).init(debug_allocator);
1297    while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
1298        try stuff_to_free.append(slice);
1299        slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
1300    }
1301    while (stuff_to_free.pop()) |item| {
1302        allocator.free(item);
1303    }
1304    slice[0] = 0x12;
1305    slice[60] = 0x34;
1306
1307    slice = try allocator.reallocAdvanced(slice, big_alignment, alloc_size / 2);
1308    try std.testing.expect(slice[0] == 0x12);
1309    try std.testing.expect(slice[60] == 0x34);
1310}
1311
1312test "realloc large object to small object" {
1313    var gpa = DebugAllocator(test_config){};
1314    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1315    const allocator = gpa.allocator();
1316
1317    var slice = try allocator.alloc(u8, default_page_size * 2 + 50);
1318    defer allocator.free(slice);
1319    slice[0] = 0x12;
1320    slice[16] = 0x34;
1321
1322    slice = try allocator.realloc(slice, 19);
1323    try std.testing.expect(slice[0] == 0x12);
1324    try std.testing.expect(slice[16] == 0x34);
1325}
1326
1327test "overridable mutexes" {
1328    var gpa = DebugAllocator(.{ .MutexType = std.Thread.Mutex }){
1329        .backing_allocator = std.testing.allocator,
1330        .mutex = std.Thread.Mutex{},
1331    };
1332    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1333    const allocator = gpa.allocator();
1334
1335    const ptr = try allocator.create(i32);
1336    defer allocator.destroy(ptr);
1337}
1338
1339test "non-page-allocator backing allocator" {
1340    var gpa: DebugAllocator(.{
1341        .backing_allocator_zeroes = false,
1342    }) = .{
1343        .backing_allocator = std.testing.allocator,
1344    };
1345    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1346    const allocator = gpa.allocator();
1347
1348    const ptr = try allocator.create(i32);
1349    defer allocator.destroy(ptr);
1350}
1351
1352test "realloc large object to larger alignment" {
1353    if (!builtin.link_libc and builtin.os.tag == .wasi) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22731
1354
1355    var gpa = DebugAllocator(test_config){};
1356    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1357    const allocator = gpa.allocator();
1358
1359    var debug_buffer: [1000]u8 = undefined;
1360    var fba = std.heap.FixedBufferAllocator.init(&debug_buffer);
1361    const debug_allocator = fba.allocator();
1362
1363    var slice = try allocator.alignedAlloc(u8, .@"16", default_page_size * 2 + 50);
1364    defer allocator.free(slice);
1365
1366    const big_alignment: usize = default_page_size * 2;
1367    // This loop allocates until we find a page that is not aligned to the big alignment.
1368    var stuff_to_free = std.array_list.Managed([]align(16) u8).init(debug_allocator);
1369    while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
1370        try stuff_to_free.append(slice);
1371        slice = try allocator.alignedAlloc(u8, .@"16", default_page_size * 2 + 50);
1372    }
1373    while (stuff_to_free.pop()) |item| {
1374        allocator.free(item);
1375    }
1376    slice[0] = 0x12;
1377    slice[16] = 0x34;
1378
1379    slice = try allocator.reallocAdvanced(slice, 32, default_page_size * 2 + 100);
1380    try std.testing.expect(slice[0] == 0x12);
1381    try std.testing.expect(slice[16] == 0x34);
1382
1383    slice = try allocator.reallocAdvanced(slice, 32, default_page_size * 2 + 25);
1384    try std.testing.expect(slice[0] == 0x12);
1385    try std.testing.expect(slice[16] == 0x34);
1386
1387    slice = try allocator.reallocAdvanced(slice, big_alignment, default_page_size * 2 + 100);
1388    try std.testing.expect(slice[0] == 0x12);
1389    try std.testing.expect(slice[16] == 0x34);
1390}
1391
1392test "large object rejects shrinking to small" {
1393    if (builtin.target.cpu.arch.isWasm()) {
1394        // Not expected to pass on targets that do not have memory mapping.
1395        return error.SkipZigTest;
1396    }
1397
1398    var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, .{ .fail_index = 3 });
1399    var gpa: DebugAllocator(.{}) = .{
1400        .backing_allocator = failing_allocator.allocator(),
1401    };
1402    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1403    const allocator = gpa.allocator();
1404
1405    var slice = try allocator.alloc(u8, default_page_size * 2 + 50);
1406    defer allocator.free(slice);
1407    slice[0] = 0x12;
1408    slice[3] = 0x34;
1409
1410    try std.testing.expect(!allocator.resize(slice, 4));
1411    try std.testing.expect(slice[0] == 0x12);
1412    try std.testing.expect(slice[3] == 0x34);
1413}
1414
1415test "objects of size 1024 and 2048" {
1416    var gpa = DebugAllocator(test_config){};
1417    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1418    const allocator = gpa.allocator();
1419
1420    const slice = try allocator.alloc(u8, 1025);
1421    const slice2 = try allocator.alloc(u8, 3000);
1422
1423    allocator.free(slice);
1424    allocator.free(slice2);
1425}
1426
1427test "setting a memory cap" {
1428    var gpa = DebugAllocator(.{ .enable_memory_limit = true }){};
1429    defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
1430    const allocator = gpa.allocator();
1431
1432    gpa.requested_memory_limit = 1010;
1433
1434    const small = try allocator.create(i32);
1435    try std.testing.expect(gpa.total_requested_bytes == 4);
1436
1437    const big = try allocator.alloc(u8, 1000);
1438    try std.testing.expect(gpa.total_requested_bytes == 1004);
1439
1440    try std.testing.expectError(error.OutOfMemory, allocator.create(u64));
1441
1442    allocator.destroy(small);
1443    try std.testing.expect(gpa.total_requested_bytes == 1000);
1444
1445    allocator.free(big);
1446    try std.testing.expect(gpa.total_requested_bytes == 0);
1447
1448    const exact = try allocator.alloc(u8, 1010);
1449    try std.testing.expect(gpa.total_requested_bytes == 1010);
1450    allocator.free(exact);
1451}
1452
1453test "large allocations count requested size not backing size" {
1454    var gpa: DebugAllocator(.{ .enable_memory_limit = true }) = .{};
1455    const allocator = gpa.allocator();
1456
1457    var buf = try allocator.alignedAlloc(u8, .@"1", default_page_size + 1);
1458    try std.testing.expectEqual(default_page_size + 1, gpa.total_requested_bytes);
1459    buf = try allocator.realloc(buf, 1);
1460    try std.testing.expectEqual(1, gpa.total_requested_bytes);
1461    buf = try allocator.realloc(buf, 2);
1462    try std.testing.expectEqual(2, gpa.total_requested_bytes);
1463}
1464
1465test "retain metadata and never unmap" {
1466    var gpa = std.heap.DebugAllocator(.{
1467        .safety = true,
1468        .never_unmap = true,
1469        .retain_metadata = true,
1470    }){};
1471    defer std.debug.assert(gpa.deinit() == .ok);
1472    const allocator = gpa.allocator();
1473
1474    const alloc = try allocator.alloc(u8, 8);
1475    allocator.free(alloc);
1476
1477    const alloc2 = try allocator.alloc(u8, 8);
1478    allocator.free(alloc2);
1479}