master
  1const std = @import("../std.zig");
  2const builtin = @import("builtin");
  3const Allocator = std.mem.Allocator;
  4const mem = std.mem;
  5const maxInt = std.math.maxInt;
  6const assert = std.debug.assert;
  7const native_os = builtin.os.tag;
  8const windows = std.os.windows;
  9const ntdll = windows.ntdll;
 10const posix = std.posix;
 11const page_size_min = std.heap.page_size_min;
 12
 13const SUCCESS = @import("../os/windows/ntstatus.zig").NTSTATUS.SUCCESS;
 14const MEM_RESERVE_PLACEHOLDER = windows.MEM_RESERVE_PLACEHOLDER;
 15const MEM_PRESERVE_PLACEHOLDER = windows.MEM_PRESERVE_PLACEHOLDER;
 16
 17pub const vtable: Allocator.VTable = .{
 18    .alloc = alloc,
 19    .resize = resize,
 20    .remap = remap,
 21    .free = free,
 22};
 23
 24pub fn map(n: usize, alignment: mem.Alignment) ?[*]u8 {
 25    const page_size = std.heap.pageSize();
 26    if (n >= maxInt(usize) - page_size) return null;
 27    const alignment_bytes = alignment.toByteUnits();
 28
 29    if (native_os == .windows) {
 30        var base_addr: ?*anyopaque = null;
 31        var size: windows.SIZE_T = n;
 32
 33        var status = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), 0, &size, windows.MEM_COMMIT | windows.MEM_RESERVE, windows.PAGE_READWRITE);
 34
 35        if (status == SUCCESS and mem.isAligned(@intFromPtr(base_addr), alignment_bytes)) {
 36            return @ptrCast(base_addr);
 37        }
 38
 39        if (status == SUCCESS) {
 40            var region_size: windows.SIZE_T = 0;
 41            _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), &region_size, windows.MEM_RELEASE);
 42        }
 43
 44        const overalloc_len = n + alignment_bytes - page_size;
 45        const aligned_len = mem.alignForward(usize, n, page_size);
 46
 47        base_addr = null;
 48        size = overalloc_len;
 49
 50        status = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), 0, &size, windows.MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, windows.PAGE_NOACCESS);
 51
 52        if (status != SUCCESS) return null;
 53
 54        const placeholder_addr = @intFromPtr(base_addr);
 55        const aligned_addr = mem.alignForward(usize, placeholder_addr, alignment_bytes);
 56        const prefix_size = aligned_addr - placeholder_addr;
 57
 58        if (prefix_size > 0) {
 59            var prefix_base = base_addr;
 60            var prefix_size_param: windows.SIZE_T = prefix_size;
 61            _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&prefix_base), &prefix_size_param, windows.MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
 62        }
 63
 64        const suffix_start = aligned_addr + aligned_len;
 65        const suffix_size = (placeholder_addr + overalloc_len) - suffix_start;
 66        if (suffix_size > 0) {
 67            var suffix_base = @as(?*anyopaque, @ptrFromInt(suffix_start));
 68            var suffix_size_param: windows.SIZE_T = suffix_size;
 69            _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&suffix_base), &suffix_size_param, windows.MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
 70        }
 71
 72        base_addr = @ptrFromInt(aligned_addr);
 73        size = aligned_len;
 74
 75        status = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), 0, &size, windows.MEM_COMMIT | MEM_PRESERVE_PLACEHOLDER, windows.PAGE_READWRITE);
 76
 77        if (status == SUCCESS) {
 78            return @ptrCast(base_addr);
 79        }
 80
 81        base_addr = @as(?*anyopaque, @ptrFromInt(aligned_addr));
 82        size = aligned_len;
 83        _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), &size, windows.MEM_RELEASE);
 84
 85        return null;
 86    }
 87
 88    const aligned_len = mem.alignForward(usize, n, page_size);
 89    const max_drop_len = alignment_bytes - @min(alignment_bytes, page_size);
 90    const overalloc_len = if (max_drop_len <= aligned_len - n)
 91        aligned_len
 92    else
 93        mem.alignForward(usize, aligned_len + max_drop_len, page_size);
 94    const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
 95    const slice = posix.mmap(
 96        hint,
 97        overalloc_len,
 98        posix.PROT.READ | posix.PROT.WRITE,
 99        .{ .TYPE = .PRIVATE, .ANONYMOUS = true },
100        -1,
101        0,
102    ) catch return null;
103    const result_ptr = mem.alignPointer(slice.ptr, alignment_bytes) orelse return null;
104    // Unmap the extra bytes that were only requested in order to guarantee
105    // that the range of memory we were provided had a proper alignment in it
106    // somewhere. The extra bytes could be at the beginning, or end, or both.
107    const drop_len = result_ptr - slice.ptr;
108    if (drop_len != 0) posix.munmap(slice[0..drop_len]);
109    const remaining_len = overalloc_len - drop_len;
110    if (remaining_len > aligned_len) posix.munmap(@alignCast(result_ptr[aligned_len..remaining_len]));
111    const new_hint: [*]align(page_size_min) u8 = @alignCast(result_ptr + aligned_len);
112    _ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic);
113    return result_ptr;
114}
115
116fn alloc(context: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
117    _ = context;
118    _ = ra;
119    assert(n > 0);
120    return map(n, alignment);
121}
122
123fn resize(context: *anyopaque, memory: []u8, alignment: mem.Alignment, new_len: usize, return_address: usize) bool {
124    _ = context;
125    _ = alignment;
126    _ = return_address;
127    return realloc(memory, new_len, false) != null;
128}
129
130fn remap(context: *anyopaque, memory: []u8, alignment: mem.Alignment, new_len: usize, return_address: usize) ?[*]u8 {
131    _ = context;
132    _ = alignment;
133    _ = return_address;
134    return realloc(memory, new_len, true);
135}
136
137fn free(context: *anyopaque, memory: []u8, alignment: mem.Alignment, return_address: usize) void {
138    _ = context;
139    _ = alignment;
140    _ = return_address;
141    return unmap(@alignCast(memory));
142}
143
144pub fn unmap(memory: []align(page_size_min) u8) void {
145    if (native_os == .windows) {
146        var base_addr: ?*anyopaque = memory.ptr;
147        var region_size: windows.SIZE_T = 0;
148        _ = ntdll.NtFreeVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&base_addr), &region_size, windows.MEM_RELEASE);
149    } else {
150        const page_aligned_len = mem.alignForward(usize, memory.len, std.heap.pageSize());
151        posix.munmap(memory.ptr[0..page_aligned_len]);
152    }
153}
154
155pub fn realloc(uncasted_memory: []u8, new_len: usize, may_move: bool) ?[*]u8 {
156    const memory: []align(page_size_min) u8 = @alignCast(uncasted_memory);
157    const page_size = std.heap.pageSize();
158    const new_size_aligned = mem.alignForward(usize, new_len, page_size);
159
160    if (native_os == .windows) {
161        if (new_len <= memory.len) {
162            const base_addr = @intFromPtr(memory.ptr);
163            const old_addr_end = base_addr + memory.len;
164            const new_addr_end = mem.alignForward(usize, base_addr + new_len, page_size);
165            if (old_addr_end > new_addr_end) {
166                var decommit_addr: ?*anyopaque = @ptrFromInt(new_addr_end);
167                var decommit_size: windows.SIZE_T = old_addr_end - new_addr_end;
168
169                _ = ntdll.NtAllocateVirtualMemory(windows.GetCurrentProcess(), @ptrCast(&decommit_addr), 0, &decommit_size, windows.MEM_RESET, windows.PAGE_NOACCESS);
170            }
171            return memory.ptr;
172        }
173        const old_size_aligned = mem.alignForward(usize, memory.len, page_size);
174        if (new_size_aligned <= old_size_aligned) {
175            return memory.ptr;
176        }
177        return null;
178    }
179
180    const page_aligned_len = mem.alignForward(usize, memory.len, page_size);
181    if (new_size_aligned == page_aligned_len)
182        return memory.ptr;
183
184    if (posix.MREMAP != void) {
185        // TODO: if the next_mmap_addr_hint is within the remapped range, update it
186        const new_memory = posix.mremap(memory.ptr, page_aligned_len, new_size_aligned, .{ .MAYMOVE = may_move }, null) catch return null;
187        return new_memory.ptr;
188    }
189
190    if (new_size_aligned < page_aligned_len) {
191        const ptr = memory.ptr + new_size_aligned;
192        // TODO: if the next_mmap_addr_hint is within the unmapped range, update it
193        posix.munmap(@alignCast(ptr[0 .. page_aligned_len - new_size_aligned]));
194        return memory.ptr;
195    }
196
197    return null;
198}