Commit 13cccdd92c
lib/std/c.zig
@@ -332,3 +332,13 @@ pub extern "c" fn prctl(option: c_int, ...) c_int;
pub extern "c" fn getrlimit(resource: rlimit_resource, rlim: *rlimit) c_int;
pub extern "c" fn setrlimit(resource: rlimit_resource, rlim: *const rlimit) c_int;
+
+pub const max_align_t = if (std.Target.current.abi == .msvc)
+ f64
+else if (std.Target.current.isDarwin())
+ c_longdouble
+else
+ extern struct {
+ a: c_longlong,
+ b: c_longdouble,
+ };
lib/std/heap.zig
@@ -148,12 +148,56 @@ const CAllocator = struct {
}
};
+/// Supports the full Allocator interface, including alignment, and exploiting
+/// `malloc_usable_size` if available. For an allocator that directly calls
+/// `malloc`/`free`, see `raw_c_allocator`.
pub const c_allocator = &c_allocator_state;
var c_allocator_state = Allocator{
.allocFn = CAllocator.alloc,
.resizeFn = CAllocator.resize,
};
+/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
+/// `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
+/// This allocator is safe to use as the backing allocator with
+/// `ArenaAllocator` and `GeneralPurposeAllocator`, and is more optimal in these cases
+/// than to using `c_allocator`.
+pub const raw_c_allocator = &raw_c_allocator_state;
+var raw_c_allocator_state = Allocator{
+ .allocFn = rawCAlloc,
+ .resizeFn = rawCResize,
+};
+
+fn rawCAlloc(
+ self: *Allocator,
+ len: usize,
+ ptr_align: u29,
+ len_align: u29,
+ ret_addr: usize,
+) Allocator.Error![]u8 {
+ assert(ptr_align <= @alignOf(std.c.max_align_t));
+ const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
+ return ptr[0..len];
+}
+
+fn rawCResize(
+ self: *Allocator,
+ buf: []u8,
+ old_align: u29,
+ new_len: usize,
+ len_align: u29,
+ ret_addr: usize,
+) Allocator.Error!usize {
+ if (new_len == 0) {
+ c.free(buf.ptr);
+ return 0;
+ }
+ if (new_len <= buf.len) {
+ return mem.alignAllocLen(buf.len, new_len, len_align);
+ }
+ return error.OutOfMemory;
+}
+
/// This allocator makes a syscall directly for every allocation and free.
/// Thread-safe and lock-free.
pub const page_allocator = if (std.Target.current.isWasm())
@@ -804,6 +848,12 @@ test "c_allocator" {
}
}
+test "raw_c_allocator" {
+ if (builtin.link_libc) {
+ try testAllocator(raw_c_allocator);
+ }
+}
+
test "WasmPageAllocator internals" {
if (comptime std.Target.current.isWasm()) {
const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size;
@@ -958,6 +1008,7 @@ test "ThreadSafeFixedBufferAllocator" {
try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
}
+/// This one should not try alignments that exceed what C malloc can handle.
pub fn testAllocator(base_allocator: *mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = &validationAllocator.allocator;
src/main.zig
@@ -103,7 +103,7 @@ pub fn log(
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
pub fn main() anyerror!void {
- const gpa = if (std.builtin.link_libc) std.heap.c_allocator else &general_purpose_allocator.allocator;
+ const gpa = if (std.builtin.link_libc) std.heap.raw_c_allocator else &general_purpose_allocator.allocator;
defer if (!std.builtin.link_libc) {
_ = general_purpose_allocator.deinit();
};