Commit 85de022c56
Changed files (148)
ci
doc
lib
std
heap
json
math
net
os
testing
zig
src
arch
aarch64
riscv64
x86_64
link
translate_c
test
behavior
ci/srht/update-download-page.zig
@@ -18,7 +18,7 @@ pub fn main() !void {
}
fn render(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
in_file: []const u8,
out_file: []const u8,
fmt: enum {
doc/docgen.zig
@@ -342,7 +342,7 @@ const Action = enum {
Close,
};
-fn genToc(allocator: *Allocator, tokenizer: *Tokenizer) !Toc {
+fn genToc(allocator: Allocator, tokenizer: *Tokenizer) !Toc {
var urls = std.StringHashMap(Token).init(allocator);
errdefer urls.deinit();
@@ -708,7 +708,7 @@ fn genToc(allocator: *Allocator, tokenizer: *Tokenizer) !Toc {
};
}
-fn urlize(allocator: *Allocator, input: []const u8) ![]u8 {
+fn urlize(allocator: Allocator, input: []const u8) ![]u8 {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
@@ -727,7 +727,7 @@ fn urlize(allocator: *Allocator, input: []const u8) ![]u8 {
return buf.toOwnedSlice();
}
-fn escapeHtml(allocator: *Allocator, input: []const u8) ![]u8 {
+fn escapeHtml(allocator: Allocator, input: []const u8) ![]u8 {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
@@ -773,7 +773,7 @@ test "term color" {
try testing.expectEqualSlices(u8, "A<span class=\"t32_1\">green</span>B", result);
}
-fn termColor(allocator: *Allocator, input: []const u8) ![]u8 {
+fn termColor(allocator: Allocator, input: []const u8) ![]u8 {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
@@ -883,7 +883,7 @@ fn writeEscapedLines(out: anytype, text: []const u8) !void {
}
fn tokenizeAndPrintRaw(
- allocator: *Allocator,
+ allocator: Allocator,
docgen_tokenizer: *Tokenizer,
out: anytype,
source_token: Token,
@@ -1137,7 +1137,7 @@ fn tokenizeAndPrintRaw(
}
fn tokenizeAndPrint(
- allocator: *Allocator,
+ allocator: Allocator,
docgen_tokenizer: *Tokenizer,
out: anytype,
source_token: Token,
@@ -1146,7 +1146,7 @@ fn tokenizeAndPrint(
return tokenizeAndPrintRaw(allocator, docgen_tokenizer, out, source_token, raw_src);
}
-fn printSourceBlock(allocator: *Allocator, docgen_tokenizer: *Tokenizer, out: anytype, syntax_block: SyntaxBlock) !void {
+fn printSourceBlock(allocator: Allocator, docgen_tokenizer: *Tokenizer, out: anytype, syntax_block: SyntaxBlock) !void {
const source_type = @tagName(syntax_block.source_type);
try out.print("<figure><figcaption class=\"{s}-cap\"><cite class=\"file\">{s}</cite></figcaption><pre>", .{ source_type, syntax_block.name });
@@ -1188,7 +1188,7 @@ fn printShell(out: anytype, shell_content: []const u8) !void {
}
fn genHtml(
- allocator: *Allocator,
+ allocator: Allocator,
tokenizer: *Tokenizer,
toc: *Toc,
out: anytype,
@@ -1687,7 +1687,7 @@ fn genHtml(
}
}
-fn exec(allocator: *Allocator, env_map: *std.BufMap, args: []const []const u8) !ChildProcess.ExecResult {
+fn exec(allocator: Allocator, env_map: *std.BufMap, args: []const []const u8) !ChildProcess.ExecResult {
const result = try ChildProcess.exec(.{
.allocator = allocator,
.argv = args,
@@ -1711,7 +1711,7 @@ fn exec(allocator: *Allocator, env_map: *std.BufMap, args: []const []const u8) !
return result;
}
-fn getBuiltinCode(allocator: *Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
+fn getBuiltinCode(allocator: Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
const result = try exec(allocator, env_map, &[_][]const u8{ zig_exe, "build-obj", "--show-builtin" });
return result.stdout;
}
lib/std/atomic/queue.zig
@@ -156,7 +156,7 @@ pub fn Queue(comptime T: type) type {
}
const Context = struct {
- allocator: *std.mem.Allocator,
+ allocator: std.mem.Allocator,
queue: *Queue(i32),
put_sum: isize,
get_sum: isize,
@@ -176,8 +176,8 @@ test "std.atomic.Queue" {
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
defer std.heap.page_allocator.free(plenty_of_memory);
- var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
- var a = &fixed_buffer_allocator.allocator;
+ var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
+ var a = fixed_buffer_allocator.getThreadSafeAllocator();
var queue = Queue(i32).init();
var context = Context{
lib/std/atomic/stack.zig
@@ -69,7 +69,7 @@ pub fn Stack(comptime T: type) type {
}
const Context = struct {
- allocator: *std.mem.Allocator,
+ allocator: std.mem.Allocator,
stack: *Stack(i32),
put_sum: isize,
get_sum: isize,
@@ -88,8 +88,8 @@ test "std.atomic.stack" {
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
defer std.heap.page_allocator.free(plenty_of_memory);
- var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
- var a = &fixed_buffer_allocator.allocator;
+ var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
+ var a = fixed_buffer_allocator.getThreadSafeAllocator();
var stack = Stack(i32).init();
var context = Context{
lib/std/build/InstallRawStep.zig
@@ -40,7 +40,7 @@ const BinaryElfOutput = struct {
self.segments.deinit();
}
- pub fn parse(allocator: *Allocator, elf_file: File) !Self {
+ pub fn parse(allocator: Allocator, elf_file: File) !Self {
var self: Self = .{
.segments = ArrayList(*BinaryElfSegment).init(allocator),
.sections = ArrayList(*BinaryElfSection).init(allocator),
@@ -298,7 +298,7 @@ fn containsValidAddressRange(segments: []*BinaryElfSegment) bool {
return true;
}
-fn emitRaw(allocator: *Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void {
+fn emitRaw(allocator: Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void {
var elf_file = try fs.cwd().openFile(elf_path, .{});
defer elf_file.close();
lib/std/build/OptionsStep.zig
@@ -274,7 +274,7 @@ test "OptionsStep" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
var builder = try Builder.create(
- &arena.allocator,
+ arena.getAllocator(),
"test",
"test",
"test",
lib/std/compress/gzip.zig
@@ -24,7 +24,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
error{ CorruptedData, WrongChecksum };
pub const Reader = io.Reader(*Self, Error, read);
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
inflater: deflate.InflateStream(ReaderType),
in_reader: ReaderType,
hasher: std.hash.Crc32,
@@ -37,7 +37,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
modification_time: u32,
},
- fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
+ fn init(allocator: mem.Allocator, source: ReaderType) !Self {
// gzip header format is specified in RFC1952
const header = try source.readBytesNoEof(10);
@@ -152,7 +152,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
};
}
-pub fn gzipStream(allocator: *mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) {
+pub fn gzipStream(allocator: mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) {
return GzipStream(@TypeOf(reader)).init(allocator, reader);
}
lib/std/compress/zlib.zig
@@ -17,13 +17,13 @@ pub fn ZlibStream(comptime ReaderType: type) type {
error{ WrongChecksum, Unsupported };
pub const Reader = io.Reader(*Self, Error, read);
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
inflater: deflate.InflateStream(ReaderType),
in_reader: ReaderType,
hasher: std.hash.Adler32,
window_slice: []u8,
- fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
+ fn init(allocator: mem.Allocator, source: ReaderType) !Self {
// Zlib header format is specified in RFC1950
const header = try source.readBytesNoEof(2);
@@ -88,7 +88,7 @@ pub fn ZlibStream(comptime ReaderType: type) type {
};
}
-pub fn zlibStream(allocator: *mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
+pub fn zlibStream(allocator: mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
return ZlibStream(@TypeOf(reader)).init(allocator, reader);
}
lib/std/crypto/argon2.zig
@@ -201,7 +201,7 @@ fn initBlocks(
}
fn processBlocks(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
blocks: *Blocks,
time: u32,
memory: u32,
@@ -240,7 +240,7 @@ fn processBlocksSt(
}
fn processBlocksMt(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
blocks: *Blocks,
time: u32,
memory: u32,
@@ -480,7 +480,7 @@ fn indexAlpha(
///
/// Salt has to be at least 8 bytes length.
pub fn kdf(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
derived_key: []u8,
password: []const u8,
salt: []const u8,
@@ -524,7 +524,7 @@ const PhcFormatHasher = struct {
};
pub fn create(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
password: []const u8,
params: Params,
mode: Mode,
@@ -550,7 +550,7 @@ const PhcFormatHasher = struct {
}
pub fn verify(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
str: []const u8,
password: []const u8,
) HasherError!void {
@@ -579,7 +579,7 @@ const PhcFormatHasher = struct {
///
/// Only phc encoding is supported.
pub const HashOptions = struct {
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
params: Params,
mode: Mode = .argon2id,
encoding: pwhash.Encoding = .phc,
@@ -609,7 +609,7 @@ pub fn strHash(
///
/// Allocator is required for argon2.
pub const VerifyOptions = struct {
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
};
/// Verify that a previously computed hash is valid for a given password.
lib/std/crypto/bcrypt.zig
@@ -368,7 +368,7 @@ const CryptFormatHasher = struct {
/// Options for hashing a password.
pub const HashOptions = struct {
- allocator: ?*mem.Allocator = null,
+ allocator: ?mem.Allocator = null,
params: Params,
encoding: pwhash.Encoding,
};
@@ -394,7 +394,7 @@ pub fn strHash(
/// Options for hash verification.
pub const VerifyOptions = struct {
- allocator: ?*mem.Allocator = null,
+ allocator: ?mem.Allocator = null,
};
/// Verify that a previously computed hash is valid for a given password.
lib/std/crypto/benchmark.zig
@@ -363,7 +363,7 @@ pub fn main() !void {
var buffer: [1024]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
- const args = try std.process.argsAlloc(&fixed.allocator);
+ const args = try std.process.argsAlloc(fixed.getAllocator());
var filter: ?[]u8 = "";
lib/std/crypto/scrypt.zig
@@ -161,7 +161,7 @@ pub const Params = struct {
///
/// scrypt is defined in RFC 7914.
///
-/// allocator: *mem.Allocator.
+/// allocator: mem.Allocator.
///
/// derived_key: Slice of appropriate size for generated key. Generally 16 or 32 bytes in length.
/// May be uninitialized. All bytes will be overwritten.
@@ -173,7 +173,7 @@ pub const Params = struct {
///
/// params: Params.
pub fn kdf(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
derived_key: []u8,
password: []const u8,
salt: []const u8,
@@ -406,7 +406,7 @@ const PhcFormatHasher = struct {
/// Return a non-deterministic hash of the password encoded as a PHC-format string
pub fn create(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
password: []const u8,
params: Params,
buf: []u8,
@@ -429,7 +429,7 @@ const PhcFormatHasher = struct {
/// Verify a password against a PHC-format encoded string
pub fn verify(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
str: []const u8,
password: []const u8,
) HasherError!void {
@@ -455,7 +455,7 @@ const CryptFormatHasher = struct {
/// Return a non-deterministic hash of the password encoded into the modular crypt format
pub fn create(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
password: []const u8,
params: Params,
buf: []u8,
@@ -478,7 +478,7 @@ const CryptFormatHasher = struct {
/// Verify a password against a string in modular crypt format
pub fn verify(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
str: []const u8,
password: []const u8,
) HasherError!void {
@@ -497,7 +497,7 @@ const CryptFormatHasher = struct {
///
/// Allocator is required for scrypt.
pub const HashOptions = struct {
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
params: Params,
encoding: pwhash.Encoding,
};
@@ -520,7 +520,7 @@ pub fn strHash(
///
/// Allocator is required for scrypt.
pub const VerifyOptions = struct {
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
};
/// Verify that a previously computed hash is valid for a given password.
lib/std/event/group.zig
@@ -15,7 +15,7 @@ pub fn Group(comptime ReturnType: type) type {
frame_stack: Stack,
alloc_stack: AllocStack,
lock: Lock,
- allocator: *Allocator,
+ allocator: Allocator,
const Self = @This();
@@ -31,7 +31,7 @@ pub fn Group(comptime ReturnType: type) type {
handle: anyframe->ReturnType,
};
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.frame_stack = Stack.init(),
.alloc_stack = AllocStack.init(),
@@ -127,7 +127,7 @@ test "std.event.Group" {
_ = async testGroup(std.heap.page_allocator);
}
-fn testGroup(allocator: *Allocator) callconv(.Async) void {
+fn testGroup(allocator: Allocator) callconv(.Async) void {
var count: usize = 0;
var group = Group(void).init(allocator);
var sleep_a_little_frame = async sleepALittle(&count);
lib/std/event/loop.zig
@@ -727,7 +727,7 @@ pub const Loop = struct {
/// with `allocator` and freed when the function returns.
/// `func` must return void and it can be an async function.
/// Yields to the event loop, running the function on the next tick.
- pub fn runDetached(self: *Loop, alloc: *mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
+ pub fn runDetached(self: *Loop, alloc: mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
if (!std.io.is_async) @compileError("Can't use runDetached in non-async mode!");
if (@TypeOf(@call(.{}, func, args)) != void) {
@compileError("`func` must not have a return value");
@@ -735,7 +735,7 @@ pub const Loop = struct {
const Wrapper = struct {
const Args = @TypeOf(args);
- fn run(func_args: Args, loop: *Loop, allocator: *mem.Allocator) void {
+ fn run(func_args: Args, loop: *Loop, allocator: mem.Allocator) void {
loop.beginOneEvent();
loop.yield();
@call(.{}, func, func_args); // compile error when called with non-void ret type
lib/std/event/rwlock.zig
@@ -226,7 +226,7 @@ test "std.event.RwLock" {
const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
try testing.expectEqualSlices(i32, expected_result, shared_test_data);
}
-fn testLock(allocator: *Allocator, lock: *RwLock) callconv(.Async) void {
+fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void {
var read_nodes: [100]Loop.NextTickNode = undefined;
for (read_nodes) |*read_node| {
const frame = allocator.create(@Frame(readRunner)) catch @panic("memory");
lib/std/fs/file.zig
@@ -420,7 +420,7 @@ pub const File = struct {
/// Reads all the bytes from the current position to the end of the file.
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
- pub fn readToEndAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 {
+ pub fn readToEndAlloc(self: File, allocator: mem.Allocator, max_bytes: usize) ![]u8 {
return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null);
}
@@ -432,7 +432,7 @@ pub const File = struct {
/// Allows specifying alignment and a sentinel value.
pub fn readToEndAllocOptions(
self: File,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
max_bytes: usize,
size_hint: ?usize,
comptime alignment: u29,
lib/std/fs/get_app_data_dir.zig
@@ -12,7 +12,7 @@ pub const GetAppDataDirError = error{
/// Caller owns returned memory.
/// TODO determine if we can remove the allocator requirement
-pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
+pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
switch (builtin.os.tag) {
.windows => {
var dir_path_ptr: [*:0]u16 = undefined;
lib/std/fs/path.zig
@@ -35,7 +35,7 @@ pub fn isSep(byte: u8) bool {
/// This is different from mem.join in that the separator will not be repeated if
/// it is found at the end or beginning of a pair of consecutive paths.
-fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
+fn joinSepMaybeZ(allocator: Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
if (paths.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{};
// Find first non-empty path index.
@@ -99,13 +99,13 @@ fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) boo
/// Naively combines a series of paths with the native path seperator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn join(allocator: Allocator, paths: []const []const u8) ![]u8 {
return joinSepMaybeZ(allocator, sep, isSep, paths, false);
}
/// Naively combines a series of paths with the native path seperator and null terminator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn joinZ(allocator: *Allocator, paths: []const []const u8) ![:0]u8 {
+pub fn joinZ(allocator: Allocator, paths: []const []const u8) ![:0]u8 {
const out = try joinSepMaybeZ(allocator, sep, isSep, paths, true);
return out[0 .. out.len - 1 :0];
}
@@ -445,7 +445,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool {
}
/// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`.
-pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolve(allocator: Allocator, paths: []const []const u8) ![]u8 {
if (native_os == .windows) {
return resolveWindows(allocator, paths);
} else {
@@ -461,7 +461,7 @@ pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
/// Note: all usage of this function should be audited due to the existence of symlinks.
/// Without performing actual syscalls, resolving `..` could be incorrect.
-pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd
return process.getCwdAlloc(allocator);
@@ -647,7 +647,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
/// If all paths are relative it uses the current working directory as a starting point.
/// Note: all usage of this function should be audited due to the existence of symlinks.
/// Without performing actual syscalls, resolving `..` could be incorrect.
-pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(native_os != .windows); // resolvePosix called on windows can't use getCwd
return process.getCwdAlloc(allocator);
@@ -1058,7 +1058,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) !void {
/// resolve to the same path (after calling `resolve` on each), a zero-length
/// string is returned.
/// On Windows this canonicalizes the drive to a capital letter and paths to `\\`.
-pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relative(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
if (native_os == .windows) {
return relativeWindows(allocator, from, to);
} else {
@@ -1066,7 +1066,7 @@ pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
}
}
-pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolveWindows(allocator, &[_][]const u8{from});
defer allocator.free(resolved_from);
@@ -1139,7 +1139,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
return [_]u8{};
}
-pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolvePosix(allocator, &[_][]const u8{from});
defer allocator.free(resolved_from);
lib/std/fs/test.zig
@@ -52,9 +52,11 @@ test "accessAbsolute" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
+
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
try fs.accessAbsolute(base_path, .{});
@@ -69,9 +71,11 @@ test "openDirAbsolute" {
try tmp.dir.makeDir("subdir");
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
+
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
{
@@ -80,8 +84,8 @@ test "openDirAbsolute" {
}
for ([_][]const u8{ ".", ".." }) |sub_path| {
- const dir_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, sub_path });
- defer arena.allocator.free(dir_path);
+ const dir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, sub_path });
+ defer allocator.free(dir_path);
var dir = try fs.openDirAbsolute(dir_path, .{});
defer dir.close();
}
@@ -107,12 +111,12 @@ test "readLinkAbsolute" {
// Get base abs path
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
- const allocator = &arena.allocator;
{
const target_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "file.txt" });
@@ -158,15 +162,16 @@ test "Dir.Iterator" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
- var entries = std.ArrayList(Dir.Entry).init(&arena.allocator);
+ var entries = std.ArrayList(Dir.Entry).init(allocator);
// Create iterator.
var iter = tmp_dir.dir.iterate();
while (try iter.next()) |entry| {
// We cannot just store `entry` as on Windows, we're re-using the name buffer
// which means we'll actually share the `name` pointer between entries!
- const name = try arena.allocator.dupe(u8, entry.name);
+ const name = try allocator.dupe(u8, entry.name);
try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
}
@@ -202,25 +207,26 @@ test "Dir.realpath smoke test" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
// First, test non-alloc version
{
var buf1: [fs.MAX_PATH_BYTES]u8 = undefined;
const file_path = try tmp_dir.dir.realpath("test_file", buf1[0..]);
- const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
+ const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" });
try testing.expect(mem.eql(u8, file_path, expected_path));
}
// Next, test alloc version
{
- const file_path = try tmp_dir.dir.realpathAlloc(&arena.allocator, "test_file");
- const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
+ const file_path = try tmp_dir.dir.realpathAlloc(allocator, "test_file");
+ const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" });
try testing.expect(mem.eql(u8, file_path, expected_path));
}
@@ -476,11 +482,11 @@ test "renameAbsolute" {
// Get base abs path
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
try testing.expectError(error.FileNotFound, fs.renameAbsolute(
@@ -987,11 +993,11 @@ test ". and .. in absolute functions" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
const subdir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "./subdir" });
lib/std/fs/wasi.zig
@@ -80,7 +80,7 @@ pub const PreopenList = struct {
pub const Error = error{ OutOfMemory, Overflow } || os.UnexpectedError;
/// Deinitialize with `deinit`.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{ .buffer = InnerList.init(allocator) };
}
lib/std/fs/watch.zig
@@ -30,7 +30,7 @@ pub fn Watch(comptime V: type) type {
return struct {
channel: event.Channel(Event.Error!Event),
os_data: OsData,
- allocator: *Allocator,
+ allocator: Allocator,
const OsData = switch (builtin.os.tag) {
// TODO https://github.com/ziglang/zig/issues/3778
@@ -96,7 +96,7 @@ pub fn Watch(comptime V: type) type {
pub const Error = WatchEventError;
};
- pub fn init(allocator: *Allocator, event_buf_count: usize) !*Self {
+ pub fn init(allocator: Allocator, event_buf_count: usize) !*Self {
const self = try allocator.create(Self);
errdefer allocator.destroy(self);
@@ -648,7 +648,7 @@ test "write a file, watch it, write it again, delete it" {
return testWriteWatchWriteDelete(std.testing.allocator);
}
-fn testWriteWatchWriteDelete(allocator: *Allocator) !void {
+fn testWriteWatchWriteDelete(allocator: Allocator) !void {
const file_path = try std.fs.path.join(allocator, &[_][]const u8{ test_tmp_dir, "file.txt" });
defer allocator.free(file_path);
lib/std/hash/auto_hash.zig
@@ -309,7 +309,7 @@ test "hash struct deep" {
const Self = @This();
- pub fn init(allocator: *mem.Allocator, a_: u32, b_: u16, c_: bool) !Self {
+ pub fn init(allocator: mem.Allocator, a_: u32, b_: u16, c_: bool) !Self {
const ptr = try allocator.create(bool);
ptr.* = c_;
return Self{ .a = a_, .b = b_, .c = ptr };
lib/std/hash/benchmark.zig
@@ -165,7 +165,7 @@ pub fn main() !void {
var buffer: [1024]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
- const args = try std.process.argsAlloc(&fixed.allocator);
+ const args = try std.process.argsAlloc(fixed.getAllocator());
var filter: ?[]u8 = "";
var count: usize = mode(128 * MiB);
lib/std/heap/arena_allocator.zig
@@ -6,9 +6,7 @@ const Allocator = std.mem.Allocator;
/// This allocator takes an existing allocator, wraps it, and provides an interface
/// where you can allocate without freeing, and then free it all together.
pub const ArenaAllocator = struct {
- allocator: Allocator,
-
- child_allocator: *Allocator,
+ child_allocator: Allocator,
state: State,
/// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator
@@ -17,21 +15,21 @@ pub const ArenaAllocator = struct {
buffer_list: std.SinglyLinkedList([]u8) = @as(std.SinglyLinkedList([]u8), .{}),
end_index: usize = 0,
- pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator {
+ pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator {
return .{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.child_allocator = child_allocator,
.state = self,
};
}
};
+ pub fn getAllocator(self: *ArenaAllocator) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
const BufNode = std.SinglyLinkedList([]u8).Node;
- pub fn init(child_allocator: *Allocator) ArenaAllocator {
+ pub fn init(child_allocator: Allocator) ArenaAllocator {
return (State{}).promote(child_allocator);
}
@@ -49,7 +47,7 @@ pub const ArenaAllocator = struct {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
- const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1, @returnAddress());
+ const buf = try self.child_allocator.allocFn(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress());
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
buf_node.* = BufNode{
.data = buf,
@@ -60,10 +58,9 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+ fn alloc(self: *ArenaAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
- const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
while (true) {
@@ -91,11 +88,10 @@ pub const ArenaAllocator = struct {
}
}
- fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
+ fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
_ = buf_align;
_ = len_align;
_ = ret_addr;
- const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory;
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
lib/std/heap/general_purpose_allocator.zig
@@ -172,11 +172,7 @@ pub const Config = struct {
pub fn GeneralPurposeAllocator(comptime config: Config) type {
return struct {
- allocator: Allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
- backing_allocator: *Allocator = std.heap.page_allocator,
+ backing_allocator: Allocator = std.heap.page_allocator,
buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
large_allocations: LargeAllocTable = .{},
empty_buckets: if (config.retain_metadata) ?*BucketHeader else void =
@@ -284,6 +280,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
};
+ pub fn getAllocator(self: *Self) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
fn bucketStackTrace(
bucket: *BucketHeader,
size_class: usize,
@@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
var it = self.large_allocations.iterator();
while (it.next()) |large| {
if (large.value_ptr.freed) {
- _ = self.backing_allocator.resizeFn(self.backing_allocator, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
+ _ = self.backing_allocator.resizeFn(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
}
}
}
@@ -571,7 +571,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const result_len = if (config.never_unmap and new_size == 0)
0
else
- try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
+ try self.backing_allocator.resizeFn(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
if (config.enable_memory_limit) {
entry.value_ptr.requested_size = new_size;
@@ -606,15 +606,13 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
fn resize(
- allocator: *Allocator,
+ self: *Self,
old_mem: []u8,
old_align: u29,
new_size: usize,
len_align: u29,
ret_addr: usize,
) Error!usize {
- const self = @fieldParentPtr(Self, "allocator", allocator);
-
self.mutex.lock();
defer self.mutex.unlock();
@@ -755,9 +753,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return true;
}
- fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
-
+ fn alloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
self.mutex.lock();
defer self.mutex.unlock();
@@ -768,7 +764,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const new_aligned_size = math.max(len, ptr_align);
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
- const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
+ const slice = try self.backing_allocator.allocFn(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
if (config.retain_metadata and !config.never_unmap) {
@@ -834,7 +830,7 @@ const test_config = Config{};
test "small allocations - free in same order" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
@@ -853,7 +849,7 @@ test "small allocations - free in same order" {
test "small allocations - free in reverse order" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
@@ -872,7 +868,7 @@ test "small allocations - free in reverse order" {
test "large allocations" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
const ptr1 = try allocator.alloc(u64, 42768);
const ptr2 = try allocator.alloc(u64, 52768);
@@ -885,7 +881,7 @@ test "large allocations" {
test "realloc" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
defer allocator.free(slice);
@@ -907,7 +903,7 @@ test "realloc" {
test "shrink" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, 20);
defer allocator.free(slice);
@@ -930,7 +926,7 @@ test "shrink" {
test "large object - grow" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
defer allocator.free(slice1);
@@ -948,7 +944,7 @@ test "large object - grow" {
test "realloc small object to large object" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, 70);
defer allocator.free(slice);
@@ -965,7 +961,7 @@ test "realloc small object to large object" {
test "shrink large object to large object" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -988,10 +984,10 @@ test "shrink large object to large object" {
test "shrink large object to large object with larger alignment" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
+ const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
const alloc_size = page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
@@ -1023,7 +1019,7 @@ test "shrink large object to large object with larger alignment" {
test "realloc large object to small object" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -1041,7 +1037,7 @@ test "overrideable mutexes" {
.mutex = std.Thread.Mutex{},
};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
const ptr = try allocator.create(i32);
defer allocator.destroy(ptr);
@@ -1050,7 +1046,7 @@ test "overrideable mutexes" {
test "non-page-allocator backing allocator" {
var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
const ptr = try allocator.create(i32);
defer allocator.destroy(ptr);
@@ -1059,10 +1055,10 @@ test "non-page-allocator backing allocator" {
test "realloc large object to larger alignment" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
+ const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
defer allocator.free(slice);
@@ -1098,9 +1094,9 @@ test "realloc large object to larger alignment" {
test "large object shrinks to small but allocation fails during shrink" {
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
- var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
+ var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.getAllocator() };
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -1117,7 +1113,7 @@ test "large object shrinks to small but allocation fails during shrink" {
test "objects of size 1024 and 2048" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
const slice = try allocator.alloc(u8, 1025);
const slice2 = try allocator.alloc(u8, 3000);
@@ -1129,7 +1125,7 @@ test "objects of size 1024 and 2048" {
test "setting a memory cap" {
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
gpa.setRequestedMemoryLimit(1010);
@@ -1158,9 +1154,9 @@ test "double frees" {
defer std.testing.expect(!backing_gpa.deinit()) catch @panic("leak");
const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true });
- var gpa = GPA{ .backing_allocator = &backing_gpa.allocator };
+ var gpa = GPA{ .backing_allocator = backing_gpa.getAllocator() };
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
// detect a small allocation double free, even though bucket is emptied
const index: usize = 6;
lib/std/heap/log_to_writer_allocator.zig
@@ -5,33 +5,31 @@ const Allocator = std.mem.Allocator;
/// on every call to the allocator. Writer errors are ignored.
pub fn LogToWriterAllocator(comptime Writer: type) type {
return struct {
- allocator: Allocator,
- parent_allocator: *Allocator,
+ parent_allocator: Allocator,
writer: Writer,
const Self = @This();
- pub fn init(parent_allocator: *Allocator, writer: Writer) Self {
+ pub fn init(parent_allocator: Allocator, writer: Writer) Self {
return Self{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.parent_allocator = parent_allocator,
.writer = writer,
};
}
+ pub fn getAllocator(self: *Self) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
fn alloc(
- allocator: *Allocator,
+ self: *Self,
len: usize,
ptr_align: u29,
len_align: u29,
ra: usize,
) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
self.writer.print("alloc : {}", .{len}) catch {};
- const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
+ const result = self.parent_allocator.allocFn(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
if (result) |_| {
self.writer.print(" success!\n", .{}) catch {};
} else |_| {
@@ -41,14 +39,13 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
}
fn resize(
- allocator: *Allocator,
+ self: *Self,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
ra: usize,
) error{OutOfMemory}!usize {
- const self = @fieldParentPtr(Self, "allocator", allocator);
if (new_len == 0) {
self.writer.print("free : {}\n", .{buf.len}) catch {};
} else if (new_len <= buf.len) {
@@ -56,7 +53,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
} else {
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
}
- if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
+ if (self.parent_allocator.resizeFn(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
if (new_len > buf.len) {
self.writer.print(" success!\n", .{}) catch {};
}
@@ -73,7 +70,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
/// This allocator is used in front of another allocator and logs to the provided writer
/// on every call to the allocator. Writer errors are ignored.
pub fn logToWriterAllocator(
- parent_allocator: *Allocator,
+ parent_allocator: Allocator,
writer: anytype,
) LogToWriterAllocator(@TypeOf(writer)) {
return LogToWriterAllocator(@TypeOf(writer)).init(parent_allocator, writer);
@@ -85,7 +82,7 @@ test "LogToWriterAllocator" {
var allocator_buf: [10]u8 = undefined;
var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
- const allocator = &logToWriterAllocator(&fixedBufferAllocator.allocator, fbs.writer()).allocator;
+ const allocator = logToWriterAllocator(fixedBufferAllocator.getAllocator(), fbs.writer()).getAllocator();
var a = try allocator.alloc(u8, 10);
a = allocator.shrink(a, 5);
lib/std/heap/logging_allocator.zig
@@ -22,21 +22,20 @@ pub fn ScopedLoggingAllocator(
const log = std.log.scoped(scope);
return struct {
- allocator: Allocator,
- parent_allocator: *Allocator,
+ parent_allocator: Allocator,
const Self = @This();
- pub fn init(parent_allocator: *Allocator) Self {
+ pub fn init(parent_allocator: Allocator) Self {
return .{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.parent_allocator = parent_allocator,
};
}
+ pub fn getAllocator(self: *Self) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
// This function is required as the `std.log.log` function is not public
inline fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) void {
switch (log_level) {
@@ -48,13 +47,12 @@ pub fn ScopedLoggingAllocator(
}
fn alloc(
- allocator: *Allocator,
+ self: *Self,
len: usize,
ptr_align: u29,
len_align: u29,
ra: usize,
) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
if (result) |_| {
logHelper(
@@ -73,15 +71,13 @@ pub fn ScopedLoggingAllocator(
}
fn resize(
- allocator: *Allocator,
+ self: *Self,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
ra: usize,
) error{OutOfMemory}!usize {
- const self = @fieldParentPtr(Self, "allocator", allocator);
-
if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
if (new_len == 0) {
logHelper(success_log_level, "free - success - len: {}", .{buf.len});
@@ -116,6 +112,6 @@ pub fn ScopedLoggingAllocator(
/// This allocator is used in front of another allocator and logs to `std.log`
/// on every call to the allocator.
/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
-pub fn loggingAllocator(parent_allocator: *Allocator) LoggingAllocator(.debug, .err) {
+pub fn loggingAllocator(parent_allocator: Allocator) LoggingAllocator(.debug, .err) {
return LoggingAllocator(.debug, .err).init(parent_allocator);
}
lib/std/io/buffered_atomic_file.zig
@@ -7,7 +7,7 @@ pub const BufferedAtomicFile = struct {
atomic_file: fs.AtomicFile,
file_writer: File.Writer,
buffered_writer: BufferedWriter,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
pub const buffer_size = 4096;
pub const BufferedWriter = std.io.BufferedWriter(buffer_size, File.Writer);
@@ -16,7 +16,7 @@ pub const BufferedAtomicFile = struct {
/// TODO when https://github.com/ziglang/zig/issues/2761 is solved
/// this API will not need an allocator
pub fn create(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
dir: fs.Dir,
dest_path: []const u8,
atomic_file_options: fs.Dir.AtomicFileOptions,
lib/std/io/peek_stream.zig
@@ -38,7 +38,7 @@ pub fn PeekStream(
}
},
.Dynamic => struct {
- pub fn init(base: ReaderType, allocator: *mem.Allocator) Self {
+ pub fn init(base: ReaderType, allocator: mem.Allocator) Self {
return .{
.unbuffered_reader = base,
.fifo = FifoType.init(allocator),
lib/std/io/reader.zig
@@ -88,7 +88,7 @@ pub fn Reader(
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readAllAlloc(self: Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
+ pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) ![]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
try self.readAllArrayList(&array_list, max_size);
@@ -127,7 +127,7 @@ pub fn Reader(
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterAlloc(
self: Self,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) ![]u8 {
@@ -163,7 +163,7 @@ pub fn Reader(
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterOrEofAlloc(
self: Self,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) !?[]u8 {
lib/std/json/write_stream.zig
@@ -243,7 +243,7 @@ test "json write stream" {
try w.beginObject();
try w.objectField("object");
- try w.emitJson(try getJsonObject(&arena_allocator.allocator));
+ try w.emitJson(try getJsonObject(arena_allocator.getAllocator()));
try w.objectField("string");
try w.emitString("This is a string");
@@ -286,7 +286,7 @@ test "json write stream" {
try std.testing.expect(std.mem.eql(u8, expected, result));
}
-fn getJsonObject(allocator: *std.mem.Allocator) !std.json.Value {
+fn getJsonObject(allocator: std.mem.Allocator) !std.json.Value {
var value = std.json.Value{ .Object = std.json.ObjectMap.init(allocator) };
try value.Object.put("one", std.json.Value{ .Integer = @intCast(i64, 1) });
try value.Object.put("two", std.json.Value{ .Float = 2.0 });
lib/std/math/big/int.zig
@@ -142,7 +142,7 @@ pub const Mutable = struct {
/// Asserts that the allocator owns the limbs memory. If this is not the case,
/// use `toConst().toManaged()`.
- pub fn toManaged(self: Mutable, allocator: *Allocator) Managed {
+ pub fn toManaged(self: Mutable, allocator: Allocator) Managed {
return .{
.allocator = allocator,
.limbs = self.limbs,
@@ -283,7 +283,7 @@ pub const Mutable = struct {
base: u8,
value: []const u8,
limbs_buffer: []Limb,
- allocator: ?*Allocator,
+ allocator: ?Allocator,
) error{InvalidCharacter}!void {
assert(base >= 2 and base <= 16);
@@ -608,7 +608,7 @@ pub const Mutable = struct {
/// rma is given by `a.limbs.len + b.limbs.len`.
///
/// `limbs_buffer` is used for temporary storage. The amount required is given by `calcMulLimbsBufferLen`.
- pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?*Allocator) void {
+ pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?Allocator) void {
var buf_index: usize = 0;
const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: {
@@ -638,7 +638,7 @@ pub const Mutable = struct {
///
/// If `allocator` is provided, it will be used for temporary storage to improve
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
- pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?*Allocator) void {
+ pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?Allocator) void {
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
@@ -674,7 +674,7 @@ pub const Mutable = struct {
signedness: Signedness,
bit_count: usize,
limbs_buffer: []Limb,
- allocator: ?*Allocator,
+ allocator: ?Allocator,
) void {
var buf_index: usize = 0;
const req_limbs = calcTwosCompLimbCount(bit_count);
@@ -714,7 +714,7 @@ pub const Mutable = struct {
b: Const,
signedness: Signedness,
bit_count: usize,
- allocator: ?*Allocator,
+ allocator: ?Allocator,
) void {
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
@@ -763,7 +763,7 @@ pub const Mutable = struct {
///
/// If `allocator` is provided, it will be used for temporary storage to improve
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
- pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?*Allocator) void {
+ pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?Allocator) void {
_ = opt_allocator;
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
@@ -1660,7 +1660,7 @@ pub const Const = struct {
positive: bool,
/// The result is an independent resource which is managed by the caller.
- pub fn toManaged(self: Const, allocator: *Allocator) Allocator.Error!Managed {
+ pub fn toManaged(self: Const, allocator: Allocator) Allocator.Error!Managed {
const limbs = try allocator.alloc(Limb, math.max(Managed.default_capacity, self.limbs.len));
mem.copy(Limb, limbs, self.limbs);
return Managed{
@@ -1873,7 +1873,7 @@ pub const Const = struct {
/// Caller owns returned memory.
/// Asserts that `base` is in the range [2, 16].
/// See also `toString`, a lower level function than this.
- pub fn toStringAlloc(self: Const, allocator: *Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 {
+ pub fn toStringAlloc(self: Const, allocator: Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 {
assert(base >= 2);
assert(base <= 16);
@@ -2092,7 +2092,7 @@ pub const Managed = struct {
pub const default_capacity = 4;
/// Allocator used by the Managed when requesting memory.
- allocator: *Allocator,
+ allocator: Allocator,
/// Raw digits. These are:
///
@@ -2109,7 +2109,7 @@ pub const Managed = struct {
/// Creates a new `Managed`. `default_capacity` limbs will be allocated immediately.
/// The integer value after initializing is `0`.
- pub fn init(allocator: *Allocator) !Managed {
+ pub fn init(allocator: Allocator) !Managed {
return initCapacity(allocator, default_capacity);
}
@@ -2131,7 +2131,7 @@ pub const Managed = struct {
/// Creates a new `Managed` with value `value`.
///
/// This is identical to an `init`, followed by a `set`.
- pub fn initSet(allocator: *Allocator, value: anytype) !Managed {
+ pub fn initSet(allocator: Allocator, value: anytype) !Managed {
var s = try Managed.init(allocator);
try s.set(value);
return s;
@@ -2140,7 +2140,7 @@ pub const Managed = struct {
/// Creates a new Managed with a specific capacity. If capacity < default_capacity then the
/// default capacity will be used instead.
/// The integer value after initializing is `0`.
- pub fn initCapacity(allocator: *Allocator, capacity: usize) !Managed {
+ pub fn initCapacity(allocator: Allocator, capacity: usize) !Managed {
return Managed{
.allocator = allocator,
.metadata = 1,
@@ -2206,7 +2206,7 @@ pub const Managed = struct {
return other.cloneWithDifferentAllocator(other.allocator);
}
- pub fn cloneWithDifferentAllocator(other: Managed, allocator: *Allocator) !Managed {
+ pub fn cloneWithDifferentAllocator(other: Managed, allocator: Allocator) !Managed {
return Managed{
.allocator = allocator,
.metadata = other.metadata,
@@ -2347,7 +2347,7 @@ pub const Managed = struct {
/// Converts self to a string in the requested base. Memory is allocated from the provided
/// allocator and not the one present in self.
- pub fn toString(self: Managed, allocator: *Allocator, base: u8, case: std.fmt.Case) ![]u8 {
+ pub fn toString(self: Managed, allocator: Allocator, base: u8, case: std.fmt.Case) ![]u8 {
_ = allocator;
if (base < 2 or base > 16) return error.InvalidBase;
return self.toConst().toStringAlloc(self.allocator, base, case);
@@ -2784,7 +2784,7 @@ const AccOp = enum {
/// r MUST NOT alias any of a or b.
///
/// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs.
-fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
+fn llmulacc(comptime op: AccOp, opt_allocator: ?Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
@setRuntimeSafety(debug_safety);
assert(r.len >= a.len);
assert(r.len >= b.len);
@@ -2819,7 +2819,7 @@ fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []cons
/// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs.
fn llmulaccKaratsuba(
comptime op: AccOp,
- allocator: *Allocator,
+ allocator: Allocator,
r: []Limb,
a: []const Limb,
b: []const Limb,
lib/std/math/big/rational.zig
@@ -29,7 +29,7 @@ pub const Rational = struct {
/// Create a new Rational. A small amount of memory will be allocated on initialization.
/// This will be 2 * Int.default_capacity.
- pub fn init(a: *Allocator) !Rational {
+ pub fn init(a: Allocator) !Rational {
return Rational{
.p = try Int.init(a),
.q = try Int.initSet(a, 1),
lib/std/mem/Allocator.zig
@@ -8,6 +8,9 @@ const Allocator = @This();
pub const Error = error{OutOfMemory};
+// The type erased pointer to the allocator implementation
+ptr: *c_void,
+
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
///
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
@@ -17,7 +20,7 @@ pub const Error = error{OutOfMemory};
///
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
/// If the value is `0` it means no return address has been provided.
-allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
+allocFn: fn (ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value
@@ -39,24 +42,56 @@ allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_a
///
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
/// If the value is `0` it means no return address has been provided.
-resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+resizeFn: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+
+pub fn init(
+ pointer: anytype,
+ comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
+ comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+) Allocator {
+ const Ptr = @TypeOf(pointer);
+ assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer
+ assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer
+ const gen = struct {
+ fn alloc(ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
+ const alignment = @typeInfo(Ptr).Pointer.alignment;
+ const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
+ return allocFn(self, len, ptr_align, len_align, ret_addr);
+ }
+ fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
+ const alignment = @typeInfo(Ptr).Pointer.alignment;
+ const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
+ return resizeFn(self, buf, buf_align, new_len, len_align, ret_addr);
+ }
+ };
-/// Set to resizeFn if in-place resize is not supported.
-pub fn noResize(
- self: *Allocator,
- buf: []u8,
- buf_align: u29,
- new_len: usize,
- len_align: u29,
- ret_addr: usize,
-) Error!usize {
- _ = self;
- _ = buf_align;
- _ = len_align;
- _ = ret_addr;
- if (new_len > buf.len)
- return error.OutOfMemory;
- return new_len;
+ return .{
+ .ptr = pointer,
+ .allocFn = gen.alloc,
+ .resizeFn = gen.resize,
+ };
+}
+
+/// Set resizeFn to `NoResize(AllocatorType).noResize` if in-place resize is not supported.
+pub fn NoResize(comptime AllocatorType: type) type {
+ return struct {
+ pub fn noResize(
+ self: *AllocatorType,
+ buf: []u8,
+ buf_align: u29,
+ new_len: usize,
+ len_align: u29,
+ ret_addr: usize,
+ ) Error!usize {
+ _ = self;
+ _ = buf_align;
+ _ = len_align;
+ _ = ret_addr;
+ if (new_len > buf.len)
+ return error.OutOfMemory;
+ return new_len;
+ }
+ };
}
/// Realloc is used to modify the size or alignment of an existing allocation,
@@ -80,8 +115,8 @@ pub fn noResize(
/// as `old_mem` was when `reallocFn` is called. The bytes of
/// `return_value[old_mem.len..]` have undefined values.
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
-pub fn reallocBytes(
- self: *Allocator,
+fn reallocBytes(
+ self: Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
/// `allocFn` or `resizeFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
@@ -106,7 +141,7 @@ pub fn reallocBytes(
return_address: usize,
) Error![]u8 {
if (old_mem.len == 0) {
- const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align, return_address);
+ const new_mem = try self.allocFn(self.ptr, new_byte_count, new_alignment, len_align, return_address);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(new_mem.ptr, undefined, new_byte_count);
return new_mem;
@@ -117,7 +152,7 @@ pub fn reallocBytes(
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
return old_mem.ptr[0..shrunk_len];
}
- if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
+ if (self.resizeFn(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
assert(resized_len >= new_byte_count);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
@@ -133,7 +168,7 @@ pub fn reallocBytes(
/// Move the given memory to a new location in the given allocator to accomodate a new
/// size and alignment.
fn moveBytes(
- self: *Allocator,
+ self: Allocator,
old_mem: []u8,
old_align: u29,
new_len: usize,
@@ -143,7 +178,7 @@ fn moveBytes(
) Error![]u8 {
assert(old_mem.len > 0);
assert(new_len > 0);
- const new_mem = try self.allocFn(self, new_len, new_alignment, len_align, return_address);
+ const new_mem = try self.allocFn(self.ptr, new_len, new_alignment, len_align, return_address);
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
// TODO https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr, undefined, old_mem.len);
@@ -153,7 +188,7 @@ fn moveBytes(
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
-pub fn create(self: *Allocator, comptime T: type) Error!*T {
+pub fn create(self: Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return @as(*T, undefined);
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
return &slice[0];
@@ -161,7 +196,7 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T {
/// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property.
-pub fn destroy(self: *Allocator, ptr: anytype) void {
+pub fn destroy(self: Allocator, ptr: anytype) void {
const info = @typeInfo(@TypeOf(ptr)).Pointer;
const T = info.child;
if (@sizeOf(T) == 0) return;
@@ -177,12 +212,12 @@ pub fn destroy(self: *Allocator, ptr: anytype) void {
/// call `free` when done.
///
/// For allocating a single item, see `create`.
-pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
+pub fn alloc(self: Allocator, comptime T: type, n: usize) Error![]T {
return self.allocAdvancedWithRetAddr(T, null, n, .exact, @returnAddress());
}
pub fn allocWithOptions(
- self: *Allocator,
+ self: Allocator,
comptime Elem: type,
n: usize,
/// null means naturally aligned
@@ -193,7 +228,7 @@ pub fn allocWithOptions(
}
pub fn allocWithOptionsRetAddr(
- self: *Allocator,
+ self: Allocator,
comptime Elem: type,
n: usize,
/// null means naturally aligned
@@ -227,7 +262,7 @@ fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, compti
///
/// For allocating a single item, see `create`.
pub fn allocSentinel(
- self: *Allocator,
+ self: Allocator,
comptime Elem: type,
n: usize,
comptime sentinel: Elem,
@@ -236,7 +271,7 @@ pub fn allocSentinel(
}
pub fn alignedAlloc(
- self: *Allocator,
+ self: Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
@@ -246,7 +281,7 @@ pub fn alignedAlloc(
}
pub fn allocAdvanced(
- self: *Allocator,
+ self: Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
@@ -259,7 +294,7 @@ pub fn allocAdvanced(
pub const Exact = enum { exact, at_least };
pub fn allocAdvancedWithRetAddr(
- self: *Allocator,
+ self: Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
@@ -285,7 +320,7 @@ pub fn allocAdvancedWithRetAddr(
.exact => 0,
.at_least => size_of_T,
};
- const byte_slice = try self.allocFn(self, byte_count, a, len_align, return_address);
+ const byte_slice = try self.allocFn(self.ptr, byte_count, a, len_align, return_address);
switch (exact) {
.exact => assert(byte_slice.len == byte_count),
.at_least => assert(byte_slice.len >= byte_count),
@@ -301,7 +336,7 @@ pub fn allocAdvancedWithRetAddr(
}
/// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
-pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
+pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (new_n == 0) {
@@ -310,7 +345,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
- const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
+ const rc = try self.resizeFn(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
assert(rc == new_byte_count);
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
return mem.bytesAsSlice(T, new_byte_slice);
@@ -326,7 +361,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
/// in `std.ArrayList.shrink`.
/// If you need guaranteed success, call `shrink`.
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
-pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+pub fn realloc(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
@@ -334,7 +369,7 @@ pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .exact, @returnAddress());
}
-pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+pub fn reallocAtLeast(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
@@ -346,7 +381,7 @@ pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
/// a new alignment, which can be larger, smaller, or the same as the old
/// allocation.
pub fn reallocAdvanced(
- self: *Allocator,
+ self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
@@ -356,7 +391,7 @@ pub fn reallocAdvanced(
}
pub fn reallocAdvancedWithRetAddr(
- self: *Allocator,
+ self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
@@ -389,7 +424,7 @@ pub fn reallocAdvancedWithRetAddr(
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
/// Returned slice has same alignment as old_mem.
/// Shrinking to 0 is the same as calling `free`.
-pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+pub fn shrink(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t []align(Slice.alignment) Slice.child;
} {
@@ -401,7 +436,7 @@ pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
/// a new alignment, which must be smaller or the same as the old
/// allocation.
pub fn alignedShrink(
- self: *Allocator,
+ self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
@@ -413,7 +448,7 @@ pub fn alignedShrink(
/// the return address of the first stack frame, which may be relevant for
/// allocators which collect stack traces.
pub fn alignedShrinkWithRetAddr(
- self: *Allocator,
+ self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
@@ -440,7 +475,7 @@ pub fn alignedShrinkWithRetAddr(
/// Free an array allocated with `alloc`. To free a single item,
/// see `destroy`.
-pub fn free(self: *Allocator, memory: anytype) void {
+pub fn free(self: Allocator, memory: anytype) void {
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
const bytes = mem.sliceAsBytes(memory);
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
@@ -452,14 +487,14 @@ pub fn free(self: *Allocator, memory: anytype) void {
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
-pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
+pub fn dupe(allocator: Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
mem.copy(T, new_buf, m);
return new_buf;
}
/// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
-pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
+pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T {
const new_buf = try allocator.alloc(T, m.len + 1);
mem.copy(T, new_buf, m);
new_buf[m.len] = 0;
@@ -471,7 +506,7 @@ pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
/// This function allows a runtime `buf_align` value. Callers should generally prefer
/// to call `shrink` directly.
pub fn shrinkBytes(
- self: *Allocator,
+ self: Allocator,
buf: []u8,
buf_align: u29,
new_len: usize,
@@ -479,5 +514,5 @@ pub fn shrinkBytes(
return_address: usize,
) usize {
assert(new_len <= buf.len);
- return self.resizeFn(self, buf, buf_align, new_len, len_align, return_address) catch unreachable;
+ return self.resizeFn(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable;
}
lib/std/net/test.zig
@@ -230,7 +230,7 @@ test "listen on ipv4 try connect on ipv6 then ipv4" {
try await client_frame;
}
-fn testClientToHost(allocator: *mem.Allocator, name: []const u8, port: u16) anyerror!void {
+fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const connection = try net.tcpConnectToHost(allocator, name, port);
lib/std/os/test.zig
@@ -58,10 +58,11 @@ test "open smoke test" {
// Get base abs path
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
var file_path: []u8 = undefined;
@@ -69,34 +70,34 @@ test "open smoke test" {
const mode: os.mode_t = if (native_os == .windows) 0 else 0o666;
// Create some file using `open`.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
fd = try os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode);
os.close(fd);
// Try this again with the same flags. This op should fail with error.PathAlreadyExists.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
try expectError(error.PathAlreadyExists, os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode));
// Try opening without `O.EXCL` flag.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
fd = try os.open(file_path, os.O.RDWR | os.O.CREAT, mode);
os.close(fd);
// Try opening as a directory which should fail.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
try expectError(error.NotDir, os.open(file_path, os.O.RDWR | os.O.DIRECTORY, mode));
// Create some directory
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
try os.mkdir(file_path, mode);
// Open dir using `open`
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
fd = try os.open(file_path, os.O.RDONLY | os.O.DIRECTORY, mode);
os.close(fd);
// Try opening as file which should fail.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
try expectError(error.IsDir, os.open(file_path, os.O.RDWR, mode));
}
lib/std/special/build_runner.zig
@@ -16,7 +16,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
var args = try process.argsAlloc(allocator);
defer process.argsFree(allocator, args);
lib/std/special/test_runner.zig
@@ -10,7 +10,7 @@ var args_buffer: [std.fs.MAX_PATH_BYTES + std.mem.page_size]u8 = undefined;
var args_allocator = std.heap.FixedBufferAllocator.init(&args_buffer);
fn processArgs() void {
- const args = std.process.argsAlloc(&args_allocator.allocator) catch {
+ const args = std.process.argsAlloc(args_allocator.getAllocator()) catch {
@panic("Too many bytes passed over the CLI to the test runner");
};
if (args.len != 2) {
lib/std/testing/failing_allocator.zig
@@ -12,10 +12,9 @@ const mem = std.mem;
/// Then use `failing_allocator` anywhere you would have used a
/// different allocator.
pub const FailingAllocator = struct {
- allocator: mem.Allocator,
index: usize,
fail_index: usize,
- internal_allocator: *mem.Allocator,
+ internal_allocator: mem.Allocator,
allocated_bytes: usize,
freed_bytes: usize,
allocations: usize,
@@ -29,7 +28,7 @@ pub const FailingAllocator = struct {
/// var a = try failing_alloc.create(i32);
/// var b = try failing_alloc.create(i32);
/// testing.expectError(error.OutOfMemory, failing_alloc.create(i32));
- pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator {
+ pub fn init(allocator: mem.Allocator, fail_index: usize) FailingAllocator {
return FailingAllocator{
.internal_allocator = allocator,
.fail_index = fail_index,
@@ -38,25 +37,24 @@ pub const FailingAllocator = struct {
.freed_bytes = 0,
.allocations = 0,
.deallocations = 0,
- .allocator = mem.Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
};
}
+ pub fn getAllocator(self: *FailingAllocator) mem.Allocator {
+ return mem.Allocator.init(self, alloc, resize);
+ }
+
fn alloc(
- allocator: *std.mem.Allocator,
+ self: *FailingAllocator,
len: usize,
ptr_align: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (self.index == self.fail_index) {
return error.OutOfMemory;
}
- const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align, return_address);
+ const result = try self.internal_allocator.allocFn(self.internal_allocator.ptr, len, ptr_align, len_align, return_address);
self.allocated_bytes += result.len;
self.allocations += 1;
self.index += 1;
@@ -64,15 +62,14 @@ pub const FailingAllocator = struct {
}
fn resize(
- allocator: *std.mem.Allocator,
+ self: *FailingAllocator,
old_mem: []u8,
old_align: u29,
new_len: usize,
len_align: u29,
ra: usize,
) error{OutOfMemory}!usize {
- const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
- const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align, ra) catch |e| {
+ const r = self.internal_allocator.resizeFn(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| {
std.debug.assert(new_len > old_mem.len);
return e;
};
lib/std/zig/system/darwin.zig
@@ -11,7 +11,7 @@ pub const macos = @import("darwin/macos.zig");
/// Therefore, we resort to the same tool used by Homebrew, namely, invoking `xcode-select --print-path`
/// and checking if the status is nonzero or the returned string in nonempty.
/// https://github.com/Homebrew/brew/blob/e119bdc571dcb000305411bc1e26678b132afb98/Library/Homebrew/brew.sh#L630
-pub fn isDarwinSDKInstalled(allocator: *Allocator) bool {
+pub fn isDarwinSDKInstalled(allocator: Allocator) bool {
const argv = &[_][]const u8{ "/usr/bin/xcode-select", "--print-path" };
const result = std.ChildProcess.exec(.{ .allocator = allocator, .argv = argv }) catch return false;
defer {
@@ -29,7 +29,7 @@ pub fn isDarwinSDKInstalled(allocator: *Allocator) bool {
/// Calls `xcrun --sdk <target_sdk> --show-sdk-path` which fetches the path to the SDK sysroot (if any).
/// Subsequently calls `xcrun --sdk <target_sdk> --show-sdk-version` which fetches version of the SDK.
/// The caller needs to deinit the resulting struct.
-pub fn getDarwinSDK(allocator: *Allocator, target: Target) ?DarwinSDK {
+pub fn getDarwinSDK(allocator: Allocator, target: Target) ?DarwinSDK {
const is_simulator_abi = target.abi == .simulator;
const sdk = switch (target.os.tag) {
.macos => "macosx",
@@ -82,7 +82,7 @@ pub const DarwinSDK = struct {
path: []const u8,
version: Version,
- pub fn deinit(self: DarwinSDK, allocator: *Allocator) void {
+ pub fn deinit(self: DarwinSDK, allocator: Allocator) void {
allocator.free(self.path);
}
};
lib/std/zig/Ast.zig
@@ -34,7 +34,7 @@ pub const Location = struct {
line_end: usize,
};
-pub fn deinit(tree: *Tree, gpa: *mem.Allocator) void {
+pub fn deinit(tree: *Tree, gpa: mem.Allocator) void {
tree.tokens.deinit(gpa);
tree.nodes.deinit(gpa);
gpa.free(tree.extra_data);
@@ -52,7 +52,7 @@ pub const RenderError = error{
/// for allocating extra stack memory if needed, because this function utilizes recursion.
/// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006.
/// Caller owns the returned slice of bytes, allocated with `gpa`.
-pub fn render(tree: Tree, gpa: *mem.Allocator) RenderError![]u8 {
+pub fn render(tree: Tree, gpa: mem.Allocator) RenderError![]u8 {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
lib/std/zig/CrossTarget.zig
@@ -520,7 +520,7 @@ pub fn isNative(self: CrossTarget) bool {
return self.isNativeCpu() and self.isNativeOs() and self.isNativeAbi();
}
-pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory}![]u8 {
+pub fn zigTriple(self: CrossTarget, allocator: mem.Allocator) error{OutOfMemory}![]u8 {
if (self.isNative()) {
return allocator.dupe(u8, "native");
}
@@ -559,13 +559,13 @@ pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory
return result.toOwnedSlice();
}
-pub fn allocDescription(self: CrossTarget, allocator: *mem.Allocator) ![]u8 {
+pub fn allocDescription(self: CrossTarget, allocator: mem.Allocator) ![]u8 {
// TODO is there anything else worthy of the description that is not
// already captured in the triple?
return self.zigTriple(allocator);
}
-pub fn linuxTriple(self: CrossTarget, allocator: *mem.Allocator) ![]u8 {
+pub fn linuxTriple(self: CrossTarget, allocator: mem.Allocator) ![]u8 {
return Target.linuxTripleSimple(allocator, self.getCpuArch(), self.getOsTag(), self.getAbi());
}
@@ -576,7 +576,7 @@ pub fn wantSharedLibSymLinks(self: CrossTarget) bool {
pub const VcpkgLinkage = std.builtin.LinkMode;
/// Returned slice must be freed by the caller.
-pub fn vcpkgTriplet(self: CrossTarget, allocator: *mem.Allocator, linkage: VcpkgLinkage) ![]u8 {
+pub fn vcpkgTriplet(self: CrossTarget, allocator: mem.Allocator, linkage: VcpkgLinkage) ![]u8 {
const arch = switch (self.getCpuArch()) {
.i386 => "x86",
.x86_64 => "x64",
lib/std/zig/parse.zig
@@ -11,7 +11,7 @@ pub const Error = error{ParseError} || Allocator.Error;
/// Result should be freed with tree.deinit() when there are
/// no more references to any of the tokens or nodes.
-pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
+pub fn parse(gpa: Allocator, source: [:0]const u8) Allocator.Error!Ast {
var tokens = Ast.TokenList{};
defer tokens.deinit(gpa);
@@ -81,7 +81,7 @@ const null_node: Node.Index = 0;
/// Represents in-progress parsing, will be converted to an Ast after completion.
const Parser = struct {
- gpa: *Allocator,
+ gpa: Allocator,
source: []const u8,
token_tags: []const Token.Tag,
token_starts: []const Ast.ByteOffset,
lib/std/zig/parser_test.zig
@@ -1220,7 +1220,7 @@ test "zig fmt: doc comments on param decl" {
try testCanonical(
\\pub const Allocator = struct {
\\ shrinkFn: fn (
- \\ self: *Allocator,
+ \\ self: Allocator,
\\ /// Guaranteed to be the same as what was returned from most recent call to
\\ /// `allocFn`, `reallocFn`, or `shrinkFn`.
\\ old_mem: []u8,
@@ -4250,7 +4250,7 @@ test "zig fmt: Only indent multiline string literals in function calls" {
test "zig fmt: Don't add extra newline after if" {
try testCanonical(
- \\pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
+ \\pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
\\ if (cwd().symLink(existing_path, new_path, .{})) {
\\ return;
\\ }
@@ -5319,7 +5319,7 @@ const maxInt = std.math.maxInt;
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn testParse(source: [:0]const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 {
+fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
const stderr = io.getStdErr().writer();
var tree = try std.zig.parse(allocator, source);
@@ -5351,9 +5351,10 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
const needed_alloc_count = x: {
// Try it once with unlimited memory, make sure it works
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, maxInt(usize));
+ var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), maxInt(usize));
+ const allocator = failing_allocator.getAllocator();
var anything_changed: bool = undefined;
- const result_source = try testParse(source, &failing_allocator.allocator, &anything_changed);
+ const result_source = try testParse(source, allocator, &anything_changed);
try std.testing.expectEqualStrings(expected_source, result_source);
const changes_expected = source.ptr != expected_source.ptr;
if (anything_changed != changes_expected) {
@@ -5361,16 +5362,16 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
return error.TestFailed;
}
try std.testing.expect(anything_changed == changes_expected);
- failing_allocator.allocator.free(result_source);
+ allocator.free(result_source);
break :x failing_allocator.index;
};
var fail_index: usize = 0;
while (fail_index < needed_alloc_count) : (fail_index += 1) {
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
+ var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), fail_index);
var anything_changed: bool = undefined;
- if (testParse(source, &failing_allocator.allocator, &anything_changed)) |_| {
+ if (testParse(source, failing_allocator.getAllocator(), &anything_changed)) |_| {
return error.NondeterministicMemoryUsage;
} else |err| switch (err) {
error.OutOfMemory => {
lib/std/zig/perf_test.zig
@@ -33,7 +33,7 @@ pub fn main() !void {
fn testOnce() usize {
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var allocator = &fixed_buf_alloc.allocator;
+ var allocator = fixed_buf_alloc.getAllocator();
_ = std.zig.parse(allocator, source) catch @panic("parse failure");
return fixed_buf_alloc.end_index;
}
lib/std/zig/render.zig
@@ -37,7 +37,7 @@ pub fn renderTree(buffer: *std.ArrayList(u8), tree: Ast) Error!void {
}
/// Render all members in the given slice, keeping empty lines where appropriate
-fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void {
+fn renderMembers(gpa: Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void {
if (members.len == 0) return;
try renderMember(gpa, ais, tree, members[0], .newline);
for (members[1..]) |member| {
@@ -46,7 +46,7 @@ fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Nod
}
}
-fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void {
+fn renderMember(gpa: Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void {
const token_tags = tree.tokens.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
const datas = tree.nodes.items(.data);
@@ -168,7 +168,7 @@ fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, spa
}
/// Render all expressions in the slice, keeping empty lines where appropriate
-fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void {
+fn renderExpressions(gpa: Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void {
if (expressions.len == 0) return;
try renderExpression(gpa, ais, tree, expressions[0], space);
for (expressions[1..]) |expression| {
@@ -177,7 +177,7 @@ fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const
}
}
-fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
+fn renderExpression(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
const token_tags = tree.tokens.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
const node_tags = tree.nodes.items(.tag);
@@ -710,7 +710,7 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
}
fn renderArrayType(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
array_type: Ast.full.ArrayType,
@@ -732,7 +732,7 @@ fn renderArrayType(
}
fn renderPtrType(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
ptr_type: Ast.full.PtrType,
@@ -825,7 +825,7 @@ fn renderPtrType(
}
fn renderSlice(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
slice_node: Ast.Node.Index,
@@ -861,7 +861,7 @@ fn renderSlice(
}
fn renderAsmOutput(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
asm_output: Ast.Node.Index,
@@ -891,7 +891,7 @@ fn renderAsmOutput(
}
fn renderAsmInput(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
asm_input: Ast.Node.Index,
@@ -912,7 +912,7 @@ fn renderAsmInput(
return renderToken(ais, tree, datas[asm_input].rhs, space); // rparen
}
-fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void {
+fn renderVarDecl(gpa: Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void {
if (var_decl.visib_token) |visib_token| {
try renderToken(ais, tree, visib_token, Space.space); // pub
}
@@ -1019,7 +1019,7 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
return renderToken(ais, tree, var_decl.ast.mut_token + 2, .newline); // ;
}
-fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void {
+fn renderIf(gpa: Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void {
return renderWhile(gpa, ais, tree, .{
.ast = .{
.while_token = if_node.ast.if_token,
@@ -1038,7 +1038,7 @@ fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space:
/// Note that this function is additionally used to render if and for expressions, with
/// respective values set to null.
-fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void {
+fn renderWhile(gpa: Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void {
const node_tags = tree.nodes.items(.tag);
const token_tags = tree.tokens.items(.tag);
@@ -1141,7 +1141,7 @@ fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While
}
fn renderContainerField(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
field: Ast.full.ContainerField,
@@ -1215,7 +1215,7 @@ fn renderContainerField(
}
fn renderBuiltinCall(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
builtin_token: Ast.TokenIndex,
@@ -1272,7 +1272,7 @@ fn renderBuiltinCall(
}
}
-fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void {
+fn renderFnProto(gpa: Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void {
const token_tags = tree.tokens.items(.tag);
const token_starts = tree.tokens.items(.start);
@@ -1488,7 +1488,7 @@ fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnPro
}
fn renderSwitchCase(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
switch_case: Ast.full.SwitchCase,
@@ -1541,7 +1541,7 @@ fn renderSwitchCase(
}
fn renderBlock(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
block_node: Ast.Node.Index,
@@ -1581,7 +1581,7 @@ fn renderBlock(
}
fn renderStructInit(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
struct_node: Ast.Node.Index,
@@ -1640,7 +1640,7 @@ fn renderStructInit(
}
fn renderArrayInit(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
array_init: Ast.full.ArrayInit,
@@ -1859,7 +1859,7 @@ fn renderArrayInit(
}
fn renderContainerDecl(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
container_decl_node: Ast.Node.Index,
@@ -1956,7 +1956,7 @@ fn renderContainerDecl(
}
fn renderAsm(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
asm_node: Ast.full.Asm,
@@ -2105,7 +2105,7 @@ fn renderAsm(
}
fn renderCall(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
call: Ast.full.Call,
@@ -2180,7 +2180,7 @@ fn renderCall(
/// Renders the given expression indented, popping the indent before rendering
/// any following line comments
-fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
+fn renderExpressionIndented(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
const token_starts = tree.tokens.items(.start);
const token_tags = tree.tokens.items(.tag);
@@ -2238,7 +2238,7 @@ fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Nod
/// Render an expression, and the comma that follows it, if it is present in the source.
/// If a comma is present, and `space` is `Space.comma`, render only a single comma.
-fn renderExpressionComma(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
+fn renderExpressionComma(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
const token_tags = tree.tokens.items(.tag);
const maybe_comma = tree.lastToken(node) + 1;
if (token_tags[maybe_comma] == .comma and space != .comma) {
lib/std/zig/string_literal.zig
@@ -131,7 +131,7 @@ pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory
/// Higher level API. Does not return extra info about parse errors.
/// Caller owns returned memory.
-pub fn parseAlloc(allocator: *std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
+pub fn parseAlloc(allocator: std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
@@ -147,7 +147,7 @@ test "parse" {
var fixed_buf_mem: [32]u8 = undefined;
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buf_mem[0..]);
- var alloc = &fixed_buf_alloc.allocator;
+ var alloc = fixed_buf_alloc.getAllocator();
try expect(eql(u8, "foo", try parseAlloc(alloc, "\"foo\"")));
try expect(eql(u8, "foo", try parseAlloc(alloc, "\"f\x6f\x6f\"")));
lib/std/zig/system.zig
@@ -21,7 +21,7 @@ pub const NativePaths = struct {
rpaths: ArrayList([:0]u8),
warnings: ArrayList([:0]u8),
- pub fn detect(allocator: *Allocator, native_info: NativeTargetInfo) !NativePaths {
+ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths {
const native_target = native_info.target;
var self: NativePaths = .{
@@ -237,7 +237,7 @@ pub const NativeTargetInfo = struct {
/// Any resources this function allocates are released before returning, and so there is no
/// deinitialization method.
/// TODO Remove the Allocator requirement from this function.
- pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
+ pub fn detect(allocator: Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
var os = cross_target.getOsTag().defaultVersionRange(cross_target.getCpuArch());
if (cross_target.os_tag == null) {
switch (builtin.target.os.tag) {
@@ -441,7 +441,7 @@ pub const NativeTargetInfo = struct {
/// we fall back to the defaults.
/// TODO Remove the Allocator requirement from this function.
fn detectAbiAndDynamicLinker(
- allocator: *Allocator,
+ allocator: Allocator,
cpu: Target.Cpu,
os: Target.Os,
cross_target: CrossTarget,
lib/std/array_hash_map.zig
@@ -79,7 +79,7 @@ pub fn ArrayHashMap(
comptime std.hash_map.verifyContext(Context, K, K, u32);
return struct {
unmanaged: Unmanaged,
- allocator: *Allocator,
+ allocator: Allocator,
ctx: Context,
/// The ArrayHashMapUnmanaged type using the same settings as this managed map.
@@ -118,12 +118,12 @@ pub fn ArrayHashMap(
const Self = @This();
/// Create an ArrayHashMap instance which will use a specified allocator.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call initContext instead.");
return initContext(allocator, undefined);
}
- pub fn initContext(allocator: *Allocator, ctx: Context) Self {
+ pub fn initContext(allocator: Allocator, ctx: Context) Self {
return .{
.unmanaged = .{},
.allocator = allocator,
@@ -383,7 +383,7 @@ pub fn ArrayHashMap(
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context as this instance, but the specified
/// allocator.
- pub fn cloneWithAllocator(self: Self, allocator: *Allocator) !Self {
+ pub fn cloneWithAllocator(self: Self, allocator: Allocator) !Self {
var other = try self.unmanaged.cloneContext(allocator, self.ctx);
return other.promoteContext(allocator, self.ctx);
}
@@ -396,7 +396,7 @@ pub fn ArrayHashMap(
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the specified allocator and context.
- pub fn cloneWithAllocatorAndContext(self: Self, allocator: *Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
+ pub fn cloneWithAllocatorAndContext(self: Self, allocator: Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
var other = try self.unmanaged.cloneContext(allocator, ctx);
return other.promoteContext(allocator, ctx);
}
@@ -533,12 +533,12 @@ pub fn ArrayHashMapUnmanaged(
/// Convert from an unmanaged map to a managed map. After calling this,
/// the promoted map should no longer be used.
- pub fn promote(self: Self, allocator: *Allocator) Managed {
+ pub fn promote(self: Self, allocator: Allocator) Managed {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
return self.promoteContext(allocator, undefined);
}
- pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
+ pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed {
return .{
.unmanaged = self,
.allocator = allocator,
@@ -549,7 +549,7 @@ pub fn ArrayHashMapUnmanaged(
/// Frees the backing allocation and leaves the map in an undefined state.
/// Note that this does not free keys or values. You must take care of that
/// before calling this function, if it is needed.
- pub fn deinit(self: *Self, allocator: *Allocator) void {
+ pub fn deinit(self: *Self, allocator: Allocator) void {
self.entries.deinit(allocator);
if (self.index_header) |header| {
header.free(allocator);
@@ -570,7 +570,7 @@ pub fn ArrayHashMapUnmanaged(
}
/// Clears the map and releases the backing allocation
- pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+ pub fn clearAndFree(self: *Self, allocator: Allocator) void {
self.entries.shrinkAndFree(allocator, 0);
if (self.index_header) |header| {
header.free(allocator);
@@ -633,24 +633,24 @@ pub fn ArrayHashMapUnmanaged(
/// Otherwise, puts a new item with undefined value, and
/// the `Entry` pointer points to it. Caller should then initialize
/// the value (but not the key).
- pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
+ pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
return self.getOrPutContext(allocator, key, undefined);
}
- pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult {
const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
if (!gop.found_existing) {
gop.key_ptr.* = key;
}
return gop;
}
- pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
+ pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
}
- pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
self.ensureTotalCapacityContext(allocator, self.entries.len + 1, ctx) catch |err| {
// "If key exists this function cannot fail."
const index = self.getIndexAdapted(key, key_ctx) orelse return err;
@@ -731,12 +731,12 @@ pub fn ArrayHashMapUnmanaged(
}
}
- pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !GetOrPutResult {
+ pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
return self.getOrPutValueContext(allocator, key, value, undefined);
}
- pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !GetOrPutResult {
const res = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
if (!res.found_existing) {
res.key_ptr.* = key;
@@ -749,12 +749,12 @@ pub fn ArrayHashMapUnmanaged(
/// Increases capacity, guaranteeing that insertions up until the
/// `expected_count` will not cause an allocation, and therefore cannot fail.
- pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+ pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
return self.ensureTotalCapacityContext(allocator, new_capacity, undefined);
}
- pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void {
+ pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_capacity: usize, ctx: Context) !void {
if (new_capacity <= linear_scan_max) {
try self.entries.ensureTotalCapacity(allocator, new_capacity);
return;
@@ -781,7 +781,7 @@ pub fn ArrayHashMapUnmanaged(
/// therefore cannot fail.
pub fn ensureUnusedCapacity(
self: *Self,
- allocator: *Allocator,
+ allocator: Allocator,
additional_capacity: usize,
) !void {
if (@sizeOf(ByIndexContext) != 0)
@@ -790,7 +790,7 @@ pub fn ArrayHashMapUnmanaged(
}
pub fn ensureUnusedCapacityContext(
self: *Self,
- allocator: *Allocator,
+ allocator: Allocator,
additional_capacity: usize,
ctx: Context,
) !void {
@@ -808,24 +808,24 @@ pub fn ArrayHashMapUnmanaged(
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPut`.
- pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
return self.putContext(allocator, key, value, undefined);
}
- pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
const result = try self.getOrPutContext(allocator, key, ctx);
result.value_ptr.* = value;
}
/// Inserts a key-value pair into the hash map, asserting that no previous
/// entry with the same key is already present
- pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
return self.putNoClobberContext(allocator, key, value, undefined);
}
- pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
const result = try self.getOrPutContext(allocator, key, ctx);
assert(!result.found_existing);
result.value_ptr.* = value;
@@ -859,12 +859,12 @@ pub fn ArrayHashMapUnmanaged(
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
- pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
+ pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
return self.fetchPutContext(allocator, key, value, undefined);
}
- pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
+ pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV {
const gop = try self.getOrPutContext(allocator, key, ctx);
var result: ?KV = null;
if (gop.found_existing) {
@@ -1132,12 +1132,12 @@ pub fn ArrayHashMapUnmanaged(
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context and allocator as this instance.
- pub fn clone(self: Self, allocator: *Allocator) !Self {
+ pub fn clone(self: Self, allocator: Allocator) !Self {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
return self.cloneContext(allocator, undefined);
}
- pub fn cloneContext(self: Self, allocator: *Allocator, ctx: Context) !Self {
+ pub fn cloneContext(self: Self, allocator: Allocator, ctx: Context) !Self {
var other: Self = .{};
other.entries = try self.entries.clone(allocator);
errdefer other.entries.deinit(allocator);
@@ -1152,12 +1152,12 @@ pub fn ArrayHashMapUnmanaged(
/// Rebuilds the key indexes. If the underlying entries has been modified directly, users
/// can call `reIndex` to update the indexes to account for these new entries.
- pub fn reIndex(self: *Self, allocator: *Allocator) !void {
+ pub fn reIndex(self: *Self, allocator: Allocator) !void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call reIndexContext instead.");
return self.reIndexContext(allocator, undefined);
}
- pub fn reIndexContext(self: *Self, allocator: *Allocator, ctx: Context) !void {
+ pub fn reIndexContext(self: *Self, allocator: Allocator, ctx: Context) !void {
if (self.entries.capacity <= linear_scan_max) return;
// We're going to rebuild the index header and replace the existing one (if any). The
// indexes should sized such that they will be at most 60% full.
@@ -1189,12 +1189,12 @@ pub fn ArrayHashMapUnmanaged(
/// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated
/// index entries. Reduces allocated capacity.
- pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void {
+ pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkAndFreeContext instead.");
return self.shrinkAndFreeContext(allocator, new_len, undefined);
}
- pub fn shrinkAndFreeContext(self: *Self, allocator: *Allocator, new_len: usize, ctx: Context) void {
+ pub fn shrinkAndFreeContext(self: *Self, allocator: Allocator, new_len: usize, ctx: Context) void {
// Remove index entries from the new length onwards.
// Explicitly choose to ONLY remove index entries and not the underlying array list
// entries as we're going to remove them in the subsequent shrink call.
@@ -1844,7 +1844,7 @@ const IndexHeader = struct {
/// Allocates an index header, and fills the entryIndexes array with empty.
/// The distance array contents are undefined.
- fn alloc(allocator: *Allocator, new_bit_index: u8) !*IndexHeader {
+ fn alloc(allocator: Allocator, new_bit_index: u8) !*IndexHeader {
const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index);
const index_size = hash_map.capacityIndexSize(new_bit_index);
const nbytes = @sizeOf(IndexHeader) + index_size * len;
@@ -1858,7 +1858,7 @@ const IndexHeader = struct {
}
/// Releases the memory for a header and its associated arrays.
- fn free(header: *IndexHeader, allocator: *Allocator) void {
+ fn free(header: *IndexHeader, allocator: Allocator) void {
const index_size = hash_map.capacityIndexSize(header.bit_index);
const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size];
lib/std/array_list.zig
@@ -42,12 +42,12 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// How many T values this list can hold without allocating
/// additional memory.
capacity: usize,
- allocator: *Allocator,
+ allocator: Allocator,
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.items = &[_]T{},
.capacity = 0,
@@ -58,7 +58,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Initialize with capacity to hold at least `num` elements.
/// The resulting capacity is likely to be equal to `num`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
+ pub fn initCapacity(allocator: Allocator, num: usize) !Self {
var self = Self.init(allocator);
try self.ensureTotalCapacityPrecise(num);
return self;
@@ -74,7 +74,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn fromOwnedSlice(allocator: *Allocator, slice: Slice) Self {
+ pub fn fromOwnedSlice(allocator: Allocator, slice: Slice) Self {
return Self{
.items = slice,
.capacity = slice.len,
@@ -457,33 +457,33 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Initialize with capacity to hold at least num elements.
/// The resulting capacity is likely to be equal to `num`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
+ pub fn initCapacity(allocator: Allocator, num: usize) !Self {
var self = Self{};
try self.ensureTotalCapacityPrecise(allocator, num);
return self;
}
/// Release all allocated memory.
- pub fn deinit(self: *Self, allocator: *Allocator) void {
+ pub fn deinit(self: *Self, allocator: Allocator) void {
allocator.free(self.allocatedSlice());
self.* = undefined;
}
/// Convert this list into an analogous memory-managed one.
/// The returned list has ownership of the underlying memory.
- pub fn toManaged(self: *Self, allocator: *Allocator) ArrayListAligned(T, alignment) {
+ pub fn toManaged(self: *Self, allocator: Allocator) ArrayListAligned(T, alignment) {
return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator };
}
/// The caller owns the returned memory. ArrayList becomes empty.
- pub fn toOwnedSlice(self: *Self, allocator: *Allocator) Slice {
+ pub fn toOwnedSlice(self: *Self, allocator: Allocator) Slice {
const result = allocator.shrink(self.allocatedSlice(), self.items.len);
self.* = Self{};
return result;
}
/// The caller owns the returned memory. ArrayList becomes empty.
- pub fn toOwnedSliceSentinel(self: *Self, allocator: *Allocator, comptime sentinel: T) ![:sentinel]T {
+ pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) ![:sentinel]T {
try self.append(allocator, sentinel);
const result = self.toOwnedSlice(allocator);
return result[0 .. result.len - 1 :sentinel];
@@ -492,7 +492,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Insert `item` at index `n`. Moves `list[n .. list.len]`
/// to higher indices to make room.
/// This operation is O(N).
- pub fn insert(self: *Self, allocator: *Allocator, n: usize, item: T) !void {
+ pub fn insert(self: *Self, allocator: Allocator, n: usize, item: T) !void {
try self.ensureUnusedCapacity(allocator, 1);
self.items.len += 1;
@@ -503,7 +503,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Insert slice `items` at index `i`. Moves `list[i .. list.len]` to
/// higher indicices make room.
/// This operation is O(N).
- pub fn insertSlice(self: *Self, allocator: *Allocator, i: usize, items: []const T) !void {
+ pub fn insertSlice(self: *Self, allocator: Allocator, i: usize, items: []const T) !void {
try self.ensureUnusedCapacity(allocator, items.len);
self.items.len += items.len;
@@ -515,14 +515,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Grows list if `len < new_items.len`.
/// Shrinks list if `len > new_items.len`
/// Invalidates pointers if this ArrayList is resized.
- pub fn replaceRange(self: *Self, allocator: *Allocator, start: usize, len: usize, new_items: []const T) !void {
+ pub fn replaceRange(self: *Self, allocator: Allocator, start: usize, len: usize, new_items: []const T) !void {
var managed = self.toManaged(allocator);
try managed.replaceRange(start, len, new_items);
self.* = managed.moveToUnmanaged();
}
/// Extend the list by 1 element. Allocates more memory as necessary.
- pub fn append(self: *Self, allocator: *Allocator, item: T) !void {
+ pub fn append(self: *Self, allocator: Allocator, item: T) !void {
const new_item_ptr = try self.addOne(allocator);
new_item_ptr.* = item;
}
@@ -563,7 +563,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
- pub fn appendSlice(self: *Self, allocator: *Allocator, items: []const T) !void {
+ pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) !void {
try self.ensureUnusedCapacity(allocator, items.len);
self.appendSliceAssumeCapacity(items);
}
@@ -580,7 +580,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
pub const WriterContext = struct {
self: *Self,
- allocator: *Allocator,
+ allocator: Allocator,
};
pub const Writer = if (T != u8)
@@ -590,7 +590,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
std.io.Writer(WriterContext, error{OutOfMemory}, appendWrite);
/// Initializes a Writer which will append to the list.
- pub fn writer(self: *Self, allocator: *Allocator) Writer {
+ pub fn writer(self: *Self, allocator: Allocator) Writer {
return .{ .context = .{ .self = self, .allocator = allocator } };
}
@@ -603,7 +603,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
- pub fn appendNTimes(self: *Self, allocator: *Allocator, value: T, n: usize) !void {
+ pub fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) !void {
const old_len = self.items.len;
try self.resize(allocator, self.items.len + n);
mem.set(T, self.items[old_len..self.items.len], value);
@@ -621,13 +621,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Adjust the list's length to `new_len`.
/// Does not initialize added items, if any.
- pub fn resize(self: *Self, allocator: *Allocator, new_len: usize) !void {
+ pub fn resize(self: *Self, allocator: Allocator, new_len: usize) !void {
try self.ensureTotalCapacity(allocator, new_len);
self.items.len = new_len;
}
/// Reduce allocated capacity to `new_len`.
- pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void {
+ pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
assert(new_len <= self.items.len);
self.items = allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) {
@@ -653,7 +653,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
}
/// Invalidates all element pointers.
- pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+ pub fn clearAndFree(self: *Self, allocator: Allocator) void {
allocator.free(self.allocatedSlice());
self.items.len = 0;
self.capacity = 0;
@@ -663,7 +663,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Modify the array so that it can hold at least `new_capacity` items.
/// Invalidates pointers if additional memory is needed.
- pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+ pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void {
var better_capacity = self.capacity;
if (better_capacity >= new_capacity) return;
@@ -679,7 +679,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Like `ensureTotalCapacity`, but the resulting capacity is much more likely
/// (but not guaranteed) to be equal to `new_capacity`.
/// Invalidates pointers if additional memory is needed.
- pub fn ensureTotalCapacityPrecise(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+ pub fn ensureTotalCapacityPrecise(self: *Self, allocator: Allocator, new_capacity: usize) !void {
if (self.capacity >= new_capacity) return;
const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), new_capacity);
@@ -691,7 +691,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Invalidates pointers if additional memory is needed.
pub fn ensureUnusedCapacity(
self: *Self,
- allocator: *Allocator,
+ allocator: Allocator,
additional_count: usize,
) !void {
return self.ensureTotalCapacity(allocator, self.items.len + additional_count);
@@ -706,7 +706,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Increase length by 1, returning pointer to the new item.
/// The returned pointer becomes invalid when the list resized.
- pub fn addOne(self: *Self, allocator: *Allocator) !*T {
+ pub fn addOne(self: *Self, allocator: Allocator) !*T {
const newlen = self.items.len + 1;
try self.ensureTotalCapacity(allocator, newlen);
return self.addOneAssumeCapacity();
@@ -726,7 +726,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
- pub fn addManyAsArray(self: *Self, allocator: *Allocator, comptime n: usize) !*[n]T {
+ pub fn addManyAsArray(self: *Self, allocator: Allocator, comptime n: usize) !*[n]T {
const prev_len = self.items.len;
try self.resize(allocator, self.items.len + n);
return self.items[prev_len..][0..n];
@@ -1119,7 +1119,7 @@ test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
test "std.ArrayList/ArrayListUnmanaged.replaceRange" {
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const a = &arena.allocator;
+ const a = arena.getAllocator();
const init = [_]i32{ 1, 2, 3, 4, 5 };
const new = [_]i32{ 0, 0, 0 };
@@ -1263,7 +1263,7 @@ test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMe
// use an arena allocator to make sure realloc returns error.OutOfMemory
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const a = &arena.allocator;
+ const a = arena.getAllocator();
{
var list = ArrayList(i32).init(a);
@@ -1361,7 +1361,7 @@ test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" {
test "std.ArrayList(u0)" {
// An ArrayList on zero-sized types should not need to allocate
- const a = &testing.FailingAllocator.init(testing.allocator, 0).allocator;
+ const a = testing.FailingAllocator.init(testing.allocator, 0).getAllocator();
var list = ArrayList(u0).init(a);
defer list.deinit();
lib/std/ascii.zig
@@ -301,7 +301,7 @@ test "lowerString" {
/// Allocates a lower case copy of `ascii_string`.
/// Caller owns returned string and must free with `allocator`.
-pub fn allocLowerString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 {
+pub fn allocLowerString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 {
const result = try allocator.alloc(u8, ascii_string.len);
return lowerString(result, ascii_string);
}
@@ -330,7 +330,7 @@ test "upperString" {
/// Allocates an upper case copy of `ascii_string`.
/// Caller owns returned string and must free with `allocator`.
-pub fn allocUpperString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 {
+pub fn allocUpperString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 {
const result = try allocator.alloc(u8, ascii_string.len);
return upperString(result, ascii_string);
}
lib/std/bit_set.zig
@@ -476,7 +476,7 @@ pub const DynamicBitSetUnmanaged = struct {
/// Creates a bit set with no elements present.
/// If bit_length is not zero, deinit must eventually be called.
- pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+ pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self {
var self = Self{};
try self.resize(bit_length, false, allocator);
return self;
@@ -484,7 +484,7 @@ pub const DynamicBitSetUnmanaged = struct {
/// Creates a bit set with all elements present.
/// If bit_length is not zero, deinit must eventually be called.
- pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+ pub fn initFull(bit_length: usize, allocator: Allocator) !Self {
var self = Self{};
try self.resize(bit_length, true, allocator);
return self;
@@ -493,7 +493,7 @@ pub const DynamicBitSetUnmanaged = struct {
/// Resizes to a new bit_length. If the new length is larger
/// than the old length, fills any added bits with `fill`.
/// If new_len is not zero, deinit must eventually be called.
- pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: *Allocator) !void {
+ pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: Allocator) !void {
const old_len = self.bit_length;
const old_masks = numMasks(old_len);
@@ -556,12 +556,12 @@ pub const DynamicBitSetUnmanaged = struct {
/// deinitializes the array and releases its memory.
/// The passed allocator must be the same one used for
/// init* or resize in the past.
- pub fn deinit(self: *Self, allocator: *Allocator) void {
+ pub fn deinit(self: *Self, allocator: Allocator) void {
self.resize(0, false, allocator) catch unreachable;
}
/// Creates a duplicate of this bit set, using the new allocator.
- pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+ pub fn clone(self: *const Self, new_allocator: Allocator) !Self {
const num_masks = numMasks(self.bit_length);
var copy = Self{};
try copy.resize(self.bit_length, false, new_allocator);
@@ -742,13 +742,13 @@ pub const DynamicBitSet = struct {
pub const ShiftInt = std.math.Log2Int(MaskInt);
/// The allocator used by this bit set
- allocator: *Allocator,
+ allocator: Allocator,
/// The number of valid items in this bit set
unmanaged: DynamicBitSetUnmanaged = .{},
/// Creates a bit set with no elements present.
- pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+ pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self {
return Self{
.unmanaged = try DynamicBitSetUnmanaged.initEmpty(bit_length, allocator),
.allocator = allocator,
@@ -756,7 +756,7 @@ pub const DynamicBitSet = struct {
}
/// Creates a bit set with all elements present.
- pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+ pub fn initFull(bit_length: usize, allocator: Allocator) !Self {
return Self{
.unmanaged = try DynamicBitSetUnmanaged.initFull(bit_length, allocator),
.allocator = allocator,
@@ -777,7 +777,7 @@ pub const DynamicBitSet = struct {
}
/// Creates a duplicate of this bit set, using the new allocator.
- pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+ pub fn clone(self: *const Self, new_allocator: Allocator) !Self {
return Self{
.unmanaged = try self.unmanaged.clone(new_allocator),
.allocator = new_allocator,
lib/std/buf_map.zig
@@ -14,7 +14,7 @@ pub const BufMap = struct {
/// Create a BufMap backed by a specific allocator.
/// That allocator will be used for both backing allocations
/// and string deduplication.
- pub fn init(allocator: *Allocator) BufMap {
+ pub fn init(allocator: Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
lib/std/buf_set.zig
@@ -16,7 +16,7 @@ pub const BufSet = struct {
/// Create a BufSet using an allocator. The allocator will
/// be used internally for both backing allocations and
/// string duplication.
- pub fn init(a: *Allocator) BufSet {
+ pub fn init(a: Allocator) BufSet {
var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
return self;
}
@@ -67,7 +67,7 @@ pub const BufSet = struct {
}
/// Get the allocator used by this set
- pub fn allocator(self: *const BufSet) *Allocator {
+ pub fn allocator(self: *const BufSet) Allocator {
return self.hash_map.allocator;
}
lib/std/build.zig
@@ -28,7 +28,7 @@ pub const OptionsStep = @import("build/OptionsStep.zig");
pub const Builder = struct {
install_tls: TopLevelStep,
uninstall_tls: TopLevelStep,
- allocator: *Allocator,
+ allocator: Allocator,
user_input_options: UserInputOptionsMap,
available_options_map: AvailableOptionsMap,
available_options_list: ArrayList(AvailableOption),
@@ -134,7 +134,7 @@ pub const Builder = struct {
};
pub fn create(
- allocator: *Allocator,
+ allocator: Allocator,
zig_exe: []const u8,
build_root: []const u8,
cache_root: []const u8,
@@ -1285,7 +1285,7 @@ test "builder.findProgram compiles" {
defer arena.deinit();
const builder = try Builder.create(
- &arena.allocator,
+ arena.getAllocator(),
"zig",
"zig-cache",
"zig-cache",
@@ -3077,7 +3077,7 @@ pub const Step = struct {
custom,
};
- pub fn init(id: Id, name: []const u8, allocator: *Allocator, makeFn: fn (*Step) anyerror!void) Step {
+ pub fn init(id: Id, name: []const u8, allocator: Allocator, makeFn: fn (*Step) anyerror!void) Step {
return Step{
.id = id,
.name = allocator.dupe(u8, name) catch unreachable,
@@ -3087,7 +3087,7 @@ pub const Step = struct {
.done_flag = false,
};
}
- pub fn initNoOp(id: Id, name: []const u8, allocator: *Allocator) Step {
+ pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
return init(id, name, allocator, makeNoOp);
}
@@ -3114,7 +3114,7 @@ pub const Step = struct {
}
};
-fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
+fn doAtomicSymLinks(allocator: Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
const out_dir = fs.path.dirname(output_path) orelse ".";
const out_basename = fs.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
@@ -3138,7 +3138,7 @@ fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_maj
}
/// Returned slice must be freed by the caller.
-fn findVcpkgRoot(allocator: *Allocator) !?[]const u8 {
+fn findVcpkgRoot(allocator: Allocator) !?[]const u8 {
const appdata_path = try fs.getAppDataDir(allocator, "vcpkg");
defer allocator.free(appdata_path);
@@ -3207,7 +3207,7 @@ test "Builder.dupePkg()" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
var builder = try Builder.create(
- &arena.allocator,
+ arena.getAllocator(),
"test",
"test",
"test",
@@ -3252,7 +3252,7 @@ test "LibExeObjStep.addPackage" {
defer arena.deinit();
var builder = try Builder.create(
- &arena.allocator,
+ arena.getAllocator(),
"test",
"test",
"test",
lib/std/builtin.zig
@@ -75,7 +75,7 @@ pub const StackTrace = struct {
};
const tty_config = std.debug.detectTTYConfig();
try writer.writeAll("\n");
- std.debug.writeStackTrace(self, writer, &arena.allocator, debug_info, tty_config) catch |err| {
+ std.debug.writeStackTrace(self, writer, arena.getAllocator(), debug_info, tty_config) catch |err| {
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
};
try writer.writeAll("\n");
lib/std/child_process.zig
@@ -23,7 +23,7 @@ pub const ChildProcess = struct {
handle: if (builtin.os.tag == .windows) windows.HANDLE else void,
thread_handle: if (builtin.os.tag == .windows) windows.HANDLE else void,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
stdin: ?File,
stdout: ?File,
@@ -90,7 +90,7 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
- pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
+ pub fn init(argv: []const []const u8, allocator: mem.Allocator) !*ChildProcess {
const child = try allocator.create(ChildProcess);
child.* = ChildProcess{
.allocator = allocator,
@@ -329,7 +329,7 @@ pub const ChildProcess = struct {
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
pub fn exec(args: struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
argv: []const []const u8,
cwd: ?[]const u8 = null,
cwd_dir: ?fs.Dir = null,
@@ -541,7 +541,7 @@ pub const ChildProcess = struct {
var arena_allocator = std.heap.ArenaAllocator.init(self.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
// The POSIX standard does not allow malloc() between fork() and execve(),
// and `self.allocator` may be a libc allocator.
@@ -931,7 +931,7 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1
}
/// Caller must dealloc.
-fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![:0]u8 {
+fn windowsCreateCommandLine(allocator: mem.Allocator, argv: []const []const u8) ![:0]u8 {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
@@ -1081,7 +1081,7 @@ fn readIntFd(fd: i32) !ErrInt {
}
/// Caller must free result.
-pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u16 {
+pub fn createWindowsEnvBlock(allocator: mem.Allocator, env_map: *const BufMap) ![]u16 {
// count bytes needed
const max_chars_needed = x: {
var max_chars_needed: usize = 4; // 4 for the final 4 null bytes
@@ -1117,7 +1117,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap)
return allocator.shrink(result, i);
}
-pub fn createNullDelimitedEnvMap(arena: *mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 {
+pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 {
const envp_count = env_map.count();
const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null);
{
@@ -1149,7 +1149,7 @@ test "createNullDelimitedEnvMap" {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
- const environ = try createNullDelimitedEnvMap(&arena.allocator, &envmap);
+ const environ = try createNullDelimitedEnvMap(arena.getAllocator(), &envmap);
try testing.expectEqual(@as(usize, 5), environ.len);
lib/std/coff.zig
@@ -98,7 +98,7 @@ pub const CoffError = error{
// Official documentation of the format: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
pub const Coff = struct {
in_file: File,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
coff_header: CoffHeader,
pe_header: OptionalHeader,
@@ -107,7 +107,7 @@ pub const Coff = struct {
guid: [16]u8,
age: u32,
- pub fn init(allocator: *mem.Allocator, in_file: File) Coff {
+ pub fn init(allocator: mem.Allocator, in_file: File) Coff {
return Coff{
.in_file = in_file,
.allocator = allocator,
@@ -324,7 +324,7 @@ pub const Coff = struct {
}
// Return an owned slice full of the section data
- pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: *mem.Allocator) ![]u8 {
+ pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: mem.Allocator) ![]u8 {
const sec = for (self.sections.items) |*sec| {
if (mem.eql(u8, sec.header.name[0..name.len], name)) {
break sec;
lib/std/cstr.zig
@@ -33,7 +33,7 @@ fn testCStrFnsImpl() !void {
/// Returns a mutable, null-terminated slice with the same length as `slice`.
/// Caller owns the returned memory.
-pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![:0]u8 {
+pub fn addNullByte(allocator: mem.Allocator, slice: []const u8) ![:0]u8 {
const result = try allocator.alloc(u8, slice.len + 1);
mem.copy(u8, result, slice);
result[slice.len] = 0;
@@ -48,13 +48,13 @@ test "addNullByte" {
}
pub const NullTerminated2DArray = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
byte_count: usize,
ptr: ?[*:null]?[*:0]u8,
/// Takes N lists of strings, concatenates the lists together, and adds a null terminator
/// Caller must deinit result
- pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
+ pub fn fromSlices(allocator: mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
var new_len: usize = 1; // 1 for the list null
var byte_count: usize = 0;
for (slices) |slice| {
lib/std/debug.zig
@@ -29,7 +29,7 @@ pub const LineInfo = struct {
line: u64,
column: u64,
file_name: []const u8,
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
pub fn deinit(self: LineInfo) void {
const allocator = self.allocator orelse return;
@@ -339,7 +339,7 @@ const RESET = "\x1b[0m";
pub fn writeStackTrace(
stack_trace: std.builtin.StackTrace,
out_stream: anytype,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
debug_info: *DebugInfo,
tty_config: TTY.Config,
) !void {
@@ -662,7 +662,7 @@ pub const OpenSelfDebugInfoError = error{
};
/// TODO resources https://github.com/ziglang/zig/issues/4353
-pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
+pub fn openSelfDebugInfo(allocator: mem.Allocator) anyerror!DebugInfo {
nosuspend {
if (builtin.strip_debug_info)
return error.MissingDebugInfo;
@@ -688,7 +688,7 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
/// it themselves, even on error.
/// TODO resources https://github.com/ziglang/zig/issues/4353
/// TODO it's weird to take ownership even on error, rework this code.
-fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInfo {
+fn readCoffDebugInfo(allocator: mem.Allocator, coff_file: File) !ModuleDebugInfo {
nosuspend {
errdefer coff_file.close();
@@ -755,7 +755,7 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) ![]const u8 {
/// it themselves, even on error.
/// TODO resources https://github.com/ziglang/zig/issues/4353
/// TODO it's weird to take ownership even on error, rework this code.
-pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugInfo {
+pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugInfo {
nosuspend {
const mapped_mem = try mapWholeFile(elf_file);
const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]);
@@ -827,7 +827,7 @@ pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugI
/// This takes ownership of macho_file: users of this function should not close
/// it themselves, even on error.
/// TODO it's weird to take ownership even on error, rework this code.
-fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugInfo {
+fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugInfo {
const mapped_mem = try mapWholeFile(macho_file);
const hdr = @ptrCast(
@@ -1025,10 +1025,10 @@ fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
}
pub const DebugInfo = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
address_map: std.AutoHashMap(usize, *ModuleDebugInfo),
- pub fn init(allocator: *mem.Allocator) DebugInfo {
+ pub fn init(allocator: mem.Allocator) DebugInfo {
return DebugInfo{
.allocator = allocator,
.address_map = std.AutoHashMap(usize, *ModuleDebugInfo).init(allocator),
@@ -1278,7 +1278,7 @@ pub const ModuleDebugInfo = switch (native_os) {
addr_table: std.StringHashMap(u64),
};
- pub fn allocator(self: @This()) *mem.Allocator {
+ pub fn allocator(self: @This()) mem.Allocator {
return self.ofiles.allocator;
}
@@ -1470,7 +1470,7 @@ pub const ModuleDebugInfo = switch (native_os) {
debug_data: PdbOrDwarf,
coff: *coff.Coff,
- pub fn allocator(self: @This()) *mem.Allocator {
+ pub fn allocator(self: @This()) mem.Allocator {
return self.coff.allocator;
}
@@ -1560,14 +1560,15 @@ fn getSymbolFromDwarf(address: u64, di: *DW.DwarfInfo) !SymbolInfo {
}
/// TODO multithreaded awareness
-var debug_info_allocator: ?*mem.Allocator = null;
+var debug_info_allocator: ?mem.Allocator = null;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
-fn getDebugInfoAllocator() *mem.Allocator {
+fn getDebugInfoAllocator() mem.Allocator {
if (debug_info_allocator) |a| return a;
debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- debug_info_allocator = &debug_info_arena_allocator.allocator;
- return &debug_info_arena_allocator.allocator;
+ const allocator = debug_info_arena_allocator.getAllocator();
+ debug_info_allocator = allocator;
+ return allocator;
}
/// Whether or not the current target can print useful debug information when a segfault occurs.
lib/std/dwarf.zig
@@ -466,7 +466,7 @@ fn readUnitLength(in_stream: anytype, endian: std.builtin.Endian, is_64: *bool)
}
// TODO the nosuspends here are workarounds
-fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
+fn readAllocBytes(allocator: mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
const buf = try allocator.alloc(u8, size);
errdefer allocator.free(buf);
if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile;
@@ -481,18 +481,18 @@ fn readAddress(in_stream: anytype, endian: std.builtin.Endian, is_64: bool) !u64
@as(u64, try in_stream.readInt(u32, endian));
}
-fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue {
+fn parseFormValueBlockLen(allocator: mem.Allocator, in_stream: anytype, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Block = buf };
}
// TODO the nosuspends here are workarounds
-fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue {
+fn parseFormValueBlock(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue {
const block_len = try nosuspend in_stream.readVarInt(usize, endian, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
-fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue {
+fn parseFormValueConstant(allocator: mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue {
_ = allocator;
// TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
// `nosuspend` should be removed from all the function calls once it is fixed.
@@ -520,7 +520,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed:
}
// TODO the nosuspends here are workarounds
-fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue {
+fn parseFormValueRef(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue {
_ = allocator;
return FormValue{
.Ref = switch (size) {
@@ -535,7 +535,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.
}
// TODO the nosuspends here are workarounds
-fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
+fn parseFormValue(allocator: mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
return switch (form_id) {
FORM.addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) },
FORM.block1 => parseFormValueBlock(allocator, in_stream, endian, 1),
@@ -604,7 +604,7 @@ pub const DwarfInfo = struct {
compile_unit_list: ArrayList(CompileUnit) = undefined,
func_list: ArrayList(Func) = undefined,
- pub fn allocator(self: DwarfInfo) *mem.Allocator {
+ pub fn allocator(self: DwarfInfo) mem.Allocator {
return self.abbrev_table_list.allocator;
}
@@ -1092,7 +1092,7 @@ pub const DwarfInfo = struct {
/// the DwarfInfo fields before calling. These fields can be left undefined:
/// * abbrev_table_list
/// * compile_unit_list
-pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: *mem.Allocator) !void {
+pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: mem.Allocator) !void {
di.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator);
di.compile_unit_list = ArrayList(CompileUnit).init(allocator);
di.func_list = ArrayList(Func).init(allocator);
lib/std/fifo.zig
@@ -33,7 +33,7 @@ pub fn LinearFifo(
};
return struct {
- allocator: if (buffer_type == .Dynamic) *Allocator else void,
+ allocator: if (buffer_type == .Dynamic) Allocator else void,
buf: if (buffer_type == .Static) [buffer_type.Static]T else []T,
head: usize,
count: usize,
@@ -69,7 +69,7 @@ pub fn LinearFifo(
}
},
.Dynamic => struct {
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return .{
.allocator = allocator,
.buf = &[_]T{},
lib/std/fmt.zig
@@ -1803,7 +1803,7 @@ pub fn count(comptime fmt: []const u8, args: anytype) u64 {
pub const AllocPrintError = error{OutOfMemory};
-pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
+pub fn allocPrint(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) {
// Output too long. Can't possibly allocate enough memory to display it.
error.Overflow => return error.OutOfMemory,
@@ -1816,7 +1816,7 @@ pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: any
pub const allocPrint0 = @compileError("deprecated; use allocPrintZ");
-pub fn allocPrintZ(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
+pub fn allocPrintZ(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
const result = try allocPrint(allocator, fmt ++ "\x00", args);
return result[0 .. result.len - 1 :0];
}
lib/std/fs.zig
@@ -64,7 +64,7 @@ pub const need_async_thread = std.io.is_async and switch (builtin.os.tag) {
};
/// TODO remove the allocator requirement from this API
-pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
+pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
if (cwd().symLink(existing_path, new_path, .{})) {
return;
} else |err| switch (err) {
@@ -875,7 +875,7 @@ pub const Dir = struct {
/// Must call `Walker.deinit` when done.
/// The order of returned file system entries is undefined.
/// `self` will not be closed after walking it.
- pub fn walk(self: Dir, allocator: *Allocator) !Walker {
+ pub fn walk(self: Dir, allocator: Allocator) !Walker {
var name_buffer = std.ArrayList(u8).init(allocator);
errdefer name_buffer.deinit();
@@ -1393,7 +1393,7 @@ pub const Dir = struct {
/// Same as `Dir.realpath` except caller must free the returned memory.
/// See also `Dir.realpath`.
- pub fn realpathAlloc(self: Dir, allocator: *Allocator, pathname: []const u8) ![]u8 {
+ pub fn realpathAlloc(self: Dir, allocator: Allocator, pathname: []const u8) ![]u8 {
// Use of MAX_PATH_BYTES here is valid as the realpath function does not
// have a variant that takes an arbitrary-size buffer.
// TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
@@ -1804,7 +1804,7 @@ pub const Dir = struct {
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
- pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
+ pub fn readFileAlloc(self: Dir, allocator: mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
}
@@ -1815,7 +1815,7 @@ pub const Dir = struct {
/// Allows specifying alignment and a sentinel value.
pub fn readFileAllocOptions(
self: Dir,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
file_path: []const u8,
max_bytes: usize,
size_hint: ?usize,
@@ -2464,7 +2464,7 @@ pub const SelfExePathError = os.ReadLinkError || os.SysCtlError || os.RealPathEr
/// `selfExePath` except allocates the result on the heap.
/// Caller owns returned memory.
-pub fn selfExePathAlloc(allocator: *Allocator) ![]u8 {
+pub fn selfExePathAlloc(allocator: Allocator) ![]u8 {
// Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux
// system, readlink will completely fail to return a result larger than
// PATH_MAX even if given a sufficiently large buffer. This makes it
@@ -2573,7 +2573,7 @@ pub fn selfExePathW() [:0]const u16 {
/// `selfExeDirPath` except allocates the result on the heap.
/// Caller owns returned memory.
-pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 {
+pub fn selfExeDirPathAlloc(allocator: Allocator) ![]u8 {
// Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux
// system, readlink will completely fail to return a result larger than
// PATH_MAX even if given a sufficiently large buffer. This makes it
@@ -2596,7 +2596,7 @@ pub fn selfExeDirPath(out_buffer: []u8) SelfExePathError![]const u8 {
/// `realpath`, except caller must free the returned memory.
/// See also `Dir.realpath`.
-pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
+pub fn realpathAlloc(allocator: Allocator, pathname: []const u8) ![]u8 {
// Use of MAX_PATH_BYTES here is valid as the realpath function does not
// have a variant that takes an arbitrary-size buffer.
// TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
lib/std/hash_map.zig
@@ -363,7 +363,7 @@ pub fn HashMap(
comptime verifyContext(Context, K, K, u64);
return struct {
unmanaged: Unmanaged,
- allocator: *Allocator,
+ allocator: Allocator,
ctx: Context,
/// The type of the unmanaged hash map underlying this wrapper
@@ -390,7 +390,7 @@ pub fn HashMap(
/// Create a managed hash map with an empty context.
/// If the context is not zero-sized, you must use
/// initContext(allocator, ctx) instead.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
if (@sizeOf(Context) != 0) {
@compileError("Context must be specified! Call initContext(allocator, ctx) instead.");
}
@@ -402,7 +402,7 @@ pub fn HashMap(
}
/// Create a managed hash map with a context
- pub fn initContext(allocator: *Allocator, ctx: Context) Self {
+ pub fn initContext(allocator: Allocator, ctx: Context) Self {
return .{
.unmanaged = .{},
.allocator = allocator,
@@ -636,7 +636,7 @@ pub fn HashMap(
}
/// Creates a copy of this map, using a specified allocator
- pub fn cloneWithAllocator(self: Self, new_allocator: *Allocator) !Self {
+ pub fn cloneWithAllocator(self: Self, new_allocator: Allocator) !Self {
var other = try self.unmanaged.cloneContext(new_allocator, self.ctx);
return other.promoteContext(new_allocator, self.ctx);
}
@@ -650,7 +650,7 @@ pub fn HashMap(
/// Creates a copy of this map, using a specified allocator and context.
pub fn cloneWithAllocatorAndContext(
self: Self,
- new_allocator: *Allocator,
+ new_allocator: Allocator,
new_ctx: anytype,
) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
var other = try self.unmanaged.cloneContext(new_allocator, new_ctx);
@@ -841,13 +841,13 @@ pub fn HashMapUnmanaged(
pub const Managed = HashMap(K, V, Context, max_load_percentage);
- pub fn promote(self: Self, allocator: *Allocator) Managed {
+ pub fn promote(self: Self, allocator: Allocator) Managed {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
return promoteContext(self, allocator, undefined);
}
- pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
+ pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed {
return .{
.unmanaged = self,
.allocator = allocator,
@@ -859,7 +859,7 @@ pub fn HashMapUnmanaged(
return size * 100 < max_load_percentage * cap;
}
- pub fn deinit(self: *Self, allocator: *Allocator) void {
+ pub fn deinit(self: *Self, allocator: Allocator) void {
self.deallocate(allocator);
self.* = undefined;
}
@@ -872,20 +872,20 @@ pub fn HashMapUnmanaged(
pub const ensureCapacity = @compileError("deprecated; call `ensureUnusedCapacity` or `ensureTotalCapacity`");
- pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void {
+ pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_size: Size) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
return ensureTotalCapacityContext(self, allocator, new_size, undefined);
}
- pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void {
+ pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_size: Size, ctx: Context) !void {
if (new_size > self.size)
try self.growIfNeeded(allocator, new_size - self.size, ctx);
}
- pub fn ensureUnusedCapacity(self: *Self, allocator: *Allocator, additional_size: Size) !void {
+ pub fn ensureUnusedCapacity(self: *Self, allocator: Allocator, additional_size: Size) !void {
return ensureUnusedCapacityContext(self, allocator, additional_size, undefined);
}
- pub fn ensureUnusedCapacityContext(self: *Self, allocator: *Allocator, additional_size: Size, ctx: Context) !void {
+ pub fn ensureUnusedCapacityContext(self: *Self, allocator: Allocator, additional_size: Size, ctx: Context) !void {
return ensureTotalCapacityContext(self, allocator, self.count() + additional_size, ctx);
}
@@ -897,7 +897,7 @@ pub fn HashMapUnmanaged(
}
}
- pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+ pub fn clearAndFree(self: *Self, allocator: Allocator) void {
self.deallocate(allocator);
self.size = 0;
self.available = 0;
@@ -962,12 +962,12 @@ pub fn HashMapUnmanaged(
}
/// Insert an entry in the map. Assumes it is not already present.
- pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
return self.putNoClobberContext(allocator, key, value, undefined);
}
- pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
assert(!self.containsContext(key, ctx));
try self.growIfNeeded(allocator, 1, ctx);
@@ -1021,12 +1021,12 @@ pub fn HashMapUnmanaged(
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
- pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
+ pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
return self.fetchPutContext(allocator, key, value, undefined);
}
- pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
+ pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV {
const gop = try self.getOrPutContext(allocator, key, ctx);
var result: ?KV = null;
if (gop.found_existing) {
@@ -1157,12 +1157,12 @@ pub fn HashMapUnmanaged(
}
/// Insert an entry if the associated key is not already present, otherwise update preexisting value.
- pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
return self.putContext(allocator, key, value, undefined);
}
- pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
const result = try self.getOrPutContext(allocator, key, ctx);
result.value_ptr.* = value;
}
@@ -1231,24 +1231,24 @@ pub fn HashMapUnmanaged(
return null;
}
- pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
+ pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
return self.getOrPutContext(allocator, key, undefined);
}
- pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult {
const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
if (!gop.found_existing) {
gop.key_ptr.* = key;
}
return gop;
}
- pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
+ pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
}
- pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
self.growIfNeeded(allocator, 1, ctx) catch |err| {
// If allocation fails, try to do the lookup anyway.
// If we find an existing item, we can return it.
@@ -1341,12 +1341,12 @@ pub fn HashMapUnmanaged(
};
}
- pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !Entry {
+ pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !Entry {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
return self.getOrPutValueContext(allocator, key, value, undefined);
}
- pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !Entry {
+ pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !Entry {
const res = try self.getOrPutAdapted(allocator, key, ctx);
if (!res.found_existing) {
res.key_ptr.* = key;
@@ -1403,18 +1403,18 @@ pub fn HashMapUnmanaged(
return @truncate(Size, max_load - self.available);
}
- fn growIfNeeded(self: *Self, allocator: *Allocator, new_count: Size, ctx: Context) !void {
+ fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) !void {
if (new_count > self.available) {
try self.grow(allocator, capacityForSize(self.load() + new_count), ctx);
}
}
- pub fn clone(self: Self, allocator: *Allocator) !Self {
+ pub fn clone(self: Self, allocator: Allocator) !Self {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
return self.cloneContext(allocator, @as(Context, undefined));
}
- pub fn cloneContext(self: Self, allocator: *Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
+ pub fn cloneContext(self: Self, allocator: Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){};
if (self.size == 0)
return other;
@@ -1439,7 +1439,7 @@ pub fn HashMapUnmanaged(
return other;
}
- fn grow(self: *Self, allocator: *Allocator, new_capacity: Size, ctx: Context) !void {
+ fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) !void {
@setCold(true);
const new_cap = std.math.max(new_capacity, minimal_capacity);
assert(new_cap > self.capacity());
@@ -1470,7 +1470,7 @@ pub fn HashMapUnmanaged(
std.mem.swap(Self, self, &map);
}
- fn allocate(self: *Self, allocator: *Allocator, new_capacity: Size) !void {
+ fn allocate(self: *Self, allocator: Allocator, new_capacity: Size) !void {
const header_align = @alignOf(Header);
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
@@ -1503,7 +1503,7 @@ pub fn HashMapUnmanaged(
self.metadata = @intToPtr([*]Metadata, metadata);
}
- fn deallocate(self: *Self, allocator: *Allocator) void {
+ fn deallocate(self: *Self, allocator: Allocator) void {
if (self.metadata == null) return;
const header_align = @alignOf(Header);
lib/std/heap.zig
@@ -97,13 +97,12 @@ const CAllocator = struct {
}
fn alloc(
- allocator: *Allocator,
+ _: *u1,
len: usize,
alignment: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
- _ = allocator;
_ = return_address;
assert(len > 0);
assert(std.math.isPowerOfTwo(alignment));
@@ -124,14 +123,13 @@ const CAllocator = struct {
}
fn resize(
- allocator: *Allocator,
+ _: *u1,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) Allocator.Error!usize {
- _ = allocator;
_ = buf_align;
_ = return_address;
if (new_len == 0) {
@@ -154,10 +152,11 @@ const CAllocator = struct {
/// Supports the full Allocator interface, including alignment, and exploiting
/// `malloc_usable_size` if available. For an allocator that directly calls
/// `malloc`/`free`, see `raw_c_allocator`.
-pub const c_allocator = &c_allocator_state;
-var c_allocator_state = Allocator{
- .allocFn = CAllocator.alloc,
- .resizeFn = CAllocator.resize,
+pub const c_allocator = blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, CAllocator.alloc, CAllocator.resize);
};
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
@@ -165,20 +164,20 @@ var c_allocator_state = Allocator{
/// This allocator is safe to use as the backing allocator with
/// `ArenaAllocator` for example and is more optimal in such a case
/// than `c_allocator`.
-pub const raw_c_allocator = &raw_c_allocator_state;
-var raw_c_allocator_state = Allocator{
- .allocFn = rawCAlloc,
- .resizeFn = rawCResize,
+pub const raw_c_allocator = blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, rawCAlloc, rawCResize);
};
fn rawCAlloc(
- self: *Allocator,
+ _: *u1,
len: usize,
ptr_align: u29,
len_align: u29,
ret_addr: usize,
) Allocator.Error![]u8 {
- _ = self;
_ = len_align;
_ = ret_addr;
assert(ptr_align <= @alignOf(std.c.max_align_t));
@@ -187,14 +186,13 @@ fn rawCAlloc(
}
fn rawCResize(
- self: *Allocator,
+ _: *u1,
buf: []u8,
old_align: u29,
new_len: usize,
len_align: u29,
ret_addr: usize,
) Allocator.Error!usize {
- _ = self;
_ = old_align;
_ = ret_addr;
if (new_len == 0) {
@@ -210,19 +208,18 @@ fn rawCResize(
/// This allocator makes a syscall directly for every allocation and free.
/// Thread-safe and lock-free.
pub const page_allocator = if (builtin.target.isWasm())
- &wasm_page_allocator_state
-else if (builtin.target.os.tag == .freestanding)
+blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, WasmPageAllocator.alloc, WasmPageAllocator.resize);
+} else if (builtin.target.os.tag == .freestanding)
root.os.heap.page_allocator
-else
- &page_allocator_state;
-
-var page_allocator_state = Allocator{
- .allocFn = PageAllocator.alloc,
- .resizeFn = PageAllocator.resize,
-};
-var wasm_page_allocator_state = Allocator{
- .allocFn = WasmPageAllocator.alloc,
- .resizeFn = WasmPageAllocator.resize,
+else blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, PageAllocator.alloc, PageAllocator.resize);
};
/// Verifies that the adjusted length will still map to the full length
@@ -236,8 +233,7 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
const PageAllocator = struct {
- fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
- _ = allocator;
+ fn alloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
_ = ra;
assert(n > 0);
const aligned_len = mem.alignForward(n, mem.page_size);
@@ -335,14 +331,13 @@ const PageAllocator = struct {
}
fn resize(
- allocator: *Allocator,
+ _: *u1,
buf_unaligned: []u8,
buf_align: u29,
new_size: usize,
len_align: u29,
return_address: usize,
) Allocator.Error!usize {
- _ = allocator;
_ = buf_align;
_ = return_address;
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
@@ -492,8 +487,7 @@ const WasmPageAllocator = struct {
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
}
- fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
- _ = allocator;
+ fn alloc(_: *u1, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
_ = ra;
const page_count = nPages(len);
const page_idx = try allocPages(page_count, alignment);
@@ -548,14 +542,13 @@ const WasmPageAllocator = struct {
}
fn resize(
- allocator: *Allocator,
+ _: *u1,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) error{OutOfMemory}!usize {
- _ = allocator;
_ = buf_align;
_ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size);
@@ -572,21 +565,20 @@ const WasmPageAllocator = struct {
pub const HeapAllocator = switch (builtin.os.tag) {
.windows => struct {
- allocator: Allocator,
heap_handle: ?HeapHandle,
const HeapHandle = os.windows.HANDLE;
pub fn init() HeapAllocator {
return HeapAllocator{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.heap_handle = null,
};
}
+ pub fn getAllocator(self: *HeapAllocator) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
pub fn deinit(self: *HeapAllocator) void {
if (self.heap_handle) |heap_handle| {
os.windows.HeapDestroy(heap_handle);
@@ -598,14 +590,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}
fn alloc(
- allocator: *Allocator,
+ self: *HeapAllocator,
n: usize,
ptr_align: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
_ = return_address;
- const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
const amt = n + ptr_align - 1 + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
@@ -632,7 +623,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}
fn resize(
- allocator: *Allocator,
+ self: *HeapAllocator,
buf: []u8,
buf_align: u29,
new_size: usize,
@@ -641,7 +632,6 @@ pub const HeapAllocator = switch (builtin.os.tag) {
) error{OutOfMemory}!usize {
_ = buf_align;
_ = return_address;
- const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
if (new_size == 0) {
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
return 0;
@@ -682,21 +672,27 @@ fn sliceContainsSlice(container: []u8, slice: []u8) bool {
}
pub const FixedBufferAllocator = struct {
- allocator: Allocator,
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.buffer = buffer,
.end_index = 0,
};
}
+ /// *WARNING* using this at the same time as the interface returned by `getThreadSafeAllocator` is not thread safe
+ pub fn getAllocator(self: *FixedBufferAllocator) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
+ /// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
+ /// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe
+ pub fn getThreadSafeAllocator(self: *FixedBufferAllocator) Allocator {
+ return Allocator.init(self, threadSafeAlloc, Allocator.NoResize(FixedBufferAllocator).noResize);
+ }
+
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
return sliceContainsPtr(self.buffer, ptr);
}
@@ -712,10 +708,9 @@ pub const FixedBufferAllocator = struct {
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
}
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+ fn alloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
- const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse
return error.OutOfMemory;
const adjusted_index = self.end_index + adjust_off;
@@ -730,7 +725,7 @@ pub const FixedBufferAllocator = struct {
}
fn resize(
- allocator: *Allocator,
+ self: *FixedBufferAllocator,
buf: []u8,
buf_align: u29,
new_size: usize,
@@ -739,7 +734,6 @@ pub const FixedBufferAllocator = struct {
) Allocator.Error!usize {
_ = buf_align;
_ = return_address;
- const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
assert(self.ownsSlice(buf)); // sanity check
if (!self.isLastAllocation(buf)) {
@@ -762,65 +756,34 @@ pub const FixedBufferAllocator = struct {
return new_size;
}
+ fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+ _ = len_align;
+ _ = ra;
+ var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
+ while (true) {
+ const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
+ return error.OutOfMemory;
+ const adjusted_index = end_index + adjust_off;
+ const new_end_index = adjusted_index + n;
+ if (new_end_index > self.buffer.len) {
+ return error.OutOfMemory;
+ }
+ end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
+ }
+ }
+
pub fn reset(self: *FixedBufferAllocator) void {
self.end_index = 0;
}
};
-pub const ThreadSafeFixedBufferAllocator = blk: {
- if (builtin.single_threaded) {
- break :blk FixedBufferAllocator;
- } else {
- // lock free
- break :blk struct {
- allocator: Allocator,
- end_index: usize,
- buffer: []u8,
-
- pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
- return ThreadSafeFixedBufferAllocator{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = Allocator.noResize,
- },
- .buffer = buffer,
- .end_index = 0,
- };
- }
-
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
- _ = len_align;
- _ = ra;
- const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
- var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
- while (true) {
- const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
- return error.OutOfMemory;
- const adjusted_index = end_index + adjust_off;
- const new_end_index = adjusted_index + n;
- if (new_end_index > self.buffer.len) {
- return error.OutOfMemory;
- }
- end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
- }
- }
+pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `getThreadSafeAllocator` on FixedBufferAllocator");
- pub fn reset(self: *ThreadSafeFixedBufferAllocator) void {
- self.end_index = 0;
- }
- };
- }
-};
-
-pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) {
+pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
return StackFallbackAllocator(size){
.buffer = undefined,
.fallback_allocator = fallback_allocator,
.fixed_buffer_allocator = undefined,
- .allocator = Allocator{
- .allocFn = StackFallbackAllocator(size).alloc,
- .resizeFn = StackFallbackAllocator(size).resize,
- },
};
}
@@ -829,40 +792,38 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
const Self = @This();
buffer: [size]u8,
- allocator: Allocator,
- fallback_allocator: *Allocator,
+ fallback_allocator: Allocator,
fixed_buffer_allocator: FixedBufferAllocator,
- pub fn get(self: *Self) *Allocator {
+ /// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator
+ pub fn get(self: *Self) Allocator {
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
- return &self.allocator;
+ return Allocator.init(self, alloc, resize);
}
fn alloc(
- allocator: *Allocator,
+ self: *Self,
len: usize,
ptr_align: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
- return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, len, ptr_align, len_align, return_address) catch
- return self.fallback_allocator.allocFn(self.fallback_allocator, len, ptr_align, len_align, return_address);
+ return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch
+ return self.fallback_allocator.allocFn(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address);
}
fn resize(
- allocator: *Allocator,
+ self: *Self,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) error{OutOfMemory}!usize {
- const self = @fieldParentPtr(Self, "allocator", allocator);
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
- return FixedBufferAllocator.resize(&self.fixed_buffer_allocator.allocator, buf, buf_align, new_len, len_align, return_address);
+ return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
} else {
- return self.fallback_allocator.resizeFn(self.fallback_allocator, buf, buf_align, new_len, len_align, return_address);
+ return self.fallback_allocator.resizeFn(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address);
}
}
};
@@ -950,8 +911,8 @@ test "HeapAllocator" {
if (builtin.os.tag == .windows) {
var heap_allocator = HeapAllocator.init();
defer heap_allocator.deinit();
+ const allocator = heap_allocator.getAllocator();
- const allocator = &heap_allocator.allocator;
try testAllocator(allocator);
try testAllocatorAligned(allocator);
try testAllocatorLargeAlignment(allocator);
@@ -962,36 +923,39 @@ test "HeapAllocator" {
test "ArenaAllocator" {
var arena_allocator = ArenaAllocator.init(page_allocator);
defer arena_allocator.deinit();
+ const allocator = arena_allocator.getAllocator();
- try testAllocator(&arena_allocator.allocator);
- try testAllocatorAligned(&arena_allocator.allocator);
- try testAllocatorLargeAlignment(&arena_allocator.allocator);
- try testAllocatorAlignedShrink(&arena_allocator.allocator);
+ try testAllocator(allocator);
+ try testAllocatorAligned(allocator);
+ try testAllocatorLargeAlignment(allocator);
+ try testAllocatorAlignedShrink(allocator);
}
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
test "FixedBufferAllocator" {
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
+ const allocator = fixed_buffer_allocator.getAllocator();
- try testAllocator(&fixed_buffer_allocator.allocator);
- try testAllocatorAligned(&fixed_buffer_allocator.allocator);
- try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
- try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
+ try testAllocator(allocator);
+ try testAllocatorAligned(allocator);
+ try testAllocatorLargeAlignment(allocator);
+ try testAllocatorAlignedShrink(allocator);
}
test "FixedBufferAllocator.reset" {
var buf: [8]u8 align(@alignOf(u64)) = undefined;
var fba = FixedBufferAllocator.init(buf[0..]);
+ const allocator = fba.getAllocator();
const X = 0xeeeeeeeeeeeeeeee;
const Y = 0xffffffffffffffff;
- var x = try fba.allocator.create(u64);
+ var x = try allocator.create(u64);
x.* = X;
- try testing.expectError(error.OutOfMemory, fba.allocator.create(u64));
+ try testing.expectError(error.OutOfMemory, allocator.create(u64));
fba.reset();
- var y = try fba.allocator.create(u64);
+ var y = try allocator.create(u64);
y.* = Y;
// we expect Y to have overwritten X.
@@ -1014,23 +978,25 @@ test "FixedBufferAllocator Reuse memory on realloc" {
// check if we re-use the memory
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+ const allocator = fixed_buffer_allocator.getAllocator();
- var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
+ var slice0 = try allocator.alloc(u8, 5);
try testing.expect(slice0.len == 5);
- var slice1 = try fixed_buffer_allocator.allocator.realloc(slice0, 10);
+ var slice1 = try allocator.realloc(slice0, 10);
try testing.expect(slice1.ptr == slice0.ptr);
try testing.expect(slice1.len == 10);
- try testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(slice1, 11));
+ try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11));
}
// check that we don't re-use the memory if it's not the most recent block
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+ const allocator = fixed_buffer_allocator.getAllocator();
- var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
+ var slice0 = try allocator.alloc(u8, 2);
slice0[0] = 1;
slice0[1] = 2;
- var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
- var slice2 = try fixed_buffer_allocator.allocator.realloc(slice0, 4);
+ var slice1 = try allocator.alloc(u8, 2);
+ var slice2 = try allocator.realloc(slice0, 4);
try testing.expect(slice0.ptr != slice2.ptr);
try testing.expect(slice1.ptr != slice2.ptr);
try testing.expect(slice2[0] == 1);
@@ -1038,19 +1004,19 @@ test "FixedBufferAllocator Reuse memory on realloc" {
}
}
-test "ThreadSafeFixedBufferAllocator" {
- var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
+test "Thread safe FixedBufferAllocator" {
+ var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
- try testAllocator(&fixed_buffer_allocator.allocator);
- try testAllocatorAligned(&fixed_buffer_allocator.allocator);
- try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
- try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
+ try testAllocator(fixed_buffer_allocator.getThreadSafeAllocator());
+ try testAllocatorAligned(fixed_buffer_allocator.getThreadSafeAllocator());
+ try testAllocatorLargeAlignment(fixed_buffer_allocator.getThreadSafeAllocator());
+ try testAllocatorAlignedShrink(fixed_buffer_allocator.getThreadSafeAllocator());
}
/// This one should not try alignments that exceed what C malloc can handle.
-pub fn testAllocator(base_allocator: *mem.Allocator) !void {
+pub fn testAllocator(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = &validationAllocator.allocator;
+ const allocator = validationAllocator.getAllocator();
var slice = try allocator.alloc(*i32, 100);
try testing.expect(slice.len == 100);
@@ -1094,9 +1060,9 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void {
allocator.free(oversize);
}
-pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void {
+pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = &validationAllocator.allocator;
+ const allocator = validationAllocator.getAllocator();
// Test a few alignment values, smaller and bigger than the type's one
inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
@@ -1124,9 +1090,9 @@ pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void {
}
}
-pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void {
+pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = &validationAllocator.allocator;
+ const allocator = validationAllocator.getAllocator();
//Maybe a platform's page_size is actually the same as or
// very near usize?
@@ -1156,12 +1122,12 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void {
allocator.free(slice);
}
-pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) !void {
+pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = &validationAllocator.allocator;
+ const allocator = validationAllocator.getAllocator();
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
+ const debug_allocator = FixedBufferAllocator.init(&debug_buffer).getAllocator();
const alloc_size = mem.page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
lib/std/json.zig
@@ -1476,7 +1476,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
}
pub const ParseOptions = struct {
- allocator: ?*Allocator = null,
+ allocator: ?Allocator = null,
/// Behaviour when a duplicate field is encountered.
duplicate_field_behavior: enum {
@@ -2033,7 +2033,7 @@ test "parse into tagged union" {
{ // failing allocations should be bubbled up instantly without trying next member
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0);
- const options = ParseOptions{ .allocator = &fail_alloc.allocator };
+ const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
const T = union(enum) {
// both fields here match the input
string: []const u8,
@@ -2081,7 +2081,7 @@ test "parse union bubbles up AllocatorRequired" {
test "parseFree descends into tagged union" {
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1);
- const options = ParseOptions{ .allocator = &fail_alloc.allocator };
+ const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
const T = union(enum) {
int: i32,
float: f64,
@@ -2328,7 +2328,7 @@ test "parse into double recursive union definition" {
/// A non-stream JSON parser which constructs a tree of Value's.
pub const Parser = struct {
- allocator: *Allocator,
+ allocator: Allocator,
state: State,
copy_strings: bool,
// Stores parent nodes and un-combined Values.
@@ -2341,7 +2341,7 @@ pub const Parser = struct {
Simple,
};
- pub fn init(allocator: *Allocator, copy_strings: bool) Parser {
+ pub fn init(allocator: Allocator, copy_strings: bool) Parser {
return Parser{
.allocator = allocator,
.state = .Simple,
@@ -2364,9 +2364,10 @@ pub const Parser = struct {
var arena = ArenaAllocator.init(p.allocator);
errdefer arena.deinit();
+ const allocator = arena.getAllocator();
while (try s.next()) |token| {
- try p.transition(&arena.allocator, input, s.i - 1, token);
+ try p.transition(allocator, input, s.i - 1, token);
}
debug.assert(p.stack.items.len == 1);
@@ -2379,7 +2380,7 @@ pub const Parser = struct {
// Even though p.allocator exists, we take an explicit allocator so that allocation state
// can be cleaned up on error correctly during a `parse` on call.
- fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: Token) !void {
+ fn transition(p: *Parser, allocator: Allocator, input: []const u8, i: usize, token: Token) !void {
switch (p.state) {
.ObjectKey => switch (token) {
.ObjectEnd => {
@@ -2536,7 +2537,7 @@ pub const Parser = struct {
}
}
- fn parseString(p: *Parser, allocator: *Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value {
+ fn parseString(p: *Parser, allocator: Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value {
const slice = s.slice(input, i);
switch (s.escapes) {
.None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice },
@@ -2737,7 +2738,7 @@ test "write json then parse it" {
try testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello"));
}
-fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
+fn testParse(arena_allocator: std.mem.Allocator, json_str: []const u8) !Value {
var p = Parser.init(arena_allocator, false);
return (try p.parse(json_str)).root;
}
@@ -2745,13 +2746,13 @@ fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
test "parsing empty string gives appropriate error" {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
- try testing.expectError(error.UnexpectedEndOfJson, testParse(&arena_allocator.allocator, ""));
+ try testing.expectError(error.UnexpectedEndOfJson, testParse(arena_allocator.getAllocator(), ""));
}
test "integer after float has proper type" {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
- const json = try testParse(&arena_allocator.allocator,
+ const json = try testParse(arena_allocator.getAllocator(),
\\{
\\ "float": 3.14,
\\ "ints": [1, 2, 3]
@@ -2786,7 +2787,7 @@ test "escaped characters" {
\\}
;
- const obj = (try testParse(&arena_allocator.allocator, input)).Object;
+ const obj = (try testParse(arena_allocator.getAllocator(), input)).Object;
try testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\");
try testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/");
@@ -2812,11 +2813,12 @@ test "string copy option" {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
+ const allocator = arena_allocator.getAllocator();
- const tree_nocopy = try Parser.init(&arena_allocator.allocator, false).parse(input);
+ const tree_nocopy = try Parser.init(allocator, false).parse(input);
const obj_nocopy = tree_nocopy.root.Object;
- const tree_copy = try Parser.init(&arena_allocator.allocator, true).parse(input);
+ const tree_copy = try Parser.init(allocator, true).parse(input);
const obj_copy = tree_copy.root.Object;
for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| {
lib/std/mem.zig
@@ -37,24 +37,26 @@ pub const Allocator = @import("mem/Allocator.zig");
pub fn ValidationAllocator(comptime T: type) type {
return struct {
const Self = @This();
- allocator: Allocator,
+
underlying_allocator: T,
+
pub fn init(allocator: T) @This() {
return .{
- .allocator = .{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.underlying_allocator = allocator,
};
}
- fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator {
- if (T == *Allocator) return self.underlying_allocator;
- if (*T == *Allocator) return &self.underlying_allocator;
- return &self.underlying_allocator.allocator;
+
+ pub fn getAllocator(self: *Self) Allocator {
+ return Allocator.init(self, alloc, resize);
}
+
+ fn getUnderlyingAllocatorPtr(self: *Self) Allocator {
+ if (T == Allocator) return self.underlying_allocator;
+ return self.underlying_allocator.getAllocator();
+ }
+
pub fn alloc(
- allocator: *Allocator,
+ self: *Self,
n: usize,
ptr_align: u29,
len_align: u29,
@@ -67,9 +69,8 @@ pub fn ValidationAllocator(comptime T: type) type {
assert(n >= len_align);
}
- const self = @fieldParentPtr(@This(), "allocator", allocator);
const underlying = self.getUnderlyingAllocatorPtr();
- const result = try underlying.allocFn(underlying, n, ptr_align, len_align, ret_addr);
+ const result = try underlying.allocFn(underlying.ptr, n, ptr_align, len_align, ret_addr);
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
if (len_align == 0) {
assert(result.len == n);
@@ -79,8 +80,9 @@ pub fn ValidationAllocator(comptime T: type) type {
}
return result;
}
+
pub fn resize(
- allocator: *Allocator,
+ self: *Self,
buf: []u8,
buf_align: u29,
new_len: usize,
@@ -92,9 +94,8 @@ pub fn ValidationAllocator(comptime T: type) type {
assert(mem.isAlignedAnyAlign(new_len, len_align));
assert(new_len >= len_align);
}
- const self = @fieldParentPtr(@This(), "allocator", allocator);
const underlying = self.getUnderlyingAllocatorPtr();
- const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align, ret_addr);
+ const result = try underlying.resizeFn(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr);
if (len_align == 0) {
assert(result == new_len);
} else {
@@ -103,7 +104,7 @@ pub fn ValidationAllocator(comptime T: type) type {
}
return result;
}
- pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct {
+ pub usingnamespace if (T == Allocator or !@hasDecl(T, "reset")) struct {} else struct {
pub fn reset(self: *Self) void {
self.underlying_allocator.reset();
}
@@ -130,12 +131,14 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
return adjusted;
}
-var failAllocator = Allocator{
- .allocFn = failAllocatorAlloc,
- .resizeFn = Allocator.noResize,
+const failAllocator = blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, failAllocatorAlloc, Allocator.NoResize(u1).noResize);
};
-fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
- _ = self;
+
+fn failAllocatorAlloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
_ = n;
_ = alignment;
_ = len_align;
@@ -1786,18 +1789,18 @@ pub fn SplitIterator(comptime T: type) type {
/// Naively combines a series of slices with a separator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
+pub fn join(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
return joinMaybeZ(allocator, separator, slices, false);
}
/// Naively combines a series of slices with a separator and null terminator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn joinZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 {
+pub fn joinZ(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 {
const out = try joinMaybeZ(allocator, separator, slices, true);
return out[0 .. out.len - 1 :0];
}
-fn joinMaybeZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 {
+fn joinMaybeZ(allocator: Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 {
if (slices.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{};
const total_len = blk: {
@@ -1876,7 +1879,7 @@ test "mem.joinZ" {
}
/// Copies each T from slices into a new slice that exactly holds all the elements.
-pub fn concat(allocator: *Allocator, comptime T: type, slices: []const []const T) ![]T {
+pub fn concat(allocator: Allocator, comptime T: type, slices: []const []const T) ![]T {
if (slices.len == 0) return &[0]T{};
const total_len = blk: {
@@ -2318,7 +2321,7 @@ test "replacementSize" {
}
/// Perform a replacement on an allocated buffer of pre-determined size. Caller must free returned memory.
-pub fn replaceOwned(comptime T: type, allocator: *Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T {
+pub fn replaceOwned(comptime T: type, allocator: Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T {
var output = try allocator.alloc(T, replacementSize(T, input, needle, replacement));
_ = replace(T, input, needle, replacement, output);
return output;
lib/std/multi_array_list.zig
@@ -59,7 +59,7 @@ pub fn MultiArrayList(comptime S: type) type {
};
}
- pub fn deinit(self: *Slice, gpa: *Allocator) void {
+ pub fn deinit(self: *Slice, gpa: Allocator) void {
var other = self.toMultiArrayList();
other.deinit(gpa);
self.* = undefined;
@@ -106,7 +106,7 @@ pub fn MultiArrayList(comptime S: type) type {
};
/// Release all allocated memory.
- pub fn deinit(self: *Self, gpa: *Allocator) void {
+ pub fn deinit(self: *Self, gpa: Allocator) void {
gpa.free(self.allocatedBytes());
self.* = undefined;
}
@@ -161,7 +161,7 @@ pub fn MultiArrayList(comptime S: type) type {
}
/// Extend the list by 1 element. Allocates more memory as necessary.
- pub fn append(self: *Self, gpa: *Allocator, elem: S) !void {
+ pub fn append(self: *Self, gpa: Allocator, elem: S) !void {
try self.ensureUnusedCapacity(gpa, 1);
self.appendAssumeCapacity(elem);
}
@@ -188,7 +188,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// after and including the specified index back by one and
/// sets the given index to the specified element. May reallocate
/// and invalidate iterators.
- pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void {
+ pub fn insert(self: *Self, gpa: Allocator, index: usize, elem: S) void {
try self.ensureUnusedCapacity(gpa, 1);
self.insertAssumeCapacity(index, elem);
}
@@ -242,7 +242,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Adjust the list's length to `new_len`.
/// Does not initialize added items, if any.
- pub fn resize(self: *Self, gpa: *Allocator, new_len: usize) !void {
+ pub fn resize(self: *Self, gpa: Allocator, new_len: usize) !void {
try self.ensureTotalCapacity(gpa, new_len);
self.len = new_len;
}
@@ -250,7 +250,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Attempt to reduce allocated capacity to `new_len`.
/// If `new_len` is greater than zero, this may fail to reduce the capacity,
/// but the data remains intact and the length is updated to new_len.
- pub fn shrinkAndFree(self: *Self, gpa: *Allocator, new_len: usize) void {
+ pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void {
if (new_len == 0) {
gpa.free(self.allocatedBytes());
self.* = .{};
@@ -314,7 +314,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Modify the array so that it can hold at least `new_capacity` items.
/// Implements super-linear growth to achieve amortized O(1) append operations.
/// Invalidates pointers if additional memory is needed.
- pub fn ensureTotalCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
+ pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
var better_capacity = self.capacity;
if (better_capacity >= new_capacity) return;
@@ -328,14 +328,14 @@ pub fn MultiArrayList(comptime S: type) type {
/// Modify the array so that it can hold at least `additional_count` **more** items.
/// Invalidates pointers if additional memory is needed.
- pub fn ensureUnusedCapacity(self: *Self, gpa: *Allocator, additional_count: usize) !void {
+ pub fn ensureUnusedCapacity(self: *Self, gpa: Allocator, additional_count: usize) !void {
return self.ensureTotalCapacity(gpa, self.len + additional_count);
}
/// Modify the array so that it can hold exactly `new_capacity` items.
/// Invalidates pointers if additional memory is needed.
/// `new_capacity` must be greater or equal to `len`.
- pub fn setCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
+ pub fn setCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
assert(new_capacity >= self.len);
const new_bytes = try gpa.allocAdvanced(
u8,
@@ -372,7 +372,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Create a copy of this list with a new backing store,
/// using the specified allocator.
- pub fn clone(self: Self, gpa: *Allocator) !Self {
+ pub fn clone(self: Self, gpa: Allocator) !Self {
var result = Self{};
errdefer result.deinit(gpa);
try result.ensureTotalCapacity(gpa, self.len);
lib/std/net.zig
@@ -664,7 +664,7 @@ pub const AddressList = struct {
};
/// All memory allocated with `allocator` will be freed before this function returns.
-pub fn tcpConnectToHost(allocator: *mem.Allocator, name: []const u8, port: u16) !Stream {
+pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) !Stream {
const list = try getAddressList(allocator, name, port);
defer list.deinit();
@@ -699,12 +699,12 @@ pub fn tcpConnectToAddress(address: Address) !Stream {
}
/// Call `AddressList.deinit` on the result.
-pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*AddressList {
+pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*AddressList {
const result = blk: {
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
- const result = try arena.allocator.create(AddressList);
+ const result = try arena.getAllocator().create(AddressList);
result.* = AddressList{
.arena = arena,
.addrs = undefined,
@@ -712,7 +712,7 @@ pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*
};
break :blk result;
};
- const arena = &result.arena.allocator;
+ const arena = result.arena.getAllocator();
errdefer result.arena.deinit();
if (builtin.target.os.tag == .windows or builtin.link_libc) {
@@ -1303,7 +1303,7 @@ const ResolvConf = struct {
/// Ignores lines longer than 512 bytes.
/// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761
-fn getResolvConf(allocator: *mem.Allocator, rc: *ResolvConf) !void {
+fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
rc.* = ResolvConf{
.ns = std.ArrayList(LookupAddr).init(allocator),
.search = std.ArrayList(u8).init(allocator),
lib/std/pdb.zig
@@ -460,7 +460,7 @@ pub const PDBStringTableHeader = packed struct {
ByteSize: u32,
};
-fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 {
+fn readSparseBitVector(stream: anytype, allocator: mem.Allocator) ![]u32 {
const num_words = try stream.readIntLittle(u32);
var list = ArrayList(u32).init(allocator);
errdefer list.deinit();
@@ -481,7 +481,7 @@ fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 {
pub const Pdb = struct {
in_file: File,
msf: Msf,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
string_table: ?*MsfStream,
dbi: ?*MsfStream,
modules: []Module,
@@ -500,7 +500,7 @@ pub const Pdb = struct {
checksum_offset: ?usize,
};
- pub fn init(allocator: *mem.Allocator, path: []const u8) !Pdb {
+ pub fn init(allocator: mem.Allocator, path: []const u8) !Pdb {
const file = try fs.cwd().openFile(path, .{ .intended_io_mode = .blocking });
errdefer file.close();
@@ -858,7 +858,7 @@ const Msf = struct {
directory: MsfStream,
streams: []MsfStream,
- fn init(allocator: *mem.Allocator, file: File) !Msf {
+ fn init(allocator: mem.Allocator, file: File) !Msf {
const in = file.reader();
const superblock = try in.readStruct(SuperBlock);
lib/std/priority_dequeue.zig
@@ -21,10 +21,10 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty
items: []T,
len: usize,
- allocator: *Allocator,
+ allocator: Allocator,
/// Initialize and return a new priority dequeue.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.items = &[_]T{},
.len = 0,
@@ -336,7 +336,7 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty
/// Dequeue takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// De-initialize with `deinit`.
- pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
+ pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self {
var queue = Self{
.items = items,
.len = items.len,
@@ -945,7 +945,7 @@ fn fuzzTestMinMax(rng: std.rand.Random, queue_size: usize) !void {
}
}
-fn generateRandomSlice(allocator: *std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 {
+fn generateRandomSlice(allocator: std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 {
var array = std.ArrayList(u32).init(allocator);
try array.ensureTotalCapacity(size);
lib/std/priority_queue.zig
@@ -20,10 +20,10 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order
items: []T,
len: usize,
- allocator: *Allocator,
+ allocator: Allocator,
/// Initialize and return a priority queue.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.items = &[_]T{},
.len = 0,
@@ -153,7 +153,7 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order
/// PriorityQueue takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit`.
- pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
+ pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self {
var queue = Self{
.items = items,
.len = items.len,
lib/std/process.zig
@@ -21,7 +21,7 @@ pub fn getCwd(out_buffer: []u8) ![]u8 {
}
/// Caller must free the returned memory.
-pub fn getCwdAlloc(allocator: *Allocator) ![]u8 {
+pub fn getCwdAlloc(allocator: Allocator) ![]u8 {
// The use of MAX_PATH_BYTES here is just a heuristic: most paths will fit
// in stack_buf, avoiding an extra allocation in the common case.
var stack_buf: [fs.MAX_PATH_BYTES]u8 = undefined;
@@ -54,7 +54,7 @@ test "getCwdAlloc" {
}
/// Caller owns resulting `BufMap`.
-pub fn getEnvMap(allocator: *Allocator) !BufMap {
+pub fn getEnvMap(allocator: Allocator) !BufMap {
var result = BufMap.init(allocator);
errdefer result.deinit();
@@ -154,7 +154,7 @@ pub const GetEnvVarOwnedError = error{
};
/// Caller must free returned memory.
-pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
+pub fn getEnvVarOwned(allocator: mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
if (builtin.os.tag == .windows) {
const result_w = blk: {
const key_w = try std.unicode.utf8ToUtf16LeWithNull(allocator, key);
@@ -183,10 +183,10 @@ pub fn hasEnvVarConstant(comptime key: []const u8) bool {
}
}
-pub fn hasEnvVar(allocator: *Allocator, key: []const u8) error{OutOfMemory}!bool {
+pub fn hasEnvVar(allocator: Allocator, key: []const u8) error{OutOfMemory}!bool {
if (builtin.os.tag == .windows) {
var stack_alloc = std.heap.stackFallback(256 * @sizeOf(u16), allocator);
- const key_w = try std.unicode.utf8ToUtf16LeWithNull(&stack_alloc.allocator, key);
+ const key_w = try std.unicode.utf8ToUtf16LeWithNull(stack_alloc.get(), key);
defer stack_alloc.allocator.free(key_w);
return std.os.getenvW(key_w) != null;
} else {
@@ -227,7 +227,7 @@ pub const ArgIteratorPosix = struct {
};
pub const ArgIteratorWasi = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
index: usize,
args: [][:0]u8,
@@ -235,7 +235,7 @@ pub const ArgIteratorWasi = struct {
/// You must call deinit to free the internal buffer of the
/// iterator after you are done.
- pub fn init(allocator: *mem.Allocator) InitError!ArgIteratorWasi {
+ pub fn init(allocator: mem.Allocator) InitError!ArgIteratorWasi {
const fetched_args = try ArgIteratorWasi.internalInit(allocator);
return ArgIteratorWasi{
.allocator = allocator,
@@ -244,7 +244,7 @@ pub const ArgIteratorWasi = struct {
};
}
- fn internalInit(allocator: *mem.Allocator) InitError![][:0]u8 {
+ fn internalInit(allocator: mem.Allocator) InitError![][:0]u8 {
const w = os.wasi;
var count: usize = undefined;
var buf_size: usize = undefined;
@@ -325,7 +325,7 @@ pub const ArgIteratorWindows = struct {
}
/// You must free the returned memory when done.
- pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![:0]u8) {
+ pub fn next(self: *ArgIteratorWindows, allocator: Allocator) ?(NextError![:0]u8) {
// march forward over whitespace
while (true) : (self.index += 1) {
const character = self.getPointAtIndex();
@@ -379,7 +379,7 @@ pub const ArgIteratorWindows = struct {
}
}
- fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![:0]u8 {
+ fn internalNext(self: *ArgIteratorWindows, allocator: Allocator) NextError![:0]u8 {
var buf = std.ArrayList(u16).init(allocator);
defer buf.deinit();
@@ -423,7 +423,7 @@ pub const ArgIteratorWindows = struct {
}
}
- fn convertFromWindowsCmdLineToUTF8(allocator: *Allocator, buf: []u16) NextError![:0]u8 {
+ fn convertFromWindowsCmdLineToUTF8(allocator: Allocator, buf: []u16) NextError![:0]u8 {
return std.unicode.utf16leToUtf8AllocZ(allocator, buf) catch |err| switch (err) {
error.ExpectedSecondSurrogateHalf,
error.DanglingSurrogateHalf,
@@ -463,7 +463,7 @@ pub const ArgIterator = struct {
pub const InitError = ArgIteratorWasi.InitError;
/// You must deinitialize iterator's internal buffers by calling `deinit` when done.
- pub fn initWithAllocator(allocator: *mem.Allocator) InitError!ArgIterator {
+ pub fn initWithAllocator(allocator: mem.Allocator) InitError!ArgIterator {
if (builtin.os.tag == .wasi and !builtin.link_libc) {
return ArgIterator{ .inner = try InnerType.init(allocator) };
}
@@ -474,7 +474,7 @@ pub const ArgIterator = struct {
pub const NextError = ArgIteratorWindows.NextError;
/// You must free the returned memory when done.
- pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![:0]u8) {
+ pub fn next(self: *ArgIterator, allocator: Allocator) ?(NextError![:0]u8) {
if (builtin.os.tag == .windows) {
return self.inner.next(allocator);
} else {
@@ -513,7 +513,7 @@ pub fn args() ArgIterator {
}
/// You must deinitialize iterator's internal buffers by calling `deinit` when done.
-pub fn argsWithAllocator(allocator: *mem.Allocator) ArgIterator.InitError!ArgIterator {
+pub fn argsWithAllocator(allocator: mem.Allocator) ArgIterator.InitError!ArgIterator {
return ArgIterator.initWithAllocator(allocator);
}
@@ -539,7 +539,7 @@ test "args iterator" {
}
/// Caller must call argsFree on result.
-pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 {
+pub fn argsAlloc(allocator: mem.Allocator) ![][:0]u8 {
// TODO refactor to only make 1 allocation.
var it = if (builtin.os.tag == .wasi) try argsWithAllocator(allocator) else args();
defer it.deinit();
@@ -579,7 +579,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 {
return result_slice_list;
}
-pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const [:0]u8) void {
+pub fn argsFree(allocator: mem.Allocator, args_alloc: []const [:0]u8) void {
var total_bytes: usize = 0;
for (args_alloc) |arg| {
total_bytes += @sizeOf([]u8) + arg.len + 1;
@@ -741,7 +741,7 @@ pub fn getBaseAddress() usize {
/// requirement from `std.zig.system.NativeTargetInfo.detect`. Most likely this will require
/// introducing a new, lower-level function which takes a callback function, and then this
/// function which takes an allocator can exist on top of it.
-pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]u8 {
+pub fn getSelfExeSharedLibPaths(allocator: Allocator) error{OutOfMemory}![][:0]u8 {
switch (builtin.link_mode) {
.Static => return &[_][:0]u8{},
.Dynamic => {},
@@ -833,7 +833,7 @@ pub const ExecvError = std.os.ExecveError || error{OutOfMemory};
/// This function also uses the PATH environment variable to get the full path to the executable.
/// Due to the heap-allocation, it is illegal to call this function in a fork() child.
/// For that use case, use the `std.os` functions directly.
-pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError {
+pub fn execv(allocator: mem.Allocator, argv: []const []const u8) ExecvError {
return execve(allocator, argv, null);
}
@@ -846,7 +846,7 @@ pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError {
/// Due to the heap-allocation, it is illegal to call this function in a fork() child.
/// For that use case, use the `std.os` functions directly.
pub fn execve(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
argv: []const []const u8,
env_map: ?*const std.BufMap,
) ExecvError {
@@ -854,7 +854,7 @@ pub fn execve(
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null);
for (argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
lib/std/target.zig
@@ -1323,15 +1323,15 @@ pub const Target = struct {
pub const stack_align = 16;
- pub fn zigTriple(self: Target, allocator: *mem.Allocator) ![]u8 {
+ pub fn zigTriple(self: Target, allocator: mem.Allocator) ![]u8 {
return std.zig.CrossTarget.fromTarget(self).zigTriple(allocator);
}
- pub fn linuxTripleSimple(allocator: *mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 {
+ pub fn linuxTripleSimple(allocator: mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 {
return std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ @tagName(cpu_arch), @tagName(os_tag), @tagName(abi) });
}
- pub fn linuxTriple(self: Target, allocator: *mem.Allocator) ![]u8 {
+ pub fn linuxTriple(self: Target, allocator: mem.Allocator) ![]u8 {
return linuxTripleSimple(allocator, self.cpu.arch, self.os.tag, self.abi);
}
lib/std/testing.zig
@@ -7,11 +7,11 @@ const print = std.debug.print;
pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
/// This should only be used in temporary test programs.
-pub const allocator = &allocator_instance.allocator;
+pub const allocator = allocator_instance.getAllocator();
pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
-pub const failing_allocator = &failing_allocator_instance.allocator;
-pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
+pub const failing_allocator = failing_allocator_instance.getAllocator();
+pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.getAllocator(), 0);
pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
lib/std/Thread.zig
@@ -460,7 +460,7 @@ const WindowsThreadImpl = struct {
errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0);
const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes];
- const instance = std.heap.FixedBufferAllocator.init(instance_bytes).allocator.create(Instance) catch unreachable;
+ const instance = std.heap.FixedBufferAllocator.init(instance_bytes).getAllocator().create(Instance) catch unreachable;
instance.* = .{
.fn_args = args,
.thread = .{
lib/std/unicode.zig
@@ -550,7 +550,7 @@ fn testDecode(bytes: []const u8) !u21 {
}
/// Caller must free returned memory.
-pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
+pub fn utf16leToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) ![]u8 {
// optimistically guess that it will all be ascii.
var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
errdefer result.deinit();
@@ -567,7 +567,7 @@ pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8
}
/// Caller must free returned memory.
-pub fn utf16leToUtf8AllocZ(allocator: *mem.Allocator, utf16le: []const u16) ![:0]u8 {
+pub fn utf16leToUtf8AllocZ(allocator: mem.Allocator, utf16le: []const u16) ![:0]u8 {
// optimistically guess that it will all be ascii.
var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
errdefer result.deinit();
@@ -661,7 +661,7 @@ test "utf16leToUtf8" {
}
}
-pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u16 {
+pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u16 {
// optimistically guess that it will not require surrogate pairs
var result = try std.ArrayList(u16).initCapacity(allocator, utf8.len + 1);
errdefer result.deinit();
lib/std/wasm.zig
@@ -361,7 +361,7 @@ pub const Type = struct {
std.mem.eql(Valtype, self.returns, other.returns);
}
- pub fn deinit(self: *Type, gpa: *std.mem.Allocator) void {
+ pub fn deinit(self: *Type, gpa: std.mem.Allocator) void {
gpa.free(self.params);
gpa.free(self.returns);
self.* = undefined;
lib/std/zig.zig
@@ -100,7 +100,7 @@ pub const BinNameOptions = struct {
};
/// Returns the standard file system basename of a binary generated by the Zig compiler.
-pub fn binNameAlloc(allocator: *std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
+pub fn binNameAlloc(allocator: std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
const root_name = options.root_name;
const target = options.target;
const ofmt = options.object_format orelse target.getObjectFormat();
src/arch/aarch64/CodeGen.zig
@@ -33,7 +33,7 @@ const InnerError = error{
CodegenFail,
};
-gpa: *Allocator,
+gpa: Allocator,
air: Air,
liveness: Liveness,
bin_file: *link.File,
@@ -164,7 +164,7 @@ const MCValue = union(enum) {
const Branch = struct {
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
- fn deinit(self: *Branch, gpa: *Allocator) void {
+ fn deinit(self: *Branch, gpa: Allocator) void {
self.inst_table.deinit(gpa);
self.* = undefined;
}
src/arch/aarch64/Mir.zig
@@ -229,7 +229,7 @@ pub const Inst = struct {
// }
};
-pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
mir.* = undefined;
src/arch/arm/CodeGen.zig
@@ -33,7 +33,7 @@ const InnerError = error{
CodegenFail,
};
-gpa: *Allocator,
+gpa: Allocator,
air: Air,
liveness: Liveness,
bin_file: *link.File,
@@ -164,7 +164,7 @@ const MCValue = union(enum) {
const Branch = struct {
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
- fn deinit(self: *Branch, gpa: *Allocator) void {
+ fn deinit(self: *Branch, gpa: Allocator) void {
self.inst_table.deinit(gpa);
self.* = undefined;
}
src/arch/arm/Mir.zig
@@ -193,7 +193,7 @@ pub const Inst = struct {
// }
};
-pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
mir.* = undefined;
src/arch/riscv64/CodeGen.zig
@@ -33,7 +33,7 @@ const InnerError = error{
CodegenFail,
};
-gpa: *Allocator,
+gpa: Allocator,
air: Air,
liveness: Liveness,
bin_file: *link.File,
@@ -158,7 +158,7 @@ const MCValue = union(enum) {
const Branch = struct {
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
- fn deinit(self: *Branch, gpa: *Allocator) void {
+ fn deinit(self: *Branch, gpa: Allocator) void {
self.inst_table.deinit(gpa);
self.* = undefined;
}
src/arch/riscv64/Mir.zig
@@ -101,7 +101,7 @@ pub const Inst = struct {
// }
};
-pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
mir.* = undefined;
src/arch/wasm/CodeGen.zig
@@ -508,7 +508,7 @@ const Self = @This();
decl: *Decl,
air: Air,
liveness: Liveness,
-gpa: *mem.Allocator,
+gpa: mem.Allocator,
/// Table to save `WValue`'s generated by an `Air.Inst`
values: ValueTable,
/// Mapping from Air.Inst.Index to block ids
@@ -983,7 +983,7 @@ const CallWValues = struct {
args: []WValue,
return_value: WValue,
- fn deinit(self: *CallWValues, gpa: *Allocator) void {
+ fn deinit(self: *CallWValues, gpa: Allocator) void {
gpa.free(self.args);
self.* = undefined;
}
src/arch/wasm/Mir.zig
@@ -411,7 +411,7 @@ pub const Inst = struct {
};
};
-pub fn deinit(self: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(self: *Mir, gpa: std.mem.Allocator) void {
self.instructions.deinit(gpa);
gpa.free(self.extra);
self.* = undefined;
src/arch/x86_64/CodeGen.zig
@@ -33,7 +33,7 @@ const InnerError = error{
CodegenFail,
};
-gpa: *Allocator,
+gpa: Allocator,
air: Air,
liveness: Liveness,
bin_file: *link.File,
@@ -174,7 +174,7 @@ pub const MCValue = union(enum) {
const Branch = struct {
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
- fn deinit(self: *Branch, gpa: *Allocator) void {
+ fn deinit(self: *Branch, gpa: Allocator) void {
self.inst_table.deinit(gpa);
self.* = undefined;
}
src/arch/x86_64/Mir.zig
@@ -347,7 +347,7 @@ pub const ArgDbgInfo = struct {
arg_index: u32,
};
-pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
mir.* = undefined;
src/codegen/c.zig
@@ -163,14 +163,14 @@ pub const Object = struct {
/// This data is available both when outputting .c code and when outputting an .h file.
pub const DeclGen = struct {
- gpa: *std.mem.Allocator,
+ gpa: std.mem.Allocator,
module: *Module,
decl: *Decl,
fwd_decl: std.ArrayList(u8),
error_msg: ?*Module.ErrorMsg,
/// The key of this map is Type which has references to typedefs_arena.
typedefs: TypedefMap,
- typedefs_arena: *std.mem.Allocator,
+ typedefs_arena: std.mem.Allocator,
fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
@setCold(true);
src/codegen/llvm.zig
@@ -23,7 +23,7 @@ const LazySrcLoc = Module.LazySrcLoc;
const Error = error{ OutOfMemory, CodegenFail };
-pub fn targetTriple(allocator: *Allocator, target: std.Target) ![:0]u8 {
+pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 {
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
@@ -190,14 +190,14 @@ pub const Object = struct {
std.hash_map.default_max_load_percentage,
);
- pub fn create(gpa: *Allocator, sub_path: []const u8, options: link.Options) !*Object {
+ pub fn create(gpa: Allocator, sub_path: []const u8, options: link.Options) !*Object {
const obj = try gpa.create(Object);
errdefer gpa.destroy(obj);
obj.* = try Object.init(gpa, sub_path, options);
return obj;
}
- pub fn init(gpa: *Allocator, sub_path: []const u8, options: link.Options) !Object {
+ pub fn init(gpa: Allocator, sub_path: []const u8, options: link.Options) !Object {
const context = llvm.Context.create();
errdefer context.dispose();
@@ -287,7 +287,7 @@ pub const Object = struct {
};
}
- pub fn deinit(self: *Object, gpa: *Allocator) void {
+ pub fn deinit(self: *Object, gpa: Allocator) void {
self.target_machine.dispose();
self.llvm_module.dispose();
self.context.dispose();
@@ -297,13 +297,13 @@ pub const Object = struct {
self.* = undefined;
}
- pub fn destroy(self: *Object, gpa: *Allocator) void {
+ pub fn destroy(self: *Object, gpa: Allocator) void {
self.deinit(gpa);
gpa.destroy(self);
}
fn locPath(
- arena: *Allocator,
+ arena: Allocator,
opt_loc: ?Compilation.EmitLoc,
cache_directory: Compilation.Directory,
) !?[*:0]u8 {
@@ -554,7 +554,7 @@ pub const DeclGen = struct {
object: *Object,
module: *Module,
decl: *Module.Decl,
- gpa: *Allocator,
+ gpa: Allocator,
err_msg: ?*Module.ErrorMsg,
fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@@ -1621,7 +1621,7 @@ pub const DeclGen = struct {
};
pub const FuncGen = struct {
- gpa: *Allocator,
+ gpa: Allocator,
dg: *DeclGen,
air: Air,
liveness: Liveness,
src/codegen/spirv.zig
@@ -70,7 +70,7 @@ pub fn writeInstructionWithString(code: *std.ArrayList(Word), opcode: Opcode, ar
/// of data which needs to be persistent over different calls to Decl code generation.
pub const SPIRVModule = struct {
/// A general-purpose allocator which may be used to allocate temporary resources required for compilation.
- gpa: *Allocator,
+ gpa: Allocator,
/// The parent module.
module: *Module,
@@ -103,7 +103,7 @@ pub const SPIRVModule = struct {
/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
file_names: std.StringHashMap(ResultId),
- pub fn init(gpa: *Allocator, module: *Module) SPIRVModule {
+ pub fn init(gpa: Allocator, module: *Module) SPIRVModule {
return .{
.gpa = gpa,
.module = module,
src/link/MachO/Archive.zig
@@ -92,7 +92,7 @@ const ar_hdr = extern struct {
}
};
-pub fn deinit(self: *Archive, allocator: *Allocator) void {
+pub fn deinit(self: *Archive, allocator: Allocator) void {
for (self.toc.keys()) |*key| {
allocator.free(key.*);
}
@@ -103,7 +103,7 @@ pub fn deinit(self: *Archive, allocator: *Allocator) void {
allocator.free(self.name);
}
-pub fn parse(self: *Archive, allocator: *Allocator, target: std.Target) !void {
+pub fn parse(self: *Archive, allocator: Allocator, target: std.Target) !void {
const reader = self.file.reader();
self.library_offset = try fat.getLibraryOffset(reader, target);
try self.file.seekTo(self.library_offset);
@@ -128,7 +128,7 @@ pub fn parse(self: *Archive, allocator: *Allocator, target: std.Target) !void {
try reader.context.seekTo(0);
}
-fn parseName(allocator: *Allocator, header: ar_hdr, reader: anytype) ![]u8 {
+fn parseName(allocator: Allocator, header: ar_hdr, reader: anytype) ![]u8 {
const name_or_length = try header.nameOrLength();
var name: []u8 = undefined;
switch (name_or_length) {
@@ -146,7 +146,7 @@ fn parseName(allocator: *Allocator, header: ar_hdr, reader: anytype) ![]u8 {
return name;
}
-fn parseTableOfContents(self: *Archive, allocator: *Allocator, reader: anytype) !void {
+fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !void {
const symtab_size = try reader.readIntLittle(u32);
var symtab = try allocator.alloc(u8, symtab_size);
defer allocator.free(symtab);
@@ -188,7 +188,7 @@ fn parseTableOfContents(self: *Archive, allocator: *Allocator, reader: anytype)
}
}
-pub fn parseObject(self: Archive, allocator: *Allocator, target: std.Target, offset: u32) !Object {
+pub fn parseObject(self: Archive, allocator: Allocator, target: std.Target, offset: u32) !Object {
const reader = self.file.reader();
try reader.context.seekTo(offset + self.library_offset);
src/link/MachO/Atom.zig
@@ -195,7 +195,7 @@ pub const empty = Atom{
.dbg_info_len = undefined,
};
-pub fn deinit(self: *Atom, allocator: *Allocator) void {
+pub fn deinit(self: *Atom, allocator: Allocator) void {
self.dices.deinit(allocator);
self.lazy_bindings.deinit(allocator);
self.bindings.deinit(allocator);
@@ -246,7 +246,7 @@ pub fn freeListEligible(self: Atom, macho_file: MachO) bool {
const RelocContext = struct {
base_addr: u64 = 0,
- allocator: *Allocator,
+ allocator: Allocator,
object: *Object,
macho_file: *MachO,
};
src/link/MachO/CodeSignature.zig
@@ -58,7 +58,7 @@ cdir: ?CodeDirectory = null,
pub fn calcAdhocSignature(
self: *CodeSignature,
- allocator: *Allocator,
+ allocator: Allocator,
file: fs.File,
id: []const u8,
text_segment: macho.segment_command_64,
@@ -145,7 +145,7 @@ pub fn write(self: CodeSignature, writer: anytype) !void {
try self.cdir.?.write(writer);
}
-pub fn deinit(self: *CodeSignature, allocator: *Allocator) void {
+pub fn deinit(self: *CodeSignature, allocator: Allocator) void {
if (self.cdir) |*cdir| {
cdir.data.deinit(allocator);
}
src/link/MachO/commands.zig
@@ -50,7 +50,7 @@ pub const LoadCommand = union(enum) {
Rpath: GenericCommandWithData(macho.rpath_command),
Unknown: GenericCommandWithData(macho.load_command),
- pub fn read(allocator: *Allocator, reader: anytype) !LoadCommand {
+ pub fn read(allocator: Allocator, reader: anytype) !LoadCommand {
const header = try reader.readStruct(macho.load_command);
var buffer = try allocator.alloc(u8, header.cmdsize);
defer allocator.free(buffer);
@@ -177,7 +177,7 @@ pub const LoadCommand = union(enum) {
};
}
- pub fn deinit(self: *LoadCommand, allocator: *Allocator) void {
+ pub fn deinit(self: *LoadCommand, allocator: Allocator) void {
return switch (self.*) {
.Segment => |*x| x.deinit(allocator),
.Dylinker => |*x| x.deinit(allocator),
@@ -218,7 +218,7 @@ pub const SegmentCommand = struct {
inner: macho.segment_command_64,
sections: std.ArrayListUnmanaged(macho.section_64) = .{},
- pub fn read(alloc: *Allocator, reader: anytype) !SegmentCommand {
+ pub fn read(alloc: Allocator, reader: anytype) !SegmentCommand {
const inner = try reader.readStruct(macho.segment_command_64);
var segment = SegmentCommand{
.inner = inner,
@@ -241,7 +241,7 @@ pub const SegmentCommand = struct {
}
}
- pub fn deinit(self: *SegmentCommand, alloc: *Allocator) void {
+ pub fn deinit(self: *SegmentCommand, alloc: Allocator) void {
self.sections.deinit(alloc);
}
@@ -299,7 +299,7 @@ pub fn GenericCommandWithData(comptime Cmd: type) type {
const Self = @This();
- pub fn read(allocator: *Allocator, reader: anytype) !Self {
+ pub fn read(allocator: Allocator, reader: anytype) !Self {
const inner = try reader.readStruct(Cmd);
var data = try allocator.alloc(u8, inner.cmdsize - @sizeOf(Cmd));
errdefer allocator.free(data);
@@ -315,7 +315,7 @@ pub fn GenericCommandWithData(comptime Cmd: type) type {
try writer.writeAll(self.data);
}
- pub fn deinit(self: *Self, allocator: *Allocator) void {
+ pub fn deinit(self: *Self, allocator: Allocator) void {
allocator.free(self.data);
}
@@ -327,7 +327,7 @@ pub fn GenericCommandWithData(comptime Cmd: type) type {
}
pub fn createLoadDylibCommand(
- allocator: *Allocator,
+ allocator: Allocator,
name: []const u8,
timestamp: u32,
current_version: u32,
@@ -395,7 +395,7 @@ pub fn sectionIsDontDeadStripIfReferencesLive(sect: macho.section_64) bool {
return sectionAttrs(sect) & macho.S_ATTR_LIVE_SUPPORT != 0;
}
-fn testRead(allocator: *Allocator, buffer: []const u8, expected: anytype) !void {
+fn testRead(allocator: Allocator, buffer: []const u8, expected: anytype) !void {
var stream = io.fixedBufferStream(buffer);
var given = try LoadCommand.read(allocator, stream.reader());
defer given.deinit(allocator);
src/link/MachO/DebugSymbols.zig
@@ -104,7 +104,7 @@ const min_nop_size = 2;
/// You must call this function *after* `MachO.populateMissingMetadata()`
/// has been called to get a viable debug symbols output.
-pub fn populateMissingMetadata(self: *DebugSymbols, allocator: *Allocator) !void {
+pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void {
if (self.uuid_cmd_index == null) {
const base_cmd = self.base.load_commands.items[self.base.uuid_cmd_index.?];
self.uuid_cmd_index = @intCast(u16, self.load_commands.items.len);
@@ -268,7 +268,7 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
return index;
}
-pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Options) !void {
+pub fn flushModule(self: *DebugSymbols, allocator: Allocator, options: link.Options) !void {
// TODO This linker code currently assumes there is only 1 compilation unit and it corresponds to the
// Zig source code.
const module = options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
@@ -577,7 +577,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
assert(!self.debug_string_table_dirty);
}
-pub fn deinit(self: *DebugSymbols, allocator: *Allocator) void {
+pub fn deinit(self: *DebugSymbols, allocator: Allocator) void {
self.dbg_info_decl_free_list.deinit(allocator);
self.dbg_line_fn_free_list.deinit(allocator);
self.debug_string_table.deinit(allocator);
@@ -588,7 +588,7 @@ pub fn deinit(self: *DebugSymbols, allocator: *Allocator) void {
self.file.close();
}
-fn copySegmentCommand(self: *DebugSymbols, allocator: *Allocator, base_cmd: SegmentCommand) !SegmentCommand {
+fn copySegmentCommand(self: *DebugSymbols, allocator: Allocator, base_cmd: SegmentCommand) !SegmentCommand {
var cmd = SegmentCommand{
.inner = .{
.segname = undefined,
@@ -648,7 +648,7 @@ fn updateDwarfSegment(self: *DebugSymbols) void {
}
/// Writes all load commands and section headers.
-fn writeLoadCommands(self: *DebugSymbols, allocator: *Allocator) !void {
+fn writeLoadCommands(self: *DebugSymbols, allocator: Allocator) !void {
if (!self.load_commands_dirty) return;
var sizeofcmds: u32 = 0;
@@ -834,7 +834,7 @@ pub const DeclDebugBuffers = struct {
/// Caller owns the returned memory.
pub fn initDeclDebugBuffers(
self: *DebugSymbols,
- allocator: *Allocator,
+ allocator: Allocator,
module: *Module,
decl: *Module.Decl,
) !DeclDebugBuffers {
@@ -930,7 +930,7 @@ pub fn initDeclDebugBuffers(
pub fn commitDeclDebugInfo(
self: *DebugSymbols,
- allocator: *Allocator,
+ allocator: Allocator,
module: *Module,
decl: *Module.Decl,
debug_buffers: *DeclDebugBuffers,
@@ -1141,7 +1141,7 @@ fn addDbgInfoType(
fn updateDeclDebugInfoAllocation(
self: *DebugSymbols,
- allocator: *Allocator,
+ allocator: Allocator,
text_block: *TextBlock,
len: u32,
) !void {
@@ -1256,7 +1256,7 @@ fn getDebugLineProgramEnd(self: DebugSymbols) u32 {
}
/// TODO Improve this to use a table.
-fn makeDebugString(self: *DebugSymbols, allocator: *Allocator, bytes: []const u8) !u32 {
+fn makeDebugString(self: *DebugSymbols, allocator: Allocator, bytes: []const u8) !u32 {
try self.debug_string_table.ensureUnusedCapacity(allocator, bytes.len + 1);
const result = self.debug_string_table.items.len;
self.debug_string_table.appendSliceAssumeCapacity(bytes);
src/link/MachO/Dylib.zig
@@ -44,7 +44,7 @@ pub const Id = struct {
current_version: u32,
compatibility_version: u32,
- pub fn default(allocator: *Allocator, name: []const u8) !Id {
+ pub fn default(allocator: Allocator, name: []const u8) !Id {
return Id{
.name = try allocator.dupe(u8, name),
.timestamp = 2,
@@ -53,7 +53,7 @@ pub const Id = struct {
};
}
- pub fn fromLoadCommand(allocator: *Allocator, lc: commands.GenericCommandWithData(macho.dylib_command)) !Id {
+ pub fn fromLoadCommand(allocator: Allocator, lc: commands.GenericCommandWithData(macho.dylib_command)) !Id {
const dylib = lc.inner.dylib;
const dylib_name = @ptrCast([*:0]const u8, lc.data[dylib.name - @sizeOf(macho.dylib_command) ..]);
const name = try allocator.dupe(u8, mem.sliceTo(dylib_name, 0));
@@ -66,7 +66,7 @@ pub const Id = struct {
};
}
- pub fn deinit(id: *Id, allocator: *Allocator) void {
+ pub fn deinit(id: *Id, allocator: Allocator) void {
allocator.free(id.name);
}
@@ -125,7 +125,7 @@ pub const Id = struct {
}
};
-pub fn deinit(self: *Dylib, allocator: *Allocator) void {
+pub fn deinit(self: *Dylib, allocator: Allocator) void {
for (self.load_commands.items) |*lc| {
lc.deinit(allocator);
}
@@ -143,7 +143,7 @@ pub fn deinit(self: *Dylib, allocator: *Allocator) void {
}
}
-pub fn parse(self: *Dylib, allocator: *Allocator, target: std.Target, dependent_libs: anytype) !void {
+pub fn parse(self: *Dylib, allocator: Allocator, target: std.Target, dependent_libs: anytype) !void {
log.debug("parsing shared library '{s}'", .{self.name});
self.library_offset = try fat.getLibraryOffset(self.file.reader(), target);
@@ -170,7 +170,7 @@ pub fn parse(self: *Dylib, allocator: *Allocator, target: std.Target, dependent_
try self.parseSymbols(allocator);
}
-fn readLoadCommands(self: *Dylib, allocator: *Allocator, reader: anytype, dependent_libs: anytype) !void {
+fn readLoadCommands(self: *Dylib, allocator: Allocator, reader: anytype, dependent_libs: anytype) !void {
const should_lookup_reexports = self.header.?.flags & macho.MH_NO_REEXPORTED_DYLIBS == 0;
try self.load_commands.ensureUnusedCapacity(allocator, self.header.?.ncmds);
@@ -203,7 +203,7 @@ fn readLoadCommands(self: *Dylib, allocator: *Allocator, reader: anytype, depend
}
}
-fn parseId(self: *Dylib, allocator: *Allocator) !void {
+fn parseId(self: *Dylib, allocator: Allocator) !void {
const index = self.id_cmd_index orelse {
log.debug("no LC_ID_DYLIB load command found; using hard-coded defaults...", .{});
self.id = try Id.default(allocator, self.name);
@@ -212,7 +212,7 @@ fn parseId(self: *Dylib, allocator: *Allocator) !void {
self.id = try Id.fromLoadCommand(allocator, self.load_commands.items[index].Dylib);
}
-fn parseSymbols(self: *Dylib, allocator: *Allocator) !void {
+fn parseSymbols(self: *Dylib, allocator: Allocator) !void {
const index = self.symtab_cmd_index orelse return;
const symtab_cmd = self.load_commands.items[index].Symtab;
@@ -236,7 +236,7 @@ fn parseSymbols(self: *Dylib, allocator: *Allocator) !void {
}
}
-fn addObjCClassSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8) !void {
+fn addObjCClassSymbol(self: *Dylib, allocator: Allocator, sym_name: []const u8) !void {
const expanded = &[_][]const u8{
try std.fmt.allocPrint(allocator, "_OBJC_CLASS_$_{s}", .{sym_name}),
try std.fmt.allocPrint(allocator, "_OBJC_METACLASS_$_{s}", .{sym_name}),
@@ -248,29 +248,29 @@ fn addObjCClassSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8)
}
}
-fn addObjCIVarSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8) !void {
+fn addObjCIVarSymbol(self: *Dylib, allocator: Allocator, sym_name: []const u8) !void {
const expanded = try std.fmt.allocPrint(allocator, "_OBJC_IVAR_$_{s}", .{sym_name});
if (self.symbols.contains(expanded)) return;
try self.symbols.putNoClobber(allocator, expanded, .{});
}
-fn addObjCEhTypeSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8) !void {
+fn addObjCEhTypeSymbol(self: *Dylib, allocator: Allocator, sym_name: []const u8) !void {
const expanded = try std.fmt.allocPrint(allocator, "_OBJC_EHTYPE_$_{s}", .{sym_name});
if (self.symbols.contains(expanded)) return;
try self.symbols.putNoClobber(allocator, expanded, .{});
}
-fn addSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8) !void {
+fn addSymbol(self: *Dylib, allocator: Allocator, sym_name: []const u8) !void {
if (self.symbols.contains(sym_name)) return;
try self.symbols.putNoClobber(allocator, try allocator.dupe(u8, sym_name), {});
}
const TargetMatcher = struct {
- allocator: *Allocator,
+ allocator: Allocator,
target: std.Target,
target_strings: std.ArrayListUnmanaged([]const u8) = .{},
- fn init(allocator: *Allocator, target: std.Target) !TargetMatcher {
+ fn init(allocator: Allocator, target: std.Target) !TargetMatcher {
var self = TargetMatcher{
.allocator = allocator,
.target = target,
@@ -297,7 +297,7 @@ const TargetMatcher = struct {
self.target_strings.deinit(self.allocator);
}
- fn targetToAppleString(allocator: *Allocator, target: std.Target) ![]const u8 {
+ fn targetToAppleString(allocator: Allocator, target: std.Target) ![]const u8 {
const arch = switch (target.cpu.arch) {
.aarch64 => "arm64",
.x86_64 => "x86_64",
@@ -336,7 +336,7 @@ const TargetMatcher = struct {
pub fn parseFromStub(
self: *Dylib,
- allocator: *Allocator,
+ allocator: Allocator,
target: std.Target,
lib_stub: LibStub,
dependent_libs: anytype,
src/link/MachO/Object.zig
@@ -74,7 +74,7 @@ const DebugInfo = struct {
debug_line: []u8,
debug_ranges: []u8,
- pub fn parseFromObject(allocator: *Allocator, object: *const Object) !?DebugInfo {
+ pub fn parseFromObject(allocator: Allocator, object: *const Object) !?DebugInfo {
var debug_info = blk: {
const index = object.dwarf_debug_info_index orelse return null;
break :blk try object.readSection(allocator, index);
@@ -118,7 +118,7 @@ const DebugInfo = struct {
};
}
- pub fn deinit(self: *DebugInfo, allocator: *Allocator) void {
+ pub fn deinit(self: *DebugInfo, allocator: Allocator) void {
allocator.free(self.debug_info);
allocator.free(self.debug_abbrev);
allocator.free(self.debug_str);
@@ -130,7 +130,7 @@ const DebugInfo = struct {
}
};
-pub fn deinit(self: *Object, allocator: *Allocator) void {
+pub fn deinit(self: *Object, allocator: Allocator) void {
for (self.load_commands.items) |*lc| {
lc.deinit(allocator);
}
@@ -160,7 +160,7 @@ pub fn deinit(self: *Object, allocator: *Allocator) void {
}
}
-pub fn free(self: *Object, allocator: *Allocator, macho_file: *MachO) void {
+pub fn free(self: *Object, allocator: Allocator, macho_file: *MachO) void {
log.debug("freeObject {*}", .{self});
var it = self.end_atoms.iterator();
@@ -227,7 +227,7 @@ fn freeAtoms(self: *Object, macho_file: *MachO) void {
}
}
-pub fn parse(self: *Object, allocator: *Allocator, target: std.Target) !void {
+pub fn parse(self: *Object, allocator: Allocator, target: std.Target) !void {
const reader = self.file.reader();
if (self.file_offset) |offset| {
try reader.context.seekTo(offset);
@@ -263,7 +263,7 @@ pub fn parse(self: *Object, allocator: *Allocator, target: std.Target) !void {
try self.parseDebugInfo(allocator);
}
-pub fn readLoadCommands(self: *Object, allocator: *Allocator, reader: anytype) !void {
+pub fn readLoadCommands(self: *Object, allocator: Allocator, reader: anytype) !void {
const header = self.header orelse unreachable; // Unreachable here signifies a fatal unexplored condition.
const offset = self.file_offset orelse 0;
@@ -381,7 +381,7 @@ fn filterDice(dices: []macho.data_in_code_entry, start_addr: u64, end_addr: u64)
return dices[start..end];
}
-pub fn parseIntoAtoms(self: *Object, allocator: *Allocator, macho_file: *MachO) !void {
+pub fn parseIntoAtoms(self: *Object, allocator: Allocator, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -555,7 +555,7 @@ pub fn parseIntoAtoms(self: *Object, allocator: *Allocator, macho_file: *MachO)
}
}
-fn parseSymtab(self: *Object, allocator: *Allocator) !void {
+fn parseSymtab(self: *Object, allocator: Allocator) !void {
const index = self.symtab_cmd_index orelse return;
const symtab_cmd = self.load_commands.items[index].Symtab;
@@ -571,7 +571,7 @@ fn parseSymtab(self: *Object, allocator: *Allocator) !void {
try self.strtab.appendSlice(allocator, strtab);
}
-pub fn parseDebugInfo(self: *Object, allocator: *Allocator) !void {
+pub fn parseDebugInfo(self: *Object, allocator: Allocator) !void {
log.debug("parsing debug info in '{s}'", .{self.name});
var debug_info = blk: {
@@ -603,7 +603,7 @@ pub fn parseDebugInfo(self: *Object, allocator: *Allocator) !void {
}
}
-pub fn parseDataInCode(self: *Object, allocator: *Allocator) !void {
+pub fn parseDataInCode(self: *Object, allocator: Allocator) !void {
const index = self.data_in_code_cmd_index orelse return;
const data_in_code = self.load_commands.items[index].LinkeditData;
@@ -623,7 +623,7 @@ pub fn parseDataInCode(self: *Object, allocator: *Allocator) !void {
}
}
-fn readSection(self: Object, allocator: *Allocator, index: u16) ![]u8 {
+fn readSection(self: Object, allocator: Allocator, index: u16) ![]u8 {
const seg = self.load_commands.items[self.segment_cmd_index.?].Segment;
const sect = seg.sections.items[index];
var buffer = try allocator.alloc(u8, @intCast(usize, sect.size));
src/link/MachO/Trie.zig
@@ -65,7 +65,7 @@ pub const Node = struct {
to: *Node,
label: []u8,
- fn deinit(self: *Edge, allocator: *Allocator) void {
+ fn deinit(self: *Edge, allocator: Allocator) void {
self.to.deinit(allocator);
allocator.destroy(self.to);
allocator.free(self.label);
@@ -75,7 +75,7 @@ pub const Node = struct {
}
};
- fn deinit(self: *Node, allocator: *Allocator) void {
+ fn deinit(self: *Node, allocator: Allocator) void {
for (self.edges.items) |*edge| {
edge.deinit(allocator);
}
@@ -83,7 +83,7 @@ pub const Node = struct {
}
/// Inserts a new node starting from `self`.
- fn put(self: *Node, allocator: *Allocator, label: []const u8) !*Node {
+ fn put(self: *Node, allocator: Allocator, label: []const u8) !*Node {
// Check for match with edges from this node.
for (self.edges.items) |*edge| {
const match = mem.indexOfDiff(u8, edge.label, label) orelse return edge.to;
@@ -126,7 +126,7 @@ pub const Node = struct {
}
/// Recursively parses the node from the input byte stream.
- fn read(self: *Node, allocator: *Allocator, reader: anytype) Trie.ReadError!usize {
+ fn read(self: *Node, allocator: Allocator, reader: anytype) Trie.ReadError!usize {
self.node_dirty = true;
const trie_offset = try reader.context.getPos();
self.trie_offset = trie_offset;
@@ -308,7 +308,7 @@ pub const ExportSymbol = struct {
/// Insert a symbol into the trie, updating the prefixes in the process.
/// This operation may change the layout of the trie by splicing edges in
/// certain circumstances.
-pub fn put(self: *Trie, allocator: *Allocator, symbol: ExportSymbol) !void {
+pub fn put(self: *Trie, allocator: Allocator, symbol: ExportSymbol) !void {
try self.createRoot(allocator);
const node = try self.root.?.put(allocator, symbol.name);
node.terminal_info = .{
@@ -322,7 +322,7 @@ pub fn put(self: *Trie, allocator: *Allocator, symbol: ExportSymbol) !void {
/// This step performs multiple passes through the trie ensuring
/// there are no gaps after every `Node` is ULEB128 encoded.
/// Call this method before trying to `write` the trie to a byte stream.
-pub fn finalize(self: *Trie, allocator: *Allocator) !void {
+pub fn finalize(self: *Trie, allocator: Allocator) !void {
if (!self.trie_dirty) return;
self.ordered_nodes.shrinkRetainingCapacity(0);
@@ -361,7 +361,7 @@ const ReadError = error{
};
/// Parse the trie from a byte stream.
-pub fn read(self: *Trie, allocator: *Allocator, reader: anytype) ReadError!usize {
+pub fn read(self: *Trie, allocator: Allocator, reader: anytype) ReadError!usize {
try self.createRoot(allocator);
return self.root.?.read(allocator, reader);
}
@@ -377,7 +377,7 @@ pub fn write(self: Trie, writer: anytype) !u64 {
return counting_writer.bytes_written;
}
-pub fn deinit(self: *Trie, allocator: *Allocator) void {
+pub fn deinit(self: *Trie, allocator: Allocator) void {
if (self.root) |root| {
root.deinit(allocator);
allocator.destroy(root);
@@ -385,7 +385,7 @@ pub fn deinit(self: *Trie, allocator: *Allocator) void {
self.ordered_nodes.deinit(allocator);
}
-fn createRoot(self: *Trie, allocator: *Allocator) !void {
+fn createRoot(self: *Trie, allocator: Allocator) !void {
if (self.root == null) {
const root = try allocator.create(Node);
root.* = .{ .base = self };
src/link/tapi/parse.zig
@@ -37,7 +37,7 @@ pub const Node = struct {
return @fieldParentPtr(T, "base", self);
}
- pub fn deinit(self: *Node, allocator: *Allocator) void {
+ pub fn deinit(self: *Node, allocator: Allocator) void {
switch (self.tag) {
.doc => @fieldParentPtr(Node.Doc, "base", self).deinit(allocator),
.map => @fieldParentPtr(Node.Map, "base", self).deinit(allocator),
@@ -69,7 +69,7 @@ pub const Node = struct {
pub const base_tag: Node.Tag = .doc;
- pub fn deinit(self: *Doc, allocator: *Allocator) void {
+ pub fn deinit(self: *Doc, allocator: Allocator) void {
if (self.value) |node| {
node.deinit(allocator);
allocator.destroy(node);
@@ -113,7 +113,7 @@ pub const Node = struct {
value: *Node,
};
- pub fn deinit(self: *Map, allocator: *Allocator) void {
+ pub fn deinit(self: *Map, allocator: Allocator) void {
for (self.values.items) |entry| {
entry.value.deinit(allocator);
allocator.destroy(entry.value);
@@ -149,7 +149,7 @@ pub const Node = struct {
pub const base_tag: Node.Tag = .list;
- pub fn deinit(self: *List, allocator: *Allocator) void {
+ pub fn deinit(self: *List, allocator: Allocator) void {
for (self.values.items) |node| {
node.deinit(allocator);
allocator.destroy(node);
@@ -198,12 +198,12 @@ pub const Node = struct {
};
pub const Tree = struct {
- allocator: *Allocator,
+ allocator: Allocator,
source: []const u8,
tokens: []Token,
docs: std.ArrayListUnmanaged(*Node) = .{},
- pub fn init(allocator: *Allocator) Tree {
+ pub fn init(allocator: Allocator) Tree {
return .{
.allocator = allocator,
.source = undefined,
@@ -266,7 +266,7 @@ pub const Tree = struct {
};
const Parser = struct {
- allocator: *Allocator,
+ allocator: Allocator,
tree: *Tree,
token_it: *TokenIterator,
scopes: std.ArrayListUnmanaged(Scope) = .{},
src/link/tapi/yaml.zig
@@ -149,7 +149,7 @@ pub const Value = union(ValueType) {
};
}
- fn fromNode(arena: *Allocator, tree: *const Tree, node: *const Node, type_hint: ?ValueType) YamlError!Value {
+ fn fromNode(arena: Allocator, tree: *const Tree, node: *const Node, type_hint: ?ValueType) YamlError!Value {
if (node.cast(Node.Doc)) |doc| {
const inner = doc.value orelse {
// empty doc
@@ -246,7 +246,7 @@ pub const Yaml = struct {
}
}
- pub fn load(allocator: *Allocator, source: []const u8) !Yaml {
+ pub fn load(allocator: Allocator, source: []const u8) !Yaml {
var arena = ArenaAllocator.init(allocator);
var tree = Tree.init(&arena.allocator);
src/link/Wasm/Atom.zig
@@ -42,7 +42,7 @@ pub const empty: Atom = .{
};
/// Frees all resources owned by this `Atom`.
-pub fn deinit(self: *Atom, gpa: *Allocator) void {
+pub fn deinit(self: *Atom, gpa: Allocator) void {
self.relocs.deinit(gpa);
self.code.deinit(gpa);
}
src/link/C.zig
@@ -36,7 +36,7 @@ const DeclBlock = struct {
/// Any arena memory the Type points to lives in the `arena` field of `C`.
typedefs: codegen.TypedefMap.Unmanaged = .{},
- fn deinit(db: *DeclBlock, gpa: *Allocator) void {
+ fn deinit(db: *DeclBlock, gpa: Allocator) void {
db.code.deinit(gpa);
db.fwd_decl.deinit(gpa);
for (db.typedefs.values()) |typedef| {
@@ -47,7 +47,7 @@ const DeclBlock = struct {
}
};
-pub fn openPath(gpa: *Allocator, sub_path: []const u8, options: link.Options) !*C {
+pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C {
assert(options.object_format == .c);
if (options.use_llvm) return error.LLVMHasNoCBackend;
@@ -336,7 +336,7 @@ const Flush = struct {
std.hash_map.default_max_load_percentage,
);
- fn deinit(f: *Flush, gpa: *Allocator) void {
+ fn deinit(f: *Flush, gpa: Allocator) void {
f.all_buffers.deinit(gpa);
f.err_typedef_buf.deinit(gpa);
f.typedefs.deinit(gpa);
src/link/Coff.zig
@@ -125,7 +125,7 @@ pub const TextBlock = struct {
pub const SrcFn = void;
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Coff {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Coff {
assert(options.object_format == .coff);
if (build_options.have_llvm and options.use_llvm) {
@@ -396,7 +396,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
return self;
}
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Coff {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff {
const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) {
0...32 => .p32,
33...64 => .p64,
@@ -1394,7 +1394,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
}
}
-fn findLib(self: *Coff, arena: *Allocator, name: []const u8) !?[]const u8 {
+fn findLib(self: *Coff, arena: Allocator, name: []const u8) !?[]const u8 {
for (self.base.options.lib_dirs) |lib_dir| {
const full_path = try fs.path.join(arena, &.{ lib_dir, name });
fs.cwd().access(full_path, .{}) catch |err| switch (err) {
src/link/Elf.zig
@@ -228,7 +228,7 @@ pub const SrcFn = struct {
};
};
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Elf {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Elf {
assert(options.object_format == .elf);
if (build_options.have_llvm and options.use_llvm) {
@@ -281,7 +281,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
return self;
}
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Elf {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) {
0...32 => .p32,
33...64 => .p64,
@@ -2205,7 +2205,7 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
}
}
-fn deinitRelocs(gpa: *Allocator, table: *File.DbgInfoTypeRelocsTable) void {
+fn deinitRelocs(gpa: Allocator, table: *File.DbgInfoTypeRelocsTable) void {
var it = table.valueIterator();
while (it.next()) |value| {
value.relocs.deinit(gpa);
@@ -3360,7 +3360,7 @@ const CsuObjects = struct {
crtend: ?[]const u8 = null,
crtn: ?[]const u8 = null,
- fn init(arena: *mem.Allocator, link_options: link.Options, comp: *const Compilation) !CsuObjects {
+ fn init(arena: mem.Allocator, link_options: link.Options, comp: *const Compilation) !CsuObjects {
// crt objects are only required for libc.
if (!link_options.link_libc) return CsuObjects{};
src/link/MachO.zig
@@ -280,7 +280,7 @@ pub const SrcFn = struct {
};
};
-pub fn openPath(allocator: *Allocator, options: link.Options) !*MachO {
+pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
assert(options.object_format == .macho);
const use_stage1 = build_options.is_stage1 and options.use_stage1;
@@ -366,7 +366,7 @@ pub fn openPath(allocator: *Allocator, options: link.Options) !*MachO {
return self;
}
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*MachO {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*MachO {
const self = try gpa.create(MachO);
const cpu_arch = options.target.cpu.arch;
const os_tag = options.target.os.tag;
@@ -1032,7 +1032,7 @@ pub fn flushObject(self: *MachO, comp: *Compilation) !void {
}
fn resolveSearchDir(
- arena: *Allocator,
+ arena: Allocator,
dir: []const u8,
syslibroot: ?[]const u8,
) !?[]const u8 {
@@ -1074,7 +1074,7 @@ fn resolveSearchDir(
}
fn resolveLib(
- arena: *Allocator,
+ arena: Allocator,
search_dirs: []const []const u8,
name: []const u8,
ext: []const u8,
@@ -1098,7 +1098,7 @@ fn resolveLib(
}
fn resolveFramework(
- arena: *Allocator,
+ arena: Allocator,
search_dirs: []const []const u8,
name: []const u8,
ext: []const u8,
src/link/Plan9.zig
@@ -132,7 +132,7 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
pub const PtrWidth = enum { p32, p64 };
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
const sixtyfour_bit: bool = switch (options.target.cpu.arch.ptrBitWidth()) {
@@ -621,7 +621,7 @@ pub fn deinit(self: *Plan9) void {
pub const Export = ?usize;
pub const base_tag = .plan9;
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
assert(options.object_format == .plan9);
src/link/SpirV.zig
@@ -58,7 +58,7 @@ const DeclGenContext = struct {
liveness: Liveness,
};
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*SpirV {
const spirv = try gpa.create(SpirV);
spirv.* = .{
.base = .{
@@ -87,7 +87,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV {
return spirv;
}
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*SpirV {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*SpirV {
assert(options.object_format == .spirv);
if (options.use_llvm) return error.LLVM_BackendIsTODO_ForSpirV; // TODO: LLVM Doesn't support SpirV at all.
src/link/tapi.zig
@@ -106,7 +106,7 @@ pub const LibStub = struct {
/// Typed contents of the tbd file.
inner: []Tbd,
- pub fn loadFromFile(allocator: *Allocator, file: fs.File) !LibStub {
+ pub fn loadFromFile(allocator: Allocator, file: fs.File) !LibStub {
const source = try file.readToEndAlloc(allocator, std.math.maxInt(u32));
defer allocator.free(source);
src/link/Wasm.zig
@@ -97,7 +97,7 @@ pub const FnData = struct {
};
};
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Wasm {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Wasm {
assert(options.object_format == .wasm);
if (build_options.have_llvm and options.use_llvm) {
@@ -138,7 +138,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
return wasm_bin;
}
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Wasm {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm {
const wasm_bin = try gpa.create(Wasm);
wasm_bin.* = .{
.base = .{
src/translate_c/ast.zig
@@ -378,7 +378,7 @@ pub const Node = extern union {
return .{ .tag_if_small_enough = @enumToInt(t) };
}
- pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!Node {
+ pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!Node {
const ptr = try ally.create(t.Type());
ptr.* = .{
.base = .{ .tag = t },
@@ -717,7 +717,7 @@ pub const Payload = struct {
/// Converts the nodes into a Zig Ast.
/// Caller must free the source slice.
-pub fn render(gpa: *Allocator, nodes: []const Node) !std.zig.Ast {
+pub fn render(gpa: Allocator, nodes: []const Node) !std.zig.Ast {
var ctx = Context{
.gpa = gpa,
.buf = std.ArrayList(u8).init(gpa),
@@ -783,7 +783,7 @@ const TokenIndex = std.zig.Ast.TokenIndex;
const TokenTag = std.zig.Token.Tag;
const Context = struct {
- gpa: *Allocator,
+ gpa: Allocator,
buf: std.ArrayList(u8) = .{},
nodes: std.zig.Ast.NodeList = .{},
extra_data: std.ArrayListUnmanaged(std.zig.Ast.Node.Index) = .{},
src/Air.zig
@@ -841,7 +841,7 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
};
}
-pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void {
+pub fn deinit(air: *Air, gpa: std.mem.Allocator) void {
air.instructions.deinit(gpa);
gpa.free(air.extra);
gpa.free(air.values);
src/AstGen.zig
@@ -16,7 +16,7 @@ const indexToRef = Zir.indexToRef;
const trace = @import("tracy.zig").trace;
const BuiltinFn = @import("BuiltinFn.zig");
-gpa: *Allocator,
+gpa: Allocator,
tree: *const Ast,
instructions: std.MultiArrayList(Zir.Inst) = .{},
extra: ArrayListUnmanaged(u32) = .{},
@@ -33,7 +33,7 @@ source_line: u32 = 0,
source_column: u32 = 0,
/// Used for temporary allocations; freed after AstGen is complete.
/// The resulting ZIR code has no references to anything in this arena.
-arena: *Allocator,
+arena: Allocator,
string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{},
compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
/// The topmost block of the current function.
@@ -92,7 +92,7 @@ fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void {
astgen.extra.appendSliceAssumeCapacity(coerced);
}
-pub fn generate(gpa: *Allocator, tree: Ast) Allocator.Error!Zir {
+pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
@@ -196,7 +196,7 @@ pub fn generate(gpa: *Allocator, tree: Ast) Allocator.Error!Zir {
};
}
-pub fn deinit(astgen: *AstGen, gpa: *Allocator) void {
+pub fn deinit(astgen: *AstGen, gpa: Allocator) void {
astgen.instructions.deinit(gpa);
astgen.extra.deinit(gpa);
astgen.string_table.deinit(gpa);
@@ -2460,7 +2460,7 @@ fn makeDeferScope(
astgen: *AstGen,
scope: *Scope,
node: Ast.Node.Index,
- block_arena: *Allocator,
+ block_arena: Allocator,
scope_tag: Scope.Tag,
) InnerError!*Scope {
const tree = astgen.tree;
@@ -2486,7 +2486,7 @@ fn varDecl(
gz: *GenZir,
scope: *Scope,
node: Ast.Node.Index,
- block_arena: *Allocator,
+ block_arena: Allocator,
var_decl: Ast.full.VarDecl,
) InnerError!*Scope {
try emitDbgNode(gz, node);
@@ -3030,7 +3030,7 @@ const WipMembers = struct {
/// (4 for src_hash + line + name + value + align + link_section + address_space)
const max_decl_size = 10;
- pub fn init(gpa: *Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self {
+ pub fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self {
const payload_top = @intCast(u32, payload.items.len);
const decls_start = payload_top + (decl_count + decls_per_u32 - 1) / decls_per_u32;
const field_bits_start = decls_start + decl_count * max_decl_size;
@@ -6178,7 +6178,7 @@ fn tunnelThroughClosure(
ns: ?*Scope.Namespace,
value: Zir.Inst.Ref,
token: Ast.TokenIndex,
- gpa: *Allocator,
+ gpa: Allocator,
) !Zir.Inst.Ref {
// For trivial values, we don't need a tunnel.
// Just return the ref.
@@ -8806,7 +8806,7 @@ const Scope = struct {
/// ref of the capture for decls in this namespace
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
- pub fn deinit(self: *Namespace, gpa: *Allocator) void {
+ pub fn deinit(self: *Namespace, gpa: Allocator) void {
self.decls.deinit(gpa);
self.captures.deinit(gpa);
self.* = undefined;
src/Cache.zig
@@ -1,4 +1,4 @@
-gpa: *Allocator,
+gpa: Allocator,
manifest_dir: fs.Dir,
hash: HashHelper = .{},
@@ -48,7 +48,7 @@ pub const File = struct {
bin_digest: BinDigest,
contents: ?[]const u8,
- pub fn deinit(self: *File, allocator: *Allocator) void {
+ pub fn deinit(self: *File, allocator: Allocator) void {
if (self.path) |owned_slice| {
allocator.free(owned_slice);
self.path = null;
src/Compilation.zig
@@ -36,7 +36,7 @@ const libtsan = @import("libtsan.zig");
const Zir = @import("Zir.zig");
/// General-purpose allocator. Used for both temporary and long-term storage.
-gpa: *Allocator,
+gpa: Allocator,
/// Arena-allocated memory used during initialization. Should be untouched until deinit.
arena_state: std.heap.ArenaAllocator.State,
bin_file: *link.File,
@@ -164,7 +164,7 @@ pub const CRTFile = struct {
lock: Cache.Lock,
full_object_path: []const u8,
- fn deinit(self: *CRTFile, gpa: *Allocator) void {
+ fn deinit(self: *CRTFile, gpa: Allocator) void {
self.lock.release();
gpa.free(self.full_object_path);
self.* = undefined;
@@ -253,14 +253,14 @@ pub const CObject = struct {
line: u32,
column: u32,
- pub fn destroy(em: *ErrorMsg, gpa: *Allocator) void {
+ pub fn destroy(em: *ErrorMsg, gpa: Allocator) void {
gpa.free(em.msg);
gpa.destroy(em);
}
};
/// Returns if there was failure.
- pub fn clearStatus(self: *CObject, gpa: *Allocator) bool {
+ pub fn clearStatus(self: *CObject, gpa: Allocator) bool {
switch (self.status) {
.new => return false,
.failure, .failure_retryable => {
@@ -276,7 +276,7 @@ pub const CObject = struct {
}
}
- pub fn destroy(self: *CObject, gpa: *Allocator) void {
+ pub fn destroy(self: *CObject, gpa: Allocator) void {
_ = self.clearStatus(gpa);
gpa.destroy(self);
}
@@ -305,7 +305,7 @@ pub const MiscError = struct {
msg: []u8,
children: ?AllErrors = null,
- pub fn deinit(misc_err: *MiscError, gpa: *Allocator) void {
+ pub fn deinit(misc_err: *MiscError, gpa: Allocator) void {
gpa.free(misc_err.msg);
if (misc_err.children) |*children| {
children.deinit(gpa);
@@ -402,7 +402,7 @@ pub const AllErrors = struct {
}
};
- pub fn deinit(self: *AllErrors, gpa: *Allocator) void {
+ pub fn deinit(self: *AllErrors, gpa: Allocator) void {
self.arena.promote(gpa).deinit();
}
@@ -456,7 +456,7 @@ pub const AllErrors = struct {
}
pub fn addZir(
- arena: *Allocator,
+ arena: Allocator,
errors: *std.ArrayList(Message),
file: *Module.File,
) !void {
@@ -559,7 +559,7 @@ pub const AllErrors = struct {
}
}
- fn dupeList(list: []const Message, arena: *Allocator) Allocator.Error![]Message {
+ fn dupeList(list: []const Message, arena: Allocator) Allocator.Error![]Message {
const duped_list = try arena.alloc(Message, list.len);
for (list) |item, i| {
duped_list[i] = switch (item) {
@@ -589,7 +589,7 @@ pub const Directory = struct {
path: ?[]const u8,
handle: std.fs.Dir,
- pub fn join(self: Directory, allocator: *Allocator, paths: []const []const u8) ![]u8 {
+ pub fn join(self: Directory, allocator: Allocator, paths: []const []const u8) ![]u8 {
if (self.path) |p| {
// TODO clean way to do this with only 1 allocation
const part2 = try std.fs.path.join(allocator, paths);
@@ -600,7 +600,7 @@ pub const Directory = struct {
}
}
- pub fn joinZ(self: Directory, allocator: *Allocator, paths: []const []const u8) ![:0]u8 {
+ pub fn joinZ(self: Directory, allocator: Allocator, paths: []const []const u8) ![:0]u8 {
if (self.path) |p| {
// TODO clean way to do this with only 1 allocation
const part2 = try std.fs.path.join(allocator, paths);
@@ -829,7 +829,7 @@ fn addPackageTableToCacheHash(
}
}
-pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
+pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const is_dyn_lib = switch (options.output_mode) {
.Obj, .Exe => false,
.Lib => (options.link_mode orelse .Static) == .Dynamic,
@@ -3263,7 +3263,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
};
}
-pub fn tmpFilePath(comp: *Compilation, arena: *Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
+pub fn tmpFilePath(comp: *Compilation, arena: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
const s = std.fs.path.sep_str;
const rand_int = std.crypto.random.int(u64);
if (comp.local_cache_directory.path) |p| {
@@ -3275,7 +3275,7 @@ pub fn tmpFilePath(comp: *Compilation, arena: *Allocator, suffix: []const u8) er
pub fn addTranslateCCArgs(
comp: *Compilation,
- arena: *Allocator,
+ arena: Allocator,
argv: *std.ArrayList([]const u8),
ext: FileExt,
out_dep_path: ?[]const u8,
@@ -3289,7 +3289,7 @@ pub fn addTranslateCCArgs(
/// Add common C compiler args between translate-c and C object compilation.
pub fn addCCArgs(
comp: *const Compilation,
- arena: *Allocator,
+ arena: Allocator,
argv: *std.ArrayList([]const u8),
ext: FileExt,
out_dep_path: ?[]const u8,
@@ -3776,7 +3776,7 @@ const LibCDirs = struct {
libc_installation: ?*const LibCInstallation,
};
-fn getZigShippedLibCIncludeDirsDarwin(arena: *Allocator, zig_lib_dir: []const u8, target: Target) !LibCDirs {
+fn getZigShippedLibCIncludeDirsDarwin(arena: Allocator, zig_lib_dir: []const u8, target: Target) !LibCDirs {
const arch_name = @tagName(target.cpu.arch);
const os_name = try std.fmt.allocPrint(arena, "{s}.{d}", .{
@tagName(target.os.tag),
@@ -3808,7 +3808,7 @@ fn getZigShippedLibCIncludeDirsDarwin(arena: *Allocator, zig_lib_dir: []const u8
}
fn detectLibCIncludeDirs(
- arena: *Allocator,
+ arena: Allocator,
zig_lib_dir: []const u8,
target: Target,
is_native_abi: bool,
@@ -3933,7 +3933,7 @@ fn detectLibCIncludeDirs(
};
}
-fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
+fn detectLibCFromLibCInstallation(arena: Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
var list = try std.ArrayList([]const u8).initCapacity(arena, 4);
list.appendAssumeCapacity(lci.include_dir.?);
@@ -3965,7 +3965,7 @@ fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const
};
}
-pub fn get_libc_crt_file(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 {
+pub fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
if (comp.wantBuildGLibCFromSource() or
comp.wantBuildMuslFromSource() or
comp.wantBuildMinGWFromSource() or
@@ -4066,7 +4066,7 @@ pub fn dump_argv(argv: []const []const u8) void {
std.debug.print("{s}\n", .{argv[argv.len - 1]});
}
-pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Allocator.Error![]u8 {
+pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Allocator.Error![]u8 {
const t = trace(@src());
defer t.end();
@@ -4717,14 +4717,14 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
comp.stage1_lock = man.toOwnedLock();
}
-fn stage1LocPath(arena: *Allocator, opt_loc: ?EmitLoc, cache_directory: Directory) ![]const u8 {
+fn stage1LocPath(arena: Allocator, opt_loc: ?EmitLoc, cache_directory: Directory) ![]const u8 {
const loc = opt_loc orelse return "";
const directory = loc.directory orelse cache_directory;
return directory.join(arena, &[_][]const u8{loc.basename});
}
fn createStage1Pkg(
- arena: *Allocator,
+ arena: Allocator,
name: []const u8,
pkg: *Package,
parent_pkg: ?*stage1.Pkg,
src/glibc.zig
@@ -34,7 +34,7 @@ pub const ABI = struct {
version_table: std.AutoHashMapUnmanaged(target_util.ArchOsAbi, [*]VerList),
arena_state: std.heap.ArenaAllocator.State,
- pub fn destroy(abi: *ABI, gpa: *Allocator) void {
+ pub fn destroy(abi: *ABI, gpa: Allocator) void {
abi.version_table.deinit(gpa);
abi.arena_state.promote(gpa).deinit(); // Frees the ABI memory too.
}
@@ -59,7 +59,7 @@ pub const LoadMetaDataError = error{
/// This function will emit a log error when there is a problem with the zig installation and then return
/// `error.ZigInstallationCorrupt`.
-pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*ABI {
+pub fn loadMetaData(gpa: Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*ABI {
const tracy = trace(@src());
defer tracy.end();
@@ -433,7 +433,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}
}
-fn start_asm_path(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 {
+fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
const arch = comp.getTarget().cpu.arch;
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
@@ -493,7 +493,7 @@ fn start_asm_path(comp: *Compilation, arena: *Allocator, basename: []const u8) !
return result.items;
}
-fn add_include_dirs(comp: *Compilation, arena: *Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
+fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
const target = comp.getTarget();
const arch = target.cpu.arch;
const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl";
@@ -566,7 +566,7 @@ fn add_include_dirs(comp: *Compilation, arena: *Allocator, args: *std.ArrayList(
}
fn add_include_dirs_arch(
- arena: *Allocator,
+ arena: Allocator,
args: *std.ArrayList([]const u8),
arch: std.Target.Cpu.Arch,
opt_nptl: ?[]const u8,
@@ -677,14 +677,14 @@ fn add_include_dirs_arch(
}
}
-fn path_from_lib(comp: *Compilation, arena: *Allocator, sub_path: []const u8) ![]const u8 {
+fn path_from_lib(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
}
const lib_libc = "libc" ++ path.sep_str;
const lib_libc_glibc = lib_libc ++ "glibc" ++ path.sep_str;
-fn lib_path(comp: *Compilation, arena: *Allocator, sub_path: []const u8) ![]const u8 {
+fn lib_path(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
}
@@ -692,7 +692,7 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: []u8,
- pub fn deinit(self: *BuiltSharedObjects, gpa: *Allocator) void {
+ pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
self.lock.release();
gpa.free(self.dir_path);
self.* = undefined;
@@ -915,7 +915,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
fn buildSharedLib(
comp: *Compilation,
- arena: *Allocator,
+ arena: Allocator,
zig_cache_directory: Compilation.Directory,
bin_directory: Compilation.Directory,
asm_file_basename: []const u8,
src/introspect.zig
@@ -33,7 +33,7 @@ fn testZigInstallPrefix(base_dir: fs.Dir) ?Compilation.Directory {
}
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
-pub fn findZigLibDir(gpa: *mem.Allocator) !Compilation.Directory {
+pub fn findZigLibDir(gpa: mem.Allocator) !Compilation.Directory {
const self_exe_path = try fs.selfExePathAlloc(gpa);
defer gpa.free(self_exe_path);
@@ -42,7 +42,7 @@ pub fn findZigLibDir(gpa: *mem.Allocator) !Compilation.Directory {
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
pub fn findZigLibDirFromSelfExe(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
self_exe_path: []const u8,
) error{ OutOfMemory, FileNotFound }!Compilation.Directory {
const cwd = fs.cwd();
@@ -61,7 +61,7 @@ pub fn findZigLibDirFromSelfExe(
}
/// Caller owns returned memory.
-pub fn resolveGlobalCacheDir(allocator: *mem.Allocator) ![]u8 {
+pub fn resolveGlobalCacheDir(allocator: mem.Allocator) ![]u8 {
if (std.process.getEnvVarOwned(allocator, "ZIG_GLOBAL_CACHE_DIR")) |value| {
if (value.len > 0) {
return value;
src/libc_installation.zig
@@ -39,7 +39,7 @@ pub const LibCInstallation = struct {
};
pub fn parse(
- allocator: *Allocator,
+ allocator: Allocator,
libc_file: []const u8,
target: std.zig.CrossTarget,
) !LibCInstallation {
@@ -175,7 +175,7 @@ pub const LibCInstallation = struct {
}
pub const FindNativeOptions = struct {
- allocator: *Allocator,
+ allocator: Allocator,
/// If enabled, will print human-friendly errors to stderr.
verbose: bool = false,
@@ -234,7 +234,7 @@ pub const LibCInstallation = struct {
}
/// Must be the same allocator passed to `parse` or `findNative`.
- pub fn deinit(self: *LibCInstallation, allocator: *Allocator) void {
+ pub fn deinit(self: *LibCInstallation, allocator: Allocator) void {
const fields = std.meta.fields(LibCInstallation);
inline for (fields) |field| {
if (@field(self, field.name)) |payload| {
@@ -562,7 +562,7 @@ pub const LibCInstallation = struct {
};
pub const CCPrintFileNameOptions = struct {
- allocator: *Allocator,
+ allocator: Allocator,
search_basename: []const u8,
want_dirname: enum { full_path, only_dir },
verbose: bool = false,
src/link.zig
@@ -165,7 +165,7 @@ pub const File = struct {
tag: Tag,
options: Options,
file: ?fs.File,
- allocator: *Allocator,
+ allocator: Allocator,
/// When linking with LLD, this linker code will output an object file only at
/// this location, and then this path can be placed on the LLD linker line.
intermediary_basename: ?[]const u8 = null,
@@ -221,7 +221,7 @@ pub const File = struct {
/// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure
/// and does not cause Illegal Behavior. This operation is not atomic.
- pub fn openPath(allocator: *Allocator, options: Options) !*File {
+ pub fn openPath(allocator: Allocator, options: Options) !*File {
if (options.object_format == .macho) {
return &(try MachO.openPath(allocator, options)).base;
}
src/Liveness.zig
@@ -51,7 +51,7 @@ pub const SwitchBr = struct {
else_death_count: u32,
};
-pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness {
+pub fn analyze(gpa: Allocator, air: Air, zir: Zir) Allocator.Error!Liveness {
const tracy = trace(@src());
defer tracy.end();
@@ -136,7 +136,7 @@ pub fn getCondBr(l: Liveness, inst: Air.Inst.Index) CondBrSlices {
};
}
-pub fn deinit(l: *Liveness, gpa: *Allocator) void {
+pub fn deinit(l: *Liveness, gpa: Allocator) void {
gpa.free(l.tomb_bits);
gpa.free(l.extra);
l.special.deinit(gpa);
@@ -150,7 +150,7 @@ pub const OperandInt = std.math.Log2Int(Bpi);
/// In-progress data; on successful analysis converted into `Liveness`.
const Analysis = struct {
- gpa: *Allocator,
+ gpa: Allocator,
air: Air,
table: std.AutoHashMapUnmanaged(Air.Inst.Index, void),
tomb_bits: []usize,
src/main.zig
@@ -165,7 +165,7 @@ pub fn main() anyerror!void {
return mainArgs(gpa, arena, args);
}
-pub fn mainArgs(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !void {
+pub fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
if (args.len <= 1) {
std.log.info("{s}", .{usage});
fatal("expected command argument", .{});
@@ -535,7 +535,7 @@ const Emit = union(enum) {
}
};
-fn optionalStringEnvVar(arena: *Allocator, name: []const u8) !?[]const u8 {
+fn optionalStringEnvVar(arena: Allocator, name: []const u8) !?[]const u8 {
if (std.process.getEnvVarOwned(arena, name)) |value| {
return value;
} else |err| switch (err) {
@@ -554,8 +554,8 @@ const ArgMode = union(enum) {
};
fn buildOutputType(
- gpa: *Allocator,
- arena: *Allocator,
+ gpa: Allocator,
+ arena: Allocator,
all_args: []const []const u8,
arg_mode: ArgMode,
) !void {
@@ -2645,7 +2645,7 @@ fn buildOutputType(
}
fn parseCrossTargetOrReportFatalError(
- allocator: *Allocator,
+ allocator: Allocator,
opts: std.zig.CrossTarget.ParseOptions,
) !std.zig.CrossTarget {
var opts_with_diags = opts;
@@ -2686,8 +2686,8 @@ fn parseCrossTargetOrReportFatalError(
fn runOrTest(
comp: *Compilation,
- gpa: *Allocator,
- arena: *Allocator,
+ gpa: Allocator,
+ arena: Allocator,
emit_bin_loc: ?Compilation.EmitLoc,
test_exec_args: []const ?[]const u8,
self_exe_path: []const u8,
@@ -2818,7 +2818,7 @@ const AfterUpdateHook = union(enum) {
update: []const u8,
};
-fn updateModule(gpa: *Allocator, comp: *Compilation, hook: AfterUpdateHook) !void {
+fn updateModule(gpa: Allocator, comp: *Compilation, hook: AfterUpdateHook) !void {
try comp.update();
var errors = try comp.getAllErrorsAlloc();
@@ -2872,7 +2872,7 @@ fn updateModule(gpa: *Allocator, comp: *Compilation, hook: AfterUpdateHook) !voi
}
}
-fn freePkgTree(gpa: *Allocator, pkg: *Package, free_parent: bool) void {
+fn freePkgTree(gpa: Allocator, pkg: *Package, free_parent: bool) void {
{
var it = pkg.table.valueIterator();
while (it.next()) |value| {
@@ -2884,7 +2884,7 @@ fn freePkgTree(gpa: *Allocator, pkg: *Package, free_parent: bool) void {
}
}
-fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !void {
+fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void {
if (!build_options.have_llvm)
fatal("cannot translate-c: compiler built without LLVM extensions", .{});
@@ -3031,7 +3031,7 @@ pub const usage_libc =
\\
;
-pub fn cmdLibC(gpa: *Allocator, args: []const []const u8) !void {
+pub fn cmdLibC(gpa: Allocator, args: []const []const u8) !void {
var input_file: ?[]const u8 = null;
var target_arch_os_abi: []const u8 = "native";
{
@@ -3100,8 +3100,8 @@ pub const usage_init =
;
pub fn cmdInit(
- gpa: *Allocator,
- arena: *Allocator,
+ gpa: Allocator,
+ arena: Allocator,
args: []const []const u8,
output_mode: std.builtin.OutputMode,
) !void {
@@ -3196,7 +3196,7 @@ pub const usage_build =
\\
;
-pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !void {
+pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var prominent_compile_errors: bool = false;
// We want to release all the locks before executing the child process, so we make a nice
@@ -3436,7 +3436,7 @@ pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v
}
}
-fn argvCmd(allocator: *Allocator, argv: []const []const u8) ![]u8 {
+fn argvCmd(allocator: Allocator, argv: []const []const u8) ![]u8 {
var cmd = std.ArrayList(u8).init(allocator);
defer cmd.deinit();
for (argv[0 .. argv.len - 1]) |arg| {
@@ -3448,7 +3448,7 @@ fn argvCmd(allocator: *Allocator, argv: []const []const u8) ![]u8 {
}
fn readSourceFileToEndAlloc(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
input: *const fs.File,
size_hint: ?usize,
) ![:0]u8 {
@@ -3518,14 +3518,14 @@ const Fmt = struct {
any_error: bool,
check_ast: bool,
color: Color,
- gpa: *Allocator,
- arena: *Allocator,
+ gpa: Allocator,
+ arena: Allocator,
out_buffer: std.ArrayList(u8),
const SeenMap = std.AutoHashMap(fs.File.INode, void);
};
-pub fn cmdFmt(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !void {
+pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var color: Color = .auto;
var stdin_flag: bool = false;
var check_flag: bool = false;
@@ -3855,8 +3855,8 @@ fn fmtPathFile(
}
fn printErrMsgToStdErr(
- gpa: *mem.Allocator,
- arena: *mem.Allocator,
+ gpa: mem.Allocator,
+ arena: mem.Allocator,
parse_error: Ast.Error,
tree: Ast,
path: []const u8,
@@ -3938,7 +3938,7 @@ extern "c" fn ZigClang_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
extern "c" fn ZigLlvmAr_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
/// TODO https://github.com/ziglang/zig/issues/3257
-fn punt_to_clang(arena: *Allocator, args: []const []const u8) error{OutOfMemory} {
+fn punt_to_clang(arena: Allocator, args: []const []const u8) error{OutOfMemory} {
if (!build_options.have_llvm)
fatal("`zig cc` and `zig c++` unavailable: compiler built without LLVM extensions", .{});
// Convert the args to the format Clang expects.
@@ -3952,7 +3952,7 @@ fn punt_to_clang(arena: *Allocator, args: []const []const u8) error{OutOfMemory}
}
/// TODO https://github.com/ziglang/zig/issues/3257
-fn punt_to_llvm_ar(arena: *Allocator, args: []const []const u8) error{OutOfMemory} {
+fn punt_to_llvm_ar(arena: Allocator, args: []const []const u8) error{OutOfMemory} {
if (!build_options.have_llvm)
fatal("`zig ar`, `zig dlltool`, `zig ranlib', and `zig lib` unavailable: compiler built without LLVM extensions", .{});
@@ -3973,7 +3973,7 @@ fn punt_to_llvm_ar(arena: *Allocator, args: []const []const u8) error{OutOfMemor
/// * `lld-link` - COFF
/// * `wasm-ld` - WebAssembly
/// TODO https://github.com/ziglang/zig/issues/3257
-pub fn punt_to_lld(arena: *Allocator, args: []const []const u8) error{OutOfMemory} {
+pub fn punt_to_lld(arena: Allocator, args: []const []const u8) error{OutOfMemory} {
if (!build_options.have_llvm)
fatal("`zig {s}` unavailable: compiler built without LLVM extensions", .{args[0]});
// Convert the args to the format LLD expects.
@@ -4009,7 +4009,7 @@ pub const ClangArgIterator = struct {
argv: []const []const u8,
next_index: usize,
root_args: ?*Args,
- allocator: *Allocator,
+ allocator: Allocator,
pub const ZigEquivalent = enum {
target,
@@ -4069,7 +4069,7 @@ pub const ClangArgIterator = struct {
argv: []const []const u8,
};
- fn init(allocator: *Allocator, argv: []const []const u8) ClangArgIterator {
+ fn init(allocator: Allocator, argv: []const []const u8) ClangArgIterator {
return .{
.next_index = 2, // `zig cc foo` this points to `foo`
.has_next = argv.len > 2,
@@ -4308,7 +4308,7 @@ test "fds" {
gimmeMoreOfThoseSweetSweetFileDescriptors();
}
-fn detectNativeTargetInfo(gpa: *Allocator, cross_target: std.zig.CrossTarget) !std.zig.system.NativeTargetInfo {
+fn detectNativeTargetInfo(gpa: Allocator, cross_target: std.zig.CrossTarget) !std.zig.system.NativeTargetInfo {
return std.zig.system.NativeTargetInfo.detect(gpa, cross_target);
}
@@ -4343,8 +4343,8 @@ const usage_ast_check =
;
pub fn cmdAstCheck(
- gpa: *Allocator,
- arena: *Allocator,
+ gpa: Allocator,
+ arena: Allocator,
args: []const []const u8,
) !void {
const Module = @import("Module.zig");
@@ -4513,8 +4513,8 @@ pub fn cmdAstCheck(
/// This is only enabled for debug builds.
pub fn cmdChangelist(
- gpa: *Allocator,
- arena: *Allocator,
+ gpa: Allocator,
+ arena: Allocator,
args: []const []const u8,
) !void {
const Module = @import("Module.zig");
src/mingw.zig
@@ -252,7 +252,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
fn add_cc_args(
comp: *Compilation,
- arena: *Allocator,
+ arena: Allocator,
args: *std.ArrayList([]const u8),
) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
@@ -428,7 +428,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
}
/// This function body is verbose but all it does is test 3 different paths and see if a .def file exists.
-fn findDef(comp: *Compilation, allocator: *Allocator, lib_name: []const u8) ![]u8 {
+fn findDef(comp: *Compilation, allocator: Allocator, lib_name: []const u8) ![]u8 {
const target = comp.getTarget();
const lib_path = switch (target.cpu.arch) {
src/Module.zig
@@ -30,7 +30,7 @@ const target_util = @import("target.zig");
const build_options = @import("build_options");
/// General-purpose allocator. Used for both temporary and long-term storage.
-gpa: *Allocator,
+gpa: Allocator,
comp: *Compilation,
/// Where our incremental compilation metadata serialization will go.
@@ -299,10 +299,10 @@ pub const CaptureScope = struct {
pub const WipCaptureScope = struct {
scope: *CaptureScope,
finalized: bool,
- gpa: *Allocator,
- perm_arena: *Allocator,
+ gpa: Allocator,
+ perm_arena: Allocator,
- pub fn init(gpa: *Allocator, perm_arena: *Allocator, parent: ?*CaptureScope) !@This() {
+ pub fn init(gpa: Allocator, perm_arena: Allocator, parent: ?*CaptureScope) !@This() {
const scope = try perm_arena.create(CaptureScope);
scope.* = .{ .parent = parent };
return @This(){
@@ -469,7 +469,7 @@ pub const Decl = struct {
pub const DepsTable = std.AutoArrayHashMapUnmanaged(*Decl, void);
- pub fn clearName(decl: *Decl, gpa: *Allocator) void {
+ pub fn clearName(decl: *Decl, gpa: Allocator) void {
gpa.free(mem.sliceTo(decl.name, 0));
decl.name = undefined;
}
@@ -499,7 +499,7 @@ pub const Decl = struct {
}
}
- pub fn clearValues(decl: *Decl, gpa: *Allocator) void {
+ pub fn clearValues(decl: *Decl, gpa: Allocator) void {
if (decl.getFunction()) |func| {
func.deinit(gpa);
gpa.destroy(func);
@@ -636,7 +636,7 @@ pub const Decl = struct {
return decl.src_namespace.renderFullyQualifiedDebugName(unqualified_name, writer);
}
- pub fn getFullyQualifiedName(decl: Decl, gpa: *Allocator) ![:0]u8 {
+ pub fn getFullyQualifiedName(decl: Decl, gpa: Allocator) ![:0]u8 {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
try decl.renderFullyQualifiedName(buffer.writer());
@@ -855,7 +855,7 @@ pub const Struct = struct {
is_comptime: bool,
};
- pub fn getFullyQualifiedName(s: *Struct, gpa: *Allocator) ![:0]u8 {
+ pub fn getFullyQualifiedName(s: *Struct, gpa: Allocator) ![:0]u8 {
return s.owner_decl.getFullyQualifiedName(gpa);
}
@@ -999,7 +999,7 @@ pub const Union = struct {
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
- pub fn getFullyQualifiedName(s: *Union, gpa: *Allocator) ![:0]u8 {
+ pub fn getFullyQualifiedName(s: *Union, gpa: Allocator) ![:0]u8 {
return s.owner_decl.getFullyQualifiedName(gpa);
}
@@ -1178,7 +1178,7 @@ pub const Opaque = struct {
};
}
- pub fn getFullyQualifiedName(s: *Opaque, gpa: *Allocator) ![:0]u8 {
+ pub fn getFullyQualifiedName(s: *Opaque, gpa: Allocator) ![:0]u8 {
return s.owner_decl.getFullyQualifiedName(gpa);
}
};
@@ -1225,7 +1225,7 @@ pub const Fn = struct {
success,
};
- pub fn deinit(func: *Fn, gpa: *Allocator) void {
+ pub fn deinit(func: *Fn, gpa: Allocator) void {
if (func.getInferredErrorSet()) |map| {
map.deinit(gpa);
}
@@ -1422,27 +1422,27 @@ pub const File = struct {
/// successful, this field is unloaded.
prev_zir: ?*Zir = null,
- pub fn unload(file: *File, gpa: *Allocator) void {
+ pub fn unload(file: *File, gpa: Allocator) void {
file.unloadTree(gpa);
file.unloadSource(gpa);
file.unloadZir(gpa);
}
- pub fn unloadTree(file: *File, gpa: *Allocator) void {
+ pub fn unloadTree(file: *File, gpa: Allocator) void {
if (file.tree_loaded) {
file.tree_loaded = false;
file.tree.deinit(gpa);
}
}
- pub fn unloadSource(file: *File, gpa: *Allocator) void {
+ pub fn unloadSource(file: *File, gpa: Allocator) void {
if (file.source_loaded) {
file.source_loaded = false;
gpa.free(file.source);
}
}
- pub fn unloadZir(file: *File, gpa: *Allocator) void {
+ pub fn unloadZir(file: *File, gpa: Allocator) void {
if (file.zir_loaded) {
file.zir_loaded = false;
file.zir.deinit(gpa);
@@ -1466,7 +1466,7 @@ pub const File = struct {
file.* = undefined;
}
- pub fn getSource(file: *File, gpa: *Allocator) ![:0]const u8 {
+ pub fn getSource(file: *File, gpa: Allocator) ![:0]const u8 {
if (file.source_loaded) return file.source;
const root_dir_path = file.pkg.root_src_directory.path orelse ".";
@@ -1499,7 +1499,7 @@ pub const File = struct {
return source;
}
- pub fn getTree(file: *File, gpa: *Allocator) !*const Ast {
+ pub fn getTree(file: *File, gpa: Allocator) !*const Ast {
if (file.tree_loaded) return &file.tree;
const source = try file.getSource(gpa);
@@ -1531,7 +1531,7 @@ pub const File = struct {
};
}
- pub fn fullyQualifiedNameZ(file: File, gpa: *Allocator) ![:0]u8 {
+ pub fn fullyQualifiedNameZ(file: File, gpa: Allocator) ![:0]u8 {
var buf = std.ArrayList(u8).init(gpa);
defer buf.deinit();
try file.renderFullyQualifiedName(buf.writer());
@@ -1539,7 +1539,7 @@ pub const File = struct {
}
/// Returns the full path to this file relative to its package.
- pub fn fullPath(file: File, ally: *Allocator) ![]u8 {
+ pub fn fullPath(file: File, ally: Allocator) ![]u8 {
return file.pkg.root_src_directory.join(ally, &[_][]const u8{file.sub_file_path});
}
@@ -1594,7 +1594,7 @@ pub const ErrorMsg = struct {
notes: []ErrorMsg = &.{},
pub fn create(
- gpa: *Allocator,
+ gpa: Allocator,
src_loc: SrcLoc,
comptime format: []const u8,
args: anytype,
@@ -1607,13 +1607,13 @@ pub const ErrorMsg = struct {
/// Assumes the ErrorMsg struct and msg were both allocated with `gpa`,
/// as well as all notes.
- pub fn destroy(err_msg: *ErrorMsg, gpa: *Allocator) void {
+ pub fn destroy(err_msg: *ErrorMsg, gpa: Allocator) void {
err_msg.deinit(gpa);
gpa.destroy(err_msg);
}
pub fn init(
- gpa: *Allocator,
+ gpa: Allocator,
src_loc: SrcLoc,
comptime format: []const u8,
args: anytype,
@@ -1624,7 +1624,7 @@ pub const ErrorMsg = struct {
};
}
- pub fn deinit(err_msg: *ErrorMsg, gpa: *Allocator) void {
+ pub fn deinit(err_msg: *ErrorMsg, gpa: Allocator) void {
for (err_msg.notes) |*note| {
note.deinit(gpa);
}
@@ -1651,7 +1651,7 @@ pub const SrcLoc = struct {
return @bitCast(Ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node));
}
- pub fn byteOffset(src_loc: SrcLoc, gpa: *Allocator) !u32 {
+ pub fn byteOffset(src_loc: SrcLoc, gpa: Allocator) !u32 {
switch (src_loc.lazy) {
.unneeded => unreachable,
.entire_file => return 0,
@@ -2066,7 +2066,7 @@ pub const SrcLoc = struct {
pub fn byteOffsetBuiltinCallArg(
src_loc: SrcLoc,
- gpa: *Allocator,
+ gpa: Allocator,
node_off: i32,
arg_index: u32,
) !u32 {
@@ -2464,7 +2464,7 @@ pub fn deinit(mod: *Module) void {
}
}
-fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
+fn freeExportList(gpa: Allocator, export_list: []*Export) void {
for (export_list) |exp| {
gpa.free(exp.options.name);
if (exp.options.section) |s| gpa.free(s);
@@ -2871,7 +2871,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
/// * Decl.zir_index
/// * Fn.zir_body_inst
/// * Decl.zir_decl_index
-fn updateZirRefs(gpa: *Allocator, file: *File, old_zir: Zir) !void {
+fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void {
const new_zir = file.zir;
// Maps from old ZIR to new ZIR, struct_decl, enum_decl, etc. Any instruction which
@@ -2965,7 +2965,7 @@ fn updateZirRefs(gpa: *Allocator, file: *File, old_zir: Zir) !void {
}
pub fn mapOldZirToNew(
- gpa: *Allocator,
+ gpa: Allocator,
old_zir: Zir,
new_zir: Zir,
inst_map: *std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index),
@@ -4119,7 +4119,7 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
mod.gpa.free(kv.value);
}
-pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: *Allocator) SemaError!Air {
+pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) SemaError!Air {
const tracy = trace(@src());
defer tracy.end();
@@ -4427,7 +4427,7 @@ pub fn getNextAnonNameIndex(mod: *Module) usize {
return @atomicRmw(usize, &mod.next_anon_name_index, .Add, 1, .Monotonic);
}
-pub fn makeIntType(arena: *Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
+pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
const int_payload = try arena.create(Type.Payload.Bits);
int_payload.* = .{
.base = .{
@@ -4459,7 +4459,7 @@ pub fn errNoteNonLazy(
}
pub fn errorUnionType(
- arena: *Allocator,
+ arena: Allocator,
error_set: Type,
payload: Type,
) Allocator.Error!Type {
@@ -4511,7 +4511,7 @@ pub const SwitchProngSrc = union(enum) {
/// the LazySrcLoc in order to emit a compile error.
pub fn resolve(
prong_src: SwitchProngSrc,
- gpa: *Allocator,
+ gpa: Allocator,
decl: *Decl,
switch_node_offset: i32,
range_expand: RangeExpand,
@@ -4605,7 +4605,7 @@ pub const PeerTypeCandidateSrc = union(enum) {
pub fn resolve(
self: PeerTypeCandidateSrc,
- gpa: *Allocator,
+ gpa: Allocator,
decl: *Decl,
candidate_i: usize,
) ?LazySrcLoc {
src/musl.zig
@@ -310,7 +310,7 @@ const Ext = enum {
o3,
};
-fn addSrcFile(arena: *Allocator, source_table: *std.StringArrayHashMap(Ext), file_path: []const u8) !void {
+fn addSrcFile(arena: Allocator, source_table: *std.StringArrayHashMap(Ext), file_path: []const u8) !void {
const ext: Ext = ext: {
if (mem.endsWith(u8, file_path, ".c")) {
if (mem.startsWith(u8, file_path, "musl/src/malloc/") or
@@ -344,7 +344,7 @@ fn addSrcFile(arena: *Allocator, source_table: *std.StringArrayHashMap(Ext), fil
fn addCcArgs(
comp: *Compilation,
- arena: *Allocator,
+ arena: Allocator,
args: *std.ArrayList([]const u8),
want_O3: bool,
) error{OutOfMemory}!void {
@@ -394,7 +394,7 @@ fn addCcArgs(
});
}
-fn start_asm_path(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 {
+fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
const target = comp.getTarget();
return comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "musl", "crt", archName(target.cpu.arch), basename,
src/Package.zig
@@ -21,7 +21,7 @@ root_src_directory_owned: bool = false,
/// Allocate a Package. No references to the slices passed are kept.
pub fn create(
- gpa: *Allocator,
+ gpa: Allocator,
/// Null indicates the current working directory
root_src_dir_path: ?[]const u8,
/// Relative to root_src_dir_path
@@ -49,7 +49,7 @@ pub fn create(
}
pub fn createWithDir(
- gpa: *Allocator,
+ gpa: Allocator,
directory: Compilation.Directory,
/// Relative to `directory`. If null, means `directory` is the root src dir
/// and is owned externally.
@@ -87,7 +87,7 @@ pub fn createWithDir(
/// Free all memory associated with this package. It does not destroy any packages
/// inside its table; the caller is responsible for calling destroy() on them.
-pub fn destroy(pkg: *Package, gpa: *Allocator) void {
+pub fn destroy(pkg: *Package, gpa: Allocator) void {
gpa.free(pkg.root_src_path);
if (pkg.root_src_directory_owned) {
@@ -104,7 +104,7 @@ pub fn destroy(pkg: *Package, gpa: *Allocator) void {
}
/// Only frees memory associated with the table.
-pub fn deinitTable(pkg: *Package, gpa: *Allocator) void {
+pub fn deinitTable(pkg: *Package, gpa: Allocator) void {
var it = pkg.table.keyIterator();
while (it.next()) |key| {
gpa.free(key.*);
@@ -113,13 +113,13 @@ pub fn deinitTable(pkg: *Package, gpa: *Allocator) void {
pkg.table.deinit(gpa);
}
-pub fn add(pkg: *Package, gpa: *Allocator, name: []const u8, package: *Package) !void {
+pub fn add(pkg: *Package, gpa: Allocator, name: []const u8, package: *Package) !void {
try pkg.table.ensureUnusedCapacity(gpa, 1);
const name_dupe = try gpa.dupe(u8, name);
pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
}
-pub fn addAndAdopt(parent: *Package, gpa: *Allocator, name: []const u8, child: *Package) !void {
+pub fn addAndAdopt(parent: *Package, gpa: Allocator, name: []const u8, child: *Package) !void {
assert(child.parent == null); // make up your mind, who is the parent??
child.parent = parent;
return parent.add(gpa, name, child);
src/print_air.zig
@@ -8,7 +8,7 @@ const Zir = @import("Zir.zig");
const Air = @import("Air.zig");
const Liveness = @import("Liveness.zig");
-pub fn dump(gpa: *Allocator, air: Air, zir: Zir, liveness: Liveness) void {
+pub fn dump(gpa: Allocator, air: Air, zir: Zir, liveness: Liveness) void {
const instruction_bytes = air.instructions.len *
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
@@ -60,8 +60,8 @@ pub fn dump(gpa: *Allocator, air: Air, zir: Zir, liveness: Liveness) void {
}
const Writer = struct {
- gpa: *Allocator,
- arena: *Allocator,
+ gpa: Allocator,
+ arena: Allocator,
air: Air,
zir: Zir,
liveness: Liveness,
src/print_env.zig
@@ -4,7 +4,7 @@ const introspect = @import("introspect.zig");
const Allocator = std.mem.Allocator;
const fatal = @import("main.zig").fatal;
-pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: std.fs.File.Writer) !void {
+pub fn cmdEnv(gpa: Allocator, args: []const []const u8, stdout: std.fs.File.Writer) !void {
_ = args;
const self_exe_path = try std.fs.selfExePathAlloc(gpa);
defer gpa.free(self_exe_path);
src/print_targets.zig
@@ -11,7 +11,7 @@ const introspect = @import("introspect.zig");
const fatal = @import("main.zig").fatal;
pub fn cmdTargets(
- allocator: *Allocator,
+ allocator: Allocator,
args: []const []const u8,
/// Output stream
stdout: anytype,
src/print_zir.zig
@@ -10,7 +10,7 @@ const LazySrcLoc = Module.LazySrcLoc;
/// Write human-readable, debug formatted ZIR code to a file.
pub fn renderAsTextToFile(
- gpa: *Allocator,
+ gpa: Allocator,
scope_file: *Module.File,
fs_file: std.fs.File,
) !void {
@@ -61,7 +61,7 @@ pub fn renderAsTextToFile(
}
pub fn renderInstructionContext(
- gpa: *Allocator,
+ gpa: Allocator,
block: []const Zir.Inst.Index,
block_index: usize,
scope_file: *Module.File,
@@ -94,7 +94,7 @@ pub fn renderInstructionContext(
}
pub fn renderSingleInstruction(
- gpa: *Allocator,
+ gpa: Allocator,
inst: Zir.Inst.Index,
scope_file: *Module.File,
parent_decl_node: Ast.Node.Index,
@@ -120,8 +120,8 @@ pub fn renderSingleInstruction(
}
const Writer = struct {
- gpa: *Allocator,
- arena: *Allocator,
+ gpa: Allocator,
+ arena: Allocator,
file: *Module.File,
code: Zir,
indent: u32,
src/RangeSet.zig
@@ -13,7 +13,7 @@ pub const Range = struct {
src: SwitchProngSrc,
};
-pub fn init(allocator: *std.mem.Allocator) RangeSet {
+pub fn init(allocator: std.mem.Allocator) RangeSet {
return .{
.ranges = std.ArrayList(Range).init(allocator),
};
src/register_manager.zig
@@ -254,7 +254,7 @@ const MockRegister2 = enum(u2) {
fn MockFunction(comptime Register: type) type {
return struct {
- allocator: *Allocator,
+ allocator: Allocator,
register_manager: RegisterManager(Self, Register, &Register.callee_preserved_regs) = .{},
spilled: std.ArrayListUnmanaged(Register) = .{},
src/Sema.zig
@@ -7,13 +7,13 @@
mod: *Module,
/// Alias to `mod.gpa`.
-gpa: *Allocator,
+gpa: Allocator,
/// Points to the temporary arena allocator of the Sema.
/// This arena will be cleared when the sema is destroyed.
-arena: *Allocator,
+arena: Allocator,
/// Points to the arena allocator for the owner_decl.
/// This arena will persist until the decl is invalidated.
-perm_arena: *Allocator,
+perm_arena: Allocator,
code: Zir,
air_instructions: std.MultiArrayList(Air.Inst) = .{},
air_extra: std.ArrayListUnmanaged(u32) = .{},
@@ -417,7 +417,7 @@ pub const Block = struct {
new_decl_arena: std.heap.ArenaAllocator,
finished: bool,
- pub fn arena(wad: *WipAnonDecl) *Allocator {
+ pub fn arena(wad: *WipAnonDecl) Allocator {
return &wad.new_decl_arena.allocator;
}
@@ -12793,7 +12793,7 @@ const ComptimePtrMutationKit = struct {
ty: Type,
decl_arena: std.heap.ArenaAllocator = undefined,
- fn beginArena(self: *ComptimePtrMutationKit, gpa: *Allocator) *Allocator {
+ fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator {
self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa);
return &self.decl_arena.allocator;
}
src/test.zig
@@ -680,7 +680,7 @@ pub const TestContext = struct {
}
fn runOneCase(
- allocator: *Allocator,
+ allocator: Allocator,
root_node: *std.Progress.Node,
case: Case,
zig_lib_directory: Compilation.Directory,
src/ThreadPool.zig
@@ -9,7 +9,7 @@ const ThreadPool = @This();
mutex: std.Thread.Mutex = .{},
is_running: bool = true,
-allocator: *std.mem.Allocator,
+allocator: std.mem.Allocator,
workers: []Worker,
run_queue: RunQueue = .{},
idle_queue: IdleQueue = .{},
@@ -55,7 +55,7 @@ const Worker = struct {
}
};
-pub fn init(self: *ThreadPool, allocator: *std.mem.Allocator) !void {
+pub fn init(self: *ThreadPool, allocator: std.mem.Allocator) !void {
self.* = .{
.allocator = allocator,
.workers = &[_]Worker{},
src/tracy.zig
@@ -103,18 +103,18 @@ pub inline fn traceNamed(comptime src: std.builtin.SourceLocation, comptime name
}
}
-pub fn tracyAllocator(allocator: *std.mem.Allocator) TracyAllocator(null) {
+pub fn tracyAllocator(allocator: std.mem.Allocator) TracyAllocator(null) {
return TracyAllocator(null).init(allocator);
}
pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
return struct {
allocator: std.mem.Allocator,
- parent_allocator: *std.mem.Allocator,
+ parent_allocator: std.mem.Allocator,
const Self = @This();
- pub fn init(allocator: *std.mem.Allocator) Self {
+ pub fn init(allocator: std.mem.Allocator) Self {
return .{
.parent_allocator = allocator,
.allocator = .{
@@ -124,7 +124,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
};
}
- fn allocFn(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
+ fn allocFn(allocator: std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ret_addr);
if (result) |data| {
@@ -141,7 +141,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
return result;
}
- fn resizeFn(allocator: *std.mem.Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize {
+ fn resizeFn(allocator: std.mem.Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize {
const self = @fieldParentPtr(Self, "allocator", allocator);
if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ret_addr)) |resized_len| {
src/translate_c.zig
@@ -305,8 +305,8 @@ const Scope = struct {
};
pub const Context = struct {
- gpa: *mem.Allocator,
- arena: *mem.Allocator,
+ gpa: mem.Allocator,
+ arena: mem.Allocator,
source_manager: *clang.SourceManager,
decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .{},
alias_list: AliasList,
@@ -351,7 +351,7 @@ pub const Context = struct {
};
pub fn translate(
- gpa: *mem.Allocator,
+ gpa: mem.Allocator,
args_begin: [*]?[*]const u8,
args_end: [*]?[*]const u8,
errors: *[]ClangErrMsg,
@@ -1448,7 +1448,7 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE
}
/// @typeInfo(@TypeOf(vec_node)).Vector.<field>
-fn vectorTypeInfo(arena: *mem.Allocator, vec_node: Node, field: []const u8) TransError!Node {
+fn vectorTypeInfo(arena: mem.Allocator, vec_node: Node, field: []const u8) TransError!Node {
const typeof_call = try Tag.typeof.create(arena, vec_node);
const typeinfo_call = try Tag.typeinfo.create(arena, typeof_call);
const vector_type_info = try Tag.field_access.create(arena, .{ .lhs = typeinfo_call, .field_name = "Vector" });
@@ -1536,7 +1536,7 @@ fn transOffsetOfExpr(
/// will become very large positive numbers but that is ok since we only use this in
/// pointer arithmetic expressions, where wraparound will ensure we get the correct value.
/// node -> @bitCast(usize, @intCast(isize, node))
-fn usizeCastForWrappingPtrArithmetic(gpa: *mem.Allocator, node: Node) TransError!Node {
+fn usizeCastForWrappingPtrArithmetic(gpa: mem.Allocator, node: Node) TransError!Node {
const intcast_node = try Tag.int_cast.create(gpa, .{
.lhs = try Tag.type.create(gpa, "isize"),
.rhs = node,
@@ -5072,7 +5072,7 @@ const PatternList = struct {
};
/// Assumes that `ms` represents a tokenized function-like macro.
- fn buildArgsHash(allocator: *mem.Allocator, ms: MacroSlicer, hash: *ArgsPositionMap) MacroProcessingError!void {
+ fn buildArgsHash(allocator: mem.Allocator, ms: MacroSlicer, hash: *ArgsPositionMap) MacroProcessingError!void {
assert(ms.tokens.len > 2);
assert(ms.tokens[0].id == .Identifier);
assert(ms.tokens[1].id == .LParen);
@@ -5098,7 +5098,7 @@ const PatternList = struct {
impl: []const u8,
args_hash: ArgsPositionMap,
- fn init(self: *Pattern, allocator: *mem.Allocator, template: [2][]const u8) Error!void {
+ fn init(self: *Pattern, allocator: mem.Allocator, template: [2][]const u8) Error!void {
const source = template[0];
const impl = template[1];
@@ -5120,7 +5120,7 @@ const PatternList = struct {
};
}
- fn deinit(self: *Pattern, allocator: *mem.Allocator) void {
+ fn deinit(self: *Pattern, allocator: mem.Allocator) void {
self.args_hash.deinit(allocator);
allocator.free(self.tokens);
}
@@ -5171,7 +5171,7 @@ const PatternList = struct {
}
};
- fn init(allocator: *mem.Allocator) Error!PatternList {
+ fn init(allocator: mem.Allocator) Error!PatternList {
const patterns = try allocator.alloc(Pattern, templates.len);
for (templates) |template, i| {
try patterns[i].init(allocator, template);
@@ -5179,12 +5179,12 @@ const PatternList = struct {
return PatternList{ .patterns = patterns };
}
- fn deinit(self: *PatternList, allocator: *mem.Allocator) void {
+ fn deinit(self: *PatternList, allocator: mem.Allocator) void {
for (self.patterns) |*pattern| pattern.deinit(allocator);
allocator.free(self.patterns);
}
- fn match(self: PatternList, allocator: *mem.Allocator, ms: MacroSlicer) Error!?Pattern {
+ fn match(self: PatternList, allocator: mem.Allocator, ms: MacroSlicer) Error!?Pattern {
var args_hash: ArgsPositionMap = .{};
defer args_hash.deinit(allocator);
@@ -5211,7 +5211,7 @@ const MacroSlicer = struct {
test "Macro matching" {
const helper = struct {
const MacroFunctions = @import("std").zig.c_translation.Macros;
- fn checkMacro(allocator: *mem.Allocator, pattern_list: PatternList, source: []const u8, comptime expected_match: ?[]const u8) !void {
+ fn checkMacro(allocator: mem.Allocator, pattern_list: PatternList, source: []const u8, comptime expected_match: ?[]const u8) !void {
var tok_list = std.ArrayList(CToken).init(allocator);
defer tok_list.deinit();
try tokenizeMacro(source, &tok_list);
src/type.zig
@@ -728,7 +728,7 @@ pub const Type = extern union {
}
};
- pub fn copy(self: Type, allocator: *Allocator) error{OutOfMemory}!Type {
+ pub fn copy(self: Type, allocator: Allocator) error{OutOfMemory}!Type {
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
return Type{ .tag_if_small_enough = self.tag_if_small_enough };
} else switch (self.ptr_otherwise.tag) {
@@ -905,7 +905,7 @@ pub const Type = extern union {
}
}
- fn copyPayloadShallow(self: Type, allocator: *Allocator, comptime T: type) error{OutOfMemory}!Type {
+ fn copyPayloadShallow(self: Type, allocator: Allocator, comptime T: type) error{OutOfMemory}!Type {
const payload = self.cast(T).?;
const new_payload = try allocator.create(T);
new_payload.* = payload.*;
@@ -1198,7 +1198,7 @@ pub const Type = extern union {
}
/// Returns a name suitable for `@typeName`.
- pub fn nameAlloc(ty: Type, arena: *Allocator) Allocator.Error![:0]const u8 {
+ pub fn nameAlloc(ty: Type, arena: Allocator) Allocator.Error![:0]const u8 {
const t = ty.tag();
switch (t) {
.inferred_alloc_const => unreachable,
@@ -1421,7 +1421,7 @@ pub const Type = extern union {
};
}
- pub fn toValue(self: Type, allocator: *Allocator) Allocator.Error!Value {
+ pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value {
switch (self.tag()) {
.u1 => return Value.initTag(.u1_type),
.u8 => return Value.initTag(.u8_type),
@@ -2676,7 +2676,7 @@ pub const Type = extern union {
/// For [*]T, returns *T
/// For []T, returns *T
/// Handles const-ness and address spaces in particular.
- pub fn elemPtrType(ptr_ty: Type, arena: *Allocator) !Type {
+ pub fn elemPtrType(ptr_ty: Type, arena: Allocator) !Type {
return try Type.ptr(arena, .{
.pointee_type = ptr_ty.elemType2(),
.mutable = ptr_ty.ptrIsMutable(),
@@ -2731,7 +2731,7 @@ pub const Type = extern union {
/// Asserts that the type is an optional.
/// Same as `optionalChild` but allocates the buffer if needed.
- pub fn optionalChildAlloc(ty: Type, allocator: *Allocator) !Type {
+ pub fn optionalChildAlloc(ty: Type, allocator: Allocator) !Type {
switch (ty.tag()) {
.optional => return ty.castTag(.optional).?.data,
.optional_single_mut_pointer => {
@@ -3379,7 +3379,7 @@ pub const Type = extern union {
}
/// Asserts that self.zigTypeTag() == .Int.
- pub fn minInt(self: Type, arena: *Allocator, target: Target) !Value {
+ pub fn minInt(self: Type, arena: Allocator, target: Target) !Value {
assert(self.zigTypeTag() == .Int);
const info = self.intInfo(target);
@@ -3404,7 +3404,7 @@ pub const Type = extern union {
}
/// Asserts that self.zigTypeTag() == .Int.
- pub fn maxInt(self: Type, arena: *Allocator, target: Target) !Value {
+ pub fn maxInt(self: Type, arena: Allocator, target: Target) !Value {
assert(self.zigTypeTag() == .Int);
const info = self.intInfo(target);
@@ -4008,7 +4008,7 @@ pub const Type = extern union {
return .{ .tag_if_small_enough = t };
}
- pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type {
+ pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type {
const p = try ally.create(t.Type());
p.* = .{
.base = .{ .tag = t },
@@ -4104,7 +4104,7 @@ pub const Type = extern union {
functions: std.AutoHashMapUnmanaged(*Module.Fn, void),
is_anyerror: bool,
- pub fn addErrorSet(self: *Data, gpa: *Allocator, err_set_ty: Type) !void {
+ pub fn addErrorSet(self: *Data, gpa: Allocator, err_set_ty: Type) !void {
switch (err_set_ty.tag()) {
.error_set => {
const names = err_set_ty.castTag(.error_set).?.data.names();
@@ -4225,7 +4225,7 @@ pub const Type = extern union {
pub const @"type" = initTag(.type);
pub const @"anyerror" = initTag(.anyerror);
- pub fn ptr(arena: *Allocator, d: Payload.Pointer.Data) !Type {
+ pub fn ptr(arena: Allocator, d: Payload.Pointer.Data) !Type {
assert(d.host_size == 0 or d.bit_offset < d.host_size * 8);
if (d.sentinel != null or d.@"align" != 0 or d.@"addrspace" != .generic or
@@ -4260,7 +4260,7 @@ pub const Type = extern union {
}
pub fn array(
- arena: *Allocator,
+ arena: Allocator,
len: u64,
sent: ?Value,
elem_type: Type,
@@ -4289,14 +4289,14 @@ pub const Type = extern union {
});
}
- pub fn vector(arena: *Allocator, len: u64, elem_type: Type) Allocator.Error!Type {
+ pub fn vector(arena: Allocator, len: u64, elem_type: Type) Allocator.Error!Type {
return Tag.vector.create(arena, .{
.len = len,
.elem_type = elem_type,
});
}
- pub fn optional(arena: *Allocator, child_type: Type) Allocator.Error!Type {
+ pub fn optional(arena: Allocator, child_type: Type) Allocator.Error!Type {
switch (child_type.tag()) {
.single_const_pointer => return Type.Tag.optional_single_const_pointer.create(
arena,
@@ -4317,7 +4317,7 @@ pub const Type = extern union {
return @intCast(u16, base + @boolToInt(upper < max));
}
- pub fn smallestUnsignedInt(arena: *Allocator, max: u64) !Type {
+ pub fn smallestUnsignedInt(arena: Allocator, max: u64) !Type {
const bits = smallestUnsignedBits(max);
return switch (bits) {
1 => initTag(.u1),
src/TypedValue.zig
@@ -16,14 +16,14 @@ pub const Managed = struct {
/// If this is `null` then there is no memory management needed.
arena: ?*std.heap.ArenaAllocator.State = null,
- pub fn deinit(self: *Managed, allocator: *Allocator) void {
+ pub fn deinit(self: *Managed, allocator: Allocator) void {
if (self.arena) |a| a.promote(allocator).deinit();
self.* = undefined;
}
};
/// Assumes arena allocation. Does a recursive copy.
-pub fn copy(self: TypedValue, arena: *Allocator) error{OutOfMemory}!TypedValue {
+pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue {
return TypedValue{
.ty = try self.ty.copy(arena),
.val = try self.val.copy(arena),
src/value.zig
@@ -297,7 +297,7 @@ pub const Value = extern union {
};
}
- pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!Value {
+ pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!Value {
const ptr = try ally.create(t.Type());
ptr.* = .{
.base = .{ .tag = t },
@@ -363,7 +363,7 @@ pub const Value = extern union {
/// It's intentional that this function is not passed a corresponding Type, so that
/// a Value can be copied from a Sema to a Decl prior to resolving struct/union field types.
- pub fn copy(self: Value, arena: *Allocator) error{OutOfMemory}!Value {
+ pub fn copy(self: Value, arena: Allocator) error{OutOfMemory}!Value {
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
return Value{ .tag_if_small_enough = self.tag_if_small_enough };
} else switch (self.ptr_otherwise.tag) {
@@ -578,7 +578,7 @@ pub const Value = extern union {
}
}
- fn copyPayloadShallow(self: Value, arena: *Allocator, comptime T: type) error{OutOfMemory}!Value {
+ fn copyPayloadShallow(self: Value, arena: Allocator, comptime T: type) error{OutOfMemory}!Value {
const payload = self.cast(T).?;
const new_payload = try arena.create(T);
new_payload.* = payload.*;
@@ -747,7 +747,7 @@ pub const Value = extern union {
/// Asserts that the value is representable as an array of bytes.
/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
- pub fn toAllocatedBytes(val: Value, ty: Type, allocator: *Allocator) ![]u8 {
+ pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator) ![]u8 {
switch (val.tag()) {
.bytes => {
const bytes = val.castTag(.bytes).?.data;
@@ -1035,7 +1035,7 @@ pub const Value = extern union {
}
}
- pub fn readFromMemory(ty: Type, target: Target, buffer: []const u8, arena: *Allocator) !Value {
+ pub fn readFromMemory(ty: Type, target: Target, buffer: []const u8, arena: Allocator) !Value {
switch (ty.zigTypeTag()) {
.Int => {
const int_info = ty.intInfo(target);
@@ -1185,7 +1185,7 @@ pub const Value = extern union {
}
}
- pub fn popCount(val: Value, ty: Type, target: Target, arena: *Allocator) !Value {
+ pub fn popCount(val: Value, ty: Type, target: Target, arena: Allocator) !Value {
assert(!val.isUndef());
const info = ty.intInfo(target);
@@ -1273,7 +1273,7 @@ pub const Value = extern union {
/// Converts an integer or a float to a float. May result in a loss of information.
/// Caller can find out by equality checking the result against the operand.
- pub fn floatCast(self: Value, arena: *Allocator, dest_ty: Type) !Value {
+ pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type) !Value {
switch (dest_ty.tag()) {
.f16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)),
.f32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)),
@@ -1678,7 +1678,7 @@ pub const Value = extern union {
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
- pub fn elemValue(val: Value, arena: *Allocator, index: usize) !Value {
+ pub fn elemValue(val: Value, arena: Allocator, index: usize) !Value {
return elemValueAdvanced(val, index, arena, undefined);
}
@@ -1691,7 +1691,7 @@ pub const Value = extern union {
pub fn elemValueAdvanced(
val: Value,
index: usize,
- arena: ?*Allocator,
+ arena: ?Allocator,
buffer: *ElemValueBuffer,
) error{OutOfMemory}!Value {
switch (val.tag()) {
@@ -1732,7 +1732,7 @@ pub const Value = extern union {
}
}
- pub fn fieldValue(val: Value, allocator: *Allocator, index: usize) error{OutOfMemory}!Value {
+ pub fn fieldValue(val: Value, allocator: Allocator, index: usize) error{OutOfMemory}!Value {
_ = allocator;
switch (val.tag()) {
.@"struct" => {
@@ -1760,7 +1760,7 @@ pub const Value = extern union {
}
/// Returns a pointer to the element value at the index.
- pub fn elemPtr(self: Value, allocator: *Allocator, index: usize) !Value {
+ pub fn elemPtr(self: Value, allocator: Allocator, index: usize) !Value {
switch (self.tag()) {
.elem_ptr => {
const elem_ptr = self.castTag(.elem_ptr).?.data;
@@ -1874,7 +1874,7 @@ pub const Value = extern union {
};
}
- pub fn intToFloat(val: Value, arena: *Allocator, dest_ty: Type, target: Target) !Value {
+ pub fn intToFloat(val: Value, arena: Allocator, dest_ty: Type, target: Target) !Value {
switch (val.tag()) {
.undef, .zero, .one => return val,
.the_only_possible_value => return Value.initTag(.zero), // for i0, u0
@@ -1898,7 +1898,7 @@ pub const Value = extern union {
}
}
- fn intToFloatInner(x: anytype, arena: *Allocator, dest_ty: Type, target: Target) !Value {
+ fn intToFloatInner(x: anytype, arena: Allocator, dest_ty: Type, target: Target) !Value {
switch (dest_ty.floatBits(target)) {
16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)),
32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)),
@@ -1908,7 +1908,7 @@ pub const Value = extern union {
}
}
- fn floatToValue(float: f128, arena: *Allocator, dest_ty: Type, target: Target) !Value {
+ fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
switch (dest_ty.floatBits(target)) {
16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),
@@ -1918,7 +1918,7 @@ pub const Value = extern union {
}
}
- pub fn floatToInt(val: Value, arena: *Allocator, dest_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
+ pub fn floatToInt(val: Value, arena: Allocator, dest_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
const Limb = std.math.big.Limb;
var value = val.toFloat(f64); // TODO: f128 ?
@@ -1969,7 +1969,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
ty: Type,
- arena: *Allocator,
+ arena: Allocator,
target: Target,
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
@@ -1993,7 +1993,7 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- fn fromBigInt(arena: *Allocator, big_int: BigIntConst) !Value {
+ fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value {
if (big_int.positive) {
if (big_int.to(u64)) |x| {
return Value.Tag.int_u64.create(arena, x);
@@ -2014,7 +2014,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
ty: Type,
- arena: *Allocator,
+ arena: Allocator,
target: Target,
) !Value {
assert(!lhs.isUndef());
@@ -2040,7 +2040,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
ty: Type,
- arena: *Allocator,
+ arena: Allocator,
target: Target,
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
@@ -2069,7 +2069,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
ty: Type,
- arena: *Allocator,
+ arena: Allocator,
target: Target,
) !Value {
assert(!lhs.isUndef());
@@ -2095,7 +2095,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
ty: Type,
- arena: *Allocator,
+ arena: Allocator,
target: Target,
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
@@ -2129,7 +2129,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
ty: Type,
- arena: *Allocator,
+ arena: Allocator,
target: Target,
) !Value {
assert(!lhs.isUndef());
@@ -2185,7 +2185,7 @@ pub const Value = extern union {
}
/// operands must be integers; handles undefined.
- pub fn bitwiseNot(val: Value, ty: Type, arena: *Allocator, target: Target) !Value {
+ pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, target: Target) !Value {
if (val.isUndef()) return Value.initTag(.undef);
const info = ty.intInfo(target);
@@ -2205,7 +2205,7 @@ pub const Value = extern union {
}
/// operands must be integers; handles undefined.
- pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+ pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
@@ -2225,7 +2225,7 @@ pub const Value = extern union {
}
/// operands must be integers; handles undefined.
- pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: *Allocator, target: Target) !Value {
+ pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
const anded = try bitwiseAnd(lhs, rhs, arena);
@@ -2239,7 +2239,7 @@ pub const Value = extern union {
}
/// operands must be integers; handles undefined.
- pub fn bitwiseOr(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+ pub fn bitwiseOr(lhs: Value, rhs: Value, arena: Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
@@ -2258,7 +2258,7 @@ pub const Value = extern union {
}
/// operands must be integers; handles undefined.
- pub fn bitwiseXor(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+ pub fn bitwiseXor(lhs: Value, rhs: Value, arena: Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
@@ -2277,7 +2277,7 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- pub fn intAdd(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn intAdd(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -2293,7 +2293,7 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn intSub(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn intSub(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -2309,7 +2309,7 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn intDiv(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn intDiv(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -2334,7 +2334,7 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intDivFloor(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn intDivFloor(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -2359,7 +2359,7 @@ pub const Value = extern union {
return fromBigInt(allocator, result_q.toConst());
}
- pub fn intRem(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn intRem(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -2386,7 +2386,7 @@ pub const Value = extern union {
return fromBigInt(allocator, result_r.toConst());
}
- pub fn intMod(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn intMod(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -2422,21 +2422,21 @@ pub const Value = extern union {
};
}
- pub fn floatRem(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn floatRem(lhs: Value, rhs: Value, allocator: Allocator) !Value {
_ = lhs;
_ = rhs;
_ = allocator;
@panic("TODO implement Value.floatRem");
}
- pub fn floatMod(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn floatMod(lhs: Value, rhs: Value, allocator: Allocator) !Value {
_ = lhs;
_ = rhs;
_ = allocator;
@panic("TODO implement Value.floatMod");
}
- pub fn intMul(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn intMul(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -2457,7 +2457,7 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn intTrunc(val: Value, allocator: *Allocator, signedness: std.builtin.Signedness, bits: u16) !Value {
+ pub fn intTrunc(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value {
var val_space: Value.BigIntSpace = undefined;
const val_bigint = val.toBigInt(&val_space);
@@ -2471,7 +2471,7 @@ pub const Value = extern union {
return fromBigInt(allocator, result_bigint.toConst());
}
- pub fn shl(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn shl(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -2494,7 +2494,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
ty: Type,
- arena: *Allocator,
+ arena: Allocator,
target: Target,
) !Value {
// TODO is this a performance issue? maybe we should try the operation without
@@ -2517,7 +2517,7 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- pub fn shr(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ pub fn shr(lhs: Value, rhs: Value, allocator: Allocator) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
@@ -2540,7 +2540,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
float_type: Type,
- arena: *Allocator,
+ arena: Allocator,
) !Value {
switch (float_type.tag()) {
.f16 => {
@@ -2571,7 +2571,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
float_type: Type,
- arena: *Allocator,
+ arena: Allocator,
) !Value {
switch (float_type.tag()) {
.f16 => {
@@ -2602,7 +2602,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
float_type: Type,
- arena: *Allocator,
+ arena: Allocator,
) !Value {
switch (float_type.tag()) {
.f16 => {
@@ -2633,7 +2633,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
float_type: Type,
- arena: *Allocator,
+ arena: Allocator,
) !Value {
switch (float_type.tag()) {
.f16 => {
@@ -2664,7 +2664,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
float_type: Type,
- arena: *Allocator,
+ arena: Allocator,
) !Value {
switch (float_type.tag()) {
.f16 => {
@@ -2695,7 +2695,7 @@ pub const Value = extern union {
lhs: Value,
rhs: Value,
float_type: Type,
- arena: *Allocator,
+ arena: Allocator,
) !Value {
switch (float_type.tag()) {
.f16 => {
src/wasi_libc.zig
@@ -243,7 +243,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}
}
-fn sanitize(arena: *Allocator, file_path: []const u8) ![]const u8 {
+fn sanitize(arena: Allocator, file_path: []const u8) ![]const u8 {
// TODO do this at comptime on the comptime data rather than at runtime
// probably best to wait until self-hosted is done and our comptime execution
// is faster and uses less memory.
@@ -261,7 +261,7 @@ fn sanitize(arena: *Allocator, file_path: []const u8) ![]const u8 {
fn addCCArgs(
comp: *Compilation,
- arena: *Allocator,
+ arena: Allocator,
args: *std.ArrayList([]const u8),
want_O3: bool,
) error{OutOfMemory}!void {
@@ -292,7 +292,7 @@ fn addCCArgs(
fn addLibcBottomHalfIncludes(
comp: *Compilation,
- arena: *Allocator,
+ arena: Allocator,
args: *std.ArrayList([]const u8),
) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
@@ -328,7 +328,7 @@ fn addLibcBottomHalfIncludes(
fn addLibcTopHalfIncludes(
comp: *Compilation,
- arena: *Allocator,
+ arena: Allocator,
args: *std.ArrayList([]const u8),
) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
src/Zir.zig
@@ -101,7 +101,7 @@ pub fn hasCompileErrors(code: Zir) bool {
return code.extra[@enumToInt(ExtraIndex.compile_errors)] != 0;
}
-pub fn deinit(code: *Zir, gpa: *Allocator) void {
+pub fn deinit(code: *Zir, gpa: Allocator) void {
code.instructions.deinit(gpa);
gpa.free(code.string_bytes);
gpa.free(code.extra);
test/behavior/async_fn.zig
@@ -713,7 +713,7 @@ fn testAsyncAwaitTypicalUsage(
}
var global_download_frame: anyframe = undefined;
- fn fetchUrl(allocator: *std.mem.Allocator, url: []const u8) anyerror![]u8 {
+ fn fetchUrl(allocator: std.mem.Allocator, url: []const u8) anyerror![]u8 {
_ = url;
const result = try allocator.dupe(u8, "expected download text");
errdefer allocator.free(result);
@@ -727,7 +727,7 @@ fn testAsyncAwaitTypicalUsage(
}
var global_file_frame: anyframe = undefined;
- fn readFile(allocator: *std.mem.Allocator, filename: []const u8) anyerror![]u8 {
+ fn readFile(allocator: std.mem.Allocator, filename: []const u8) anyerror![]u8 {
_ = filename;
const result = try allocator.dupe(u8, "expected file text");
errdefer allocator.free(result);
@@ -912,7 +912,7 @@ test "recursive async function" {
fn recursiveAsyncFunctionTest(comptime suspending_implementation: bool) type {
return struct {
- fn fib(allocator: *std.mem.Allocator, x: u32) error{OutOfMemory}!u32 {
+ fn fib(allocator: std.mem.Allocator, x: u32) error{OutOfMemory}!u32 {
if (x <= 1) return x;
if (suspending_implementation) {
test/cli.zig
@@ -5,7 +5,7 @@ const process = std.process;
const fs = std.fs;
const ChildProcess = std.ChildProcess;
-var a: *std.mem.Allocator = undefined;
+var a: std.mem.Allocator = undefined;
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
tools/merge_anal_dumps.zig
@@ -160,7 +160,7 @@ const Dump = struct {
const ErrorMap = std.HashMap(Error, usize, Error.hash, Error.eql, 80);
const TypeMap = std.HashMap(Type, usize, Type.hash, Type.eql, 80);
- fn init(allocator: *mem.Allocator) Dump {
+ fn init(allocator: mem.Allocator) Dump {
return Dump{
.targets = std.ArrayList([]const u8).init(allocator),
.file_list = std.ArrayList([]const u8).init(allocator),
@@ -434,7 +434,7 @@ const Dump = struct {
try jw.endObject();
}
- fn a(self: Dump) *mem.Allocator {
+ fn a(self: Dump) mem.Allocator {
return self.targets.allocator;
}
tools/update_cpu_features.zig
@@ -1244,7 +1244,7 @@ fn asciiLessThan(context: void, a: []const u8, b: []const u8) bool {
return std.ascii.lessThanIgnoreCase(a, b);
}
-fn llvmNameToZigName(arena: *mem.Allocator, llvm_name: []const u8) ![]const u8 {
+fn llvmNameToZigName(arena: mem.Allocator, llvm_name: []const u8) ![]const u8 {
const duped = try arena.dupe(u8, llvm_name);
for (duped) |*byte| switch (byte.*) {
'-', '.' => byte.* = '_',
@@ -1254,7 +1254,7 @@ fn llvmNameToZigName(arena: *mem.Allocator, llvm_name: []const u8) ![]const u8 {
}
fn llvmNameToZigNameOmit(
- arena: *mem.Allocator,
+ arena: mem.Allocator,
llvm_target: LlvmTarget,
llvm_name: []const u8,
) !?[]const u8 {
@@ -1279,7 +1279,7 @@ fn hasSuperclass(obj: *json.ObjectMap, class_name: []const u8) bool {
}
fn pruneFeatures(
- arena: *mem.Allocator,
+ arena: mem.Allocator,
features_table: std.StringHashMap(Feature),
deps_set: *std.StringHashMap(void),
) !void {
tools/update_spirv_features.zig
@@ -216,7 +216,7 @@ pub fn main() !void {
/// The *.grammar.json in SPIRV-Headers should have most of these as well, but with this we're sure to get only the actually
/// registered ones.
/// TODO: Unfortunately, neither repository contains a machine-readable list of extension dependencies.
-fn gather_extensions(allocator: *Allocator, spirv_registry_root: []const u8) ![]const []const u8 {
+fn gather_extensions(allocator: Allocator, spirv_registry_root: []const u8) ![]const []const u8 {
const extensions_path = try fs.path.join(allocator, &.{ spirv_registry_root, "extensions" });
var extensions_dir = try fs.cwd().openDir(extensions_path, .{ .iterate = true });
defer extensions_dir.close();
@@ -286,7 +286,7 @@ fn insertVersion(versions: *std.ArrayList(Version), version: ?[]const u8) !void
try versions.append(ver);
}
-fn gatherVersions(allocator: *Allocator, registry: g.CoreRegistry) ![]const Version {
+fn gatherVersions(allocator: Allocator, registry: g.CoreRegistry) ![]const Version {
// Expected number of versions is small
var versions = std.ArrayList(Version).init(allocator);