Commit c5283eb49b

Jacob Young <jacobly0@users.noreply.github.com>
2024-07-09 03:48:57
InternPool: implement thread-safe allocated lists
1 parent 1307044
src/Zcu/PerThread.zig
@@ -5,6 +5,58 @@ tid: Id,
 
 pub const Id = if (InternPool.single_threaded) enum { main } else enum(u8) { main, _ };
 
+pub fn destroyDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void {
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+
+    {
+        _ = zcu.test_functions.swapRemove(decl_index);
+        if (zcu.global_assembly.fetchSwapRemove(decl_index)) |kv| {
+            gpa.free(kv.value);
+        }
+    }
+
+    pt.zcu.intern_pool.destroyDecl(pt.tid, decl_index);
+
+    if (zcu.emit_h) |zcu_emit_h| {
+        const decl_emit_h = zcu_emit_h.declPtr(decl_index);
+        decl_emit_h.fwd_decl.deinit(gpa);
+        decl_emit_h.* = undefined;
+    }
+}
+
+fn deinitFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void {
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+    const file = zcu.fileByIndex(file_index);
+    const is_builtin = file.mod.isBuiltin();
+    log.debug("deinit File {s}", .{file.sub_file_path});
+    if (is_builtin) {
+        file.unloadTree(gpa);
+        file.unloadZir(gpa);
+    } else {
+        gpa.free(file.sub_file_path);
+        file.unload(gpa);
+    }
+    file.references.deinit(gpa);
+    if (zcu.fileRootDecl(file_index).unwrap()) |root_decl| {
+        pt.zcu.intern_pool.destroyDecl(pt.tid, root_decl);
+    }
+    if (file.prev_zir) |prev_zir| {
+        prev_zir.deinit(gpa);
+        gpa.destroy(prev_zir);
+    }
+    file.* = undefined;
+}
+
+pub fn destroyFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void {
+    const gpa = pt.zcu.gpa;
+    const file = pt.zcu.fileByIndex(file_index);
+    const is_builtin = file.mod.isBuiltin();
+    pt.deinitFile(file_index);
+    if (!is_builtin) gpa.destroy(file);
+}
+
 pub fn astGenFile(
     pt: Zcu.PerThread,
     file: *Zcu.File,
@@ -930,14 +982,14 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
     // Because these three things each reference each other, `undefined`
     // placeholders are used before being set after the struct type gains an
     // InternPool index.
-    const new_namespace_index = try zcu.createNamespace(.{
+    const new_namespace_index = try pt.createNamespace(.{
         .parent = .none,
         .decl_index = undefined,
         .file_scope = file_index,
     });
-    errdefer zcu.destroyNamespace(new_namespace_index);
+    errdefer pt.destroyNamespace(new_namespace_index);
 
-    const new_decl_index = try zcu.allocateNewDecl(new_namespace_index);
+    const new_decl_index = try pt.allocateNewDecl(new_namespace_index);
     const new_decl = zcu.declPtr(new_decl_index);
     errdefer @panic("TODO error handling");
 
@@ -1380,6 +1432,13 @@ pub fn embedFile(
     return pt.newEmbedFile(cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc);
 }
 
+/// Cancel the creation of an anon decl and delete any references to it.
+/// If other decls depend on this decl, they must be aborted first.
+pub fn abortAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void {
+    assert(!pt.zcu.declIsRoot(decl_index));
+    pt.destroyDecl(decl_index);
+}
+
 /// Finalize the creation of an anon decl.
 pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void {
     if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) {
@@ -1674,7 +1733,7 @@ const ScanDeclIter = struct {
             break :decl_index .{ was_exported, decl_index };
         } else decl_index: {
             // Create and set up a new Decl.
-            const new_decl_index = try zcu.allocateNewDecl(namespace_index);
+            const new_decl_index = try pt.allocateNewDecl(namespace_index);
             const new_decl = zcu.declPtr(new_decl_index);
             new_decl.kind = kind;
             new_decl.name = decl_name;
@@ -1981,6 +2040,43 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All
     };
 }
 
+pub fn createNamespace(pt: Zcu.PerThread, initialization: Zcu.Namespace) !Zcu.Namespace.Index {
+    return pt.zcu.intern_pool.createNamespace(pt.zcu.gpa, pt.tid, initialization);
+}
+
+pub fn destroyNamespace(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) void {
+    return pt.zcu.intern_pool.destroyNamespace(pt.tid, namespace_index);
+}
+
+pub fn allocateNewDecl(pt: Zcu.PerThread, namespace: Zcu.Namespace.Index) !Zcu.Decl.Index {
+    const zcu = pt.zcu;
+    const gpa = zcu.gpa;
+    const decl_index = try zcu.intern_pool.createDecl(gpa, pt.tid, .{
+        .name = undefined,
+        .src_namespace = namespace,
+        .has_tv = false,
+        .owns_tv = false,
+        .val = undefined,
+        .alignment = undefined,
+        .@"linksection" = .none,
+        .@"addrspace" = .generic,
+        .analysis = .unreferenced,
+        .zir_decl_index = .none,
+        .is_pub = false,
+        .is_exported = false,
+        .kind = .anon,
+    });
+
+    if (zcu.emit_h) |zcu_emit_h| {
+        if (@intFromEnum(decl_index) >= zcu_emit_h.allocated_emit_h.len) {
+            try zcu_emit_h.allocated_emit_h.append(gpa, .{});
+            assert(@intFromEnum(decl_index) == zcu_emit_h.allocated_emit_h.len);
+        }
+    }
+
+    return decl_index;
+}
+
 fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void {
     switch (file.status) {
         .success_zir, .retryable_failure => {},
src/InternPool.zig
@@ -14,25 +14,6 @@ tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_th
 /// Cached shift amount to put a `tid` in the top bits of a 32-bit value.
 tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31,
 
-/// Rather than allocating Decl objects with an Allocator, we instead allocate
-/// them with this SegmentedList. This provides four advantages:
-///  * Stable memory so that one thread can access a Decl object while another
-///    thread allocates additional Decl objects from this list.
-///  * It allows us to use u32 indexes to reference Decl objects rather than
-///    pointers, saving memory in Type, Value, and dependency sets.
-///  * Using integers to reference Decl objects rather than pointers makes
-///    serialization trivial.
-///  * It provides a unique integer to be used for anonymous symbol names, avoiding
-///    multi-threaded contention on an atomic counter.
-allocated_decls: std.SegmentedList(Module.Decl, 0) = .{},
-/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack.
-decls_free_list: std.ArrayListUnmanaged(DeclIndex) = .{},
-
-/// Same pattern as with `allocated_decls`.
-allocated_namespaces: std.SegmentedList(Module.Namespace, 0) = .{},
-/// Same pattern as with `decls_free_list`.
-namespaces_free_list: std.ArrayListUnmanaged(NamespaceIndex) = .{},
-
 /// Some types such as enums, structs, and unions need to store mappings from field names
 /// to field index, or value to field index. In such cases, they will store the underlying
 /// field names and values directly, relying on one of these maps, stored separately,
@@ -354,10 +335,14 @@ const Local = struct {
     /// atomic access.
     mutate: struct {
         arena: std.heap.ArenaAllocator.State,
-        items: Mutate,
-        extra: Mutate,
-        limbs: Mutate,
-        strings: Mutate,
+
+        items: ListMutate,
+        extra: ListMutate,
+        limbs: ListMutate,
+        strings: ListMutate,
+
+        decls: BucketListMutate,
+        namespaces: BucketListMutate,
     } align(std.atomic.cache_line),
 
     const Shared = struct {
@@ -366,6 +351,9 @@ const Local = struct {
         limbs: Limbs,
         strings: Strings,
 
+        decls: Decls,
+        namespaces: Namespaces,
+
         pub fn getLimbs(shared: *const Local.Shared) Limbs {
             return switch (@sizeOf(Limb)) {
                 @sizeOf(u32) => shared.extra,
@@ -383,14 +371,38 @@ const Local = struct {
     };
     const Strings = List(struct { u8 });
 
-    const Mutate = struct {
+    const decls_bucket_width = 8;
+    const decls_bucket_mask = (1 << decls_bucket_width) - 1;
+    const decl_next_free_field = "src_namespace";
+    const Decls = List(struct { *[1 << decls_bucket_width]Module.Decl });
+
+    const namespaces_bucket_width = 8;
+    const namespaces_bucket_mask = (1 << namespaces_bucket_width) - 1;
+    const namespace_next_free_field = "decl_index";
+    const Namespaces = List(struct { *[1 << namespaces_bucket_width]Module.Namespace });
+
+    const ListMutate = struct {
         len: u32,
 
-        const empty: Mutate = .{
+        const empty: ListMutate = .{
             .len = 0,
         };
     };
 
+    const BucketListMutate = struct {
+        last_bucket_len: u32,
+        buckets_list: ListMutate,
+        free_list: u32,
+
+        const free_list_sentinel = std.math.maxInt(u32);
+
+        const empty: BucketListMutate = .{
+            .last_bucket_len = 0,
+            .buckets_list = ListMutate.empty,
+            .free_list = free_list_sentinel,
+        };
+    };
+
     fn List(comptime Elem: type) type {
         assert(@typeInfo(Elem) == .Struct);
         return struct {
@@ -400,7 +412,7 @@ const Local = struct {
             const Mutable = struct {
                 gpa: std.mem.Allocator,
                 arena: *std.heap.ArenaAllocator.State,
-                mutate: *Mutate,
+                mutate: *ListMutate,
                 list: *ListSelf,
 
                 const fields = std.enums.values(std.meta.FieldEnum(Elem));
@@ -664,6 +676,35 @@ const Local = struct {
             .list = &local.shared.strings,
         };
     }
+
+    /// Rather than allocating Decl objects with an Allocator, we instead allocate
+    /// them with this BucketList. This provides four advantages:
+    ///  * Stable memory so that one thread can access a Decl object while another
+    ///    thread allocates additional Decl objects from this list.
+    ///  * It allows us to use u32 indexes to reference Decl objects rather than
+    ///    pointers, saving memory in Type, Value, and dependency sets.
+    ///  * Using integers to reference Decl objects rather than pointers makes
+    ///    serialization trivial.
+    ///  * It provides a unique integer to be used for anonymous symbol names, avoiding
+    ///    multi-threaded contention on an atomic counter.
+    pub fn getMutableDecls(local: *Local, gpa: std.mem.Allocator) Decls.Mutable {
+        return .{
+            .gpa = gpa,
+            .arena = &local.mutate.arena,
+            .mutate = &local.mutate.decls.buckets_list,
+            .list = &local.shared.decls,
+        };
+    }
+
+    /// Same pattern as with `getMutableDecls`.
+    pub fn getMutableNamespaces(local: *Local, gpa: std.mem.Allocator) Namespaces.Mutable {
+        return .{
+            .gpa = gpa,
+            .arena = &local.mutate.arena,
+            .mutate = &local.mutate.namespaces.buckets_list,
+            .list = &local.shared.namespaces,
+        };
+    }
 };
 
 pub fn getLocal(ip: *InternPool, tid: Zcu.PerThread.Id) *Local {
@@ -810,6 +851,29 @@ pub const ComptimeAllocIndex = enum(u32) { _ };
 pub const DeclIndex = enum(u32) {
     _,
 
+    const Unwrapped = struct {
+        tid: Zcu.PerThread.Id,
+        bucket_index: u32,
+        index: u32,
+
+        fn wrap(unwrapped: Unwrapped, ip: *const InternPool) DeclIndex {
+            assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
+            assert(unwrapped.bucket_index <= ip.getIndexMask(u32) >> Local.decls_bucket_width);
+            assert(unwrapped.index <= Local.decls_bucket_mask);
+            return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 |
+                unwrapped.bucket_index << Local.decls_bucket_width |
+                unwrapped.index);
+        }
+    };
+    fn unwrap(decl_index: DeclIndex, ip: *const InternPool) Unwrapped {
+        const index = @intFromEnum(decl_index) & ip.getIndexMask(u32);
+        return .{
+            .tid = @enumFromInt(@intFromEnum(decl_index) >> ip.tid_shift_32 & ip.getTidMask()),
+            .bucket_index = index >> Local.decls_bucket_width,
+            .index = index & Local.decls_bucket_mask,
+        };
+    }
+
     pub fn toOptional(i: DeclIndex) OptionalDeclIndex {
         return @enumFromInt(@intFromEnum(i));
     }
@@ -832,6 +896,29 @@ pub const OptionalDeclIndex = enum(u32) {
 pub const NamespaceIndex = enum(u32) {
     _,
 
+    const Unwrapped = struct {
+        tid: Zcu.PerThread.Id,
+        bucket_index: u32,
+        index: u32,
+
+        fn wrap(unwrapped: Unwrapped, ip: *const InternPool) NamespaceIndex {
+            assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
+            assert(unwrapped.bucket_index <= ip.getIndexMask(u32) >> Local.namespaces_bucket_width);
+            assert(unwrapped.index <= Local.namespaces_bucket_mask);
+            return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 |
+                unwrapped.bucket_index << Local.namespaces_bucket_width |
+                unwrapped.index);
+        }
+    };
+    fn unwrap(namespace_index: NamespaceIndex, ip: *const InternPool) Unwrapped {
+        const index = @intFromEnum(namespace_index) & ip.getIndexMask(u32);
+        return .{
+            .tid = @enumFromInt(@intFromEnum(namespace_index) >> ip.tid_shift_32 & ip.getTidMask()),
+            .bucket_index = index >> Local.namespaces_bucket_width,
+            .index = index & Local.namespaces_bucket_mask,
+        };
+    }
+
     pub fn toOptional(i: NamespaceIndex) OptionalNamespaceIndex {
         return @enumFromInt(@intFromEnum(i));
     }
@@ -5114,13 +5201,20 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
             .extra = Local.Extra.empty,
             .limbs = Local.Limbs.empty,
             .strings = Local.Strings.empty,
+
+            .decls = Local.Decls.empty,
+            .namespaces = Local.Namespaces.empty,
         },
         .mutate = .{
             .arena = .{},
-            .items = Local.Mutate.empty,
-            .extra = Local.Mutate.empty,
-            .limbs = Local.Mutate.empty,
-            .strings = Local.Mutate.empty,
+
+            .items = Local.ListMutate.empty,
+            .extra = Local.ListMutate.empty,
+            .limbs = Local.ListMutate.empty,
+            .strings = Local.ListMutate.empty,
+
+            .decls = Local.BucketListMutate.empty,
+            .namespaces = Local.BucketListMutate.empty,
         },
     });
 
@@ -5173,12 +5267,6 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
 }
 
 pub fn deinit(ip: *InternPool, gpa: Allocator) void {
-    ip.decls_free_list.deinit(gpa);
-    ip.allocated_decls.deinit(gpa);
-
-    ip.namespaces_free_list.deinit(gpa);
-    ip.allocated_namespaces.deinit(gpa);
-
     for (ip.maps.items) |*map| map.deinit(gpa);
     ip.maps.deinit(gpa);
 
@@ -5198,7 +5286,23 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
     ip.files.deinit(gpa);
 
     gpa.free(ip.shards);
-    for (ip.locals) |*local| local.mutate.arena.promote(gpa).deinit();
+    for (ip.locals) |*local| {
+        const buckets_len = local.mutate.namespaces.buckets_list.len;
+        if (buckets_len > 0) for (
+            local.shared.namespaces.view().items(.@"0")[0..buckets_len],
+            0..,
+        ) |namespace_bucket, buckets_index| {
+            for (namespace_bucket[0..if (buckets_index < buckets_len - 1)
+                namespace_bucket.len
+            else
+                local.mutate.namespaces.last_bucket_len]) |*namespace|
+            {
+                namespace.decls.deinit(gpa);
+                namespace.usingnamespace_set.deinit(gpa);
+            }
+        };
+        local.mutate.arena.promote(gpa).deinit();
+    }
     gpa.free(ip.locals);
 
     ip.* = undefined;
@@ -7849,7 +7953,7 @@ fn finishFuncInstance(
     section: OptionalNullTerminatedString,
 ) Allocator.Error!void {
     const fn_owner_decl = ip.declPtr(ip.funcDeclOwner(generic_owner));
-    const decl_index = try ip.createDecl(gpa, .{
+    const decl_index = try ip.createDecl(gpa, tid, .{
         .name = undefined,
         .src_namespace = fn_owner_decl.src_namespace,
         .has_tv = true,
@@ -7864,7 +7968,7 @@ fn finishFuncInstance(
         .is_exported = fn_owner_decl.is_exported,
         .kind = .anon,
     });
-    errdefer ip.destroyDecl(gpa, decl_index);
+    errdefer ip.destroyDecl(tid, decl_index);
 
     // Populate the owner_decl field which was left undefined until now.
     extra.view().items(.@"0")[
@@ -9078,15 +9182,17 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
     var items_len: usize = 0;
     var extra_len: usize = 0;
     var limbs_len: usize = 0;
+    var decls_len: usize = 0;
     for (ip.locals) |*local| {
         items_len += local.mutate.items.len;
         extra_len += local.mutate.extra.len;
         limbs_len += local.mutate.limbs.len;
+        decls_len += local.mutate.decls.buckets_list.len;
     }
     const items_size = (1 + 4) * items_len;
     const extra_size = 4 * extra_len;
     const limbs_size = 8 * limbs_len;
-    const decls_size = ip.allocated_decls.len * @sizeOf(Module.Decl);
+    const decls_size = @sizeOf(Module.Decl) * decls_len;
 
     // TODO: map overhead size is not taken into account
     const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + decls_size;
@@ -9106,7 +9212,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
         extra_size,
         limbs_len,
         limbs_size,
-        ip.allocated_decls.len,
+        decls_len,
         decls_size,
     });
 
@@ -9513,64 +9619,120 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
     try bw.flush();
 }
 
-pub fn declPtr(ip: *InternPool, index: DeclIndex) *Module.Decl {
-    return ip.allocated_decls.at(@intFromEnum(index));
+pub fn declPtr(ip: *InternPool, decl_index: DeclIndex) *Module.Decl {
+    return @constCast(ip.declPtrConst(decl_index));
 }
 
-pub fn declPtrConst(ip: *const InternPool, index: DeclIndex) *const Module.Decl {
-    return ip.allocated_decls.at(@intFromEnum(index));
+pub fn declPtrConst(ip: *const InternPool, decl_index: DeclIndex) *const Module.Decl {
+    const unwrapped_decl_index = decl_index.unwrap(ip);
+    const decls = ip.getLocalShared(unwrapped_decl_index.tid).decls.acquire();
+    const decls_bucket = decls.view().items(.@"0")[unwrapped_decl_index.bucket_index];
+    return &decls_bucket[unwrapped_decl_index.index];
 }
 
-pub fn namespacePtr(ip: *InternPool, index: NamespaceIndex) *Module.Namespace {
-    return ip.allocated_namespaces.at(@intFromEnum(index));
+pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Module.Namespace {
+    const unwrapped_namespace_index = namespace_index.unwrap(ip);
+    const namespaces = ip.getLocalShared(unwrapped_namespace_index.tid).namespaces.acquire();
+    const namespaces_bucket = namespaces.view().items(.@"0")[unwrapped_namespace_index.bucket_index];
+    return &namespaces_bucket[unwrapped_namespace_index.index];
 }
 
 pub fn createDecl(
     ip: *InternPool,
     gpa: Allocator,
+    tid: Zcu.PerThread.Id,
     initialization: Module.Decl,
 ) Allocator.Error!DeclIndex {
-    if (ip.decls_free_list.popOrNull()) |index| {
-        ip.allocated_decls.at(@intFromEnum(index)).* = initialization;
-        return index;
-    }
-    const ptr = try ip.allocated_decls.addOne(gpa);
-    ptr.* = initialization;
-    return @enumFromInt(ip.allocated_decls.len - 1);
+    const local = ip.getLocal(tid);
+    const free_list_next = local.mutate.decls.free_list;
+    if (free_list_next != Local.BucketListMutate.free_list_sentinel) {
+        const reused_decl_index: DeclIndex = @enumFromInt(free_list_next);
+        const reused_decl = ip.declPtr(reused_decl_index);
+        local.mutate.decls.free_list = @intFromEnum(@field(reused_decl, Local.decl_next_free_field));
+        reused_decl.* = initialization;
+        return reused_decl_index;
+    }
+    const decls = local.getMutableDecls(gpa);
+    if (local.mutate.decls.last_bucket_len == 0) {
+        try decls.ensureUnusedCapacity(1);
+        var arena = decls.arena.promote(decls.gpa);
+        defer decls.arena.* = arena.state;
+        decls.appendAssumeCapacity(.{try arena.allocator().create(
+            [1 << Local.decls_bucket_width]Module.Decl,
+        )});
+    }
+    const unwrapped_decl_index: DeclIndex.Unwrapped = .{
+        .tid = tid,
+        .bucket_index = decls.mutate.len - 1,
+        .index = local.mutate.decls.last_bucket_len,
+    };
+    local.mutate.decls.last_bucket_len =
+        (unwrapped_decl_index.index + 1) & Local.namespaces_bucket_mask;
+    const decl_index = unwrapped_decl_index.wrap(ip);
+    ip.declPtr(decl_index).* = initialization;
+    return decl_index;
 }
 
-pub fn destroyDecl(ip: *InternPool, gpa: Allocator, index: DeclIndex) void {
-    ip.declPtr(index).* = undefined;
-    ip.decls_free_list.append(gpa, index) catch {
-        // In order to keep `destroyDecl` a non-fallible function, we ignore memory
-        // allocation failures here, instead leaking the Decl until garbage collection.
-    };
+pub fn destroyDecl(ip: *InternPool, tid: Zcu.PerThread.Id, decl_index: DeclIndex) void {
+    const local = ip.getLocal(tid);
+    const decl = ip.declPtr(decl_index);
+    decl.* = undefined;
+    @field(decl, Local.decl_next_free_field) = @enumFromInt(local.mutate.decls.free_list);
+    local.mutate.decls.free_list = @intFromEnum(decl_index);
 }
 
 pub fn createNamespace(
     ip: *InternPool,
     gpa: Allocator,
+    tid: Zcu.PerThread.Id,
     initialization: Module.Namespace,
 ) Allocator.Error!NamespaceIndex {
-    if (ip.namespaces_free_list.popOrNull()) |index| {
-        ip.allocated_namespaces.at(@intFromEnum(index)).* = initialization;
-        return index;
-    }
-    const ptr = try ip.allocated_namespaces.addOne(gpa);
-    ptr.* = initialization;
-    return @enumFromInt(ip.allocated_namespaces.len - 1);
+    const local = ip.getLocal(tid);
+    const free_list_next = local.mutate.namespaces.free_list;
+    if (free_list_next != Local.BucketListMutate.free_list_sentinel) {
+        const reused_namespace_index: NamespaceIndex = @enumFromInt(free_list_next);
+        const reused_namespace = ip.namespacePtr(reused_namespace_index);
+        local.mutate.namespaces.free_list =
+            @intFromEnum(@field(reused_namespace, Local.namespace_next_free_field));
+        reused_namespace.* = initialization;
+        return reused_namespace_index;
+    }
+    const namespaces = local.getMutableNamespaces(gpa);
+    if (local.mutate.namespaces.last_bucket_len == 0) {
+        try namespaces.ensureUnusedCapacity(1);
+        var arena = namespaces.arena.promote(namespaces.gpa);
+        defer namespaces.arena.* = arena.state;
+        namespaces.appendAssumeCapacity(.{try arena.allocator().create(
+            [1 << Local.namespaces_bucket_width]Module.Namespace,
+        )});
+    }
+    const unwrapped_namespace_index: NamespaceIndex.Unwrapped = .{
+        .tid = tid,
+        .bucket_index = namespaces.mutate.len - 1,
+        .index = local.mutate.namespaces.last_bucket_len,
+    };
+    local.mutate.namespaces.last_bucket_len =
+        (unwrapped_namespace_index.index + 1) & Local.namespaces_bucket_mask;
+    const namespace_index = unwrapped_namespace_index.wrap(ip);
+    ip.namespacePtr(namespace_index).* = initialization;
+    return namespace_index;
 }
 
-pub fn destroyNamespace(ip: *InternPool, gpa: Allocator, index: NamespaceIndex) void {
-    ip.namespacePtr(index).* = .{
+pub fn destroyNamespace(
+    ip: *InternPool,
+    tid: Zcu.PerThread.Id,
+    namespace_index: NamespaceIndex,
+) void {
+    const local = ip.getLocal(tid);
+    const namespace = ip.namespacePtr(namespace_index);
+    namespace.* = .{
         .parent = undefined,
         .file_scope = undefined,
         .decl_index = undefined,
     };
-    ip.namespaces_free_list.append(gpa, index) catch {
-        // In order to keep `destroyNamespace` a non-fallible function, we ignore memory
-        // allocation failures here, instead leaking the Namespace until garbage collection.
-    };
+    @field(namespace, Local.namespace_next_free_field) =
+        @enumFromInt(local.mutate.namespaces.free_list);
+    local.mutate.namespaces.free_list = @intFromEnum(namespace_index);
 }
 
 const EmbeddedNulls = enum {
src/Sema.zig
@@ -2830,7 +2830,7 @@ fn zirStructDecl(
         inst,
     );
     mod.declPtr(new_decl_index).owns_tv = true;
-    errdefer mod.abortAnonDecl(new_decl_index);
+    errdefer pt.abortAnonDecl(new_decl_index);
 
     if (pt.zcu.comp.debug_incremental) {
         try ip.addDependency(
@@ -2841,12 +2841,12 @@ fn zirStructDecl(
     }
 
     // TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace.
-    const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{
+    const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .decl_index = new_decl_index,
         .file_scope = block.getFileScopeIndex(mod),
     })).toOptional() else .none;
-    errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns);
+    errdefer if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns);
 
     if (new_namespace_index.unwrap()) |ns| {
         const decls = sema.code.bodySlice(extra_index, decls_len);
@@ -2872,8 +2872,8 @@ fn createAnonymousDeclTypeNamed(
     const ip = &zcu.intern_pool;
     const gpa = sema.gpa;
     const namespace = block.namespace;
-    const new_decl_index = try zcu.allocateNewDecl(namespace);
-    errdefer zcu.destroyDecl(new_decl_index);
+    const new_decl_index = try pt.allocateNewDecl(namespace);
+    errdefer pt.destroyDecl(new_decl_index);
 
     switch (name_strategy) {
         .anon => {}, // handled after switch
@@ -3068,7 +3068,7 @@ fn zirEnumDecl(
     );
     const new_decl = mod.declPtr(new_decl_index);
     new_decl.owns_tv = true;
-    errdefer if (!done) mod.abortAnonDecl(new_decl_index);
+    errdefer if (!done) pt.abortAnonDecl(new_decl_index);
 
     if (pt.zcu.comp.debug_incremental) {
         try mod.intern_pool.addDependency(
@@ -3079,12 +3079,12 @@ fn zirEnumDecl(
     }
 
     // TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace.
-    const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{
+    const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .decl_index = new_decl_index,
         .file_scope = block.getFileScopeIndex(mod),
     })).toOptional() else .none;
-    errdefer if (!done) if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns);
+    errdefer if (!done) if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns);
 
     if (new_namespace_index.unwrap()) |ns| {
         try pt.scanNamespace(ns, decls, new_decl);
@@ -3335,7 +3335,7 @@ fn zirUnionDecl(
         inst,
     );
     mod.declPtr(new_decl_index).owns_tv = true;
-    errdefer mod.abortAnonDecl(new_decl_index);
+    errdefer pt.abortAnonDecl(new_decl_index);
 
     if (pt.zcu.comp.debug_incremental) {
         try mod.intern_pool.addDependency(
@@ -3346,12 +3346,12 @@ fn zirUnionDecl(
     }
 
     // TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace.
-    const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try mod.createNamespace(.{
+    const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .decl_index = new_decl_index,
         .file_scope = block.getFileScopeIndex(mod),
     })).toOptional() else .none;
-    errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns);
+    errdefer if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns);
 
     if (new_namespace_index.unwrap()) |ns| {
         const decls = sema.code.bodySlice(extra_index, decls_len);
@@ -3425,7 +3425,7 @@ fn zirOpaqueDecl(
         inst,
     );
     mod.declPtr(new_decl_index).owns_tv = true;
-    errdefer mod.abortAnonDecl(new_decl_index);
+    errdefer pt.abortAnonDecl(new_decl_index);
 
     if (pt.zcu.comp.debug_incremental) {
         try ip.addDependency(
@@ -3435,12 +3435,12 @@ fn zirOpaqueDecl(
         );
     }
 
-    const new_namespace_index: InternPool.OptionalNamespaceIndex = if (decls_len > 0) (try mod.createNamespace(.{
+    const new_namespace_index: InternPool.OptionalNamespaceIndex = if (decls_len > 0) (try pt.createNamespace(.{
         .parent = block.namespace.toOptional(),
         .decl_index = new_decl_index,
         .file_scope = block.getFileScopeIndex(mod),
     })).toOptional() else .none;
-    errdefer if (new_namespace_index.unwrap()) |ns| mod.destroyNamespace(ns);
+    errdefer if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns);
 
     if (new_namespace_index.unwrap()) |ns| {
         const decls = sema.code.bodySlice(extra_index, decls_len);
@@ -21716,7 +21716,7 @@ fn zirReify(
                 inst,
             );
             mod.declPtr(new_decl_index).owns_tv = true;
-            errdefer mod.abortAnonDecl(new_decl_index);
+            errdefer pt.abortAnonDecl(new_decl_index);
 
             try pt.finalizeAnonDecl(new_decl_index);
 
@@ -21916,7 +21916,7 @@ fn reifyEnum(
         inst,
     );
     mod.declPtr(new_decl_index).owns_tv = true;
-    errdefer mod.abortAnonDecl(new_decl_index);
+    errdefer pt.abortAnonDecl(new_decl_index);
 
     wip_ty.prepare(ip, new_decl_index, .none);
     wip_ty.setTagTy(ip, tag_ty.toIntern());
@@ -22063,7 +22063,7 @@ fn reifyUnion(
         inst,
     );
     mod.declPtr(new_decl_index).owns_tv = true;
-    errdefer mod.abortAnonDecl(new_decl_index);
+    errdefer pt.abortAnonDecl(new_decl_index);
 
     const field_types = try sema.arena.alloc(InternPool.Index, fields_len);
     const field_aligns = if (any_aligns) try sema.arena.alloc(InternPool.Alignment, fields_len) else undefined;
@@ -22322,7 +22322,7 @@ fn reifyStruct(
         inst,
     );
     mod.declPtr(new_decl_index).owns_tv = true;
-    errdefer mod.abortAnonDecl(new_decl_index);
+    errdefer pt.abortAnonDecl(new_decl_index);
 
     const struct_type = ip.loadStructType(wip_ty.index);
 
@@ -26497,8 +26497,8 @@ fn zirBuiltinExtern(
     }
     const ptr_info = ty.ptrInfo(mod);
 
-    const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace);
-    errdefer mod.destroyDecl(new_decl_index);
+    const new_decl_index = try pt.allocateNewDecl(sema.owner_decl.src_namespace);
+    errdefer pt.destroyDecl(new_decl_index);
     const new_decl = mod.declPtr(new_decl_index);
     try mod.initNewAnonDecl(
         new_decl_index,
@@ -36733,8 +36733,8 @@ fn generateUnionTagTypeNumbered(
     const gpa = sema.gpa;
     const ip = &mod.intern_pool;
 
-    const new_decl_index = try mod.allocateNewDecl(block.namespace);
-    errdefer mod.destroyDecl(new_decl_index);
+    const new_decl_index = try pt.allocateNewDecl(block.namespace);
+    errdefer pt.destroyDecl(new_decl_index);
     const fqn = try union_owner_decl.fullyQualifiedName(pt);
     const name = try ip.getOrPutStringFmt(
         gpa,
@@ -36748,7 +36748,7 @@ fn generateUnionTagTypeNumbered(
         Value.@"unreachable",
         name,
     );
-    errdefer mod.abortAnonDecl(new_decl_index);
+    errdefer pt.abortAnonDecl(new_decl_index);
 
     const new_decl = mod.declPtr(new_decl_index);
     new_decl.owns_tv = true;
@@ -36785,8 +36785,8 @@ fn generateUnionTagTypeSimple(
 
     const new_decl_index = new_decl_index: {
         const fqn = try union_owner_decl.fullyQualifiedName(pt);
-        const new_decl_index = try mod.allocateNewDecl(block.namespace);
-        errdefer mod.destroyDecl(new_decl_index);
+        const new_decl_index = try pt.allocateNewDecl(block.namespace);
+        errdefer pt.destroyDecl(new_decl_index);
         const name = try ip.getOrPutStringFmt(
             gpa,
             pt.tid,
@@ -36802,7 +36802,7 @@ fn generateUnionTagTypeSimple(
         mod.declPtr(new_decl_index).name_fully_qualified = true;
         break :new_decl_index new_decl_index;
     };
-    errdefer mod.abortAnonDecl(new_decl_index);
+    errdefer pt.abortAnonDecl(new_decl_index);
 
     const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{
         .decl = new_decl_index,
src/Zcu.zig
@@ -2410,6 +2410,7 @@ pub fn init(mod: *Module, thread_count: usize) !void {
 }
 
 pub fn deinit(zcu: *Zcu) void {
+    const pt: Zcu.PerThread = .{ .tid = .main, .zcu = zcu };
     const gpa = zcu.gpa;
 
     if (zcu.llvm_object) |llvm_object| {
@@ -2422,7 +2423,7 @@ pub fn deinit(zcu: *Zcu) void {
     }
     for (0..zcu.import_table.entries.len) |file_index_usize| {
         const file_index: File.Index = @enumFromInt(file_index_usize);
-        zcu.destroyFile(file_index);
+        pt.destroyFile(file_index);
     }
     zcu.import_table.deinit(gpa);
 
@@ -2497,68 +2498,9 @@ pub fn deinit(zcu: *Zcu) void {
     zcu.all_references.deinit(gpa);
     zcu.free_references.deinit(gpa);
 
-    {
-        var it = zcu.intern_pool.allocated_namespaces.iterator(0);
-        while (it.next()) |namespace| {
-            namespace.decls.deinit(gpa);
-            namespace.usingnamespace_set.deinit(gpa);
-        }
-    }
-
     zcu.intern_pool.deinit(gpa);
 }
 
-pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
-    const gpa = mod.gpa;
-    const ip = &mod.intern_pool;
-
-    {
-        _ = mod.test_functions.swapRemove(decl_index);
-        if (mod.global_assembly.fetchSwapRemove(decl_index)) |kv| {
-            gpa.free(kv.value);
-        }
-    }
-
-    ip.destroyDecl(gpa, decl_index);
-
-    if (mod.emit_h) |mod_emit_h| {
-        const decl_emit_h = mod_emit_h.declPtr(decl_index);
-        decl_emit_h.fwd_decl.deinit(gpa);
-        decl_emit_h.* = undefined;
-    }
-}
-
-fn deinitFile(zcu: *Zcu, file_index: File.Index) void {
-    const gpa = zcu.gpa;
-    const file = zcu.fileByIndex(file_index);
-    const is_builtin = file.mod.isBuiltin();
-    log.debug("deinit File {s}", .{file.sub_file_path});
-    if (is_builtin) {
-        file.unloadTree(gpa);
-        file.unloadZir(gpa);
-    } else {
-        gpa.free(file.sub_file_path);
-        file.unload(gpa);
-    }
-    file.references.deinit(gpa);
-    if (zcu.fileRootDecl(file_index).unwrap()) |root_decl| {
-        zcu.destroyDecl(root_decl);
-    }
-    if (file.prev_zir) |prev_zir| {
-        prev_zir.deinit(gpa);
-        gpa.destroy(prev_zir);
-    }
-    file.* = undefined;
-}
-
-pub fn destroyFile(zcu: *Zcu, file_index: File.Index) void {
-    const gpa = zcu.gpa;
-    const file = zcu.fileByIndex(file_index);
-    const is_builtin = file.mod.isBuiltin();
-    zcu.deinitFile(file_index);
-    if (!is_builtin) gpa.destroy(file);
-}
-
 pub fn declPtr(mod: *Module, index: Decl.Index) *Decl {
     return mod.intern_pool.declPtr(index);
 }
@@ -3269,13 +3211,6 @@ fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8)
     return bin;
 }
 
-/// Cancel the creation of an anon decl and delete any references to it.
-/// If other decls depend on this decl, they must be aborted first.
-pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
-    assert(!mod.declIsRoot(decl_index));
-    mod.destroyDecl(decl_index);
-}
-
 /// Delete all the Export objects that are caused by this `AnalUnit`. Re-analysis of
 /// this `AnalUnit` will cause them to be re-created (or not).
 pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
@@ -3357,42 +3292,6 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit
     gop.value_ptr.* = @intCast(ref_idx);
 }
 
-pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index {
-    return mod.intern_pool.createNamespace(mod.gpa, initialization);
-}
-
-pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
-    return mod.intern_pool.destroyNamespace(mod.gpa, index);
-}
-
-pub fn allocateNewDecl(zcu: *Zcu, namespace: Namespace.Index) !Decl.Index {
-    const gpa = zcu.gpa;
-    const decl_index = try zcu.intern_pool.createDecl(gpa, .{
-        .name = undefined,
-        .src_namespace = namespace,
-        .has_tv = false,
-        .owns_tv = false,
-        .val = undefined,
-        .alignment = undefined,
-        .@"linksection" = .none,
-        .@"addrspace" = .generic,
-        .analysis = .unreferenced,
-        .zir_decl_index = .none,
-        .is_pub = false,
-        .is_exported = false,
-        .kind = .anon,
-    });
-
-    if (zcu.emit_h) |zcu_emit_h| {
-        if (@intFromEnum(decl_index) >= zcu_emit_h.allocated_emit_h.len) {
-            try zcu_emit_h.allocated_emit_h.append(gpa, .{});
-            assert(@intFromEnum(decl_index) == zcu_emit_h.allocated_emit_h.len);
-        }
-    }
-
-    return decl_index;
-}
-
 pub fn getErrorValue(
     mod: *Module,
     name: InternPool.NullTerminatedString,