master
   1const std = @import("std.zig");
   2const debug = std.debug;
   3const assert = debug.assert;
   4const testing = std.testing;
   5const math = std.math;
   6const mem = std.mem;
   7const autoHash = std.hash.autoHash;
   8const Wyhash = std.hash.Wyhash;
   9const Allocator = mem.Allocator;
  10const hash_map = @This();
  11
  12/// An `ArrayHashMap` with default hash and equal functions.
  13///
  14/// See `AutoContext` for a description of the hash and equal implementations.
  15pub fn AutoArrayHashMap(comptime K: type, comptime V: type) type {
  16    return ArrayHashMap(K, V, AutoContext(K), !autoEqlIsCheap(K));
  17}
  18
  19/// An `ArrayHashMapUnmanaged` with default hash and equal functions.
  20///
  21/// See `AutoContext` for a description of the hash and equal implementations.
  22pub fn AutoArrayHashMapUnmanaged(comptime K: type, comptime V: type) type {
  23    return ArrayHashMapUnmanaged(K, V, AutoContext(K), !autoEqlIsCheap(K));
  24}
  25
  26/// An `ArrayHashMap` with strings as keys.
  27pub fn StringArrayHashMap(comptime V: type) type {
  28    return ArrayHashMap([]const u8, V, StringContext, true);
  29}
  30
  31/// An `ArrayHashMapUnmanaged` with strings as keys.
  32pub fn StringArrayHashMapUnmanaged(comptime V: type) type {
  33    return ArrayHashMapUnmanaged([]const u8, V, StringContext, true);
  34}
  35
  36pub const StringContext = struct {
  37    pub fn hash(self: @This(), s: []const u8) u32 {
  38        _ = self;
  39        return hashString(s);
  40    }
  41    pub fn eql(self: @This(), a: []const u8, b: []const u8, b_index: usize) bool {
  42        _ = self;
  43        _ = b_index;
  44        return eqlString(a, b);
  45    }
  46};
  47
  48pub fn eqlString(a: []const u8, b: []const u8) bool {
  49    return mem.eql(u8, a, b);
  50}
  51
  52pub fn hashString(s: []const u8) u32 {
  53    return @truncate(std.hash.Wyhash.hash(0, s));
  54}
  55
  56/// Deprecated in favor of `ArrayHashMapWithAllocator` (no code changes needed)
  57/// or `ArrayHashMapUnmanaged` (will need to update callsites to pass an
  58/// allocator). After Zig 0.14.0 is released, `ArrayHashMapWithAllocator` will
  59/// be removed and `ArrayHashMapUnmanaged` will be a deprecated alias. After
  60/// Zig 0.15.0 is released, the deprecated alias `ArrayHashMapUnmanaged` will
  61/// be removed.
  62pub const ArrayHashMap = ArrayHashMapWithAllocator;
  63
  64/// A hash table of keys and values, each stored sequentially.
  65///
  66/// Insertion order is preserved. In general, this data structure supports the same
  67/// operations as `std.ArrayList`.
  68///
  69/// Deletion operations:
  70/// * `swapRemove` - O(1)
  71/// * `orderedRemove` - O(N)
  72///
  73/// Modifying the hash map while iterating is allowed, however, one must understand
  74/// the (well defined) behavior when mixing insertions and deletions with iteration.
  75///
  76/// See `ArrayHashMapUnmanaged` for a variant of this data structure that accepts an
  77/// `Allocator` as a parameter when needed rather than storing it.
  78pub fn ArrayHashMapWithAllocator(
  79    comptime K: type,
  80    comptime V: type,
  81    /// A namespace that provides these two functions:
  82    /// * `pub fn hash(self, K) u32`
  83    /// * `pub fn eql(self, K, K, usize) bool`
  84    ///
  85    /// The final `usize` in the `eql` function represents the index of the key
  86    /// that's already inside the map.
  87    comptime Context: type,
  88    /// When `false`, this data structure is biased towards cheap `eql`
  89    /// functions and avoids storing each key's hash in the table. Setting
  90    /// `store_hash` to `true` incurs more memory cost but limits `eql` to
  91    /// being called only once per insertion/deletion (provided there are no
  92    /// hash collisions).
  93    comptime store_hash: bool,
  94) type {
  95    return struct {
  96        unmanaged: Unmanaged,
  97        allocator: Allocator,
  98        ctx: Context,
  99
 100        /// The ArrayHashMapUnmanaged type using the same settings as this managed map.
 101        pub const Unmanaged = ArrayHashMapUnmanaged(K, V, Context, store_hash);
 102
 103        /// Pointers to a key and value in the backing store of this map.
 104        /// Modifying the key is allowed only if it does not change the hash.
 105        /// Modifying the value is allowed.
 106        /// Entry pointers become invalid whenever this ArrayHashMap is modified,
 107        /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
 108        pub const Entry = Unmanaged.Entry;
 109
 110        /// A KV pair which has been copied out of the backing store
 111        pub const KV = Unmanaged.KV;
 112
 113        /// The Data type used for the MultiArrayList backing this map
 114        pub const Data = Unmanaged.Data;
 115        /// The MultiArrayList type backing this map
 116        pub const DataList = Unmanaged.DataList;
 117
 118        /// The stored hash type, either u32 or void.
 119        pub const Hash = Unmanaged.Hash;
 120
 121        /// getOrPut variants return this structure, with pointers
 122        /// to the backing store and a flag to indicate whether an
 123        /// existing entry was found.
 124        /// Modifying the key is allowed only if it does not change the hash.
 125        /// Modifying the value is allowed.
 126        /// Entry pointers become invalid whenever this ArrayHashMap is modified,
 127        /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
 128        pub const GetOrPutResult = Unmanaged.GetOrPutResult;
 129
 130        /// An Iterator over Entry pointers.
 131        pub const Iterator = Unmanaged.Iterator;
 132
 133        const Self = @This();
 134
 135        /// Create an ArrayHashMap instance which will use a specified allocator.
 136        pub fn init(allocator: Allocator) Self {
 137            if (@sizeOf(Context) != 0)
 138                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call initContext instead.");
 139            return initContext(allocator, undefined);
 140        }
 141        pub fn initContext(allocator: Allocator, ctx: Context) Self {
 142            return .{
 143                .unmanaged = .empty,
 144                .allocator = allocator,
 145                .ctx = ctx,
 146            };
 147        }
 148
 149        /// Frees the backing allocation and leaves the map in an undefined state.
 150        /// Note that this does not free keys or values.  You must take care of that
 151        /// before calling this function, if it is needed.
 152        pub fn deinit(self: *Self) void {
 153            self.unmanaged.deinit(self.allocator);
 154            self.* = undefined;
 155        }
 156
 157        /// Puts the hash map into a state where any method call that would
 158        /// cause an existing key or value pointer to become invalidated will
 159        /// instead trigger an assertion.
 160        ///
 161        /// An additional call to `lockPointers` in such state also triggers an
 162        /// assertion.
 163        ///
 164        /// `unlockPointers` returns the hash map to the previous state.
 165        pub fn lockPointers(self: *Self) void {
 166            self.unmanaged.lockPointers();
 167        }
 168
 169        /// Undoes a call to `lockPointers`.
 170        pub fn unlockPointers(self: *Self) void {
 171            self.unmanaged.unlockPointers();
 172        }
 173
 174        /// Clears the map but retains the backing allocation for future use.
 175        pub fn clearRetainingCapacity(self: *Self) void {
 176            return self.unmanaged.clearRetainingCapacity();
 177        }
 178
 179        /// Clears the map and releases the backing allocation
 180        pub fn clearAndFree(self: *Self) void {
 181            return self.unmanaged.clearAndFree(self.allocator);
 182        }
 183
 184        /// Returns the number of KV pairs stored in this map.
 185        pub fn count(self: Self) usize {
 186            return self.unmanaged.count();
 187        }
 188
 189        /// Returns the backing array of keys in this map. Modifying the map may
 190        /// invalidate this array. Modifying this array in a way that changes
 191        /// key hashes or key equality puts the map into an unusable state until
 192        /// `reIndex` is called.
 193        pub fn keys(self: Self) []K {
 194            return self.unmanaged.keys();
 195        }
 196        /// Returns the backing array of values in this map. Modifying the map
 197        /// may invalidate this array. It is permitted to modify the values in
 198        /// this array.
 199        pub fn values(self: Self) []V {
 200            return self.unmanaged.values();
 201        }
 202
 203        /// Returns an iterator over the pairs in this map.
 204        /// Modifying the map may invalidate this iterator.
 205        pub fn iterator(self: *const Self) Iterator {
 206            return self.unmanaged.iterator();
 207        }
 208
 209        /// If key exists this function cannot fail.
 210        /// If there is an existing item with `key`, then the result
 211        /// `Entry` pointer points to it, and found_existing is true.
 212        /// Otherwise, puts a new item with undefined value, and
 213        /// the `Entry` pointer points to it. Caller should then initialize
 214        /// the value (but not the key).
 215        pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
 216            return self.unmanaged.getOrPutContext(self.allocator, key, self.ctx);
 217        }
 218        pub fn getOrPutAdapted(self: *Self, key: anytype, ctx: anytype) !GetOrPutResult {
 219            return self.unmanaged.getOrPutContextAdapted(self.allocator, key, ctx, self.ctx);
 220        }
 221
 222        /// If there is an existing item with `key`, then the result
 223        /// `Entry` pointer points to it, and found_existing is true.
 224        /// Otherwise, puts a new item with undefined value, and
 225        /// the `Entry` pointer points to it. Caller should then initialize
 226        /// the value (but not the key).
 227        /// If a new entry needs to be stored, this function asserts there
 228        /// is enough capacity to store it.
 229        pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
 230            return self.unmanaged.getOrPutAssumeCapacityContext(key, self.ctx);
 231        }
 232        pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
 233            return self.unmanaged.getOrPutAssumeCapacityAdapted(key, ctx);
 234        }
 235        pub fn getOrPutValue(self: *Self, key: K, value: V) !GetOrPutResult {
 236            return self.unmanaged.getOrPutValueContext(self.allocator, key, value, self.ctx);
 237        }
 238
 239        /// Increases capacity, guaranteeing that insertions up until the
 240        /// `expected_count` will not cause an allocation, and therefore cannot fail.
 241        pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void {
 242            return self.unmanaged.ensureTotalCapacityContext(self.allocator, new_capacity, self.ctx);
 243        }
 244
 245        /// Increases capacity, guaranteeing that insertions up until
 246        /// `additional_count` **more** items will not cause an allocation, and
 247        /// therefore cannot fail.
 248        pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void {
 249            return self.unmanaged.ensureUnusedCapacityContext(self.allocator, additional_count, self.ctx);
 250        }
 251
 252        /// Returns the number of total elements which may be present before it is
 253        /// no longer guaranteed that no allocations will be performed.
 254        pub fn capacity(self: Self) usize {
 255            return self.unmanaged.capacity();
 256        }
 257
 258        /// Clobbers any existing data. To detect if a put would clobber
 259        /// existing data, see `getOrPut`.
 260        pub fn put(self: *Self, key: K, value: V) !void {
 261            return self.unmanaged.putContext(self.allocator, key, value, self.ctx);
 262        }
 263
 264        /// Inserts a key-value pair into the hash map, asserting that no previous
 265        /// entry with the same key is already present
 266        pub fn putNoClobber(self: *Self, key: K, value: V) !void {
 267            return self.unmanaged.putNoClobberContext(self.allocator, key, value, self.ctx);
 268        }
 269
 270        /// Asserts there is enough capacity to store the new key-value pair.
 271        /// Clobbers any existing data. To detect if a put would clobber
 272        /// existing data, see `getOrPutAssumeCapacity`.
 273        pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
 274            return self.unmanaged.putAssumeCapacityContext(key, value, self.ctx);
 275        }
 276
 277        /// Asserts there is enough capacity to store the new key-value pair.
 278        /// Asserts that it does not clobber any existing data.
 279        /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
 280        pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
 281            return self.unmanaged.putAssumeCapacityNoClobberContext(key, value, self.ctx);
 282        }
 283
 284        /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
 285        pub fn fetchPut(self: *Self, key: K, value: V) !?KV {
 286            return self.unmanaged.fetchPutContext(self.allocator, key, value, self.ctx);
 287        }
 288
 289        /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
 290        /// If insertion happuns, asserts there is enough capacity without allocating.
 291        pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
 292            return self.unmanaged.fetchPutAssumeCapacityContext(key, value, self.ctx);
 293        }
 294
 295        /// Finds pointers to the key and value storage associated with a key.
 296        pub fn getEntry(self: Self, key: K) ?Entry {
 297            return self.unmanaged.getEntryContext(key, self.ctx);
 298        }
 299        pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
 300            return self.unmanaged.getEntryAdapted(key, ctx);
 301        }
 302
 303        /// Finds the index in the `entries` array where a key is stored
 304        pub fn getIndex(self: Self, key: K) ?usize {
 305            return self.unmanaged.getIndexContext(key, self.ctx);
 306        }
 307        pub fn getIndexAdapted(self: Self, key: anytype, ctx: anytype) ?usize {
 308            return self.unmanaged.getIndexAdapted(key, ctx);
 309        }
 310
 311        /// Find the value associated with a key
 312        pub fn get(self: Self, key: K) ?V {
 313            return self.unmanaged.getContext(key, self.ctx);
 314        }
 315        pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V {
 316            return self.unmanaged.getAdapted(key, ctx);
 317        }
 318
 319        /// Find a pointer to the value associated with a key
 320        pub fn getPtr(self: Self, key: K) ?*V {
 321            return self.unmanaged.getPtrContext(key, self.ctx);
 322        }
 323        pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
 324            return self.unmanaged.getPtrAdapted(key, ctx);
 325        }
 326
 327        /// Find the actual key associated with an adapted key
 328        pub fn getKey(self: Self, key: K) ?K {
 329            return self.unmanaged.getKeyContext(key, self.ctx);
 330        }
 331        pub fn getKeyAdapted(self: Self, key: anytype, ctx: anytype) ?K {
 332            return self.unmanaged.getKeyAdapted(key, ctx);
 333        }
 334
 335        /// Find a pointer to the actual key associated with an adapted key
 336        pub fn getKeyPtr(self: Self, key: K) ?*K {
 337            return self.unmanaged.getKeyPtrContext(key, self.ctx);
 338        }
 339        pub fn getKeyPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*K {
 340            return self.unmanaged.getKeyPtrAdapted(key, ctx);
 341        }
 342
 343        /// Check whether a key is stored in the map
 344        pub fn contains(self: Self, key: K) bool {
 345            return self.unmanaged.containsContext(key, self.ctx);
 346        }
 347        pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool {
 348            return self.unmanaged.containsAdapted(key, ctx);
 349        }
 350
 351        /// If there is an `Entry` with a matching key, it is deleted from
 352        /// the hash map, and then returned from this function. The entry is
 353        /// removed from the underlying array by swapping it with the last
 354        /// element.
 355        pub fn fetchSwapRemove(self: *Self, key: K) ?KV {
 356            return self.unmanaged.fetchSwapRemoveContext(key, self.ctx);
 357        }
 358        pub fn fetchSwapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
 359            return self.unmanaged.fetchSwapRemoveContextAdapted(key, ctx, self.ctx);
 360        }
 361
 362        /// If there is an `Entry` with a matching key, it is deleted from
 363        /// the hash map, and then returned from this function. The entry is
 364        /// removed from the underlying array by shifting all elements forward
 365        /// thereby maintaining the current ordering.
 366        pub fn fetchOrderedRemove(self: *Self, key: K) ?KV {
 367            return self.unmanaged.fetchOrderedRemoveContext(key, self.ctx);
 368        }
 369        pub fn fetchOrderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
 370            return self.unmanaged.fetchOrderedRemoveContextAdapted(key, ctx, self.ctx);
 371        }
 372
 373        /// If there is an `Entry` with a matching key, it is deleted from
 374        /// the hash map. The entry is removed from the underlying array
 375        /// by swapping it with the last element.  Returns true if an entry
 376        /// was removed, false otherwise.
 377        pub fn swapRemove(self: *Self, key: K) bool {
 378            return self.unmanaged.swapRemoveContext(key, self.ctx);
 379        }
 380        pub fn swapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
 381            return self.unmanaged.swapRemoveContextAdapted(key, ctx, self.ctx);
 382        }
 383
 384        /// If there is an `Entry` with a matching key, it is deleted from
 385        /// the hash map. The entry is removed from the underlying array
 386        /// by shifting all elements forward, thereby maintaining the
 387        /// current ordering.  Returns true if an entry was removed, false otherwise.
 388        pub fn orderedRemove(self: *Self, key: K) bool {
 389            return self.unmanaged.orderedRemoveContext(key, self.ctx);
 390        }
 391        pub fn orderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
 392            return self.unmanaged.orderedRemoveContextAdapted(key, ctx, self.ctx);
 393        }
 394
 395        /// Deletes the item at the specified index in `entries` from
 396        /// the hash map. The entry is removed from the underlying array
 397        /// by swapping it with the last element.
 398        pub fn swapRemoveAt(self: *Self, index: usize) void {
 399            self.unmanaged.swapRemoveAtContext(index, self.ctx);
 400        }
 401
 402        /// Deletes the item at the specified index in `entries` from
 403        /// the hash map. The entry is removed from the underlying array
 404        /// by shifting all elements forward, thereby maintaining the
 405        /// current ordering.
 406        pub fn orderedRemoveAt(self: *Self, index: usize) void {
 407            self.unmanaged.orderedRemoveAtContext(index, self.ctx);
 408        }
 409
 410        /// Create a copy of the hash map which can be modified separately.
 411        /// The copy uses the same context and allocator as this instance.
 412        pub fn clone(self: Self) !Self {
 413            var other = try self.unmanaged.cloneContext(self.allocator, self.ctx);
 414            return other.promoteContext(self.allocator, self.ctx);
 415        }
 416        /// Create a copy of the hash map which can be modified separately.
 417        /// The copy uses the same context as this instance, but the specified
 418        /// allocator.
 419        pub fn cloneWithAllocator(self: Self, allocator: Allocator) !Self {
 420            var other = try self.unmanaged.cloneContext(allocator, self.ctx);
 421            return other.promoteContext(allocator, self.ctx);
 422        }
 423        /// Create a copy of the hash map which can be modified separately.
 424        /// The copy uses the same allocator as this instance, but the
 425        /// specified context.
 426        pub fn cloneWithContext(self: Self, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
 427            var other = try self.unmanaged.cloneContext(self.allocator, ctx);
 428            return other.promoteContext(self.allocator, ctx);
 429        }
 430        /// Create a copy of the hash map which can be modified separately.
 431        /// The copy uses the specified allocator and context.
 432        pub fn cloneWithAllocatorAndContext(self: Self, allocator: Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
 433            var other = try self.unmanaged.cloneContext(allocator, ctx);
 434            return other.promoteContext(allocator, ctx);
 435        }
 436
 437        /// Set the map to an empty state, making deinitialization a no-op, and
 438        /// returning a copy of the original.
 439        pub fn move(self: *Self) Self {
 440            self.unmanaged.pointer_stability.assertUnlocked();
 441            const result = self.*;
 442            self.unmanaged = .empty;
 443            return result;
 444        }
 445
 446        /// Recomputes stored hashes and rebuilds the key indexes. If the
 447        /// underlying keys have been modified directly, call this method to
 448        /// recompute the denormalized metadata necessary for the operation of
 449        /// the methods of this map that lookup entries by key.
 450        ///
 451        /// One use case for this is directly calling `entries.resize()` to grow
 452        /// the underlying storage, and then setting the `keys` and `values`
 453        /// directly without going through the methods of this map.
 454        ///
 455        /// The time complexity of this operation is O(n).
 456        pub fn reIndex(self: *Self) !void {
 457            return self.unmanaged.reIndexContext(self.allocator, self.ctx);
 458        }
 459
 460        /// Sorts the entries and then rebuilds the index.
 461        /// `sort_ctx` must have this method:
 462        /// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
 463        /// Uses a stable sorting algorithm.
 464        pub fn sort(self: *Self, sort_ctx: anytype) void {
 465            return self.unmanaged.sortContext(sort_ctx, self.ctx);
 466        }
 467
 468        /// Sorts the entries and then rebuilds the index.
 469        /// `sort_ctx` must have this method:
 470        /// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
 471        /// Uses an unstable sorting algorithm.
 472        pub fn sortUnstable(self: *Self, sort_ctx: anytype) void {
 473            return self.unmanaged.sortUnstableContext(sort_ctx, self.ctx);
 474        }
 475
 476        /// Shrinks the underlying `Entry` array to `new_len` elements and
 477        /// discards any associated index entries. Keeps capacity the same.
 478        ///
 479        /// Asserts the discarded entries remain initialized and capable of
 480        /// performing hash and equality checks. Any deinitialization of
 481        /// discarded entries must take place *after* calling this function.
 482        pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
 483            return self.unmanaged.shrinkRetainingCapacityContext(new_len, self.ctx);
 484        }
 485
 486        /// Shrinks the underlying `Entry` array to `new_len` elements and
 487        /// discards any associated index entries. Reduces allocated capacity.
 488        ///
 489        /// Asserts the discarded entries remain initialized and capable of
 490        /// performing hash and equality checks. It is a bug to call this
 491        /// function if the discarded entries require deinitialization. For
 492        /// that use case, `shrinkRetainingCapacity` can be used instead.
 493        pub fn shrinkAndFree(self: *Self, new_len: usize) void {
 494            return self.unmanaged.shrinkAndFreeContext(self.allocator, new_len, self.ctx);
 495        }
 496
 497        /// Removes the last inserted `Entry` in the hash map and returns it if count is nonzero.
 498        /// Otherwise returns null.
 499        pub fn pop(self: *Self) ?KV {
 500            return self.unmanaged.popContext(self.ctx);
 501        }
 502    };
 503}
 504
 505/// A hash table of keys and values, each stored sequentially.
 506///
 507/// Insertion order is preserved. In general, this data structure supports the same
 508/// operations as `std.ArrayList`.
 509///
 510/// Deletion operations:
 511/// * `swapRemove` - O(1)
 512/// * `orderedRemove` - O(N)
 513///
 514/// Modifying the hash map while iterating is allowed, however, one must understand
 515/// the (well defined) behavior when mixing insertions and deletions with iteration.
 516///
 517/// This type does not store an `Allocator` field - the `Allocator` must be passed in
 518/// with each function call that requires it. See `ArrayHashMap` for a type that stores
 519/// an `Allocator` field for convenience.
 520///
 521/// Can be initialized directly using the default field values.
 522///
 523/// This type is designed to have low overhead for small numbers of entries. When
 524/// `store_hash` is `false` and the number of entries in the map is less than 9,
 525/// the overhead cost of using `ArrayHashMapUnmanaged` rather than `std.ArrayList` is
 526/// only a single pointer-sized integer.
 527///
 528/// Default initialization of this struct is deprecated; use `.empty` instead.
 529pub fn ArrayHashMapUnmanaged(
 530    comptime K: type,
 531    comptime V: type,
 532    /// A namespace that provides these two functions:
 533    /// * `pub fn hash(self, K) u32`
 534    /// * `pub fn eql(self, K, K, usize) bool`
 535    ///
 536    /// The final `usize` in the `eql` function represents the index of the key
 537    /// that's already inside the map.
 538    comptime Context: type,
 539    /// When `false`, this data structure is biased towards cheap `eql`
 540    /// functions and avoids storing each key's hash in the table. Setting
 541    /// `store_hash` to `true` incurs more memory cost but limits `eql` to
 542    /// being called only once per insertion/deletion (provided there are no
 543    /// hash collisions).
 544    comptime store_hash: bool,
 545) type {
 546    return struct {
 547        /// It is permitted to access this field directly.
 548        /// After any modification to the keys, consider calling `reIndex`.
 549        entries: DataList = .{},
 550
 551        /// When entries length is less than `linear_scan_max`, this remains `null`.
 552        /// Once entries length grows big enough, this field is allocated. There is
 553        /// an IndexHeader followed by an array of Index(I) structs, where I is defined
 554        /// by how many total indexes there are.
 555        index_header: ?*IndexHeader = null,
 556
 557        /// Used to detect memory safety violations.
 558        pointer_stability: std.debug.SafetyLock = .{},
 559
 560        /// A map containing no keys or values.
 561        pub const empty: Self = .{
 562            .entries = .{},
 563            .index_header = null,
 564        };
 565
 566        /// Modifying the key is allowed only if it does not change the hash.
 567        /// Modifying the value is allowed.
 568        /// Entry pointers become invalid whenever this ArrayHashMap is modified,
 569        /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
 570        pub const Entry = struct {
 571            key_ptr: *K,
 572            value_ptr: *V,
 573        };
 574
 575        /// A KV pair which has been copied out of the backing store
 576        pub const KV = struct {
 577            key: K,
 578            value: V,
 579        };
 580
 581        /// The Data type used for the MultiArrayList backing this map
 582        pub const Data = struct {
 583            hash: Hash,
 584            key: K,
 585            value: V,
 586        };
 587
 588        /// The MultiArrayList type backing this map
 589        pub const DataList = std.MultiArrayList(Data);
 590
 591        /// The stored hash type, either u32 or void.
 592        pub const Hash = if (store_hash) u32 else void;
 593
 594        /// getOrPut variants return this structure, with pointers
 595        /// to the backing store and a flag to indicate whether an
 596        /// existing entry was found.
 597        /// Modifying the key is allowed only if it does not change the hash.
 598        /// Modifying the value is allowed.
 599        /// Entry pointers become invalid whenever this ArrayHashMap is modified,
 600        /// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
 601        pub const GetOrPutResult = struct {
 602            key_ptr: *K,
 603            value_ptr: *V,
 604            found_existing: bool,
 605            index: usize,
 606        };
 607
 608        /// The ArrayHashMap type using the same settings as this managed map.
 609        pub const Managed = ArrayHashMap(K, V, Context, store_hash);
 610
 611        /// Some functions require a context only if hashes are not stored.
 612        /// To keep the api simple, this type is only used internally.
 613        const ByIndexContext = if (store_hash) void else Context;
 614
 615        const Self = @This();
 616
 617        const linear_scan_max = @as(comptime_int, @max(1, @as(comptime_int, @min(
 618            std.atomic.cache_line / @as(comptime_int, @max(1, @sizeOf(Hash))),
 619            std.atomic.cache_line / @as(comptime_int, @max(1, @sizeOf(K))),
 620        ))));
 621
 622        const RemovalType = enum {
 623            swap,
 624            ordered,
 625        };
 626
 627        const Oom = Allocator.Error;
 628
 629        /// Convert from an unmanaged map to a managed map.  After calling this,
 630        /// the promoted map should no longer be used.
 631        pub fn promote(self: Self, gpa: Allocator) Managed {
 632            if (@sizeOf(Context) != 0)
 633                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
 634            return self.promoteContext(gpa, undefined);
 635        }
 636        pub fn promoteContext(self: Self, gpa: Allocator, ctx: Context) Managed {
 637            return .{
 638                .unmanaged = self,
 639                .allocator = gpa,
 640                .ctx = ctx,
 641            };
 642        }
 643
 644        pub fn init(gpa: Allocator, key_list: []const K, value_list: []const V) Oom!Self {
 645            var self: Self = .{};
 646            errdefer self.deinit(gpa);
 647            try self.reinit(gpa, key_list, value_list);
 648            return self;
 649        }
 650
 651        /// An empty `value_list` may be passed, in which case the values array becomes `undefined`.
 652        pub fn reinit(self: *Self, gpa: Allocator, key_list: []const K, value_list: []const V) Oom!void {
 653            try self.entries.resize(gpa, key_list.len);
 654            @memcpy(self.keys(), key_list);
 655            if (value_list.len == 0) {
 656                @memset(self.values(), undefined);
 657            } else {
 658                assert(key_list.len == value_list.len);
 659                @memcpy(self.values(), value_list);
 660            }
 661            try self.reIndex(gpa);
 662        }
 663
 664        /// Frees the backing allocation and leaves the map in an undefined state.
 665        /// Note that this does not free keys or values.  You must take care of that
 666        /// before calling this function, if it is needed.
 667        pub fn deinit(self: *Self, gpa: Allocator) void {
 668            self.pointer_stability.assertUnlocked();
 669            self.entries.deinit(gpa);
 670            if (self.index_header) |header| {
 671                header.free(gpa);
 672            }
 673            self.* = undefined;
 674        }
 675
 676        /// Puts the hash map into a state where any method call that would
 677        /// cause an existing key or value pointer to become invalidated will
 678        /// instead trigger an assertion.
 679        ///
 680        /// An additional call to `lockPointers` in such state also triggers an
 681        /// assertion.
 682        ///
 683        /// `unlockPointers` returns the hash map to the previous state.
 684        pub fn lockPointers(self: *Self) void {
 685            self.pointer_stability.lock();
 686        }
 687
 688        /// Undoes a call to `lockPointers`.
 689        pub fn unlockPointers(self: *Self) void {
 690            self.pointer_stability.unlock();
 691        }
 692
 693        /// Clears the map but retains the backing allocation for future use.
 694        pub fn clearRetainingCapacity(self: *Self) void {
 695            self.pointer_stability.lock();
 696            defer self.pointer_stability.unlock();
 697
 698            self.entries.len = 0;
 699            if (self.index_header) |header| {
 700                switch (header.capacityIndexType()) {
 701                    .u8 => @memset(header.indexes(u8), Index(u8).empty),
 702                    .u16 => @memset(header.indexes(u16), Index(u16).empty),
 703                    .u32 => @memset(header.indexes(u32), Index(u32).empty),
 704                }
 705            }
 706        }
 707
 708        /// Clears the map and releases the backing allocation
 709        pub fn clearAndFree(self: *Self, gpa: Allocator) void {
 710            self.pointer_stability.lock();
 711            defer self.pointer_stability.unlock();
 712
 713            self.entries.shrinkAndFree(gpa, 0);
 714            if (self.index_header) |header| {
 715                header.free(gpa);
 716                self.index_header = null;
 717            }
 718        }
 719
 720        /// Returns the number of KV pairs stored in this map.
 721        pub fn count(self: Self) usize {
 722            return self.entries.len;
 723        }
 724
 725        /// Returns the backing array of keys in this map. Modifying the map may
 726        /// invalidate this array. Modifying this array in a way that changes
 727        /// key hashes or key equality puts the map into an unusable state until
 728        /// `reIndex` is called.
 729        pub fn keys(self: Self) []K {
 730            return self.entries.items(.key);
 731        }
 732        /// Returns the backing array of values in this map. Modifying the map
 733        /// may invalidate this array. It is permitted to modify the values in
 734        /// this array.
 735        pub fn values(self: Self) []V {
 736            return self.entries.items(.value);
 737        }
 738
 739        /// Returns an iterator over the pairs in this map.
 740        /// Modifying the map may invalidate this iterator.
 741        pub fn iterator(self: Self) Iterator {
 742            const slice = self.entries.slice();
 743            return .{
 744                .keys = slice.items(.key).ptr,
 745                .values = slice.items(.value).ptr,
 746                .len = @as(u32, @intCast(slice.len)),
 747            };
 748        }
 749        pub const Iterator = struct {
 750            keys: [*]K,
 751            values: [*]V,
 752            len: u32,
 753            index: u32 = 0,
 754
 755            pub fn next(it: *Iterator) ?Entry {
 756                if (it.index >= it.len) return null;
 757                const result = Entry{
 758                    .key_ptr = &it.keys[it.index],
 759                    // workaround for #6974
 760                    .value_ptr = if (@sizeOf(*V) == 0) undefined else &it.values[it.index],
 761                };
 762                it.index += 1;
 763                return result;
 764            }
 765
 766            /// Reset the iterator to the initial index
 767            pub fn reset(it: *Iterator) void {
 768                it.index = 0;
 769            }
 770        };
 771
 772        /// If key exists this function cannot fail.
 773        /// If there is an existing item with `key`, then the result
 774        /// `Entry` pointer points to it, and found_existing is true.
 775        /// Otherwise, puts a new item with undefined value, and
 776        /// the `Entry` pointer points to it. Caller should then initialize
 777        /// the value (but not the key).
 778        pub fn getOrPut(self: *Self, gpa: Allocator, key: K) Oom!GetOrPutResult {
 779            if (@sizeOf(Context) != 0)
 780                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
 781            return self.getOrPutContext(gpa, key, undefined);
 782        }
 783        pub fn getOrPutContext(self: *Self, gpa: Allocator, key: K, ctx: Context) Oom!GetOrPutResult {
 784            const gop = try self.getOrPutContextAdapted(gpa, key, ctx, ctx);
 785            if (!gop.found_existing) {
 786                gop.key_ptr.* = key;
 787            }
 788            return gop;
 789        }
 790        pub fn getOrPutAdapted(self: *Self, gpa: Allocator, key: anytype, key_ctx: anytype) Oom!GetOrPutResult {
 791            if (@sizeOf(Context) != 0)
 792                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
 793            return self.getOrPutContextAdapted(gpa, key, key_ctx, undefined);
 794        }
 795        pub fn getOrPutContextAdapted(self: *Self, gpa: Allocator, key: anytype, key_ctx: anytype, ctx: Context) Oom!GetOrPutResult {
 796            self.ensureTotalCapacityContext(gpa, self.entries.len + 1, ctx) catch |err| {
 797                // "If key exists this function cannot fail."
 798                const index = self.getIndexAdapted(key, key_ctx) orelse return err;
 799                const slice = self.entries.slice();
 800                return GetOrPutResult{
 801                    .key_ptr = &slice.items(.key)[index],
 802                    // workaround for #6974
 803                    .value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value)[index],
 804                    .found_existing = true,
 805                    .index = index,
 806                };
 807            };
 808            return self.getOrPutAssumeCapacityAdapted(key, key_ctx);
 809        }
 810
 811        /// If there is an existing item with `key`, then the result
 812        /// `Entry` pointer points to it, and found_existing is true.
 813        /// Otherwise, puts a new item with undefined value, and
 814        /// the `Entry` pointer points to it. Caller should then initialize
 815        /// the value (but not the key).
 816        /// If a new entry needs to be stored, this function asserts there
 817        /// is enough capacity to store it.
 818        pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
 819            if (@sizeOf(Context) != 0)
 820                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutAssumeCapacityContext instead.");
 821            return self.getOrPutAssumeCapacityContext(key, undefined);
 822        }
 823        pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) GetOrPutResult {
 824            const gop = self.getOrPutAssumeCapacityAdapted(key, ctx);
 825            if (!gop.found_existing) {
 826                gop.key_ptr.* = key;
 827            }
 828            return gop;
 829        }
 830        /// If there is an existing item with `key`, then the result
 831        /// `Entry` pointers point to it, and found_existing is true.
 832        /// Otherwise, puts a new item with undefined key and value, and
 833        /// the `Entry` pointers point to it. Caller must then initialize
 834        /// both the key and the value.
 835        /// If a new entry needs to be stored, this function asserts there
 836        /// is enough capacity to store it.
 837        pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
 838            const header = self.index_header orelse {
 839                // Linear scan.
 840                const h = if (store_hash) checkedHash(ctx, key) else {};
 841                const slice = self.entries.slice();
 842                const hashes_array = slice.items(.hash);
 843                const keys_array = slice.items(.key);
 844                for (keys_array, 0..) |*item_key, i| {
 845                    if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*, i)) {
 846                        return GetOrPutResult{
 847                            .key_ptr = item_key,
 848                            // workaround for #6974
 849                            .value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value)[i],
 850                            .found_existing = true,
 851                            .index = i,
 852                        };
 853                    }
 854                }
 855
 856                const index = self.entries.addOneAssumeCapacity();
 857                // The slice length changed, so we directly index the pointer.
 858                if (store_hash) hashes_array.ptr[index] = h;
 859
 860                return GetOrPutResult{
 861                    .key_ptr = &keys_array.ptr[index],
 862                    // workaround for #6974
 863                    .value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value).ptr[index],
 864                    .found_existing = false,
 865                    .index = index,
 866                };
 867            };
 868
 869            switch (header.capacityIndexType()) {
 870                .u8 => return self.getOrPutInternal(key, ctx, header, u8),
 871                .u16 => return self.getOrPutInternal(key, ctx, header, u16),
 872                .u32 => return self.getOrPutInternal(key, ctx, header, u32),
 873            }
 874        }
 875
 876        pub fn getOrPutValue(self: *Self, gpa: Allocator, key: K, value: V) Oom!GetOrPutResult {
 877            if (@sizeOf(Context) != 0)
 878                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
 879            return self.getOrPutValueContext(gpa, key, value, undefined);
 880        }
 881        pub fn getOrPutValueContext(self: *Self, gpa: Allocator, key: K, value: V, ctx: Context) Oom!GetOrPutResult {
 882            const res = try self.getOrPutContextAdapted(gpa, key, ctx, ctx);
 883            if (!res.found_existing) {
 884                res.key_ptr.* = key;
 885                res.value_ptr.* = value;
 886            }
 887            return res;
 888        }
 889
 890        /// Increases capacity, guaranteeing that insertions up until the
 891        /// `expected_count` will not cause an allocation, and therefore cannot fail.
 892        pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) Oom!void {
 893            if (@sizeOf(ByIndexContext) != 0)
 894                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
 895            return self.ensureTotalCapacityContext(gpa, new_capacity, undefined);
 896        }
 897        pub fn ensureTotalCapacityContext(self: *Self, gpa: Allocator, new_capacity: usize, ctx: Context) Oom!void {
 898            self.pointer_stability.lock();
 899            defer self.pointer_stability.unlock();
 900
 901            try self.entries.ensureTotalCapacity(gpa, new_capacity);
 902            if (new_capacity <= linear_scan_max) return;
 903            if (self.index_header) |header| if (new_capacity <= header.capacity()) return;
 904
 905            const new_bit_index = try IndexHeader.findBitIndex(new_capacity);
 906            const new_header = try IndexHeader.alloc(gpa, new_bit_index);
 907
 908            if (self.index_header) |old_header| old_header.free(gpa);
 909            self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header);
 910            self.index_header = new_header;
 911        }
 912
 913        /// Increases capacity, guaranteeing that insertions up until
 914        /// `additional_count` **more** items will not cause an allocation, and
 915        /// therefore cannot fail.
 916        pub fn ensureUnusedCapacity(
 917            self: *Self,
 918            gpa: Allocator,
 919            additional_capacity: usize,
 920        ) Oom!void {
 921            if (@sizeOf(Context) != 0)
 922                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
 923            return self.ensureUnusedCapacityContext(gpa, additional_capacity, undefined);
 924        }
 925        pub fn ensureUnusedCapacityContext(
 926            self: *Self,
 927            gpa: Allocator,
 928            additional_capacity: usize,
 929            ctx: Context,
 930        ) Oom!void {
 931            return self.ensureTotalCapacityContext(gpa, self.count() + additional_capacity, ctx);
 932        }
 933
 934        /// Returns the number of total elements which may be present before it is
 935        /// no longer guaranteed that no allocations will be performed.
 936        pub fn capacity(self: Self) usize {
 937            const entry_cap = self.entries.capacity;
 938            const header = self.index_header orelse return @min(linear_scan_max, entry_cap);
 939            const indexes_cap = header.capacity();
 940            return @min(entry_cap, indexes_cap);
 941        }
 942
 943        /// Clobbers any existing data. To detect if a put would clobber
 944        /// existing data, see `getOrPut`.
 945        pub fn put(self: *Self, gpa: Allocator, key: K, value: V) Oom!void {
 946            if (@sizeOf(Context) != 0)
 947                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
 948            return self.putContext(gpa, key, value, undefined);
 949        }
 950        pub fn putContext(self: *Self, gpa: Allocator, key: K, value: V, ctx: Context) Oom!void {
 951            const result = try self.getOrPutContext(gpa, key, ctx);
 952            result.value_ptr.* = value;
 953        }
 954
 955        /// Inserts a key-value pair into the hash map, asserting that no previous
 956        /// entry with the same key is already present
 957        pub fn putNoClobber(self: *Self, gpa: Allocator, key: K, value: V) Oom!void {
 958            if (@sizeOf(Context) != 0)
 959                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
 960            return self.putNoClobberContext(gpa, key, value, undefined);
 961        }
 962        pub fn putNoClobberContext(self: *Self, gpa: Allocator, key: K, value: V, ctx: Context) Oom!void {
 963            const result = try self.getOrPutContext(gpa, key, ctx);
 964            assert(!result.found_existing);
 965            result.value_ptr.* = value;
 966        }
 967
 968        /// Asserts there is enough capacity to store the new key-value pair.
 969        /// Clobbers any existing data. To detect if a put would clobber
 970        /// existing data, see `getOrPutAssumeCapacity`.
 971        pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
 972            if (@sizeOf(Context) != 0)
 973                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityContext instead.");
 974            return self.putAssumeCapacityContext(key, value, undefined);
 975        }
 976        pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void {
 977            const result = self.getOrPutAssumeCapacityContext(key, ctx);
 978            result.value_ptr.* = value;
 979        }
 980
 981        /// Asserts there is enough capacity to store the new key-value pair.
 982        /// Asserts that it does not clobber any existing data.
 983        /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
 984        pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
 985            if (@sizeOf(Context) != 0)
 986                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityNoClobberContext instead.");
 987            return self.putAssumeCapacityNoClobberContext(key, value, undefined);
 988        }
 989        pub fn putAssumeCapacityNoClobberContext(self: *Self, key: K, value: V, ctx: Context) void {
 990            const result = self.getOrPutAssumeCapacityContext(key, ctx);
 991            assert(!result.found_existing);
 992            result.value_ptr.* = value;
 993        }
 994
 995        /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
 996        pub fn fetchPut(self: *Self, gpa: Allocator, key: K, value: V) Oom!?KV {
 997            if (@sizeOf(Context) != 0)
 998                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
 999            return self.fetchPutContext(gpa, key, value, undefined);
1000        }
1001        pub fn fetchPutContext(self: *Self, gpa: Allocator, key: K, value: V, ctx: Context) Oom!?KV {
1002            const gop = try self.getOrPutContext(gpa, key, ctx);
1003            var result: ?KV = null;
1004            if (gop.found_existing) {
1005                result = KV{
1006                    .key = gop.key_ptr.*,
1007                    .value = gop.value_ptr.*,
1008                };
1009            }
1010            gop.value_ptr.* = value;
1011            return result;
1012        }
1013
1014        /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
1015        /// If insertion happens, asserts there is enough capacity without allocating.
1016        pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
1017            if (@sizeOf(Context) != 0)
1018                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutAssumeCapacityContext instead.");
1019            return self.fetchPutAssumeCapacityContext(key, value, undefined);
1020        }
1021        pub fn fetchPutAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) ?KV {
1022            const gop = self.getOrPutAssumeCapacityContext(key, ctx);
1023            var result: ?KV = null;
1024            if (gop.found_existing) {
1025                result = KV{
1026                    .key = gop.key_ptr.*,
1027                    .value = gop.value_ptr.*,
1028                };
1029            }
1030            gop.value_ptr.* = value;
1031            return result;
1032        }
1033
1034        /// Finds pointers to the key and value storage associated with a key.
1035        pub fn getEntry(self: Self, key: K) ?Entry {
1036            if (@sizeOf(Context) != 0)
1037                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getEntryContext instead.");
1038            return self.getEntryContext(key, undefined);
1039        }
1040        pub fn getEntryContext(self: Self, key: K, ctx: Context) ?Entry {
1041            return self.getEntryAdapted(key, ctx);
1042        }
1043        pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
1044            const index = self.getIndexAdapted(key, ctx) orelse return null;
1045            const slice = self.entries.slice();
1046            return Entry{
1047                .key_ptr = &slice.items(.key)[index],
1048                // workaround for #6974
1049                .value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value)[index],
1050            };
1051        }
1052
1053        /// Finds the index in the `entries` array where a key is stored
1054        pub fn getIndex(self: Self, key: K) ?usize {
1055            if (@sizeOf(Context) != 0)
1056                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getIndexContext instead.");
1057            return self.getIndexContext(key, undefined);
1058        }
1059        pub fn getIndexContext(self: Self, key: K, ctx: Context) ?usize {
1060            return self.getIndexAdapted(key, ctx);
1061        }
1062        pub fn getIndexAdapted(self: Self, key: anytype, ctx: anytype) ?usize {
1063            const header = self.index_header orelse {
1064                // Linear scan.
1065                const h = if (store_hash) checkedHash(ctx, key) else {};
1066                const slice = self.entries.slice();
1067                const hashes_array = slice.items(.hash);
1068                const keys_array = slice.items(.key);
1069                for (keys_array, 0..) |*item_key, i| {
1070                    if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*, i)) {
1071                        return i;
1072                    }
1073                }
1074                return null;
1075            };
1076            switch (header.capacityIndexType()) {
1077                .u8 => return self.getIndexWithHeaderGeneric(key, ctx, header, u8),
1078                .u16 => return self.getIndexWithHeaderGeneric(key, ctx, header, u16),
1079                .u32 => return self.getIndexWithHeaderGeneric(key, ctx, header, u32),
1080            }
1081        }
1082        fn getIndexWithHeaderGeneric(self: Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) ?usize {
1083            const indexes = header.indexes(I);
1084            const slot = self.getSlotByKey(key, ctx, header, I, indexes) orelse return null;
1085            return indexes[slot].entry_index;
1086        }
1087
1088        /// Find the value associated with a key
1089        pub fn get(self: Self, key: K) ?V {
1090            if (@sizeOf(Context) != 0)
1091                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getContext instead.");
1092            return self.getContext(key, undefined);
1093        }
1094        pub fn getContext(self: Self, key: K, ctx: Context) ?V {
1095            return self.getAdapted(key, ctx);
1096        }
1097        pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V {
1098            const index = self.getIndexAdapted(key, ctx) orelse return null;
1099            return self.values()[index];
1100        }
1101
1102        /// Find a pointer to the value associated with a key
1103        pub fn getPtr(self: Self, key: K) ?*V {
1104            if (@sizeOf(Context) != 0)
1105                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getPtrContext instead.");
1106            return self.getPtrContext(key, undefined);
1107        }
1108        pub fn getPtrContext(self: Self, key: K, ctx: Context) ?*V {
1109            return self.getPtrAdapted(key, ctx);
1110        }
1111        pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
1112            const index = self.getIndexAdapted(key, ctx) orelse return null;
1113            // workaround for #6974
1114            return if (@sizeOf(*V) == 0) @as(*V, undefined) else &self.values()[index];
1115        }
1116
1117        /// Find the actual key associated with an adapted key
1118        pub fn getKey(self: Self, key: K) ?K {
1119            if (@sizeOf(Context) != 0)
1120                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getKeyContext instead.");
1121            return self.getKeyContext(key, undefined);
1122        }
1123        pub fn getKeyContext(self: Self, key: K, ctx: Context) ?K {
1124            return self.getKeyAdapted(key, ctx);
1125        }
1126        pub fn getKeyAdapted(self: Self, key: anytype, ctx: anytype) ?K {
1127            const index = self.getIndexAdapted(key, ctx) orelse return null;
1128            return self.keys()[index];
1129        }
1130
1131        /// Find a pointer to the actual key associated with an adapted key
1132        pub fn getKeyPtr(self: Self, key: K) ?*K {
1133            if (@sizeOf(Context) != 0)
1134                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getKeyPtrContext instead.");
1135            return self.getKeyPtrContext(key, undefined);
1136        }
1137        pub fn getKeyPtrContext(self: Self, key: K, ctx: Context) ?*K {
1138            return self.getKeyPtrAdapted(key, ctx);
1139        }
1140        pub fn getKeyPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*K {
1141            const index = self.getIndexAdapted(key, ctx) orelse return null;
1142            return &self.keys()[index];
1143        }
1144
1145        /// Check whether a key is stored in the map
1146        pub fn contains(self: Self, key: K) bool {
1147            if (@sizeOf(Context) != 0)
1148                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call containsContext instead.");
1149            return self.containsContext(key, undefined);
1150        }
1151        pub fn containsContext(self: Self, key: K, ctx: Context) bool {
1152            return self.containsAdapted(key, ctx);
1153        }
1154        pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool {
1155            return self.getIndexAdapted(key, ctx) != null;
1156        }
1157
1158        /// If there is an `Entry` with a matching key, it is deleted from
1159        /// the hash map, and then returned from this function. The entry is
1160        /// removed from the underlying array by swapping it with the last
1161        /// element.
1162        pub fn fetchSwapRemove(self: *Self, key: K) ?KV {
1163            if (@sizeOf(Context) != 0)
1164                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchSwapRemoveContext instead.");
1165            return self.fetchSwapRemoveContext(key, undefined);
1166        }
1167        pub fn fetchSwapRemoveContext(self: *Self, key: K, ctx: Context) ?KV {
1168            return self.fetchSwapRemoveContextAdapted(key, ctx, ctx);
1169        }
1170        pub fn fetchSwapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
1171            if (@sizeOf(ByIndexContext) != 0)
1172                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchSwapRemoveContextAdapted instead.");
1173            return self.fetchSwapRemoveContextAdapted(key, ctx, undefined);
1174        }
1175        pub fn fetchSwapRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) ?KV {
1176            self.pointer_stability.lock();
1177            defer self.pointer_stability.unlock();
1178
1179            return self.fetchRemoveByKey(key, key_ctx, if (store_hash) {} else ctx, .swap);
1180        }
1181
1182        /// If there is an `Entry` with a matching key, it is deleted from
1183        /// the hash map, and then returned from this function. The entry is
1184        /// removed from the underlying array by shifting all elements forward
1185        /// thereby maintaining the current ordering.
1186        pub fn fetchOrderedRemove(self: *Self, key: K) ?KV {
1187            if (@sizeOf(Context) != 0)
1188                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchOrderedRemoveContext instead.");
1189            return self.fetchOrderedRemoveContext(key, undefined);
1190        }
1191        pub fn fetchOrderedRemoveContext(self: *Self, key: K, ctx: Context) ?KV {
1192            return self.fetchOrderedRemoveContextAdapted(key, ctx, ctx);
1193        }
1194        pub fn fetchOrderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
1195            if (@sizeOf(ByIndexContext) != 0)
1196                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchOrderedRemoveContextAdapted instead.");
1197            return self.fetchOrderedRemoveContextAdapted(key, ctx, undefined);
1198        }
1199        pub fn fetchOrderedRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) ?KV {
1200            self.pointer_stability.lock();
1201            defer self.pointer_stability.unlock();
1202
1203            return self.fetchRemoveByKey(key, key_ctx, if (store_hash) {} else ctx, .ordered);
1204        }
1205
1206        /// If there is an `Entry` with a matching key, it is deleted from
1207        /// the hash map. The entry is removed from the underlying array
1208        /// by swapping it with the last element.  Returns true if an entry
1209        /// was removed, false otherwise.
1210        pub fn swapRemove(self: *Self, key: K) bool {
1211            if (@sizeOf(Context) != 0)
1212                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call swapRemoveContext instead.");
1213            return self.swapRemoveContext(key, undefined);
1214        }
1215        pub fn swapRemoveContext(self: *Self, key: K, ctx: Context) bool {
1216            return self.swapRemoveContextAdapted(key, ctx, ctx);
1217        }
1218        pub fn swapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
1219            if (@sizeOf(ByIndexContext) != 0)
1220                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call swapRemoveContextAdapted instead.");
1221            return self.swapRemoveContextAdapted(key, ctx, undefined);
1222        }
1223        pub fn swapRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) bool {
1224            self.pointer_stability.lock();
1225            defer self.pointer_stability.unlock();
1226
1227            return self.removeByKey(key, key_ctx, if (store_hash) {} else ctx, .swap);
1228        }
1229
1230        /// If there is an `Entry` with a matching key, it is deleted from
1231        /// the hash map. The entry is removed from the underlying array
1232        /// by shifting all elements forward, thereby maintaining the
1233        /// current ordering.  Returns true if an entry was removed, false otherwise.
1234        pub fn orderedRemove(self: *Self, key: K) bool {
1235            if (@sizeOf(Context) != 0)
1236                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveContext instead.");
1237            return self.orderedRemoveContext(key, undefined);
1238        }
1239        pub fn orderedRemoveContext(self: *Self, key: K, ctx: Context) bool {
1240            return self.orderedRemoveContextAdapted(key, ctx, ctx);
1241        }
1242        pub fn orderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
1243            if (@sizeOf(ByIndexContext) != 0)
1244                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveContextAdapted instead.");
1245            return self.orderedRemoveContextAdapted(key, ctx, undefined);
1246        }
1247        pub fn orderedRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) bool {
1248            self.pointer_stability.lock();
1249            defer self.pointer_stability.unlock();
1250
1251            return self.removeByKey(key, key_ctx, if (store_hash) {} else ctx, .ordered);
1252        }
1253
1254        /// Deletes the item at the specified index in `entries` from
1255        /// the hash map. The entry is removed from the underlying array
1256        /// by swapping it with the last element.
1257        pub fn swapRemoveAt(self: *Self, index: usize) void {
1258            if (@sizeOf(ByIndexContext) != 0)
1259                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call swapRemoveAtContext instead.");
1260            return self.swapRemoveAtContext(index, undefined);
1261        }
1262        pub fn swapRemoveAtContext(self: *Self, index: usize, ctx: Context) void {
1263            self.pointer_stability.lock();
1264            defer self.pointer_stability.unlock();
1265
1266            self.removeByIndex(index, if (store_hash) {} else ctx, .swap);
1267        }
1268
1269        /// Deletes the item at the specified index in `entries` from
1270        /// the hash map. The entry is removed from the underlying array
1271        /// by shifting all elements forward, thereby maintaining the
1272        /// current ordering.
1273        pub fn orderedRemoveAt(self: *Self, index: usize) void {
1274            if (@sizeOf(ByIndexContext) != 0)
1275                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveAtContext instead.");
1276            return self.orderedRemoveAtContext(index, undefined);
1277        }
1278        pub fn orderedRemoveAtContext(self: *Self, index: usize, ctx: Context) void {
1279            self.pointer_stability.lock();
1280            defer self.pointer_stability.unlock();
1281
1282            self.removeByIndex(index, if (store_hash) {} else ctx, .ordered);
1283        }
1284
1285        /// Remove the entries indexed by `sorted_indexes`. The indexes to be
1286        /// removed correspond to state before deletion.
1287        ///
1288        /// This operation is O(N).
1289        ///
1290        /// Asserts that each index to be removed is in bounds.
1291        ///
1292        /// Invalidates key and element pointers beyond the first deleted index.
1293        pub fn orderedRemoveAtMany(self: *Self, gpa: Allocator, sorted_indexes: []const usize) Oom!void {
1294            if (@sizeOf(ByIndexContext) != 0)
1295                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call orderedRemoveAtContext instead.");
1296            return self.orderedRemoveAtManyContext(gpa, sorted_indexes, undefined);
1297        }
1298
1299        pub fn orderedRemoveAtManyContext(
1300            self: *Self,
1301            gpa: Allocator,
1302            sorted_indexes: []const usize,
1303            ctx: Context,
1304        ) Oom!void {
1305            self.pointer_stability.lock();
1306            defer self.pointer_stability.unlock();
1307
1308            self.entries.orderedRemoveMany(sorted_indexes);
1309            try self.reIndexContext(gpa, ctx);
1310        }
1311
1312        /// Create a copy of the hash map which can be modified separately.
1313        /// The copy uses the same context as this instance, but is allocated
1314        /// with the provided allocator.
1315        pub fn clone(self: Self, gpa: Allocator) Oom!Self {
1316            if (@sizeOf(ByIndexContext) != 0)
1317                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
1318            return self.cloneContext(gpa, undefined);
1319        }
1320        pub fn cloneContext(self: Self, gpa: Allocator, ctx: Context) Oom!Self {
1321            var other: Self = .{};
1322            other.entries = try self.entries.clone(gpa);
1323            errdefer other.entries.deinit(gpa);
1324
1325            if (self.index_header) |header| {
1326                // TODO: I'm pretty sure this could be memcpy'd instead of
1327                // doing all this work.
1328                const new_header = try IndexHeader.alloc(gpa, header.bit_index);
1329                other.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header);
1330                other.index_header = new_header;
1331            }
1332            return other;
1333        }
1334
1335        /// Set the map to an empty state, making deinitialization a no-op, and
1336        /// returning a copy of the original.
1337        pub fn move(self: *Self) Self {
1338            self.pointer_stability.assertUnlocked();
1339            const result = self.*;
1340            self.* = .empty;
1341            return result;
1342        }
1343
1344        /// Recomputes stored hashes and rebuilds the key indexes. If the
1345        /// underlying keys have been modified directly, call this method to
1346        /// recompute the denormalized metadata necessary for the operation of
1347        /// the methods of this map that lookup entries by key.
1348        ///
1349        /// One use case for this is directly calling `entries.resize()` to grow
1350        /// the underlying storage, and then setting the `keys` and `values`
1351        /// directly without going through the methods of this map.
1352        ///
1353        /// The time complexity of this operation is O(n).
1354        pub fn reIndex(self: *Self, gpa: Allocator) Oom!void {
1355            if (@sizeOf(ByIndexContext) != 0)
1356                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call reIndexContext instead.");
1357            return self.reIndexContext(gpa, undefined);
1358        }
1359
1360        pub fn reIndexContext(self: *Self, gpa: Allocator, ctx: Context) Oom!void {
1361            // Recompute all hashes.
1362            if (store_hash) {
1363                for (self.keys(), self.entries.items(.hash)) |key, *hash| {
1364                    const h = checkedHash(ctx, key);
1365                    hash.* = h;
1366                }
1367            }
1368            try rebuildIndex(self, gpa, ctx);
1369        }
1370
1371        /// Modify an entry's key without reordering any entries.
1372        pub fn setKey(self: *Self, gpa: Allocator, index: usize, new_key: K) Oom!void {
1373            if (@sizeOf(ByIndexContext) != 0)
1374                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call setKeyContext instead.");
1375            return setKeyContext(self, gpa, index, new_key, undefined);
1376        }
1377
1378        pub fn setKeyContext(self: *Self, gpa: Allocator, index: usize, new_key: K, ctx: Context) Oom!void {
1379            const key_ptr = &self.entries.items(.key)[index];
1380            key_ptr.* = new_key;
1381            if (store_hash) self.entries.items(.hash)[index] = checkedHash(ctx, key_ptr.*);
1382            try rebuildIndex(self, gpa, undefined);
1383        }
1384
1385        fn rebuildIndex(self: *Self, gpa: Allocator, ctx: Context) Oom!void {
1386            if (self.entries.capacity <= linear_scan_max) return;
1387
1388            // We're going to rebuild the index header and replace the existing one (if any). The
1389            // indexes should sized such that they will be at most 60% full.
1390            const bit_index = try IndexHeader.findBitIndex(self.entries.capacity);
1391            const new_header = try IndexHeader.alloc(gpa, bit_index);
1392            if (self.index_header) |header| header.free(gpa);
1393            self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header);
1394            self.index_header = new_header;
1395        }
1396
1397        /// Sorts the entries and then rebuilds the index.
1398        /// `sort_ctx` must have this method:
1399        /// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
1400        /// Uses a stable sorting algorithm.
1401        pub inline fn sort(self: *Self, sort_ctx: anytype) void {
1402            if (@sizeOf(ByIndexContext) != 0)
1403                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call sortContext instead.");
1404            return sortContextInternal(self, .stable, sort_ctx, undefined);
1405        }
1406
1407        /// Sorts the entries and then rebuilds the index.
1408        /// `sort_ctx` must have this method:
1409        /// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
1410        /// Uses an unstable sorting algorithm.
1411        pub inline fn sortUnstable(self: *Self, sort_ctx: anytype) void {
1412            if (@sizeOf(ByIndexContext) != 0)
1413                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call sortUnstableContext instead.");
1414            return self.sortContextInternal(.unstable, sort_ctx, undefined);
1415        }
1416
1417        pub inline fn sortContext(self: *Self, sort_ctx: anytype, ctx: Context) void {
1418            return sortContextInternal(self, .stable, sort_ctx, ctx);
1419        }
1420
1421        pub inline fn sortUnstableContext(self: *Self, sort_ctx: anytype, ctx: Context) void {
1422            return sortContextInternal(self, .unstable, sort_ctx, ctx);
1423        }
1424
1425        fn sortContextInternal(
1426            self: *Self,
1427            comptime mode: std.sort.Mode,
1428            sort_ctx: anytype,
1429            ctx: Context,
1430        ) void {
1431            self.pointer_stability.lock();
1432            defer self.pointer_stability.unlock();
1433
1434            switch (mode) {
1435                .stable => self.entries.sort(sort_ctx),
1436                .unstable => self.entries.sortUnstable(sort_ctx),
1437            }
1438            const header = self.index_header orelse return;
1439            header.reset();
1440            self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, header);
1441        }
1442
1443        /// Shrinks the underlying `Entry` array to `new_len` elements and
1444        /// discards any associated index entries. Keeps capacity the same.
1445        ///
1446        /// Asserts the discarded entries remain initialized and capable of
1447        /// performing hash and equality checks. Any deinitialization of
1448        /// discarded entries must take place *after* calling this function.
1449        pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
1450            if (@sizeOf(ByIndexContext) != 0)
1451                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkRetainingCapacityContext instead.");
1452            return self.shrinkRetainingCapacityContext(new_len, undefined);
1453        }
1454
1455        /// Shrinks the underlying `Entry` array to `new_len` elements and
1456        /// discards any associated index entries. Keeps capacity the same.
1457        ///
1458        /// Asserts the discarded entries remain initialized and capable of
1459        /// performing hash and equality checks. Any deinitialization of
1460        /// discarded entries must take place *after* calling this function.
1461        pub fn shrinkRetainingCapacityContext(self: *Self, new_len: usize, ctx: Context) void {
1462            self.pointer_stability.lock();
1463            defer self.pointer_stability.unlock();
1464
1465            // Remove index entries from the new length onwards.
1466            // Explicitly choose to ONLY remove index entries and not the underlying array list
1467            // entries as we're going to remove them in the subsequent shrink call.
1468            if (self.index_header) |header| {
1469                var i: usize = new_len;
1470                while (i < self.entries.len) : (i += 1)
1471                    self.removeFromIndexByIndex(i, if (store_hash) {} else ctx, header);
1472            }
1473            self.entries.shrinkRetainingCapacity(new_len);
1474        }
1475
1476        /// Shrinks the underlying `Entry` array to `new_len` elements and
1477        /// discards any associated index entries. Reduces allocated capacity.
1478        ///
1479        /// Asserts the discarded entries remain initialized and capable of
1480        /// performing hash and equality checks. It is a bug to call this
1481        /// function if the discarded entries require deinitialization. For
1482        /// that use case, `shrinkRetainingCapacity` can be used instead.
1483        pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void {
1484            if (@sizeOf(ByIndexContext) != 0)
1485                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkAndFreeContext instead.");
1486            return self.shrinkAndFreeContext(gpa, new_len, undefined);
1487        }
1488
1489        /// Shrinks the underlying `Entry` array to `new_len` elements and
1490        /// discards any associated index entries. Reduces allocated capacity.
1491        ///
1492        /// Asserts the discarded entries remain initialized and capable of
1493        /// performing hash and equality checks. It is a bug to call this
1494        /// function if the discarded entries require deinitialization. For
1495        /// that use case, `shrinkRetainingCapacityContext` can be used
1496        /// instead.
1497        pub fn shrinkAndFreeContext(self: *Self, gpa: Allocator, new_len: usize, ctx: Context) void {
1498            self.pointer_stability.lock();
1499            defer self.pointer_stability.unlock();
1500
1501            // Remove index entries from the new length onwards.
1502            // Explicitly choose to ONLY remove index entries and not the underlying array list
1503            // entries as we're going to remove them in the subsequent shrink call.
1504            if (self.index_header) |header| {
1505                var i: usize = new_len;
1506                while (i < self.entries.len) : (i += 1)
1507                    self.removeFromIndexByIndex(i, if (store_hash) {} else ctx, header);
1508            }
1509            self.entries.shrinkAndFree(gpa, new_len);
1510        }
1511
1512        /// Removes the last inserted `Entry` in the hash map and returns it.
1513        /// Otherwise returns null.
1514        pub fn pop(self: *Self) ?KV {
1515            if (@sizeOf(ByIndexContext) != 0)
1516                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call popContext instead.");
1517            return self.popContext(undefined);
1518        }
1519        pub fn popContext(self: *Self, ctx: Context) ?KV {
1520            if (self.entries.len == 0) return null;
1521            self.pointer_stability.lock();
1522            defer self.pointer_stability.unlock();
1523
1524            const item = self.entries.get(self.entries.len - 1);
1525            if (self.index_header) |header|
1526                self.removeFromIndexByIndex(self.entries.len - 1, if (store_hash) {} else ctx, header);
1527            self.entries.len -= 1;
1528            return .{
1529                .key = item.key,
1530                .value = item.value,
1531            };
1532        }
1533
1534        fn fetchRemoveByKey(
1535            self: *Self,
1536            key: anytype,
1537            key_ctx: anytype,
1538            ctx: ByIndexContext,
1539            comptime removal_type: RemovalType,
1540        ) ?KV {
1541            const header = self.index_header orelse {
1542                // Linear scan.
1543                const key_hash = if (store_hash) key_ctx.hash(key) else {};
1544                const slice = self.entries.slice();
1545                const hashes_array = if (store_hash) slice.items(.hash) else {};
1546                const keys_array = slice.items(.key);
1547                for (keys_array, 0..) |*item_key, i| {
1548                    const hash_match = if (store_hash) hashes_array[i] == key_hash else true;
1549                    if (hash_match and key_ctx.eql(key, item_key.*, i)) {
1550                        const removed_entry: KV = .{
1551                            .key = keys_array[i],
1552                            .value = slice.items(.value)[i],
1553                        };
1554                        switch (removal_type) {
1555                            .swap => self.entries.swapRemove(i),
1556                            .ordered => self.entries.orderedRemove(i),
1557                        }
1558                        return removed_entry;
1559                    }
1560                }
1561                return null;
1562            };
1563            return switch (header.capacityIndexType()) {
1564                .u8 => self.fetchRemoveByKeyGeneric(key, key_ctx, ctx, header, u8, removal_type),
1565                .u16 => self.fetchRemoveByKeyGeneric(key, key_ctx, ctx, header, u16, removal_type),
1566                .u32 => self.fetchRemoveByKeyGeneric(key, key_ctx, ctx, header, u32, removal_type),
1567            };
1568        }
1569        fn fetchRemoveByKeyGeneric(
1570            self: *Self,
1571            key: anytype,
1572            key_ctx: anytype,
1573            ctx: ByIndexContext,
1574            header: *IndexHeader,
1575            comptime I: type,
1576            comptime removal_type: RemovalType,
1577        ) ?KV {
1578            const indexes = header.indexes(I);
1579            const entry_index = self.removeFromIndexByKey(key, key_ctx, header, I, indexes) orelse return null;
1580            const slice = self.entries.slice();
1581            const removed_entry: KV = .{
1582                .key = slice.items(.key)[entry_index],
1583                .value = slice.items(.value)[entry_index],
1584            };
1585            self.removeFromArrayAndUpdateIndex(entry_index, ctx, header, I, indexes, removal_type);
1586            return removed_entry;
1587        }
1588
1589        fn removeByKey(
1590            self: *Self,
1591            key: anytype,
1592            key_ctx: anytype,
1593            ctx: ByIndexContext,
1594            comptime removal_type: RemovalType,
1595        ) bool {
1596            const header = self.index_header orelse {
1597                // Linear scan.
1598                const key_hash = if (store_hash) key_ctx.hash(key) else {};
1599                const slice = self.entries.slice();
1600                const hashes_array = if (store_hash) slice.items(.hash) else {};
1601                const keys_array = slice.items(.key);
1602                for (keys_array, 0..) |*item_key, i| {
1603                    const hash_match = if (store_hash) hashes_array[i] == key_hash else true;
1604                    if (hash_match and key_ctx.eql(key, item_key.*, i)) {
1605                        switch (removal_type) {
1606                            .swap => self.entries.swapRemove(i),
1607                            .ordered => self.entries.orderedRemove(i),
1608                        }
1609                        return true;
1610                    }
1611                }
1612                return false;
1613            };
1614            return switch (header.capacityIndexType()) {
1615                .u8 => self.removeByKeyGeneric(key, key_ctx, ctx, header, u8, removal_type),
1616                .u16 => self.removeByKeyGeneric(key, key_ctx, ctx, header, u16, removal_type),
1617                .u32 => self.removeByKeyGeneric(key, key_ctx, ctx, header, u32, removal_type),
1618            };
1619        }
1620        fn removeByKeyGeneric(self: *Self, key: anytype, key_ctx: anytype, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) bool {
1621            const indexes = header.indexes(I);
1622            const entry_index = self.removeFromIndexByKey(key, key_ctx, header, I, indexes) orelse return false;
1623            self.removeFromArrayAndUpdateIndex(entry_index, ctx, header, I, indexes, removal_type);
1624            return true;
1625        }
1626
1627        fn removeByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, comptime removal_type: RemovalType) void {
1628            assert(entry_index < self.entries.len);
1629            const header = self.index_header orelse {
1630                switch (removal_type) {
1631                    .swap => self.entries.swapRemove(entry_index),
1632                    .ordered => self.entries.orderedRemove(entry_index),
1633                }
1634                return;
1635            };
1636            switch (header.capacityIndexType()) {
1637                .u8 => self.removeByIndexGeneric(entry_index, ctx, header, u8, removal_type),
1638                .u16 => self.removeByIndexGeneric(entry_index, ctx, header, u16, removal_type),
1639                .u32 => self.removeByIndexGeneric(entry_index, ctx, header, u32, removal_type),
1640            }
1641        }
1642        fn removeByIndexGeneric(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) void {
1643            const indexes = header.indexes(I);
1644            self.removeFromIndexByIndexGeneric(entry_index, ctx, header, I, indexes);
1645            self.removeFromArrayAndUpdateIndex(entry_index, ctx, header, I, indexes, removal_type);
1646        }
1647
1648        fn removeFromArrayAndUpdateIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I), comptime removal_type: RemovalType) void {
1649            const last_index = self.entries.len - 1; // overflow => remove from empty map
1650            switch (removal_type) {
1651                .swap => {
1652                    if (last_index != entry_index) {
1653                        // Because of the swap remove, now we need to update the index that was
1654                        // pointing to the last entry and is now pointing to this removed item slot.
1655                        self.updateEntryIndex(header, last_index, entry_index, ctx, I, indexes);
1656                    }
1657                    // updateEntryIndex reads from the old entry index,
1658                    // so it needs to run before removal.
1659                    self.entries.swapRemove(entry_index);
1660                },
1661                .ordered => {
1662                    var i: usize = entry_index;
1663                    while (i < last_index) : (i += 1) {
1664                        // Because of the ordered remove, everything from the entry index onwards has
1665                        // been shifted forward so we'll need to update the index entries.
1666                        self.updateEntryIndex(header, i + 1, i, ctx, I, indexes);
1667                    }
1668                    // updateEntryIndex reads from the old entry index,
1669                    // so it needs to run before removal.
1670                    self.entries.orderedRemove(entry_index);
1671                },
1672            }
1673        }
1674
1675        fn updateEntryIndex(
1676            self: *Self,
1677            header: *IndexHeader,
1678            old_entry_index: usize,
1679            new_entry_index: usize,
1680            ctx: ByIndexContext,
1681            comptime I: type,
1682            indexes: []Index(I),
1683        ) void {
1684            const slot = self.getSlotByIndex(old_entry_index, ctx, header, I, indexes);
1685            indexes[slot].entry_index = @as(I, @intCast(new_entry_index));
1686        }
1687
1688        fn removeFromIndexByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader) void {
1689            switch (header.capacityIndexType()) {
1690                .u8 => self.removeFromIndexByIndexGeneric(entry_index, ctx, header, u8, header.indexes(u8)),
1691                .u16 => self.removeFromIndexByIndexGeneric(entry_index, ctx, header, u16, header.indexes(u16)),
1692                .u32 => self.removeFromIndexByIndexGeneric(entry_index, ctx, header, u32, header.indexes(u32)),
1693            }
1694        }
1695        fn removeFromIndexByIndexGeneric(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void {
1696            const slot = self.getSlotByIndex(entry_index, ctx, header, I, indexes);
1697            removeSlot(slot, header, I, indexes);
1698        }
1699
1700        fn removeFromIndexByKey(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type, indexes: []Index(I)) ?usize {
1701            const slot = self.getSlotByKey(key, ctx, header, I, indexes) orelse return null;
1702            const removed_entry_index = indexes[slot].entry_index;
1703            removeSlot(slot, header, I, indexes);
1704            return removed_entry_index;
1705        }
1706
1707        fn removeSlot(removed_slot: usize, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void {
1708            const start_index = removed_slot +% 1;
1709            const end_index = start_index +% indexes.len;
1710
1711            var last_slot = removed_slot;
1712            var index: usize = start_index;
1713            while (index != end_index) : (index +%= 1) {
1714                const slot = header.constrainIndex(index);
1715                const slot_data = indexes[slot];
1716                if (slot_data.isEmpty() or slot_data.distance_from_start_index == 0) {
1717                    indexes[last_slot].setEmpty();
1718                    return;
1719                }
1720                indexes[last_slot] = .{
1721                    .entry_index = slot_data.entry_index,
1722                    .distance_from_start_index = slot_data.distance_from_start_index - 1,
1723                };
1724                last_slot = slot;
1725            }
1726            unreachable;
1727        }
1728
1729        fn getSlotByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I)) usize {
1730            const slice = self.entries.slice();
1731            const h = if (store_hash) slice.items(.hash)[entry_index] else checkedHash(ctx, slice.items(.key)[entry_index]);
1732            const start_index = safeTruncate(usize, h);
1733            const end_index = start_index +% indexes.len;
1734
1735            var index = start_index;
1736            var distance_from_start_index: I = 0;
1737            while (index != end_index) : ({
1738                index +%= 1;
1739                distance_from_start_index += 1;
1740            }) {
1741                const slot = header.constrainIndex(index);
1742                const slot_data = indexes[slot];
1743
1744                // This is the fundamental property of the array hash map index.  If this
1745                // assert fails, it probably means that the entry was not in the index.
1746                assert(!slot_data.isEmpty());
1747                assert(slot_data.distance_from_start_index >= distance_from_start_index);
1748
1749                if (slot_data.entry_index == entry_index) {
1750                    return slot;
1751                }
1752            }
1753            unreachable;
1754        }
1755
1756        /// Must `ensureTotalCapacity`/`ensureUnusedCapacity` before calling this.
1757        fn getOrPutInternal(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) GetOrPutResult {
1758            const slice = self.entries.slice();
1759            const hashes_array = if (store_hash) slice.items(.hash) else {};
1760            const keys_array = slice.items(.key);
1761            const values_array = slice.items(.value);
1762            const indexes = header.indexes(I);
1763
1764            const h = checkedHash(ctx, key);
1765            const start_index = safeTruncate(usize, h);
1766            const end_index = start_index +% indexes.len;
1767
1768            var index = start_index;
1769            var distance_from_start_index: I = 0;
1770            while (index != end_index) : ({
1771                index +%= 1;
1772                distance_from_start_index += 1;
1773            }) {
1774                var slot = header.constrainIndex(index);
1775                var slot_data = indexes[slot];
1776
1777                // If the slot is empty, there can be no more items in this run.
1778                // We didn't find a matching item, so this must be new.
1779                // Put it in the empty slot.
1780                if (slot_data.isEmpty()) {
1781                    const new_index = self.entries.addOneAssumeCapacity();
1782                    indexes[slot] = .{
1783                        .distance_from_start_index = distance_from_start_index,
1784                        .entry_index = @as(I, @intCast(new_index)),
1785                    };
1786
1787                    // update the hash if applicable
1788                    if (store_hash) hashes_array.ptr[new_index] = h;
1789
1790                    return .{
1791                        .found_existing = false,
1792                        .key_ptr = &keys_array.ptr[new_index],
1793                        // workaround for #6974
1794                        .value_ptr = if (@sizeOf(*V) == 0) undefined else &values_array.ptr[new_index],
1795                        .index = new_index,
1796                    };
1797                }
1798
1799                // This pointer survives the following append because we call
1800                // entries.ensureTotalCapacity before getOrPutInternal.
1801                const i = slot_data.entry_index;
1802                const hash_match = if (store_hash) h == hashes_array[i] else true;
1803                if (hash_match and checkedEql(ctx, key, keys_array[i], i)) {
1804                    return .{
1805                        .found_existing = true,
1806                        .key_ptr = &keys_array[slot_data.entry_index],
1807                        // workaround for #6974
1808                        .value_ptr = if (@sizeOf(*V) == 0) undefined else &values_array[slot_data.entry_index],
1809                        .index = slot_data.entry_index,
1810                    };
1811                }
1812
1813                // If the entry is closer to its target than our current distance,
1814                // the entry we are looking for does not exist.  It would be in
1815                // this slot instead if it was here.  So stop looking, and switch
1816                // to insert mode.
1817                if (slot_data.distance_from_start_index < distance_from_start_index) {
1818                    // In this case, we did not find the item. We will put a new entry.
1819                    // However, we will use this index for the new entry, and move
1820                    // the previous index down the line, to keep the max distance_from_start_index
1821                    // as small as possible.
1822                    const new_index = self.entries.addOneAssumeCapacity();
1823                    if (store_hash) hashes_array.ptr[new_index] = h;
1824                    indexes[slot] = .{
1825                        .entry_index = @as(I, @intCast(new_index)),
1826                        .distance_from_start_index = distance_from_start_index,
1827                    };
1828                    distance_from_start_index = slot_data.distance_from_start_index;
1829                    var displaced_index = slot_data.entry_index;
1830
1831                    // Find somewhere to put the index we replaced by shifting
1832                    // following indexes backwards.
1833                    index +%= 1;
1834                    distance_from_start_index += 1;
1835                    while (index != end_index) : ({
1836                        index +%= 1;
1837                        distance_from_start_index += 1;
1838                    }) {
1839                        slot = header.constrainIndex(index);
1840                        slot_data = indexes[slot];
1841                        if (slot_data.isEmpty()) {
1842                            indexes[slot] = .{
1843                                .entry_index = displaced_index,
1844                                .distance_from_start_index = distance_from_start_index,
1845                            };
1846                            return .{
1847                                .found_existing = false,
1848                                .key_ptr = &keys_array.ptr[new_index],
1849                                // workaround for #6974
1850                                .value_ptr = if (@sizeOf(*V) == 0) undefined else &values_array.ptr[new_index],
1851                                .index = new_index,
1852                            };
1853                        }
1854
1855                        if (slot_data.distance_from_start_index < distance_from_start_index) {
1856                            indexes[slot] = .{
1857                                .entry_index = displaced_index,
1858                                .distance_from_start_index = distance_from_start_index,
1859                            };
1860                            displaced_index = slot_data.entry_index;
1861                            distance_from_start_index = slot_data.distance_from_start_index;
1862                        }
1863                    }
1864                    unreachable;
1865                }
1866            }
1867            unreachable;
1868        }
1869
1870        fn getSlotByKey(self: Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type, indexes: []Index(I)) ?usize {
1871            const slice = self.entries.slice();
1872            const hashes_array = if (store_hash) slice.items(.hash) else {};
1873            const keys_array = slice.items(.key);
1874            const h = checkedHash(ctx, key);
1875
1876            const start_index = safeTruncate(usize, h);
1877            const end_index = start_index +% indexes.len;
1878
1879            var index = start_index;
1880            var distance_from_start_index: I = 0;
1881            while (index != end_index) : ({
1882                index +%= 1;
1883                distance_from_start_index += 1;
1884            }) {
1885                const slot = header.constrainIndex(index);
1886                const slot_data = indexes[slot];
1887                if (slot_data.isEmpty() or slot_data.distance_from_start_index < distance_from_start_index)
1888                    return null;
1889
1890                const i = slot_data.entry_index;
1891                const hash_match = if (store_hash) h == hashes_array[i] else true;
1892                if (hash_match and checkedEql(ctx, key, keys_array[i], i))
1893                    return slot;
1894            }
1895            unreachable;
1896        }
1897
1898        fn insertAllEntriesIntoNewHeader(self: *Self, ctx: ByIndexContext, header: *IndexHeader) void {
1899            switch (header.capacityIndexType()) {
1900                .u8 => return self.insertAllEntriesIntoNewHeaderGeneric(ctx, header, u8),
1901                .u16 => return self.insertAllEntriesIntoNewHeaderGeneric(ctx, header, u16),
1902                .u32 => return self.insertAllEntriesIntoNewHeaderGeneric(ctx, header, u32),
1903            }
1904        }
1905        fn insertAllEntriesIntoNewHeaderGeneric(self: *Self, ctx: ByIndexContext, header: *IndexHeader, comptime I: type) void {
1906            const slice = self.entries.slice();
1907            const items = if (store_hash) slice.items(.hash) else slice.items(.key);
1908            const indexes = header.indexes(I);
1909
1910            entry_loop: for (items, 0..) |key, i| {
1911                const h = if (store_hash) key else checkedHash(ctx, key);
1912                const start_index = safeTruncate(usize, h);
1913                const end_index = start_index +% indexes.len;
1914                var index = start_index;
1915                var entry_index = @as(I, @intCast(i));
1916                var distance_from_start_index: I = 0;
1917                while (index != end_index) : ({
1918                    index +%= 1;
1919                    distance_from_start_index += 1;
1920                }) {
1921                    const slot = header.constrainIndex(index);
1922                    const next_index = indexes[slot];
1923                    if (next_index.isEmpty()) {
1924                        indexes[slot] = .{
1925                            .distance_from_start_index = distance_from_start_index,
1926                            .entry_index = entry_index,
1927                        };
1928                        continue :entry_loop;
1929                    }
1930                    if (next_index.distance_from_start_index < distance_from_start_index) {
1931                        indexes[slot] = .{
1932                            .distance_from_start_index = distance_from_start_index,
1933                            .entry_index = entry_index,
1934                        };
1935                        distance_from_start_index = next_index.distance_from_start_index;
1936                        entry_index = next_index.entry_index;
1937                    }
1938                }
1939                unreachable;
1940            }
1941        }
1942
1943        fn checkedHash(ctx: anytype, key: anytype) u32 {
1944            // If you get a compile error on the next line, it means that your
1945            // generic hash function doesn't accept your key.
1946            return ctx.hash(key);
1947        }
1948
1949        fn checkedEql(ctx: anytype, a: anytype, b: K, b_index: usize) bool {
1950            // If you get a compile error on the next line, it means that your
1951            // generic eql function doesn't accept (self, adapt key, K, index).
1952            return ctx.eql(a, b, b_index);
1953        }
1954
1955        fn dumpState(self: Self, comptime keyFmt: []const u8, comptime valueFmt: []const u8) void {
1956            if (@sizeOf(ByIndexContext) != 0)
1957                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call dumpStateContext instead.");
1958            self.dumpStateContext(keyFmt, valueFmt, undefined);
1959        }
1960        fn dumpStateContext(self: Self, comptime keyFmt: []const u8, comptime valueFmt: []const u8, ctx: Context) void {
1961            const p = std.debug.print;
1962            p("{s}:\n", .{@typeName(Self)});
1963            const slice = self.entries.slice();
1964            const hash_status = if (store_hash) "stored" else "computed";
1965            p("  len={} capacity={} hashes {s}\n", .{ slice.len, slice.capacity, hash_status });
1966            var i: usize = 0;
1967            const mask: u32 = if (self.index_header) |header| header.mask() else ~@as(u32, 0);
1968            while (i < slice.len) : (i += 1) {
1969                const hash = if (store_hash) slice.items(.hash)[i] else checkedHash(ctx, slice.items(.key)[i]);
1970                if (store_hash) {
1971                    p(
1972                        "  [{}]: key=" ++ keyFmt ++ " value=" ++ valueFmt ++ " hash=0x{x} slot=[0x{x}]\n",
1973                        .{ i, slice.items(.key)[i], slice.items(.value)[i], hash, hash & mask },
1974                    );
1975                } else {
1976                    p(
1977                        "  [{}]: key=" ++ keyFmt ++ " value=" ++ valueFmt ++ " slot=[0x{x}]\n",
1978                        .{ i, slice.items(.key)[i], slice.items(.value)[i], hash & mask },
1979                    );
1980                }
1981            }
1982            if (self.index_header) |header| {
1983                p("\n", .{});
1984                switch (header.capacityIndexType()) {
1985                    .u8 => dumpIndex(header, u8),
1986                    .u16 => dumpIndex(header, u16),
1987                    .u32 => dumpIndex(header, u32),
1988                }
1989            }
1990        }
1991        fn dumpIndex(header: *IndexHeader, comptime I: type) void {
1992            const p = std.debug.print;
1993            p("  index len=0x{x} type={}\n", .{ header.length(), header.capacityIndexType() });
1994            const indexes = header.indexes(I);
1995            if (indexes.len == 0) return;
1996            var is_empty = false;
1997            for (indexes, 0..) |idx, i| {
1998                if (idx.isEmpty()) {
1999                    is_empty = true;
2000                } else {
2001                    if (is_empty) {
2002                        is_empty = false;
2003                        p("  ...\n", .{});
2004                    }
2005                    p("  [0x{x}]: [{}] +{}\n", .{ i, idx.entry_index, idx.distance_from_start_index });
2006                }
2007            }
2008            if (is_empty) {
2009                p("  ...\n", .{});
2010            }
2011        }
2012    };
2013}
2014
2015const CapacityIndexType = enum { u8, u16, u32 };
2016
2017fn capacityIndexType(bit_index: u8) CapacityIndexType {
2018    if (bit_index <= 8)
2019        return .u8;
2020    if (bit_index <= 16)
2021        return .u16;
2022    assert(bit_index <= 32);
2023    return .u32;
2024}
2025
2026fn capacityIndexSize(bit_index: u8) usize {
2027    switch (capacityIndexType(bit_index)) {
2028        .u8 => return @sizeOf(Index(u8)),
2029        .u16 => return @sizeOf(Index(u16)),
2030        .u32 => return @sizeOf(Index(u32)),
2031    }
2032}
2033
2034/// @truncate fails if the target type is larger than the
2035/// target value.  This causes problems when one of the types
2036/// is usize, which may be larger or smaller than u32 on different
2037/// systems.  This version of truncate is safe to use if either
2038/// parameter has dynamic size, and will perform widening conversion
2039/// when needed.  Both arguments must have the same signedness.
2040fn safeTruncate(comptime T: type, val: anytype) T {
2041    if (@bitSizeOf(T) >= @bitSizeOf(@TypeOf(val)))
2042        return val;
2043    return @as(T, @truncate(val));
2044}
2045
2046/// A single entry in the lookup acceleration structure.  These structs
2047/// are found in an array after the IndexHeader.  Hashes index into this
2048/// array, and linear probing is used for collisions.
2049fn Index(comptime I: type) type {
2050    return extern struct {
2051        const Self = @This();
2052
2053        /// The index of this entry in the backing store.  If the index is
2054        /// empty, this is empty_sentinel.
2055        entry_index: I,
2056
2057        /// The distance between this slot and its ideal placement.  This is
2058        /// used to keep maximum scan length small.  This value is undefined
2059        /// if the index is empty.
2060        distance_from_start_index: I,
2061
2062        /// The special entry_index value marking an empty slot.
2063        const empty_sentinel = ~@as(I, 0);
2064
2065        /// A constant empty index
2066        const empty = Self{
2067            .entry_index = empty_sentinel,
2068            .distance_from_start_index = undefined,
2069        };
2070
2071        /// Checks if a slot is empty
2072        fn isEmpty(idx: Self) bool {
2073            return idx.entry_index == empty_sentinel;
2074        }
2075
2076        /// Sets a slot to empty
2077        fn setEmpty(idx: *Self) void {
2078            idx.entry_index = empty_sentinel;
2079            idx.distance_from_start_index = undefined;
2080        }
2081    };
2082}
2083
2084/// the byte size of the index must fit in a usize.  This is a power of two
2085/// length * the size of an Index(u32).  The index is 8 bytes (3 bits repr)
2086/// and max_usize + 1 is not representable, so we need to subtract out 4 bits.
2087const max_representable_index_len = @bitSizeOf(usize) - 4;
2088const max_bit_index = @min(32, max_representable_index_len);
2089const min_bit_index = 5;
2090const max_capacity = (1 << max_bit_index) - 1;
2091const index_capacities = blk: {
2092    var caps: [max_bit_index + 1]u32 = undefined;
2093    for (caps[0..max_bit_index], 0..) |*item, i| {
2094        item.* = (1 << i) * 3 / 5;
2095    }
2096    caps[max_bit_index] = max_capacity;
2097    break :blk caps;
2098};
2099
2100/// This struct is trailed by two arrays of length indexes_len
2101/// of integers, whose integer size is determined by indexes_len.
2102/// These arrays are indexed by constrainIndex(hash).  The
2103/// entryIndexes array contains the index in the dense backing store
2104/// where the entry's data can be found.  Entries which are not in
2105/// use have their index value set to emptySentinel(I).
2106/// The entryDistances array stores the distance between an entry
2107/// and its ideal hash bucket.  This is used when adding elements
2108/// to balance the maximum scan length.
2109const IndexHeader = struct {
2110    /// This field tracks the total number of items in the arrays following
2111    /// this header.  It is the bit index of the power of two number of indices.
2112    /// This value is between min_bit_index and max_bit_index, inclusive.
2113    bit_index: u8 align(@alignOf(u32)),
2114
2115    /// Map from an incrementing index to an index slot in the attached arrays.
2116    fn constrainIndex(header: IndexHeader, i: usize) usize {
2117        // This is an optimization for modulo of power of two integers;
2118        // it requires `indexes_len` to always be a power of two.
2119        return @as(usize, @intCast(i & header.mask()));
2120    }
2121
2122    /// Returns the attached array of indexes.  I must match the type
2123    /// returned by capacityIndexType.
2124    fn indexes(header: *IndexHeader, comptime I: type) []Index(I) {
2125        const start_ptr: [*]Index(I) = @ptrCast(@alignCast(@as([*]u8, @ptrCast(header)) + @sizeOf(IndexHeader)));
2126        return start_ptr[0..header.length()];
2127    }
2128
2129    /// Returns the type used for the index arrays.
2130    fn capacityIndexType(header: IndexHeader) CapacityIndexType {
2131        return hash_map.capacityIndexType(header.bit_index);
2132    }
2133
2134    fn capacity(self: IndexHeader) u32 {
2135        return index_capacities[self.bit_index];
2136    }
2137    fn length(self: IndexHeader) usize {
2138        return @as(usize, 1) << @as(math.Log2Int(usize), @intCast(self.bit_index));
2139    }
2140    fn mask(self: IndexHeader) u32 {
2141        return @as(u32, @intCast(self.length() - 1));
2142    }
2143
2144    fn findBitIndex(desired_capacity: usize) Allocator.Error!u8 {
2145        if (desired_capacity > max_capacity) return error.OutOfMemory;
2146        var new_bit_index: u8 = @intCast(std.math.log2_int_ceil(usize, desired_capacity));
2147        if (desired_capacity > index_capacities[new_bit_index]) new_bit_index += 1;
2148        if (new_bit_index < min_bit_index) new_bit_index = min_bit_index;
2149        assert(desired_capacity <= index_capacities[new_bit_index]);
2150        return new_bit_index;
2151    }
2152
2153    /// Allocates an index header, and fills the entryIndexes array with empty.
2154    /// The distance array contents are undefined.
2155    fn alloc(gpa: Allocator, new_bit_index: u8) Allocator.Error!*IndexHeader {
2156        const len = @as(usize, 1) << @as(math.Log2Int(usize), @intCast(new_bit_index));
2157        const index_size = hash_map.capacityIndexSize(new_bit_index);
2158        const nbytes = @sizeOf(IndexHeader) + index_size * len;
2159        const bytes = try gpa.alignedAlloc(u8, .of(IndexHeader), nbytes);
2160        @memset(bytes[@sizeOf(IndexHeader)..], 0xff);
2161        const result: *IndexHeader = @ptrCast(@alignCast(bytes.ptr));
2162        result.* = .{
2163            .bit_index = new_bit_index,
2164        };
2165        return result;
2166    }
2167
2168    /// Releases the memory for a header and its associated arrays.
2169    fn free(header: *IndexHeader, gpa: Allocator) void {
2170        const index_size = hash_map.capacityIndexSize(header.bit_index);
2171        const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header);
2172        const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size];
2173        gpa.free(slice);
2174    }
2175
2176    /// Puts an IndexHeader into the state that it would be in after being freshly allocated.
2177    fn reset(header: *IndexHeader) void {
2178        const index_size = hash_map.capacityIndexSize(header.bit_index);
2179        const ptr: [*]align(@alignOf(IndexHeader)) u8 = @ptrCast(header);
2180        const nbytes = @sizeOf(IndexHeader) + header.length() * index_size;
2181        @memset(ptr[@sizeOf(IndexHeader)..nbytes], 0xff);
2182    }
2183
2184    // Verify that the header has sufficient alignment to produce aligned arrays.
2185    comptime {
2186        if (@alignOf(u32) > @alignOf(IndexHeader))
2187            @compileError("IndexHeader must have a larger alignment than its indexes!");
2188    }
2189};
2190
2191test "basic hash map usage" {
2192    var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
2193    defer map.deinit();
2194
2195    try testing.expect((try map.fetchPut(1, 11)) == null);
2196    try testing.expect((try map.fetchPut(2, 22)) == null);
2197    try testing.expect((try map.fetchPut(3, 33)) == null);
2198    try testing.expect((try map.fetchPut(4, 44)) == null);
2199
2200    try map.putNoClobber(5, 55);
2201    try testing.expect((try map.fetchPut(5, 66)).?.value == 55);
2202    try testing.expect((try map.fetchPut(5, 55)).?.value == 66);
2203
2204    const gop1 = try map.getOrPut(5);
2205    try testing.expect(gop1.found_existing == true);
2206    try testing.expect(gop1.value_ptr.* == 55);
2207    try testing.expect(gop1.index == 4);
2208    gop1.value_ptr.* = 77;
2209    try testing.expect(map.getEntry(5).?.value_ptr.* == 77);
2210
2211    const gop2 = try map.getOrPut(99);
2212    try testing.expect(gop2.found_existing == false);
2213    try testing.expect(gop2.index == 5);
2214    gop2.value_ptr.* = 42;
2215    try testing.expect(map.getEntry(99).?.value_ptr.* == 42);
2216
2217    const gop3 = try map.getOrPutValue(5, 5);
2218    try testing.expect(gop3.value_ptr.* == 77);
2219
2220    const gop4 = try map.getOrPutValue(100, 41);
2221    try testing.expect(gop4.value_ptr.* == 41);
2222
2223    try testing.expect(map.contains(2));
2224    try testing.expect(map.getEntry(2).?.value_ptr.* == 22);
2225    try testing.expect(map.get(2).? == 22);
2226
2227    const rmv1 = map.fetchSwapRemove(2);
2228    try testing.expect(rmv1.?.key == 2);
2229    try testing.expect(rmv1.?.value == 22);
2230    try testing.expect(map.fetchSwapRemove(2) == null);
2231    try testing.expect(map.swapRemove(2) == false);
2232    try testing.expect(map.getEntry(2) == null);
2233    try testing.expect(map.get(2) == null);
2234
2235    // Since we've used `swapRemove` above, the index of this entry should remain unchanged.
2236    try testing.expect(map.getIndex(100).? == 1);
2237    const gop5 = try map.getOrPut(5);
2238    try testing.expect(gop5.found_existing == true);
2239    try testing.expect(gop5.value_ptr.* == 77);
2240    try testing.expect(gop5.index == 4);
2241
2242    // Whereas, if we do an `orderedRemove`, it should move the index forward one spot.
2243    const rmv2 = map.fetchOrderedRemove(100);
2244    try testing.expect(rmv2.?.key == 100);
2245    try testing.expect(rmv2.?.value == 41);
2246    try testing.expect(map.fetchOrderedRemove(100) == null);
2247    try testing.expect(map.orderedRemove(100) == false);
2248    try testing.expect(map.getEntry(100) == null);
2249    try testing.expect(map.get(100) == null);
2250    const gop6 = try map.getOrPut(5);
2251    try testing.expect(gop6.found_existing == true);
2252    try testing.expect(gop6.value_ptr.* == 77);
2253    try testing.expect(gop6.index == 3);
2254
2255    try testing.expect(map.swapRemove(3));
2256}
2257
2258test "iterator hash map" {
2259    var reset_map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
2260    defer reset_map.deinit();
2261
2262    // test ensureTotalCapacity with a 0 parameter
2263    try reset_map.ensureTotalCapacity(0);
2264
2265    try reset_map.putNoClobber(0, 11);
2266    try reset_map.putNoClobber(1, 22);
2267    try reset_map.putNoClobber(2, 33);
2268
2269    const keys = [_]i32{
2270        0, 2, 1,
2271    };
2272
2273    const values = [_]i32{
2274        11, 33, 22,
2275    };
2276
2277    var buffer = [_]i32{
2278        0, 0, 0,
2279    };
2280
2281    var it = reset_map.iterator();
2282    const first_entry = it.next().?;
2283    it.reset();
2284
2285    var count: usize = 0;
2286    while (it.next()) |entry| : (count += 1) {
2287        buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*;
2288    }
2289    try testing.expect(count == 3);
2290    try testing.expect(it.next() == null);
2291
2292    for (buffer, 0..) |_, i| {
2293        try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]);
2294    }
2295
2296    it.reset();
2297    count = 0;
2298    while (it.next()) |entry| {
2299        buffer[@as(usize, @intCast(entry.key_ptr.*))] = entry.value_ptr.*;
2300        count += 1;
2301        if (count >= 2) break;
2302    }
2303
2304    for (buffer[0..2], 0..) |_, i| {
2305        try testing.expect(buffer[@as(usize, @intCast(keys[i]))] == values[i]);
2306    }
2307
2308    it.reset();
2309    const entry = it.next().?;
2310    try testing.expect(entry.key_ptr.* == first_entry.key_ptr.*);
2311    try testing.expect(entry.value_ptr.* == first_entry.value_ptr.*);
2312}
2313
2314test "ensure capacity" {
2315    var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
2316    defer map.deinit();
2317
2318    try map.ensureTotalCapacity(20);
2319    const initial_capacity = map.capacity();
2320    try testing.expect(initial_capacity >= 20);
2321    var i: i32 = 0;
2322    while (i < 20) : (i += 1) {
2323        try testing.expect(map.fetchPutAssumeCapacity(i, i + 10) == null);
2324    }
2325    // shouldn't resize from putAssumeCapacity
2326    try testing.expect(initial_capacity == map.capacity());
2327}
2328
2329test "ensure capacity leak" {
2330    try testing.checkAllAllocationFailures(std.testing.allocator, struct {
2331        pub fn f(allocator: Allocator) !void {
2332            var map = AutoArrayHashMap(i32, i32).init(allocator);
2333            defer map.deinit();
2334
2335            var i: i32 = 0;
2336            // put more than `linear_scan_max` in so index_header gets allocated.
2337            while (i <= 20) : (i += 1) try map.put(i, i);
2338        }
2339    }.f, .{});
2340}
2341
2342test "big map" {
2343    var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
2344    defer map.deinit();
2345
2346    var i: i32 = 0;
2347    while (i < 8) : (i += 1) {
2348        try map.put(i, i + 10);
2349    }
2350
2351    i = 0;
2352    while (i < 8) : (i += 1) {
2353        try testing.expectEqual(@as(?i32, i + 10), map.get(i));
2354    }
2355    while (i < 16) : (i += 1) {
2356        try testing.expectEqual(@as(?i32, null), map.get(i));
2357    }
2358
2359    i = 4;
2360    while (i < 12) : (i += 1) {
2361        try map.put(i, i + 12);
2362    }
2363
2364    i = 0;
2365    while (i < 4) : (i += 1) {
2366        try testing.expectEqual(@as(?i32, i + 10), map.get(i));
2367    }
2368    while (i < 12) : (i += 1) {
2369        try testing.expectEqual(@as(?i32, i + 12), map.get(i));
2370    }
2371    while (i < 16) : (i += 1) {
2372        try testing.expectEqual(@as(?i32, null), map.get(i));
2373    }
2374
2375    i = 0;
2376    while (i < 4) : (i += 1) {
2377        try testing.expect(map.orderedRemove(i));
2378    }
2379    while (i < 8) : (i += 1) {
2380        try testing.expect(map.swapRemove(i));
2381    }
2382
2383    i = 0;
2384    while (i < 8) : (i += 1) {
2385        try testing.expectEqual(@as(?i32, null), map.get(i));
2386    }
2387    while (i < 12) : (i += 1) {
2388        try testing.expectEqual(@as(?i32, i + 12), map.get(i));
2389    }
2390    while (i < 16) : (i += 1) {
2391        try testing.expectEqual(@as(?i32, null), map.get(i));
2392    }
2393}
2394
2395test "clone" {
2396    var original = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
2397    defer original.deinit();
2398
2399    // put more than `linear_scan_max` so we can test that the index header is properly cloned
2400    var i: u8 = 0;
2401    while (i < 10) : (i += 1) {
2402        try original.putNoClobber(i, i * 10);
2403    }
2404
2405    var copy = try original.clone();
2406    defer copy.deinit();
2407
2408    i = 0;
2409    while (i < 10) : (i += 1) {
2410        try testing.expect(original.get(i).? == i * 10);
2411        try testing.expect(copy.get(i).? == i * 10);
2412        try testing.expect(original.getPtr(i).? != copy.getPtr(i).?);
2413    }
2414
2415    while (i < 20) : (i += 1) {
2416        try testing.expect(original.get(i) == null);
2417        try testing.expect(copy.get(i) == null);
2418    }
2419}
2420
2421test "shrink" {
2422    var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
2423    defer map.deinit();
2424
2425    // This test is more interesting if we insert enough entries to allocate the index header.
2426    const num_entries = 200;
2427    var i: i32 = 0;
2428    while (i < num_entries) : (i += 1)
2429        try testing.expect((try map.fetchPut(i, i * 10)) == null);
2430
2431    try testing.expect(map.unmanaged.index_header != null);
2432    try testing.expect(map.count() == num_entries);
2433
2434    // Test `shrinkRetainingCapacity`.
2435    map.shrinkRetainingCapacity(17);
2436    try testing.expect(map.count() == 17);
2437    try testing.expect(map.capacity() >= num_entries);
2438    i = 0;
2439    while (i < num_entries) : (i += 1) {
2440        const gop = try map.getOrPut(i);
2441        if (i < 17) {
2442            try testing.expect(gop.found_existing == true);
2443            try testing.expect(gop.value_ptr.* == i * 10);
2444        } else try testing.expect(gop.found_existing == false);
2445    }
2446
2447    // Test `shrinkAndFree`.
2448    map.shrinkAndFree(15);
2449    try testing.expect(map.count() == 15);
2450    try testing.expect(map.capacity() == 15);
2451    i = 0;
2452    while (i < num_entries) : (i += 1) {
2453        const gop = try map.getOrPut(i);
2454        if (i < 15) {
2455            try testing.expect(gop.found_existing == true);
2456            try testing.expect(gop.value_ptr.* == i * 10);
2457        } else try testing.expect(gop.found_existing == false);
2458    }
2459}
2460
2461test "pop()" {
2462    var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
2463    defer map.deinit();
2464
2465    // Insert just enough entries so that the map expands. Afterwards,
2466    // pop all entries out of the map.
2467
2468    var i: i32 = 0;
2469    while (i < 9) : (i += 1) {
2470        try testing.expect((try map.fetchPut(i, i)) == null);
2471    }
2472
2473    while (map.pop()) |pop| {
2474        try testing.expect(pop.key == i - 1 and pop.value == i - 1);
2475        i -= 1;
2476    }
2477
2478    try testing.expect(map.count() == 0);
2479}
2480
2481test "reIndex" {
2482    var map = ArrayHashMap(i32, i32, AutoContext(i32), true).init(std.testing.allocator);
2483    defer map.deinit();
2484
2485    // Populate via the API.
2486    const num_indexed_entries = 200;
2487    var i: i32 = 0;
2488    while (i < num_indexed_entries) : (i += 1)
2489        try testing.expect((try map.fetchPut(i, i * 10)) == null);
2490
2491    // Make sure we allocated an index header.
2492    try testing.expect(map.unmanaged.index_header != null);
2493
2494    // Now write to the arrays directly.
2495    const num_unindexed_entries = 20;
2496    try map.unmanaged.entries.resize(std.testing.allocator, num_indexed_entries + num_unindexed_entries);
2497    for (map.keys()[num_indexed_entries..], map.values()[num_indexed_entries..], num_indexed_entries..) |*key, *value, j| {
2498        key.* = @intCast(j);
2499        value.* = @intCast(j * 10);
2500    }
2501
2502    // After reindexing, we should see everything.
2503    try map.reIndex();
2504    i = 0;
2505    while (i < num_indexed_entries + num_unindexed_entries) : (i += 1) {
2506        const gop = try map.getOrPut(i);
2507        try testing.expect(gop.found_existing == true);
2508        try testing.expect(gop.value_ptr.* == i * 10);
2509        try testing.expect(gop.index == i);
2510    }
2511}
2512
2513test "auto store_hash" {
2514    const HasCheapEql = AutoArrayHashMap(i32, i32);
2515    const HasExpensiveEql = AutoArrayHashMap([32]i32, i32);
2516    try testing.expect(@FieldType(HasCheapEql.Data, "hash") == void);
2517    try testing.expect(@FieldType(HasExpensiveEql.Data, "hash") != void);
2518
2519    const HasCheapEqlUn = AutoArrayHashMapUnmanaged(i32, i32);
2520    const HasExpensiveEqlUn = AutoArrayHashMapUnmanaged([32]i32, i32);
2521    try testing.expect(@FieldType(HasCheapEqlUn.Data, "hash") == void);
2522    try testing.expect(@FieldType(HasExpensiveEqlUn.Data, "hash") != void);
2523}
2524
2525test "sort" {
2526    var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
2527    defer map.deinit();
2528
2529    for ([_]i32{ 8, 3, 12, 10, 2, 4, 9, 5, 6, 13, 14, 15, 16, 1, 11, 17, 7 }) |x| {
2530        try map.put(x, x * 3);
2531    }
2532
2533    const C = struct {
2534        keys: []i32,
2535
2536        pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
2537            return ctx.keys[a_index] < ctx.keys[b_index];
2538        }
2539    };
2540
2541    map.sort(C{ .keys = map.keys() });
2542
2543    var x: i32 = 1;
2544    for (map.keys(), 0..) |key, i| {
2545        try testing.expect(key == x);
2546        try testing.expect(map.values()[i] == x * 3);
2547        x += 1;
2548    }
2549}
2550
2551test "0 sized key" {
2552    var map = AutoArrayHashMap(u0, i32).init(std.testing.allocator);
2553    defer map.deinit();
2554
2555    try testing.expectEqual(map.get(0), null);
2556
2557    try map.put(0, 5);
2558    try testing.expectEqual(map.get(0), 5);
2559
2560    try map.put(0, 10);
2561    try testing.expectEqual(map.get(0), 10);
2562
2563    try testing.expectEqual(map.swapRemove(0), true);
2564    try testing.expectEqual(map.get(0), null);
2565}
2566
2567test "0 sized key and 0 sized value" {
2568    var map = AutoArrayHashMap(u0, u0).init(std.testing.allocator);
2569    defer map.deinit();
2570
2571    try testing.expectEqual(map.get(0), null);
2572
2573    try map.put(0, 0);
2574    try testing.expectEqual(map.get(0), 0);
2575
2576    try testing.expectEqual(map.swapRemove(0), true);
2577    try testing.expectEqual(map.get(0), null);
2578}
2579
2580test "setKey storehash true" {
2581    const gpa = std.testing.allocator;
2582
2583    var map: ArrayHashMapUnmanaged(i32, i32, AutoContext(i32), true) = .empty;
2584    defer map.deinit(gpa);
2585
2586    try map.put(gpa, 12, 34);
2587    try map.put(gpa, 56, 78);
2588
2589    try map.setKey(gpa, 0, 42);
2590    try testing.expectEqual(2, map.count());
2591    try testing.expectEqual(false, map.contains(12));
2592    try testing.expectEqual(34, map.get(42));
2593    try testing.expectEqual(78, map.get(56));
2594}
2595
2596test "setKey storehash false" {
2597    const gpa = std.testing.allocator;
2598
2599    var map: ArrayHashMapUnmanaged(i32, i32, AutoContext(i32), false) = .empty;
2600    defer map.deinit(gpa);
2601
2602    try map.put(gpa, 12, 34);
2603    try map.put(gpa, 56, 78);
2604
2605    try map.setKey(gpa, 0, 42);
2606    try testing.expectEqual(2, map.count());
2607    try testing.expectEqual(false, map.contains(12));
2608    try testing.expectEqual(34, map.get(42));
2609    try testing.expectEqual(78, map.get(56));
2610}
2611
2612pub fn getHashPtrAddrFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
2613    return struct {
2614        fn hash(ctx: Context, key: K) u32 {
2615            _ = ctx;
2616            return getAutoHashFn(usize, void)({}, @intFromPtr(key));
2617        }
2618    }.hash;
2619}
2620
2621pub fn getTrivialEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
2622    return struct {
2623        fn eql(ctx: Context, a: K, b: K) bool {
2624            _ = ctx;
2625            return a == b;
2626        }
2627    }.eql;
2628}
2629
2630pub fn AutoContext(comptime K: type) type {
2631    return struct {
2632        pub const hash = getAutoHashFn(K, @This());
2633        pub const eql = getAutoEqlFn(K, @This());
2634    };
2635}
2636
2637pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
2638    return struct {
2639        fn hash(ctx: Context, key: K) u32 {
2640            _ = ctx;
2641            if (std.meta.hasUniqueRepresentation(K)) {
2642                return @truncate(Wyhash.hash(0, std.mem.asBytes(&key)));
2643            } else {
2644                var hasher = Wyhash.init(0);
2645                autoHash(&hasher, key);
2646                return @truncate(hasher.final());
2647            }
2648        }
2649    }.hash;
2650}
2651
2652pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K, usize) bool) {
2653    return struct {
2654        fn eql(ctx: Context, a: K, b: K, b_index: usize) bool {
2655            _ = b_index;
2656            _ = ctx;
2657            return std.meta.eql(a, b);
2658        }
2659    }.eql;
2660}
2661
2662pub fn autoEqlIsCheap(comptime K: type) bool {
2663    return switch (@typeInfo(K)) {
2664        .bool,
2665        .int,
2666        .float,
2667        .pointer,
2668        .comptime_float,
2669        .comptime_int,
2670        .@"enum",
2671        .@"fn",
2672        .error_set,
2673        .@"anyframe",
2674        .enum_literal,
2675        => true,
2676        else => false,
2677    };
2678}
2679
2680pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime strategy: std.hash.Strategy) (fn (Context, K) u32) {
2681    return struct {
2682        fn hash(ctx: Context, key: K) u32 {
2683            _ = ctx;
2684            var hasher = Wyhash.init(0);
2685            std.hash.autoHashStrat(&hasher, key, strategy);
2686            return @as(u32, @truncate(hasher.final()));
2687        }
2688    }.hash;
2689}
2690
2691test "orderedRemoveAtMany" {
2692    const gpa = testing.allocator;
2693
2694    var map: AutoArrayHashMapUnmanaged(usize, void) = .empty;
2695    defer map.deinit(gpa);
2696
2697    for (0..10) |n| {
2698        try map.put(gpa, n, {});
2699    }
2700
2701    try map.orderedRemoveAtMany(gpa, &.{ 1, 5, 5, 7, 9 });
2702    try testing.expectEqualSlices(usize, &.{ 0, 2, 3, 4, 6, 8 }, map.keys());
2703
2704    try map.orderedRemoveAtMany(gpa, &.{0});
2705    try testing.expectEqualSlices(usize, &.{ 2, 3, 4, 6, 8 }, map.keys());
2706
2707    try map.orderedRemoveAtMany(gpa, &.{});
2708    try testing.expectEqualSlices(usize, &.{ 2, 3, 4, 6, 8 }, map.keys());
2709
2710    try map.orderedRemoveAtMany(gpa, &.{ 1, 2, 3, 4 });
2711    try testing.expectEqualSlices(usize, &.{2}, map.keys());
2712
2713    try map.orderedRemoveAtMany(gpa, &.{0});
2714    try testing.expectEqualSlices(usize, &.{}, map.keys());
2715}