master
   1const std = @import("std.zig");
   2const builtin = @import("builtin");
   3const assert = std.debug.assert;
   4const autoHash = std.hash.autoHash;
   5const math = std.math;
   6const mem = std.mem;
   7const Allocator = mem.Allocator;
   8const Wyhash = std.hash.Wyhash;
   9const Alignment = std.mem.Alignment;
  10
  11pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u64) {
  12    comptime {
  13        assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated
  14        if (K == []const u8) {
  15            @compileError("std.hash.autoHash does not allow slices here (" ++
  16                @typeName(K) ++
  17                ") because the intent is unclear. " ++
  18                "Consider using std.StringHashMap for hashing the contents of []const u8. " ++
  19                "Alternatively, consider using std.hash.autoHashStrat or providing your own hash function instead.");
  20        }
  21    }
  22
  23    return struct {
  24        fn hash(ctx: Context, key: K) u64 {
  25            _ = ctx;
  26            if (std.meta.hasUniqueRepresentation(K)) {
  27                return Wyhash.hash(0, std.mem.asBytes(&key));
  28            } else {
  29                var hasher = Wyhash.init(0);
  30                autoHash(&hasher, key);
  31                return hasher.final();
  32            }
  33        }
  34    }.hash;
  35}
  36
  37pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
  38    return struct {
  39        fn eql(ctx: Context, a: K, b: K) bool {
  40            _ = ctx;
  41            return std.meta.eql(a, b);
  42        }
  43    }.eql;
  44}
  45
  46pub fn AutoHashMap(comptime K: type, comptime V: type) type {
  47    return HashMap(K, V, AutoContext(K), default_max_load_percentage);
  48}
  49
  50pub fn AutoHashMapUnmanaged(comptime K: type, comptime V: type) type {
  51    return HashMapUnmanaged(K, V, AutoContext(K), default_max_load_percentage);
  52}
  53
  54pub fn AutoContext(comptime K: type) type {
  55    return struct {
  56        pub const hash = getAutoHashFn(K, @This());
  57        pub const eql = getAutoEqlFn(K, @This());
  58    };
  59}
  60
  61/// Builtin hashmap for strings as keys.
  62/// Key memory is managed by the caller.  Keys and values
  63/// will not automatically be freed.
  64pub fn StringHashMap(comptime V: type) type {
  65    return HashMap([]const u8, V, StringContext, default_max_load_percentage);
  66}
  67
  68/// Key memory is managed by the caller.  Keys and values
  69/// will not automatically be freed.
  70pub fn StringHashMapUnmanaged(comptime V: type) type {
  71    return HashMapUnmanaged([]const u8, V, StringContext, default_max_load_percentage);
  72}
  73
  74pub const StringContext = struct {
  75    pub fn hash(self: @This(), s: []const u8) u64 {
  76        _ = self;
  77        return hashString(s);
  78    }
  79    pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
  80        _ = self;
  81        return eqlString(a, b);
  82    }
  83};
  84
  85pub fn eqlString(a: []const u8, b: []const u8) bool {
  86    return mem.eql(u8, a, b);
  87}
  88
  89pub fn hashString(s: []const u8) u64 {
  90    return std.hash.Wyhash.hash(0, s);
  91}
  92
  93pub const StringIndexContext = struct {
  94    bytes: *const std.ArrayList(u8),
  95
  96    pub fn eql(_: @This(), a: u32, b: u32) bool {
  97        return a == b;
  98    }
  99
 100    pub fn hash(ctx: @This(), key: u32) u64 {
 101        return hashString(mem.sliceTo(ctx.bytes.items[key..], 0));
 102    }
 103};
 104
 105pub const StringIndexAdapter = struct {
 106    bytes: *const std.ArrayList(u8),
 107
 108    pub fn eql(ctx: @This(), a: []const u8, b: u32) bool {
 109        return mem.eql(u8, a, mem.sliceTo(ctx.bytes.items[b..], 0));
 110    }
 111
 112    pub fn hash(_: @This(), adapted_key: []const u8) u64 {
 113        assert(mem.indexOfScalar(u8, adapted_key, 0) == null);
 114        return hashString(adapted_key);
 115    }
 116};
 117
 118pub const default_max_load_percentage = 80;
 119
 120/// General purpose hash table.
 121/// No order is guaranteed and any modification invalidates live iterators.
 122/// It provides fast operations (lookup, insertion, deletion) with quite high
 123/// load factors (up to 80% by default) for low memory usage.
 124/// For a hash map that can be initialized directly that does not store an Allocator
 125/// field, see `HashMapUnmanaged`.
 126/// If iterating over the table entries is a strong usecase and needs to be fast,
 127/// prefer the alternative `std.ArrayHashMap`.
 128/// Context must be a struct type with two member functions:
 129///   hash(self, K) u64
 130///   eql(self, K, K) bool
 131/// Adapted variants of many functions are provided.  These variants
 132/// take a pseudo key instead of a key.  Their context must have the functions:
 133///   hash(self, PseudoKey) u64
 134///   eql(self, PseudoKey, K) bool
 135pub fn HashMap(
 136    comptime K: type,
 137    comptime V: type,
 138    comptime Context: type,
 139    comptime max_load_percentage: u64,
 140) type {
 141    return struct {
 142        unmanaged: Unmanaged,
 143        allocator: Allocator,
 144        ctx: Context,
 145
 146        /// The type of the unmanaged hash map underlying this wrapper
 147        pub const Unmanaged = HashMapUnmanaged(K, V, Context, max_load_percentage);
 148        /// An entry, containing pointers to a key and value stored in the map
 149        pub const Entry = Unmanaged.Entry;
 150        /// A copy of a key and value which are no longer in the map
 151        pub const KV = Unmanaged.KV;
 152        /// The integer type that is the result of hashing
 153        pub const Hash = Unmanaged.Hash;
 154        /// The iterator type returned by iterator()
 155        pub const Iterator = Unmanaged.Iterator;
 156
 157        pub const KeyIterator = Unmanaged.KeyIterator;
 158        pub const ValueIterator = Unmanaged.ValueIterator;
 159
 160        /// The integer type used to store the size of the map
 161        pub const Size = Unmanaged.Size;
 162        /// The type returned from getOrPut and variants
 163        pub const GetOrPutResult = Unmanaged.GetOrPutResult;
 164
 165        const Self = @This();
 166
 167        /// Create a managed hash map with an empty context.
 168        /// If the context is not zero-sized, you must use
 169        /// initContext(allocator, ctx) instead.
 170        pub fn init(allocator: Allocator) Self {
 171            if (@sizeOf(Context) != 0) {
 172                @compileError("Context must be specified! Call initContext(allocator, ctx) instead.");
 173            }
 174            return .{
 175                .unmanaged = .empty,
 176                .allocator = allocator,
 177                .ctx = undefined, // ctx is zero-sized so this is safe.
 178            };
 179        }
 180
 181        /// Create a managed hash map with a context
 182        pub fn initContext(allocator: Allocator, ctx: Context) Self {
 183            return .{
 184                .unmanaged = .empty,
 185                .allocator = allocator,
 186                .ctx = ctx,
 187            };
 188        }
 189
 190        /// Puts the hash map into a state where any method call that would
 191        /// cause an existing key or value pointer to become invalidated will
 192        /// instead trigger an assertion.
 193        ///
 194        /// An additional call to `lockPointers` in such state also triggers an
 195        /// assertion.
 196        ///
 197        /// `unlockPointers` returns the hash map to the previous state.
 198        pub fn lockPointers(self: *Self) void {
 199            self.unmanaged.lockPointers();
 200        }
 201
 202        /// Undoes a call to `lockPointers`.
 203        pub fn unlockPointers(self: *Self) void {
 204            self.unmanaged.unlockPointers();
 205        }
 206
 207        /// Release the backing array and invalidate this map.
 208        /// This does *not* deinit keys, values, or the context!
 209        /// If your keys or values need to be released, ensure
 210        /// that that is done before calling this function.
 211        pub fn deinit(self: *Self) void {
 212            self.unmanaged.deinit(self.allocator);
 213            self.* = undefined;
 214        }
 215
 216        /// Empty the map, but keep the backing allocation for future use.
 217        /// This does *not* free keys or values! Be sure to
 218        /// release them if they need deinitialization before
 219        /// calling this function.
 220        pub fn clearRetainingCapacity(self: *Self) void {
 221            return self.unmanaged.clearRetainingCapacity();
 222        }
 223
 224        /// Empty the map and release the backing allocation.
 225        /// This does *not* free keys or values! Be sure to
 226        /// release them if they need deinitialization before
 227        /// calling this function.
 228        pub fn clearAndFree(self: *Self) void {
 229            return self.unmanaged.clearAndFree(self.allocator);
 230        }
 231
 232        /// Return the number of items in the map.
 233        pub fn count(self: Self) Size {
 234            return self.unmanaged.count();
 235        }
 236
 237        /// Create an iterator over the entries in the map.
 238        /// The iterator is invalidated if the map is modified.
 239        pub fn iterator(self: *const Self) Iterator {
 240            return self.unmanaged.iterator();
 241        }
 242
 243        /// Create an iterator over the keys in the map.
 244        /// The iterator is invalidated if the map is modified.
 245        pub fn keyIterator(self: Self) KeyIterator {
 246            return self.unmanaged.keyIterator();
 247        }
 248
 249        /// Create an iterator over the values in the map.
 250        /// The iterator is invalidated if the map is modified.
 251        pub fn valueIterator(self: Self) ValueIterator {
 252            return self.unmanaged.valueIterator();
 253        }
 254
 255        /// If key exists this function cannot fail.
 256        /// If there is an existing item with `key`, then the result's
 257        /// `Entry` pointers point to it, and found_existing is true.
 258        /// Otherwise, puts a new item with undefined value, and
 259        /// the `Entry` pointers point to it. Caller should then initialize
 260        /// the value (but not the key).
 261        pub fn getOrPut(self: *Self, key: K) Allocator.Error!GetOrPutResult {
 262            return self.unmanaged.getOrPutContext(self.allocator, key, self.ctx);
 263        }
 264
 265        /// If key exists this function cannot fail.
 266        /// If there is an existing item with `key`, then the result's
 267        /// `Entry` pointers point to it, and found_existing is true.
 268        /// Otherwise, puts a new item with undefined key and value, and
 269        /// the `Entry` pointers point to it. Caller must then initialize
 270        /// the key and value.
 271        pub fn getOrPutAdapted(self: *Self, key: anytype, ctx: anytype) Allocator.Error!GetOrPutResult {
 272            return self.unmanaged.getOrPutContextAdapted(self.allocator, key, ctx, self.ctx);
 273        }
 274
 275        /// If there is an existing item with `key`, then the result's
 276        /// `Entry` pointers point to it, and found_existing is true.
 277        /// Otherwise, puts a new item with undefined value, and
 278        /// the `Entry` pointers point to it. Caller should then initialize
 279        /// the value (but not the key).
 280        /// If a new entry needs to be stored, this function asserts there
 281        /// is enough capacity to store it.
 282        pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
 283            return self.unmanaged.getOrPutAssumeCapacityContext(key, self.ctx);
 284        }
 285
 286        /// If there is an existing item with `key`, then the result's
 287        /// `Entry` pointers point to it, and found_existing is true.
 288        /// Otherwise, puts a new item with undefined value, and
 289        /// the `Entry` pointers point to it. Caller must then initialize
 290        /// the key and value.
 291        /// If a new entry needs to be stored, this function asserts there
 292        /// is enough capacity to store it.
 293        pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
 294            return self.unmanaged.getOrPutAssumeCapacityAdapted(key, ctx);
 295        }
 296
 297        pub fn getOrPutValue(self: *Self, key: K, value: V) Allocator.Error!Entry {
 298            return self.unmanaged.getOrPutValueContext(self.allocator, key, value, self.ctx);
 299        }
 300
 301        /// Increases capacity, guaranteeing that insertions up until the
 302        /// `expected_count` will not cause an allocation, and therefore cannot fail.
 303        pub fn ensureTotalCapacity(self: *Self, expected_count: Size) Allocator.Error!void {
 304            return self.unmanaged.ensureTotalCapacityContext(self.allocator, expected_count, self.ctx);
 305        }
 306
 307        /// Increases capacity, guaranteeing that insertions up until
 308        /// `additional_count` **more** items will not cause an allocation, and
 309        /// therefore cannot fail.
 310        pub fn ensureUnusedCapacity(self: *Self, additional_count: Size) Allocator.Error!void {
 311            return self.unmanaged.ensureUnusedCapacityContext(self.allocator, additional_count, self.ctx);
 312        }
 313
 314        /// Returns the number of total elements which may be present before it is
 315        /// no longer guaranteed that no allocations will be performed.
 316        pub fn capacity(self: Self) Size {
 317            return self.unmanaged.capacity();
 318        }
 319
 320        /// Clobbers any existing data. To detect if a put would clobber
 321        /// existing data, see `getOrPut`.
 322        pub fn put(self: *Self, key: K, value: V) Allocator.Error!void {
 323            return self.unmanaged.putContext(self.allocator, key, value, self.ctx);
 324        }
 325
 326        /// Inserts a key-value pair into the hash map, asserting that no previous
 327        /// entry with the same key is already present
 328        pub fn putNoClobber(self: *Self, key: K, value: V) Allocator.Error!void {
 329            return self.unmanaged.putNoClobberContext(self.allocator, key, value, self.ctx);
 330        }
 331
 332        /// Asserts there is enough capacity to store the new key-value pair.
 333        /// Clobbers any existing data. To detect if a put would clobber
 334        /// existing data, see `getOrPutAssumeCapacity`.
 335        pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
 336            return self.unmanaged.putAssumeCapacityContext(key, value, self.ctx);
 337        }
 338
 339        /// Asserts there is enough capacity to store the new key-value pair.
 340        /// Asserts that it does not clobber any existing data.
 341        /// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
 342        pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
 343            return self.unmanaged.putAssumeCapacityNoClobberContext(key, value, self.ctx);
 344        }
 345
 346        /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
 347        pub fn fetchPut(self: *Self, key: K, value: V) Allocator.Error!?KV {
 348            return self.unmanaged.fetchPutContext(self.allocator, key, value, self.ctx);
 349        }
 350
 351        /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
 352        /// If insertion happens, asserts there is enough capacity without allocating.
 353        pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
 354            return self.unmanaged.fetchPutAssumeCapacityContext(key, value, self.ctx);
 355        }
 356
 357        /// Removes a value from the map and returns the removed kv pair.
 358        pub fn fetchRemove(self: *Self, key: K) ?KV {
 359            return self.unmanaged.fetchRemoveContext(key, self.ctx);
 360        }
 361
 362        pub fn fetchRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
 363            return self.unmanaged.fetchRemoveAdapted(key, ctx);
 364        }
 365
 366        /// Finds the value associated with a key in the map
 367        pub fn get(self: Self, key: K) ?V {
 368            return self.unmanaged.getContext(key, self.ctx);
 369        }
 370        pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V {
 371            return self.unmanaged.getAdapted(key, ctx);
 372        }
 373
 374        pub fn getPtr(self: Self, key: K) ?*V {
 375            return self.unmanaged.getPtrContext(key, self.ctx);
 376        }
 377        pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
 378            return self.unmanaged.getPtrAdapted(key, ctx);
 379        }
 380
 381        /// Finds the actual key associated with an adapted key in the map
 382        pub fn getKey(self: Self, key: K) ?K {
 383            return self.unmanaged.getKeyContext(key, self.ctx);
 384        }
 385        pub fn getKeyAdapted(self: Self, key: anytype, ctx: anytype) ?K {
 386            return self.unmanaged.getKeyAdapted(key, ctx);
 387        }
 388
 389        pub fn getKeyPtr(self: Self, key: K) ?*K {
 390            return self.unmanaged.getKeyPtrContext(key, self.ctx);
 391        }
 392        pub fn getKeyPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*K {
 393            return self.unmanaged.getKeyPtrAdapted(key, ctx);
 394        }
 395
 396        /// Finds the key and value associated with a key in the map
 397        pub fn getEntry(self: Self, key: K) ?Entry {
 398            return self.unmanaged.getEntryContext(key, self.ctx);
 399        }
 400
 401        pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
 402            return self.unmanaged.getEntryAdapted(key, ctx);
 403        }
 404
 405        /// Check if the map contains a key
 406        pub fn contains(self: Self, key: K) bool {
 407            return self.unmanaged.containsContext(key, self.ctx);
 408        }
 409
 410        pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool {
 411            return self.unmanaged.containsAdapted(key, ctx);
 412        }
 413
 414        /// If there is an `Entry` with a matching key, it is deleted from
 415        /// the hash map, and this function returns true.  Otherwise this
 416        /// function returns false.
 417        ///
 418        /// TODO: answer the question in these doc comments, does this
 419        /// increase the unused capacity by one?
 420        pub fn remove(self: *Self, key: K) bool {
 421            return self.unmanaged.removeContext(key, self.ctx);
 422        }
 423
 424        /// TODO: answer the question in these doc comments, does this
 425        /// increase the unused capacity by one?
 426        pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
 427            return self.unmanaged.removeAdapted(key, ctx);
 428        }
 429
 430        /// Delete the entry with key pointed to by key_ptr from the hash map.
 431        /// key_ptr is assumed to be a valid pointer to a key that is present
 432        /// in the hash map.
 433        ///
 434        /// TODO: answer the question in these doc comments, does this
 435        /// increase the unused capacity by one?
 436        pub fn removeByPtr(self: *Self, key_ptr: *K) void {
 437            self.unmanaged.removeByPtr(key_ptr);
 438        }
 439
 440        /// Creates a copy of this map, using the same allocator
 441        pub fn clone(self: Self) Allocator.Error!Self {
 442            var other = try self.unmanaged.cloneContext(self.allocator, self.ctx);
 443            return other.promoteContext(self.allocator, self.ctx);
 444        }
 445
 446        /// Creates a copy of this map, using a specified allocator
 447        pub fn cloneWithAllocator(self: Self, new_allocator: Allocator) Allocator.Error!Self {
 448            var other = try self.unmanaged.cloneContext(new_allocator, self.ctx);
 449            return other.promoteContext(new_allocator, self.ctx);
 450        }
 451
 452        /// Creates a copy of this map, using a specified context
 453        pub fn cloneWithContext(self: Self, new_ctx: anytype) Allocator.Error!HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
 454            var other = try self.unmanaged.cloneContext(self.allocator, new_ctx);
 455            return other.promoteContext(self.allocator, new_ctx);
 456        }
 457
 458        /// Creates a copy of this map, using a specified allocator and context.
 459        pub fn cloneWithAllocatorAndContext(
 460            self: Self,
 461            new_allocator: Allocator,
 462            new_ctx: anytype,
 463        ) Allocator.Error!HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
 464            var other = try self.unmanaged.cloneContext(new_allocator, new_ctx);
 465            return other.promoteContext(new_allocator, new_ctx);
 466        }
 467
 468        /// Set the map to an empty state, making deinitialization a no-op, and
 469        /// returning a copy of the original.
 470        pub fn move(self: *Self) Self {
 471            self.unmanaged.pointer_stability.assertUnlocked();
 472            const result = self.*;
 473            self.unmanaged = .empty;
 474            return result;
 475        }
 476
 477        /// Rehash the map, in-place.
 478        ///
 479        /// Over time, due to the current tombstone-based implementation, a
 480        /// HashMap could become fragmented due to the buildup of tombstone
 481        /// entries that causes a performance degradation due to excessive
 482        /// probing. The kind of pattern that might cause this is a long-lived
 483        /// HashMap with repeated inserts and deletes.
 484        ///
 485        /// After this function is called, there will be no tombstones in
 486        /// the HashMap, each of the entries is rehashed and any existing
 487        /// key/value pointers into the HashMap are invalidated.
 488        pub fn rehash(self: *Self) void {
 489            self.unmanaged.rehash(self.ctx);
 490        }
 491    };
 492}
 493
 494/// A HashMap based on open addressing and linear probing.
 495/// A lookup or modification typically incurs only 2 cache misses.
 496/// No order is guaranteed and any modification invalidates live iterators.
 497/// It achieves good performance with quite high load factors (by default,
 498/// grow is triggered at 80% full) and only one byte of overhead per element.
 499/// The struct itself is only 16 bytes for a small footprint. This comes at
 500/// the price of handling size with u32, which should be reasonable enough
 501/// for almost all uses.
 502/// Deletions are achieved with tombstones.
 503///
 504/// Default initialization of this struct is deprecated; use `.empty` instead.
 505pub fn HashMapUnmanaged(
 506    comptime K: type,
 507    comptime V: type,
 508    comptime Context: type,
 509    comptime max_load_percentage: u64,
 510) type {
 511    if (max_load_percentage <= 0 or max_load_percentage >= 100)
 512        @compileError("max_load_percentage must be between 0 and 100.");
 513    return struct {
 514        const Self = @This();
 515
 516        // This is actually a midway pointer to the single buffer containing
 517        // a `Header` field, the `Metadata`s and `Entry`s.
 518        // At `-@sizeOf(Header)` is the Header field.
 519        // At `sizeOf(Metadata) * capacity + offset`, which is pointed to by
 520        // self.header().entries, is the array of entries.
 521        // This means that the hashmap only holds one live allocation, to
 522        // reduce memory fragmentation and struct size.
 523        /// Pointer to the metadata.
 524        metadata: ?[*]Metadata = null,
 525
 526        /// Current number of elements in the hashmap.
 527        size: Size = 0,
 528
 529        // Having a countdown to grow reduces the number of instructions to
 530        // execute when determining if the hashmap has enough capacity already.
 531        /// Number of available slots before a grow is needed to satisfy the
 532        /// `max_load_percentage`.
 533        available: Size = 0,
 534
 535        /// Used to detect memory safety violations.
 536        pointer_stability: std.debug.SafetyLock = .{},
 537
 538        // This is purely empirical and not a /very smart magic constantâ„¢/.
 539        /// Capacity of the first grow when bootstrapping the hashmap.
 540        const minimal_capacity = 8;
 541
 542        /// A map containing no keys or values.
 543        pub const empty: Self = .{
 544            .metadata = null,
 545            .size = 0,
 546            .available = 0,
 547        };
 548
 549        // This hashmap is specially designed for sizes that fit in a u32.
 550        pub const Size = u32;
 551
 552        // u64 hashes guarantee us that the fingerprint bits will never be used
 553        // to compute the index of a slot, maximizing the use of entropy.
 554        pub const Hash = u64;
 555
 556        pub const Entry = struct {
 557            key_ptr: *K,
 558            value_ptr: *V,
 559        };
 560
 561        pub const KV = struct {
 562            key: K,
 563            value: V,
 564        };
 565
 566        const Header = struct {
 567            values: [*]V,
 568            keys: [*]K,
 569            capacity: Size,
 570        };
 571
 572        /// Metadata for a slot. It can be in three states: empty, used or
 573        /// tombstone. Tombstones indicate that an entry was previously used,
 574        /// they are a simple way to handle removal.
 575        /// To this state, we add 7 bits from the slot's key hash. These are
 576        /// used as a fast way to disambiguate between entries without
 577        /// having to use the equality function. If two fingerprints are
 578        /// different, we know that we don't have to compare the keys at all.
 579        /// The 7 bits are the highest ones from a 64 bit hash. This way, not
 580        /// only we use the `log2(capacity)` lowest bits from the hash to determine
 581        /// a slot index, but we use 7 more bits to quickly resolve collisions
 582        /// when multiple elements with different hashes end up wanting to be in the same slot.
 583        /// Not using the equality function means we don't have to read into
 584        /// the entries array, likely avoiding a cache miss and a potentially
 585        /// costly function call.
 586        const Metadata = packed struct {
 587            const FingerPrint = u7;
 588
 589            const free: FingerPrint = 0;
 590            const tombstone: FingerPrint = 1;
 591
 592            fingerprint: FingerPrint = free,
 593            used: u1 = 0,
 594
 595            const slot_free = @as(u8, @bitCast(Metadata{ .fingerprint = free }));
 596            const slot_tombstone = @as(u8, @bitCast(Metadata{ .fingerprint = tombstone }));
 597
 598            pub fn isUsed(self: Metadata) bool {
 599                return self.used == 1;
 600            }
 601
 602            pub fn isTombstone(self: Metadata) bool {
 603                return @as(u8, @bitCast(self)) == slot_tombstone;
 604            }
 605
 606            pub fn isFree(self: Metadata) bool {
 607                return @as(u8, @bitCast(self)) == slot_free;
 608            }
 609
 610            pub fn takeFingerprint(hash: Hash) FingerPrint {
 611                const hash_bits = @typeInfo(Hash).int.bits;
 612                const fp_bits = @typeInfo(FingerPrint).int.bits;
 613                return @as(FingerPrint, @truncate(hash >> (hash_bits - fp_bits)));
 614            }
 615
 616            pub fn fill(self: *Metadata, fp: FingerPrint) void {
 617                self.used = 1;
 618                self.fingerprint = fp;
 619            }
 620
 621            pub fn remove(self: *Metadata) void {
 622                self.used = 0;
 623                self.fingerprint = tombstone;
 624            }
 625        };
 626
 627        comptime {
 628            assert(@sizeOf(Metadata) == 1);
 629            assert(@alignOf(Metadata) == 1);
 630        }
 631
 632        pub const Iterator = struct {
 633            hm: *const Self,
 634            index: Size = 0,
 635
 636            pub fn next(it: *Iterator) ?Entry {
 637                assert(it.index <= it.hm.capacity());
 638                if (it.hm.size == 0) return null;
 639
 640                const cap = it.hm.capacity();
 641                const end = it.hm.metadata.? + cap;
 642                var metadata = it.hm.metadata.? + it.index;
 643
 644                while (metadata != end) : ({
 645                    metadata += 1;
 646                    it.index += 1;
 647                }) {
 648                    if (metadata[0].isUsed()) {
 649                        const key = &it.hm.keys()[it.index];
 650                        const value = &it.hm.values()[it.index];
 651                        it.index += 1;
 652                        return Entry{ .key_ptr = key, .value_ptr = value };
 653                    }
 654                }
 655
 656                return null;
 657            }
 658        };
 659
 660        pub const KeyIterator = FieldIterator(K);
 661        pub const ValueIterator = FieldIterator(V);
 662
 663        fn FieldIterator(comptime T: type) type {
 664            return struct {
 665                len: usize,
 666                metadata: [*]const Metadata,
 667                items: [*]T,
 668
 669                pub fn next(self: *@This()) ?*T {
 670                    while (self.len > 0) {
 671                        self.len -= 1;
 672                        const used = self.metadata[0].isUsed();
 673                        const item = &self.items[0];
 674                        self.metadata += 1;
 675                        self.items += 1;
 676                        if (used) {
 677                            return item;
 678                        }
 679                    }
 680                    return null;
 681                }
 682            };
 683        }
 684
 685        pub const GetOrPutResult = struct {
 686            key_ptr: *K,
 687            value_ptr: *V,
 688            found_existing: bool,
 689        };
 690
 691        pub const Managed = HashMap(K, V, Context, max_load_percentage);
 692
 693        pub fn promote(self: Self, allocator: Allocator) Managed {
 694            if (@sizeOf(Context) != 0)
 695                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
 696            return promoteContext(self, allocator, undefined);
 697        }
 698
 699        pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed {
 700            return .{
 701                .unmanaged = self,
 702                .allocator = allocator,
 703                .ctx = ctx,
 704            };
 705        }
 706
 707        /// Puts the hash map into a state where any method call that would
 708        /// cause an existing key or value pointer to become invalidated will
 709        /// instead trigger an assertion.
 710        ///
 711        /// An additional call to `lockPointers` in such state also triggers an
 712        /// assertion.
 713        ///
 714        /// `unlockPointers` returns the hash map to the previous state.
 715        pub fn lockPointers(self: *Self) void {
 716            self.pointer_stability.lock();
 717        }
 718
 719        /// Undoes a call to `lockPointers`.
 720        pub fn unlockPointers(self: *Self) void {
 721            self.pointer_stability.unlock();
 722        }
 723
 724        fn isUnderMaxLoadPercentage(size: Size, cap: Size) bool {
 725            return size * 100 < max_load_percentage * cap;
 726        }
 727
 728        pub fn deinit(self: *Self, allocator: Allocator) void {
 729            self.pointer_stability.assertUnlocked();
 730            self.deallocate(allocator);
 731            self.* = undefined;
 732        }
 733
 734        fn capacityForSize(size: Size) Size {
 735            var new_cap: u32 = @intCast((@as(u64, size) * 100) / max_load_percentage + 1);
 736            new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable;
 737            return new_cap;
 738        }
 739
 740        pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_size: Size) Allocator.Error!void {
 741            if (@sizeOf(Context) != 0)
 742                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
 743            return ensureTotalCapacityContext(self, allocator, new_size, undefined);
 744        }
 745        pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_size: Size, ctx: Context) Allocator.Error!void {
 746            self.pointer_stability.lock();
 747            defer self.pointer_stability.unlock();
 748            if (new_size > self.size)
 749                try self.growIfNeeded(allocator, new_size - self.size, ctx);
 750        }
 751
 752        pub fn ensureUnusedCapacity(self: *Self, allocator: Allocator, additional_size: Size) Allocator.Error!void {
 753            if (@sizeOf(Context) != 0)
 754                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureUnusedCapacityContext instead.");
 755            return ensureUnusedCapacityContext(self, allocator, additional_size, undefined);
 756        }
 757        pub fn ensureUnusedCapacityContext(self: *Self, allocator: Allocator, additional_size: Size, ctx: Context) Allocator.Error!void {
 758            return ensureTotalCapacityContext(self, allocator, self.count() + additional_size, ctx);
 759        }
 760
 761        pub fn clearRetainingCapacity(self: *Self) void {
 762            self.pointer_stability.lock();
 763            defer self.pointer_stability.unlock();
 764            if (self.metadata) |_| {
 765                self.initMetadatas();
 766                self.size = 0;
 767                self.available = @truncate((self.capacity() * max_load_percentage) / 100);
 768            }
 769        }
 770
 771        pub fn clearAndFree(self: *Self, allocator: Allocator) void {
 772            self.pointer_stability.lock();
 773            defer self.pointer_stability.unlock();
 774            self.deallocate(allocator);
 775            self.size = 0;
 776            self.available = 0;
 777        }
 778
 779        pub fn count(self: Self) Size {
 780            return self.size;
 781        }
 782
 783        fn header(self: Self) *Header {
 784            return @ptrCast(@as([*]Header, @ptrCast(@alignCast(self.metadata.?))) - 1);
 785        }
 786
 787        fn keys(self: Self) [*]K {
 788            return self.header().keys;
 789        }
 790
 791        fn values(self: Self) [*]V {
 792            return self.header().values;
 793        }
 794
 795        pub fn capacity(self: Self) Size {
 796            if (self.metadata == null) return 0;
 797
 798            return self.header().capacity;
 799        }
 800
 801        pub fn iterator(self: *const Self) Iterator {
 802            return .{ .hm = self };
 803        }
 804
 805        pub fn keyIterator(self: Self) KeyIterator {
 806            if (self.metadata) |metadata| {
 807                return .{
 808                    .len = self.capacity(),
 809                    .metadata = metadata,
 810                    .items = self.keys(),
 811                };
 812            } else {
 813                return .{
 814                    .len = 0,
 815                    .metadata = undefined,
 816                    .items = undefined,
 817                };
 818            }
 819        }
 820
 821        pub fn valueIterator(self: Self) ValueIterator {
 822            if (self.metadata) |metadata| {
 823                return .{
 824                    .len = self.capacity(),
 825                    .metadata = metadata,
 826                    .items = self.values(),
 827                };
 828            } else {
 829                return .{
 830                    .len = 0,
 831                    .metadata = undefined,
 832                    .items = undefined,
 833                };
 834            }
 835        }
 836
 837        /// Insert an entry in the map. Assumes it is not already present.
 838        pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) Allocator.Error!void {
 839            if (@sizeOf(Context) != 0)
 840                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
 841            return self.putNoClobberContext(allocator, key, value, undefined);
 842        }
 843        pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) Allocator.Error!void {
 844            {
 845                self.pointer_stability.lock();
 846                defer self.pointer_stability.unlock();
 847                try self.growIfNeeded(allocator, 1, ctx);
 848            }
 849            self.putAssumeCapacityNoClobberContext(key, value, ctx);
 850        }
 851
 852        /// Asserts there is enough capacity to store the new key-value pair.
 853        /// Clobbers any existing data. To detect if a put would clobber
 854        /// existing data, see `getOrPutAssumeCapacity`.
 855        pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
 856            if (@sizeOf(Context) != 0)
 857                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityContext instead.");
 858            return self.putAssumeCapacityContext(key, value, undefined);
 859        }
 860        pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void {
 861            const gop = self.getOrPutAssumeCapacityContext(key, ctx);
 862            gop.value_ptr.* = value;
 863        }
 864
 865        /// Insert an entry in the map. Assumes it is not already present,
 866        /// and that no allocation is needed.
 867        pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
 868            if (@sizeOf(Context) != 0)
 869                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putAssumeCapacityNoClobberContext instead.");
 870            return self.putAssumeCapacityNoClobberContext(key, value, undefined);
 871        }
 872        pub fn putAssumeCapacityNoClobberContext(self: *Self, key: K, value: V, ctx: Context) void {
 873            assert(!self.containsContext(key, ctx));
 874
 875            const hash: Hash = ctx.hash(key);
 876            const mask = self.capacity() - 1;
 877            var idx: usize = @truncate(hash & mask);
 878
 879            var metadata = self.metadata.? + idx;
 880            while (metadata[0].isUsed()) {
 881                idx = (idx + 1) & mask;
 882                metadata = self.metadata.? + idx;
 883            }
 884
 885            assert(self.available > 0);
 886            self.available -= 1;
 887
 888            const fingerprint = Metadata.takeFingerprint(hash);
 889            metadata[0].fill(fingerprint);
 890            self.keys()[idx] = key;
 891            self.values()[idx] = value;
 892
 893            self.size += 1;
 894        }
 895
 896        /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
 897        pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) Allocator.Error!?KV {
 898            if (@sizeOf(Context) != 0)
 899                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
 900            return self.fetchPutContext(allocator, key, value, undefined);
 901        }
 902        pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) Allocator.Error!?KV {
 903            const gop = try self.getOrPutContext(allocator, key, ctx);
 904            var result: ?KV = null;
 905            if (gop.found_existing) {
 906                result = KV{
 907                    .key = gop.key_ptr.*,
 908                    .value = gop.value_ptr.*,
 909                };
 910            }
 911            gop.value_ptr.* = value;
 912            return result;
 913        }
 914
 915        /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
 916        /// If insertion happens, asserts there is enough capacity without allocating.
 917        pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
 918            if (@sizeOf(Context) != 0)
 919                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutAssumeCapacityContext instead.");
 920            return self.fetchPutAssumeCapacityContext(key, value, undefined);
 921        }
 922        pub fn fetchPutAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) ?KV {
 923            const gop = self.getOrPutAssumeCapacityContext(key, ctx);
 924            var result: ?KV = null;
 925            if (gop.found_existing) {
 926                result = KV{
 927                    .key = gop.key_ptr.*,
 928                    .value = gop.value_ptr.*,
 929                };
 930            }
 931            gop.value_ptr.* = value;
 932            return result;
 933        }
 934
 935        /// If there is an `Entry` with a matching key, it is deleted from
 936        /// the hash map, and then returned from this function.
 937        pub fn fetchRemove(self: *Self, key: K) ?KV {
 938            if (@sizeOf(Context) != 0)
 939                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchRemoveContext instead.");
 940            return self.fetchRemoveContext(key, undefined);
 941        }
 942        pub fn fetchRemoveContext(self: *Self, key: K, ctx: Context) ?KV {
 943            return self.fetchRemoveAdapted(key, ctx);
 944        }
 945        pub fn fetchRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
 946            if (self.getIndex(key, ctx)) |idx| {
 947                const old_key = &self.keys()[idx];
 948                const old_val = &self.values()[idx];
 949                const result = KV{
 950                    .key = old_key.*,
 951                    .value = old_val.*,
 952                };
 953                self.metadata.?[idx].remove();
 954                old_key.* = undefined;
 955                old_val.* = undefined;
 956                self.size -= 1;
 957                self.available += 1;
 958                return result;
 959            }
 960
 961            return null;
 962        }
 963
 964        /// Find the index containing the data for the given key.
 965        fn getIndex(self: Self, key: anytype, ctx: anytype) ?usize {
 966            if (self.size == 0) {
 967                // We use cold instead of unlikely to force a jump to this case,
 968                // no matter the weight of the opposing side.
 969                @branchHint(.cold);
 970                return null;
 971            }
 972
 973            // If you get a compile error on this line, it means that your generic hash
 974            // function is invalid for these parameters.
 975            const hash: Hash = ctx.hash(key);
 976
 977            const mask = self.capacity() - 1;
 978            const fingerprint = Metadata.takeFingerprint(hash);
 979            // Don't loop indefinitely when there are no empty slots.
 980            var limit = self.capacity();
 981            var idx = @as(usize, @truncate(hash & mask));
 982
 983            var metadata = self.metadata.? + idx;
 984            while (!metadata[0].isFree() and limit != 0) {
 985                if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
 986                    const test_key = &self.keys()[idx];
 987
 988                    if (ctx.eql(key, test_key.*)) {
 989                        return idx;
 990                    }
 991                }
 992
 993                limit -= 1;
 994                idx = (idx + 1) & mask;
 995                metadata = self.metadata.? + idx;
 996            }
 997
 998            return null;
 999        }
1000
1001        pub fn getEntry(self: Self, key: K) ?Entry {
1002            if (@sizeOf(Context) != 0)
1003                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getEntryContext instead.");
1004            return self.getEntryContext(key, undefined);
1005        }
1006        pub fn getEntryContext(self: Self, key: K, ctx: Context) ?Entry {
1007            return self.getEntryAdapted(key, ctx);
1008        }
1009        pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
1010            if (self.getIndex(key, ctx)) |idx| {
1011                return Entry{
1012                    .key_ptr = &self.keys()[idx],
1013                    .value_ptr = &self.values()[idx],
1014                };
1015            }
1016            return null;
1017        }
1018
1019        /// Insert an entry if the associated key is not already present, otherwise update preexisting value.
1020        pub fn put(self: *Self, allocator: Allocator, key: K, value: V) Allocator.Error!void {
1021            if (@sizeOf(Context) != 0)
1022                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
1023            return self.putContext(allocator, key, value, undefined);
1024        }
1025        pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) Allocator.Error!void {
1026            const result = try self.getOrPutContext(allocator, key, ctx);
1027            result.value_ptr.* = value;
1028        }
1029
1030        /// Get an optional pointer to the actual key associated with adapted key, if present.
1031        pub fn getKeyPtr(self: Self, key: K) ?*K {
1032            if (@sizeOf(Context) != 0)
1033                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getKeyPtrContext instead.");
1034            return self.getKeyPtrContext(key, undefined);
1035        }
1036        pub fn getKeyPtrContext(self: Self, key: K, ctx: Context) ?*K {
1037            return self.getKeyPtrAdapted(key, ctx);
1038        }
1039        pub fn getKeyPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*K {
1040            if (self.getIndex(key, ctx)) |idx| {
1041                return &self.keys()[idx];
1042            }
1043            return null;
1044        }
1045
1046        /// Get a copy of the actual key associated with adapted key, if present.
1047        pub fn getKey(self: Self, key: K) ?K {
1048            if (@sizeOf(Context) != 0)
1049                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getKeyContext instead.");
1050            return self.getKeyContext(key, undefined);
1051        }
1052        pub fn getKeyContext(self: Self, key: K, ctx: Context) ?K {
1053            return self.getKeyAdapted(key, ctx);
1054        }
1055        pub fn getKeyAdapted(self: Self, key: anytype, ctx: anytype) ?K {
1056            if (self.getIndex(key, ctx)) |idx| {
1057                return self.keys()[idx];
1058            }
1059            return null;
1060        }
1061
1062        /// Get an optional pointer to the value associated with key, if present.
1063        pub fn getPtr(self: Self, key: K) ?*V {
1064            if (@sizeOf(Context) != 0)
1065                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getPtrContext instead.");
1066            return self.getPtrContext(key, undefined);
1067        }
1068        pub fn getPtrContext(self: Self, key: K, ctx: Context) ?*V {
1069            return self.getPtrAdapted(key, ctx);
1070        }
1071        pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
1072            if (self.getIndex(key, ctx)) |idx| {
1073                return &self.values()[idx];
1074            }
1075            return null;
1076        }
1077
1078        /// Get a copy of the value associated with key, if present.
1079        pub fn get(self: Self, key: K) ?V {
1080            if (@sizeOf(Context) != 0)
1081                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getContext instead.");
1082            return self.getContext(key, undefined);
1083        }
1084        pub fn getContext(self: Self, key: K, ctx: Context) ?V {
1085            return self.getAdapted(key, ctx);
1086        }
1087        pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V {
1088            if (self.getIndex(key, ctx)) |idx| {
1089                return self.values()[idx];
1090            }
1091            return null;
1092        }
1093
1094        pub fn getOrPut(self: *Self, allocator: Allocator, key: K) Allocator.Error!GetOrPutResult {
1095            if (@sizeOf(Context) != 0)
1096                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
1097            return self.getOrPutContext(allocator, key, undefined);
1098        }
1099        pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) Allocator.Error!GetOrPutResult {
1100            const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
1101            if (!gop.found_existing) {
1102                gop.key_ptr.* = key;
1103            }
1104            return gop;
1105        }
1106        pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) Allocator.Error!GetOrPutResult {
1107            if (@sizeOf(Context) != 0)
1108                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
1109            return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
1110        }
1111        pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) Allocator.Error!GetOrPutResult {
1112            {
1113                self.pointer_stability.lock();
1114                defer self.pointer_stability.unlock();
1115                self.growIfNeeded(allocator, 1, ctx) catch |err| {
1116                    // If allocation fails, try to do the lookup anyway.
1117                    // If we find an existing item, we can return it.
1118                    // Otherwise return the error, we could not add another.
1119                    const index = self.getIndex(key, key_ctx) orelse return err;
1120                    return GetOrPutResult{
1121                        .key_ptr = &self.keys()[index],
1122                        .value_ptr = &self.values()[index],
1123                        .found_existing = true,
1124                    };
1125                };
1126            }
1127            return self.getOrPutAssumeCapacityAdapted(key, key_ctx);
1128        }
1129
1130        pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
1131            if (@sizeOf(Context) != 0)
1132                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutAssumeCapacityContext instead.");
1133            return self.getOrPutAssumeCapacityContext(key, undefined);
1134        }
1135        pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) GetOrPutResult {
1136            const result = self.getOrPutAssumeCapacityAdapted(key, ctx);
1137            if (!result.found_existing) {
1138                result.key_ptr.* = key;
1139            }
1140            return result;
1141        }
1142        pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
1143
1144            // If you get a compile error on this line, it means that your generic hash
1145            // function is invalid for these parameters.
1146            const hash: Hash = ctx.hash(key);
1147
1148            const mask = self.capacity() - 1;
1149            const fingerprint = Metadata.takeFingerprint(hash);
1150            var limit = self.capacity();
1151            var idx = @as(usize, @truncate(hash & mask));
1152
1153            var first_tombstone_idx: usize = self.capacity(); // invalid index
1154            var metadata = self.metadata.? + idx;
1155            while (!metadata[0].isFree() and limit != 0) {
1156                if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
1157                    const test_key = &self.keys()[idx];
1158                    // If you get a compile error on this line, it means that your generic eql
1159                    // function is invalid for these parameters.
1160
1161                    if (ctx.eql(key, test_key.*)) {
1162                        return GetOrPutResult{
1163                            .key_ptr = test_key,
1164                            .value_ptr = &self.values()[idx],
1165                            .found_existing = true,
1166                        };
1167                    }
1168                } else if (first_tombstone_idx == self.capacity() and metadata[0].isTombstone()) {
1169                    first_tombstone_idx = idx;
1170                }
1171
1172                limit -= 1;
1173                idx = (idx + 1) & mask;
1174                metadata = self.metadata.? + idx;
1175            }
1176
1177            if (first_tombstone_idx < self.capacity()) {
1178                // Cheap try to lower probing lengths after deletions. Recycle a tombstone.
1179                idx = first_tombstone_idx;
1180                metadata = self.metadata.? + idx;
1181            }
1182            // We're using a slot previously free or a tombstone.
1183            self.available -= 1;
1184
1185            metadata[0].fill(fingerprint);
1186            const new_key = &self.keys()[idx];
1187            const new_value = &self.values()[idx];
1188            new_key.* = undefined;
1189            new_value.* = undefined;
1190            self.size += 1;
1191
1192            return GetOrPutResult{
1193                .key_ptr = new_key,
1194                .value_ptr = new_value,
1195                .found_existing = false,
1196            };
1197        }
1198
1199        pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) Allocator.Error!Entry {
1200            if (@sizeOf(Context) != 0)
1201                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
1202            return self.getOrPutValueContext(allocator, key, value, undefined);
1203        }
1204        pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) Allocator.Error!Entry {
1205            const res = try self.getOrPutAdapted(allocator, key, ctx);
1206            if (!res.found_existing) {
1207                res.key_ptr.* = key;
1208                res.value_ptr.* = value;
1209            }
1210            return Entry{ .key_ptr = res.key_ptr, .value_ptr = res.value_ptr };
1211        }
1212
1213        /// Return true if there is a value associated with key in the map.
1214        pub fn contains(self: Self, key: K) bool {
1215            if (@sizeOf(Context) != 0)
1216                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call containsContext instead.");
1217            return self.containsContext(key, undefined);
1218        }
1219        pub fn containsContext(self: Self, key: K, ctx: Context) bool {
1220            return self.containsAdapted(key, ctx);
1221        }
1222        pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool {
1223            return self.getIndex(key, ctx) != null;
1224        }
1225
1226        fn removeByIndex(self: *Self, idx: usize) void {
1227            self.metadata.?[idx].remove();
1228            self.keys()[idx] = undefined;
1229            self.values()[idx] = undefined;
1230            self.size -= 1;
1231            self.available += 1;
1232        }
1233
1234        /// If there is an `Entry` with a matching key, it is deleted from
1235        /// the hash map, and this function returns true.  Otherwise this
1236        /// function returns false.
1237        ///
1238        /// TODO: answer the question in these doc comments, does this
1239        /// increase the unused capacity by one?
1240        pub fn remove(self: *Self, key: K) bool {
1241            if (@sizeOf(Context) != 0)
1242                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call removeContext instead.");
1243            return self.removeContext(key, undefined);
1244        }
1245
1246        /// TODO: answer the question in these doc comments, does this
1247        /// increase the unused capacity by one?
1248        pub fn removeContext(self: *Self, key: K, ctx: Context) bool {
1249            return self.removeAdapted(key, ctx);
1250        }
1251
1252        /// TODO: answer the question in these doc comments, does this
1253        /// increase the unused capacity by one?
1254        pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
1255            if (self.getIndex(key, ctx)) |idx| {
1256                self.removeByIndex(idx);
1257                return true;
1258            }
1259
1260            return false;
1261        }
1262
1263        /// Delete the entry with key pointed to by key_ptr from the hash map.
1264        /// key_ptr is assumed to be a valid pointer to a key that is present
1265        /// in the hash map.
1266        ///
1267        /// TODO: answer the question in these doc comments, does this
1268        /// increase the unused capacity by one?
1269        pub fn removeByPtr(self: *Self, key_ptr: *K) void {
1270            // if @sizeOf(K) == 0 then there is at most one item in the hash
1271            // map, which is assumed to exist as key_ptr must be valid.  This
1272            // item must be at index 0.
1273            const idx = if (@sizeOf(K) > 0)
1274                (key_ptr - self.keys())
1275            else
1276                0;
1277
1278            self.removeByIndex(idx);
1279        }
1280
1281        fn initMetadatas(self: *Self) void {
1282            @memset(@as([*]u8, @ptrCast(self.metadata.?))[0 .. @sizeOf(Metadata) * self.capacity()], 0);
1283        }
1284
1285        // This counts the number of occupied slots (not counting tombstones), which is
1286        // what has to stay under the max_load_percentage of capacity.
1287        fn load(self: Self) Size {
1288            const max_load = (self.capacity() * max_load_percentage) / 100;
1289            assert(max_load >= self.available);
1290            return @as(Size, @truncate(max_load - self.available));
1291        }
1292
1293        fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) Allocator.Error!void {
1294            if (new_count > self.available) {
1295                try self.grow(allocator, capacityForSize(self.load() + new_count), ctx);
1296            }
1297        }
1298
1299        pub fn clone(self: Self, allocator: Allocator) Allocator.Error!Self {
1300            if (@sizeOf(Context) != 0)
1301                @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
1302            return self.cloneContext(allocator, @as(Context, undefined));
1303        }
1304        pub fn cloneContext(self: Self, allocator: Allocator, new_ctx: anytype) Allocator.Error!HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
1305            var other: HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) = .empty;
1306            if (self.size == 0)
1307                return other;
1308
1309            const new_cap = capacityForSize(self.size);
1310            try other.allocate(allocator, new_cap);
1311            other.initMetadatas();
1312            other.available = @truncate((new_cap * max_load_percentage) / 100);
1313
1314            var i: Size = 0;
1315            var metadata = self.metadata.?;
1316            const keys_ptr = self.keys();
1317            const values_ptr = self.values();
1318            while (i < self.capacity()) : (i += 1) {
1319                if (metadata[i].isUsed()) {
1320                    other.putAssumeCapacityNoClobberContext(keys_ptr[i], values_ptr[i], new_ctx);
1321                    if (other.size == self.size)
1322                        break;
1323                }
1324            }
1325
1326            return other;
1327        }
1328
1329        /// Set the map to an empty state, making deinitialization a no-op, and
1330        /// returning a copy of the original.
1331        pub fn move(self: *Self) Self {
1332            self.pointer_stability.assertUnlocked();
1333            const result = self.*;
1334            self.* = .empty;
1335            return result;
1336        }
1337
1338        /// Rehash the map, in-place.
1339        ///
1340        /// Over time, due to the current tombstone-based implementation, a
1341        /// HashMap could become fragmented due to the buildup of tombstone
1342        /// entries that causes a performance degradation due to excessive
1343        /// probing. The kind of pattern that might cause this is a long-lived
1344        /// HashMap with repeated inserts and deletes.
1345        ///
1346        /// After this function is called, there will be no tombstones in
1347        /// the HashMap, each of the entries is rehashed and any existing
1348        /// key/value pointers into the HashMap are invalidated.
1349        pub fn rehash(self: *Self, ctx: anytype) void {
1350            const mask = self.capacity() - 1;
1351
1352            var metadata = self.metadata.?;
1353            var keys_ptr = self.keys();
1354            var values_ptr = self.values();
1355            var curr: Size = 0;
1356
1357            // While we are re-hashing every slot, we will use the
1358            // fingerprint to mark used buckets as being used and either free
1359            // (needing to be rehashed) or tombstone (already rehashed).
1360
1361            while (curr < self.capacity()) : (curr += 1) {
1362                metadata[curr].fingerprint = Metadata.free;
1363            }
1364
1365            // Now iterate over all the buckets, rehashing them
1366
1367            curr = 0;
1368            while (curr < self.capacity()) {
1369                if (!metadata[curr].isUsed()) {
1370                    assert(metadata[curr].isFree());
1371                    curr += 1;
1372                    continue;
1373                }
1374
1375                const hash = ctx.hash(keys_ptr[curr]);
1376                const fingerprint = Metadata.takeFingerprint(hash);
1377                var idx = @as(usize, @truncate(hash & mask));
1378
1379                // For each bucket, rehash to an index:
1380                // 1) before the cursor, probed into a free slot, or
1381                // 2) equal to the cursor, no need to move, or
1382                // 3) ahead of the cursor, probing over already rehashed
1383
1384                while ((idx < curr and metadata[idx].isUsed()) or
1385                    (idx > curr and metadata[idx].fingerprint == Metadata.tombstone))
1386                {
1387                    idx = (idx + 1) & mask;
1388                }
1389
1390                if (idx < curr) {
1391                    assert(metadata[idx].isFree());
1392                    metadata[idx].fill(fingerprint);
1393                    keys_ptr[idx] = keys_ptr[curr];
1394                    values_ptr[idx] = values_ptr[curr];
1395
1396                    metadata[curr].used = 0;
1397                    assert(metadata[curr].isFree());
1398                    keys_ptr[curr] = undefined;
1399                    values_ptr[curr] = undefined;
1400
1401                    curr += 1;
1402                } else if (idx == curr) {
1403                    metadata[idx].fingerprint = fingerprint;
1404                    curr += 1;
1405                } else {
1406                    assert(metadata[idx].fingerprint != Metadata.tombstone);
1407                    metadata[idx].fingerprint = Metadata.tombstone;
1408                    if (metadata[idx].isUsed()) {
1409                        std.mem.swap(K, &keys_ptr[curr], &keys_ptr[idx]);
1410                        std.mem.swap(V, &values_ptr[curr], &values_ptr[idx]);
1411                    } else {
1412                        metadata[idx].used = 1;
1413                        keys_ptr[idx] = keys_ptr[curr];
1414                        values_ptr[idx] = values_ptr[curr];
1415
1416                        metadata[curr].fingerprint = Metadata.free;
1417                        metadata[curr].used = 0;
1418                        keys_ptr[curr] = undefined;
1419                        values_ptr[curr] = undefined;
1420
1421                        curr += 1;
1422                    }
1423                }
1424            }
1425        }
1426
1427        fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) Allocator.Error!void {
1428            @branchHint(.cold);
1429            const new_cap = @max(new_capacity, minimal_capacity);
1430            assert(new_cap > self.capacity());
1431            assert(std.math.isPowerOfTwo(new_cap));
1432
1433            var map: Self = .{};
1434            try map.allocate(allocator, new_cap);
1435            errdefer comptime unreachable;
1436            map.pointer_stability.lock();
1437            map.initMetadatas();
1438            map.available = @truncate((new_cap * max_load_percentage) / 100);
1439
1440            if (self.size != 0) {
1441                const old_capacity = self.capacity();
1442                for (
1443                    self.metadata.?[0..old_capacity],
1444                    self.keys()[0..old_capacity],
1445                    self.values()[0..old_capacity],
1446                ) |m, k, v| {
1447                    if (!m.isUsed()) continue;
1448                    map.putAssumeCapacityNoClobberContext(k, v, ctx);
1449                    if (map.size == self.size) break;
1450                }
1451            }
1452
1453            self.size = 0;
1454            self.pointer_stability = .{};
1455            std.mem.swap(Self, self, &map);
1456            map.deinit(allocator);
1457        }
1458
1459        fn allocate(self: *Self, allocator: Allocator, new_capacity: Size) Allocator.Error!void {
1460            const header_align = @alignOf(Header);
1461            const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
1462            const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
1463            const max_align: Alignment = comptime .fromByteUnits(@max(header_align, key_align, val_align));
1464
1465            const new_cap: usize = new_capacity;
1466            const meta_size = @sizeOf(Header) + new_cap * @sizeOf(Metadata);
1467            comptime assert(@alignOf(Metadata) == 1);
1468
1469            const keys_start = std.mem.alignForward(usize, meta_size, key_align);
1470            const keys_end = keys_start + new_cap * @sizeOf(K);
1471
1472            const vals_start = std.mem.alignForward(usize, keys_end, val_align);
1473            const vals_end = vals_start + new_cap * @sizeOf(V);
1474
1475            const total_size = max_align.forward(vals_end);
1476
1477            const slice = try allocator.alignedAlloc(u8, max_align, total_size);
1478            const ptr: [*]u8 = @ptrCast(slice.ptr);
1479
1480            const metadata = ptr + @sizeOf(Header);
1481
1482            const hdr = @as(*Header, @ptrCast(@alignCast(ptr)));
1483            if (@sizeOf([*]V) != 0) {
1484                hdr.values = @ptrCast(@alignCast((ptr + vals_start)));
1485            }
1486            if (@sizeOf([*]K) != 0) {
1487                hdr.keys = @ptrCast(@alignCast((ptr + keys_start)));
1488            }
1489            hdr.capacity = new_capacity;
1490            self.metadata = @ptrCast(@alignCast(metadata));
1491        }
1492
1493        fn deallocate(self: *Self, allocator: Allocator) void {
1494            if (self.metadata == null) return;
1495
1496            const header_align = @alignOf(Header);
1497            const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
1498            const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
1499            const max_align = comptime @max(header_align, key_align, val_align);
1500
1501            const cap: usize = self.capacity();
1502            const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata);
1503            comptime assert(@alignOf(Metadata) == 1);
1504
1505            const keys_start = std.mem.alignForward(usize, meta_size, key_align);
1506            const keys_end = keys_start + cap * @sizeOf(K);
1507
1508            const vals_start = std.mem.alignForward(usize, keys_end, val_align);
1509            const vals_end = vals_start + cap * @sizeOf(V);
1510
1511            const total_size = std.mem.alignForward(usize, vals_end, max_align);
1512
1513            const slice = @as([*]align(max_align) u8, @ptrCast(@alignCast(self.header())))[0..total_size];
1514            allocator.free(slice);
1515
1516            self.metadata = null;
1517            self.available = 0;
1518        }
1519
1520        /// This function is used in the debugger pretty formatters in tools/ to fetch the
1521        /// header type to facilitate fancy debug printing for this type.
1522        fn dbHelper(self: *Self, hdr: *Header, entry: *Entry) void {
1523            _ = self;
1524            _ = hdr;
1525            _ = entry;
1526        }
1527
1528        comptime {
1529            if (!builtin.strip_debug_info) _ = switch (builtin.zig_backend) {
1530                .stage2_llvm => &dbHelper,
1531                .stage2_x86_64 => KV,
1532                else => {},
1533            };
1534        }
1535    };
1536}
1537
1538const testing = std.testing;
1539const expect = std.testing.expect;
1540const expectEqual = std.testing.expectEqual;
1541
1542test "basic usage" {
1543    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1544    defer map.deinit();
1545
1546    const count = 5;
1547    var i: u32 = 0;
1548    var total: u32 = 0;
1549    while (i < count) : (i += 1) {
1550        try map.put(i, i);
1551        total += i;
1552    }
1553
1554    var sum: u32 = 0;
1555    var it = map.iterator();
1556    while (it.next()) |kv| {
1557        sum += kv.key_ptr.*;
1558    }
1559    try expectEqual(total, sum);
1560
1561    i = 0;
1562    sum = 0;
1563    while (i < count) : (i += 1) {
1564        try expectEqual(i, map.get(i).?);
1565        sum += map.get(i).?;
1566    }
1567    try expectEqual(total, sum);
1568}
1569
1570test "ensureTotalCapacity" {
1571    var map = AutoHashMap(i32, i32).init(std.testing.allocator);
1572    defer map.deinit();
1573
1574    try map.ensureTotalCapacity(20);
1575    const initial_capacity = map.capacity();
1576    try testing.expect(initial_capacity >= 20);
1577    var i: i32 = 0;
1578    while (i < 20) : (i += 1) {
1579        try testing.expect(map.fetchPutAssumeCapacity(i, i + 10) == null);
1580    }
1581    // shouldn't resize from putAssumeCapacity
1582    try testing.expect(initial_capacity == map.capacity());
1583}
1584
1585test "ensureUnusedCapacity with tombstones" {
1586    var map = AutoHashMap(i32, i32).init(std.testing.allocator);
1587    defer map.deinit();
1588
1589    var i: i32 = 0;
1590    while (i < 100) : (i += 1) {
1591        try map.ensureUnusedCapacity(1);
1592        map.putAssumeCapacity(i, i);
1593        _ = map.remove(i);
1594    }
1595}
1596
1597test "clearRetainingCapacity" {
1598    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1599    defer map.deinit();
1600
1601    map.clearRetainingCapacity();
1602
1603    try map.put(1, 1);
1604    try expectEqual(map.get(1).?, 1);
1605    try expectEqual(map.count(), 1);
1606
1607    map.clearRetainingCapacity();
1608    map.putAssumeCapacity(1, 1);
1609    try expectEqual(map.get(1).?, 1);
1610    try expectEqual(map.count(), 1);
1611
1612    const cap = map.capacity();
1613    try expect(cap > 0);
1614
1615    map.clearRetainingCapacity();
1616    map.clearRetainingCapacity();
1617    try expectEqual(map.count(), 0);
1618    try expectEqual(map.capacity(), cap);
1619    try expect(!map.contains(1));
1620}
1621
1622test "grow" {
1623    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1624    defer map.deinit();
1625
1626    const growTo = 12456;
1627
1628    var i: u32 = 0;
1629    while (i < growTo) : (i += 1) {
1630        try map.put(i, i);
1631    }
1632    try expectEqual(map.count(), growTo);
1633
1634    i = 0;
1635    var it = map.iterator();
1636    while (it.next()) |kv| {
1637        try expectEqual(kv.key_ptr.*, kv.value_ptr.*);
1638        i += 1;
1639    }
1640    try expectEqual(i, growTo);
1641
1642    i = 0;
1643    while (i < growTo) : (i += 1) {
1644        try expectEqual(map.get(i).?, i);
1645    }
1646}
1647
1648test "clone" {
1649    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1650    defer map.deinit();
1651
1652    var a = try map.clone();
1653    defer a.deinit();
1654
1655    try expectEqual(a.count(), 0);
1656
1657    try a.put(1, 1);
1658    try a.put(2, 2);
1659    try a.put(3, 3);
1660
1661    var b = try a.clone();
1662    defer b.deinit();
1663
1664    try expectEqual(b.count(), 3);
1665    try expectEqual(b.get(1).?, 1);
1666    try expectEqual(b.get(2).?, 2);
1667    try expectEqual(b.get(3).?, 3);
1668
1669    var original = AutoHashMap(i32, i32).init(std.testing.allocator);
1670    defer original.deinit();
1671
1672    var i: u8 = 0;
1673    while (i < 10) : (i += 1) {
1674        try original.putNoClobber(i, i * 10);
1675    }
1676
1677    var copy = try original.clone();
1678    defer copy.deinit();
1679
1680    i = 0;
1681    while (i < 10) : (i += 1) {
1682        try testing.expect(copy.get(i).? == i * 10);
1683    }
1684}
1685
1686test "ensureTotalCapacity with existing elements" {
1687    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1688    defer map.deinit();
1689
1690    try map.put(0, 0);
1691    try expectEqual(map.count(), 1);
1692    try expectEqual(map.capacity(), @TypeOf(map).Unmanaged.minimal_capacity);
1693
1694    try map.ensureTotalCapacity(65);
1695    try expectEqual(map.count(), 1);
1696    try expectEqual(map.capacity(), 128);
1697}
1698
1699test "ensureTotalCapacity satisfies max load factor" {
1700    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1701    defer map.deinit();
1702
1703    try map.ensureTotalCapacity(127);
1704    try expectEqual(map.capacity(), 256);
1705}
1706
1707test "remove" {
1708    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1709    defer map.deinit();
1710
1711    var i: u32 = 0;
1712    while (i < 16) : (i += 1) {
1713        try map.put(i, i);
1714    }
1715
1716    i = 0;
1717    while (i < 16) : (i += 1) {
1718        if (i % 3 == 0) {
1719            _ = map.remove(i);
1720        }
1721    }
1722    try expectEqual(map.count(), 10);
1723    var it = map.iterator();
1724    while (it.next()) |kv| {
1725        try expectEqual(kv.key_ptr.*, kv.value_ptr.*);
1726        try expect(kv.key_ptr.* % 3 != 0);
1727    }
1728
1729    i = 0;
1730    while (i < 16) : (i += 1) {
1731        if (i % 3 == 0) {
1732            try expect(!map.contains(i));
1733        } else {
1734            try expectEqual(map.get(i).?, i);
1735        }
1736    }
1737}
1738
1739test "reverse removes" {
1740    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1741    defer map.deinit();
1742
1743    var i: u32 = 0;
1744    while (i < 16) : (i += 1) {
1745        try map.putNoClobber(i, i);
1746    }
1747
1748    i = 16;
1749    while (i > 0) : (i -= 1) {
1750        _ = map.remove(i - 1);
1751        try expect(!map.contains(i - 1));
1752        var j: u32 = 0;
1753        while (j < i - 1) : (j += 1) {
1754            try expectEqual(map.get(j).?, j);
1755        }
1756    }
1757
1758    try expectEqual(map.count(), 0);
1759}
1760
1761test "multiple removes on same metadata" {
1762    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1763    defer map.deinit();
1764
1765    var i: u32 = 0;
1766    while (i < 16) : (i += 1) {
1767        try map.put(i, i);
1768    }
1769
1770    _ = map.remove(7);
1771    _ = map.remove(15);
1772    _ = map.remove(14);
1773    _ = map.remove(13);
1774    try expect(!map.contains(7));
1775    try expect(!map.contains(15));
1776    try expect(!map.contains(14));
1777    try expect(!map.contains(13));
1778
1779    i = 0;
1780    while (i < 13) : (i += 1) {
1781        if (i == 7) {
1782            try expect(!map.contains(i));
1783        } else {
1784            try expectEqual(map.get(i).?, i);
1785        }
1786    }
1787
1788    try map.put(15, 15);
1789    try map.put(13, 13);
1790    try map.put(14, 14);
1791    try map.put(7, 7);
1792    i = 0;
1793    while (i < 16) : (i += 1) {
1794        try expectEqual(map.get(i).?, i);
1795    }
1796}
1797
1798test "put and remove loop in random order" {
1799    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1800    defer map.deinit();
1801
1802    var keys = std.array_list.Managed(u32).init(std.testing.allocator);
1803    defer keys.deinit();
1804
1805    const size = 32;
1806    const iterations = 100;
1807
1808    var i: u32 = 0;
1809    while (i < size) : (i += 1) {
1810        try keys.append(i);
1811    }
1812    var prng = std.Random.DefaultPrng.init(std.testing.random_seed);
1813    const random = prng.random();
1814
1815    while (i < iterations) : (i += 1) {
1816        random.shuffle(u32, keys.items);
1817
1818        for (keys.items) |key| {
1819            try map.put(key, key);
1820        }
1821        try expectEqual(map.count(), size);
1822
1823        for (keys.items) |key| {
1824            _ = map.remove(key);
1825        }
1826        try expectEqual(map.count(), 0);
1827    }
1828}
1829
1830test "remove many elements in random order" {
1831    const Map = AutoHashMap(u32, u32);
1832    const n = 1000 * 100;
1833    var map = Map.init(std.heap.page_allocator);
1834    defer map.deinit();
1835
1836    var keys = std.array_list.Managed(u32).init(std.heap.page_allocator);
1837    defer keys.deinit();
1838
1839    var i: u32 = 0;
1840    while (i < n) : (i += 1) {
1841        keys.append(i) catch unreachable;
1842    }
1843
1844    var prng = std.Random.DefaultPrng.init(std.testing.random_seed);
1845    const random = prng.random();
1846    random.shuffle(u32, keys.items);
1847
1848    for (keys.items) |key| {
1849        map.put(key, key) catch unreachable;
1850    }
1851
1852    random.shuffle(u32, keys.items);
1853    i = 0;
1854    while (i < n) : (i += 1) {
1855        const key = keys.items[i];
1856        _ = map.remove(key);
1857    }
1858}
1859
1860test "put" {
1861    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1862    defer map.deinit();
1863
1864    var i: u32 = 0;
1865    while (i < 16) : (i += 1) {
1866        try map.put(i, i);
1867    }
1868
1869    i = 0;
1870    while (i < 16) : (i += 1) {
1871        try expectEqual(map.get(i).?, i);
1872    }
1873
1874    i = 0;
1875    while (i < 16) : (i += 1) {
1876        try map.put(i, i * 16 + 1);
1877    }
1878
1879    i = 0;
1880    while (i < 16) : (i += 1) {
1881        try expectEqual(map.get(i).?, i * 16 + 1);
1882    }
1883}
1884
1885test "putAssumeCapacity" {
1886    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1887    defer map.deinit();
1888
1889    try map.ensureTotalCapacity(20);
1890    var i: u32 = 0;
1891    while (i < 20) : (i += 1) {
1892        map.putAssumeCapacityNoClobber(i, i);
1893    }
1894
1895    i = 0;
1896    var sum = i;
1897    while (i < 20) : (i += 1) {
1898        sum += map.getPtr(i).?.*;
1899    }
1900    try expectEqual(sum, 190);
1901
1902    i = 0;
1903    while (i < 20) : (i += 1) {
1904        map.putAssumeCapacity(i, 1);
1905    }
1906
1907    i = 0;
1908    sum = i;
1909    while (i < 20) : (i += 1) {
1910        sum += map.get(i).?;
1911    }
1912    try expectEqual(sum, 20);
1913}
1914
1915test "repeat putAssumeCapacity/remove" {
1916    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1917    defer map.deinit();
1918
1919    try map.ensureTotalCapacity(20);
1920    const limit = map.unmanaged.available;
1921
1922    var i: u32 = 0;
1923    while (i < limit) : (i += 1) {
1924        map.putAssumeCapacityNoClobber(i, i);
1925    }
1926
1927    // Repeatedly delete/insert an entry without resizing the map.
1928    // Put to different keys so entries don't land in the just-freed slot.
1929    i = 0;
1930    while (i < 10 * limit) : (i += 1) {
1931        try testing.expect(map.remove(i));
1932        if (i % 2 == 0) {
1933            map.putAssumeCapacityNoClobber(limit + i, i);
1934        } else {
1935            map.putAssumeCapacity(limit + i, i);
1936        }
1937    }
1938
1939    i = 9 * limit;
1940    while (i < 10 * limit) : (i += 1) {
1941        try expectEqual(map.get(limit + i), i);
1942    }
1943    try expectEqual(map.unmanaged.available, 0);
1944    try expectEqual(map.unmanaged.count(), limit);
1945}
1946
1947test "getOrPut" {
1948    var map = AutoHashMap(u32, u32).init(std.testing.allocator);
1949    defer map.deinit();
1950
1951    var i: u32 = 0;
1952    while (i < 10) : (i += 1) {
1953        try map.put(i * 2, 2);
1954    }
1955
1956    i = 0;
1957    while (i < 20) : (i += 1) {
1958        _ = try map.getOrPutValue(i, 1);
1959    }
1960
1961    i = 0;
1962    var sum = i;
1963    while (i < 20) : (i += 1) {
1964        sum += map.get(i).?;
1965    }
1966
1967    try expectEqual(sum, 30);
1968}
1969
1970test "basic hash map usage" {
1971    var map = AutoHashMap(i32, i32).init(std.testing.allocator);
1972    defer map.deinit();
1973
1974    try testing.expect((try map.fetchPut(1, 11)) == null);
1975    try testing.expect((try map.fetchPut(2, 22)) == null);
1976    try testing.expect((try map.fetchPut(3, 33)) == null);
1977    try testing.expect((try map.fetchPut(4, 44)) == null);
1978
1979    try map.putNoClobber(5, 55);
1980    try testing.expect((try map.fetchPut(5, 66)).?.value == 55);
1981    try testing.expect((try map.fetchPut(5, 55)).?.value == 66);
1982
1983    const gop1 = try map.getOrPut(5);
1984    try testing.expect(gop1.found_existing == true);
1985    try testing.expect(gop1.value_ptr.* == 55);
1986    gop1.value_ptr.* = 77;
1987    try testing.expect(map.getEntry(5).?.value_ptr.* == 77);
1988
1989    const gop2 = try map.getOrPut(99);
1990    try testing.expect(gop2.found_existing == false);
1991    gop2.value_ptr.* = 42;
1992    try testing.expect(map.getEntry(99).?.value_ptr.* == 42);
1993
1994    const gop3 = try map.getOrPutValue(5, 5);
1995    try testing.expect(gop3.value_ptr.* == 77);
1996
1997    const gop4 = try map.getOrPutValue(100, 41);
1998    try testing.expect(gop4.value_ptr.* == 41);
1999
2000    try testing.expect(map.contains(2));
2001    try testing.expect(map.getEntry(2).?.value_ptr.* == 22);
2002    try testing.expect(map.get(2).? == 22);
2003
2004    const rmv1 = map.fetchRemove(2);
2005    try testing.expect(rmv1.?.key == 2);
2006    try testing.expect(rmv1.?.value == 22);
2007    try testing.expect(map.fetchRemove(2) == null);
2008    try testing.expect(map.remove(2) == false);
2009    try testing.expect(map.getEntry(2) == null);
2010    try testing.expect(map.get(2) == null);
2011
2012    try testing.expect(map.remove(3) == true);
2013}
2014
2015test "getOrPutAdapted" {
2016    const AdaptedContext = struct {
2017        fn eql(self: @This(), adapted_key: []const u8, test_key: u64) bool {
2018            _ = self;
2019            return std.fmt.parseInt(u64, adapted_key, 10) catch unreachable == test_key;
2020        }
2021        fn hash(self: @This(), adapted_key: []const u8) u64 {
2022            _ = self;
2023            const key = std.fmt.parseInt(u64, adapted_key, 10) catch unreachable;
2024            return (AutoContext(u64){}).hash(key);
2025        }
2026    };
2027    var map = AutoHashMap(u64, u64).init(testing.allocator);
2028    defer map.deinit();
2029
2030    const keys = [_][]const u8{
2031        "1231",
2032        "4564",
2033        "7894",
2034        "1132",
2035        "65235",
2036        "95462",
2037        "0112305",
2038        "00658",
2039        "0",
2040        "2",
2041    };
2042
2043    var real_keys: [keys.len]u64 = undefined;
2044
2045    inline for (keys, 0..) |key_str, i| {
2046        const result = try map.getOrPutAdapted(key_str, AdaptedContext{});
2047        try testing.expect(!result.found_existing);
2048        real_keys[i] = std.fmt.parseInt(u64, key_str, 10) catch unreachable;
2049        result.key_ptr.* = real_keys[i];
2050        result.value_ptr.* = i * 2;
2051    }
2052
2053    try testing.expectEqual(map.count(), keys.len);
2054
2055    inline for (keys, 0..) |key_str, i| {
2056        const result = map.getOrPutAssumeCapacityAdapted(key_str, AdaptedContext{});
2057        try testing.expect(result.found_existing);
2058        try testing.expectEqual(real_keys[i], result.key_ptr.*);
2059        try testing.expectEqual(@as(u64, i) * 2, result.value_ptr.*);
2060        try testing.expectEqual(real_keys[i], map.getKeyAdapted(key_str, AdaptedContext{}).?);
2061    }
2062}
2063
2064test "ensureUnusedCapacity" {
2065    var map = AutoHashMap(u64, u64).init(testing.allocator);
2066    defer map.deinit();
2067
2068    try map.ensureUnusedCapacity(32);
2069    const capacity = map.capacity();
2070    try map.ensureUnusedCapacity(32);
2071
2072    // Repeated ensureUnusedCapacity() calls with no insertions between
2073    // should not change the capacity.
2074    try testing.expectEqual(capacity, map.capacity());
2075}
2076
2077test "removeByPtr" {
2078    var map = AutoHashMap(i32, u64).init(testing.allocator);
2079    defer map.deinit();
2080
2081    var i: i32 = undefined;
2082
2083    i = 0;
2084    while (i < 10) : (i += 1) {
2085        try map.put(i, 0);
2086    }
2087
2088    try testing.expect(map.count() == 10);
2089
2090    i = 0;
2091    while (i < 10) : (i += 1) {
2092        const key_ptr = map.getKeyPtr(i);
2093        try testing.expect(key_ptr != null);
2094
2095        if (key_ptr) |ptr| {
2096            map.removeByPtr(ptr);
2097        }
2098    }
2099
2100    try testing.expect(map.count() == 0);
2101}
2102
2103test "removeByPtr 0 sized key" {
2104    var map = AutoHashMap(u0, u64).init(testing.allocator);
2105    defer map.deinit();
2106
2107    try map.put(0, 0);
2108
2109    try testing.expect(map.count() == 1);
2110
2111    const key_ptr = map.getKeyPtr(0);
2112    try testing.expect(key_ptr != null);
2113
2114    if (key_ptr) |ptr| {
2115        map.removeByPtr(ptr);
2116    }
2117
2118    try testing.expect(map.count() == 0);
2119}
2120
2121test "repeat fetchRemove" {
2122    var map: AutoHashMapUnmanaged(u64, void) = .empty;
2123    defer map.deinit(testing.allocator);
2124
2125    try map.ensureTotalCapacity(testing.allocator, 4);
2126
2127    map.putAssumeCapacity(0, {});
2128    map.putAssumeCapacity(1, {});
2129    map.putAssumeCapacity(2, {});
2130    map.putAssumeCapacity(3, {});
2131
2132    // fetchRemove() should make slots available.
2133    var i: usize = 0;
2134    while (i < 10) : (i += 1) {
2135        try testing.expect(map.fetchRemove(3) != null);
2136        map.putAssumeCapacity(3, {});
2137    }
2138
2139    try testing.expect(map.get(0) != null);
2140    try testing.expect(map.get(1) != null);
2141    try testing.expect(map.get(2) != null);
2142    try testing.expect(map.get(3) != null);
2143}
2144
2145test "getOrPut allocation failure" {
2146    var map: std.StringHashMapUnmanaged(void) = .empty;
2147    try testing.expectError(error.OutOfMemory, map.getOrPut(std.testing.failing_allocator, "hello"));
2148}
2149
2150test "rehash" {
2151    var map = AutoHashMap(usize, usize).init(std.testing.allocator);
2152    defer map.deinit();
2153
2154    var prng = std.Random.DefaultPrng.init(0);
2155    const random = prng.random();
2156
2157    const count = 4 * random.intRangeLessThan(u32, 100_000, 500_000);
2158
2159    for (0..count) |i| {
2160        try map.put(i, i);
2161        if (i % 3 == 0) {
2162            try expectEqual(map.remove(i), true);
2163        }
2164    }
2165
2166    map.rehash();
2167
2168    try expectEqual(map.count(), count * 2 / 3);
2169
2170    for (0..count) |i| {
2171        if (i % 3 == 0) {
2172            try expectEqual(map.get(i), null);
2173        } else {
2174            try expectEqual(map.get(i).?, i);
2175        }
2176    }
2177}