master
1const std = @import("std");
2const builtin = @import("builtin");
3const assert = std.debug.assert;
4const meta = std.meta;
5const mem = std.mem;
6const Allocator = mem.Allocator;
7const testing = std.testing;
8
9/// A MultiArrayList stores a list of a struct or tagged union type.
10/// Instead of storing a single list of items, MultiArrayList
11/// stores separate lists for each field of the struct or
12/// lists of tags and bare unions.
13/// This allows for memory savings if the struct or union has padding,
14/// and also improves cache usage if only some fields or just tags
15/// are needed for a computation. The primary API for accessing fields is
16/// the `slice()` function, which computes the start pointers
17/// for the array of each field. From the slice you can call
18/// `.items(.<field_name>)` to obtain a slice of field values.
19/// For unions you can call `.items(.tags)` or `.items(.data)`.
20pub fn MultiArrayList(comptime T: type) type {
21 return struct {
22 bytes: [*]align(@alignOf(T)) u8 = undefined,
23 len: usize = 0,
24 capacity: usize = 0,
25
26 pub const empty: Self = .{
27 .bytes = undefined,
28 .len = 0,
29 .capacity = 0,
30 };
31
32 const Elem = switch (@typeInfo(T)) {
33 .@"struct" => T,
34 .@"union" => |u| struct {
35 pub const Bare = Bare: {
36 var field_names: [u.fields.len][]const u8 = undefined;
37 var field_types: [u.fields.len]type = undefined;
38 var field_attrs: [u.fields.len]std.builtin.Type.UnionField.Attributes = undefined;
39 for (u.fields, &field_names, &field_types, &field_attrs) |field, *name, *Type, *attrs| {
40 name.* = field.name;
41 Type.* = field.type;
42 attrs.* = .{ .@"align" = field.alignment };
43 }
44 break :Bare @Union(u.layout, null, &field_names, &field_types, &field_attrs);
45 };
46 pub const Tag =
47 u.tag_type orelse @compileError("MultiArrayList does not support untagged unions");
48 tags: Tag,
49 data: Bare,
50
51 pub fn fromT(outer: T) @This() {
52 const tag = meta.activeTag(outer);
53 return .{
54 .tags = tag,
55 .data = switch (tag) {
56 inline else => |t| @unionInit(Bare, @tagName(t), @field(outer, @tagName(t))),
57 },
58 };
59 }
60 pub fn toT(tag: Tag, bare: Bare) T {
61 return switch (tag) {
62 inline else => |t| @unionInit(T, @tagName(t), @field(bare, @tagName(t))),
63 };
64 }
65 },
66 else => @compileError("MultiArrayList only supports structs and tagged unions"),
67 };
68
69 pub const Field = meta.FieldEnum(Elem);
70
71 /// A MultiArrayList.Slice contains cached start pointers for each field in the list.
72 /// These pointers are not normally stored to reduce the size of the list in memory.
73 /// If you are accessing multiple fields, call slice() first to compute the pointers,
74 /// and then get the field arrays from the slice.
75 pub const Slice = struct {
76 /// This array is indexed by the field index which can be obtained
77 /// by using @intFromEnum() on the Field enum
78 ptrs: [fields.len][*]u8,
79 len: usize,
80 capacity: usize,
81
82 pub const empty: Slice = .{
83 .ptrs = undefined,
84 .len = 0,
85 .capacity = 0,
86 };
87
88 pub fn items(self: Slice, comptime field: Field) []FieldType(field) {
89 const F = FieldType(field);
90 if (self.capacity == 0) {
91 return &[_]F{};
92 }
93 const byte_ptr = self.ptrs[@intFromEnum(field)];
94 const casted_ptr: [*]F = if (@sizeOf(F) == 0)
95 undefined
96 else
97 @ptrCast(@alignCast(byte_ptr));
98 return casted_ptr[0..self.len];
99 }
100
101 pub fn set(self: *Slice, index: usize, elem: T) void {
102 const e = switch (@typeInfo(T)) {
103 .@"struct" => elem,
104 .@"union" => Elem.fromT(elem),
105 else => unreachable,
106 };
107 inline for (fields, 0..) |field_info, i| {
108 self.items(@as(Field, @enumFromInt(i)))[index] = @field(e, field_info.name);
109 }
110 }
111
112 pub fn get(self: Slice, index: usize) T {
113 var result: Elem = undefined;
114 inline for (fields, 0..) |field_info, i| {
115 @field(result, field_info.name) = self.items(@as(Field, @enumFromInt(i)))[index];
116 }
117 return switch (@typeInfo(T)) {
118 .@"struct" => result,
119 .@"union" => Elem.toT(result.tags, result.data),
120 else => unreachable,
121 };
122 }
123
124 pub fn toMultiArrayList(self: Slice) Self {
125 if (self.ptrs.len == 0 or self.capacity == 0) {
126 return .{};
127 }
128 const unaligned_ptr = self.ptrs[sizes.fields[0]];
129 const aligned_ptr: [*]align(@alignOf(Elem)) u8 = @alignCast(unaligned_ptr);
130 return .{
131 .bytes = aligned_ptr,
132 .len = self.len,
133 .capacity = self.capacity,
134 };
135 }
136
137 pub fn deinit(self: *Slice, gpa: Allocator) void {
138 var other = self.toMultiArrayList();
139 other.deinit(gpa);
140 self.* = undefined;
141 }
142
143 /// Returns a `Slice` representing a range of elements in `s`, analagous to `arr[off..len]`.
144 /// It is illegal to call `deinit` or `toMultiArrayList` on the returned `Slice`.
145 /// Asserts that `off + len <= s.len`.
146 pub fn subslice(s: Slice, off: usize, len: usize) Slice {
147 assert(off + len <= s.len);
148 var ptrs: [fields.len][*]u8 = undefined;
149 inline for (s.ptrs, &ptrs, fields) |in, *out, field| {
150 out.* = in + (off * @sizeOf(field.type));
151 }
152 return .{
153 .ptrs = ptrs,
154 .len = len,
155 .capacity = len,
156 };
157 }
158
159 /// This function is used in the debugger pretty formatters in tools/ to fetch the
160 /// child field order and entry type to facilitate fancy debug printing for this type.
161 fn dbHelper(self: *Slice, child: *Elem, field: *Field, entry: *Entry) void {
162 _ = self;
163 _ = child;
164 _ = field;
165 _ = entry;
166 }
167 };
168
169 const Self = @This();
170
171 const fields = meta.fields(Elem);
172 /// `sizes.bytes` is an array of @sizeOf each T field. Sorted by alignment, descending.
173 /// `sizes.fields` is an array mapping from `sizes.bytes` array index to field index.
174 const sizes = blk: {
175 const Data = struct {
176 size: usize,
177 size_index: usize,
178 alignment: usize,
179 };
180 var data: [fields.len]Data = undefined;
181 for (fields, 0..) |field_info, i| {
182 data[i] = .{
183 .size = @sizeOf(field_info.type),
184 .size_index = i,
185 .alignment = if (@sizeOf(field_info.type) == 0) 1 else field_info.alignment,
186 };
187 }
188 const Sort = struct {
189 fn lessThan(context: void, lhs: Data, rhs: Data) bool {
190 _ = context;
191 return lhs.alignment > rhs.alignment;
192 }
193 };
194 @setEvalBranchQuota(3 * fields.len * std.math.log2(fields.len));
195 mem.sort(Data, &data, {}, Sort.lessThan);
196 var sizes_bytes: [fields.len]usize = undefined;
197 var field_indexes: [fields.len]usize = undefined;
198 for (data, 0..) |elem, i| {
199 sizes_bytes[i] = elem.size;
200 field_indexes[i] = elem.size_index;
201 }
202 break :blk .{
203 .bytes = sizes_bytes,
204 .fields = field_indexes,
205 };
206 };
207
208 /// Release all allocated memory.
209 pub fn deinit(self: *Self, gpa: Allocator) void {
210 gpa.free(self.allocatedBytes());
211 self.* = undefined;
212 }
213
214 /// The caller owns the returned memory. Empties this MultiArrayList.
215 pub fn toOwnedSlice(self: *Self) Slice {
216 const result = self.slice();
217 self.* = .{};
218 return result;
219 }
220
221 /// Compute pointers to the start of each field of the array.
222 /// If you need to access multiple fields, calling this may
223 /// be more efficient than calling `items()` multiple times.
224 pub fn slice(self: Self) Slice {
225 var result: Slice = .{
226 .ptrs = undefined,
227 .len = self.len,
228 .capacity = self.capacity,
229 };
230 var ptr: [*]u8 = self.bytes;
231 for (sizes.bytes, sizes.fields) |field_size, i| {
232 result.ptrs[i] = ptr;
233 ptr += field_size * self.capacity;
234 }
235 return result;
236 }
237
238 /// Get the slice of values for a specified field.
239 /// If you need multiple fields, consider calling slice()
240 /// instead.
241 pub fn items(self: Self, comptime field: Field) []FieldType(field) {
242 return self.slice().items(field);
243 }
244
245 /// Overwrite one array element with new data.
246 pub fn set(self: *Self, index: usize, elem: T) void {
247 var slices = self.slice();
248 slices.set(index, elem);
249 }
250
251 /// Obtain all the data for one array element.
252 pub fn get(self: Self, index: usize) T {
253 return self.slice().get(index);
254 }
255
256 /// Extend the list by 1 element. Allocates more memory as necessary.
257 pub fn append(self: *Self, gpa: Allocator, elem: T) !void {
258 try self.ensureUnusedCapacity(gpa, 1);
259 self.appendAssumeCapacity(elem);
260 }
261
262 /// Extend the list by 1 element, but asserting `self.capacity`
263 /// is sufficient to hold an additional item.
264 pub fn appendAssumeCapacity(self: *Self, elem: T) void {
265 assert(self.len < self.capacity);
266 self.len += 1;
267 self.set(self.len - 1, elem);
268 }
269
270 /// Extend the list by 1 element, returning the newly reserved
271 /// index with uninitialized data.
272 /// Allocates more memory as necesasry.
273 pub fn addOne(self: *Self, gpa: Allocator) Allocator.Error!usize {
274 try self.ensureUnusedCapacity(gpa, 1);
275 return self.addOneAssumeCapacity();
276 }
277
278 /// Extend the list by 1 element, asserting `self.capacity`
279 /// is sufficient to hold an additional item. Returns the
280 /// newly reserved index with uninitialized data.
281 pub fn addOneAssumeCapacity(self: *Self) usize {
282 assert(self.len < self.capacity);
283 const index = self.len;
284 self.len += 1;
285 return index;
286 }
287
288 /// Remove and return the last element from the list, or return `null` if list is empty.
289 /// Invalidates pointers to fields of the removed element.
290 pub fn pop(self: *Self) ?T {
291 if (self.len == 0) return null;
292 const val = self.get(self.len - 1);
293 self.len -= 1;
294 return val;
295 }
296
297 /// Inserts an item into an ordered list. Shifts all elements
298 /// after and including the specified index back by one and
299 /// sets the given index to the specified element. May reallocate
300 /// and invalidate iterators.
301 pub fn insert(self: *Self, gpa: Allocator, index: usize, elem: T) !void {
302 try self.ensureUnusedCapacity(gpa, 1);
303 self.insertAssumeCapacity(index, elem);
304 }
305
306 /// Inserts an item into an ordered list which has room for it.
307 /// Shifts all elements after and including the specified index
308 /// back by one and sets the given index to the specified element.
309 /// Will not reallocate the array, does not invalidate iterators.
310 pub fn insertAssumeCapacity(self: *Self, index: usize, elem: T) void {
311 assert(self.len < self.capacity);
312 assert(index <= self.len);
313 self.len += 1;
314 const entry = switch (@typeInfo(T)) {
315 .@"struct" => elem,
316 .@"union" => Elem.fromT(elem),
317 else => unreachable,
318 };
319 const slices = self.slice();
320 inline for (fields, 0..) |field_info, field_index| {
321 const field_slice = slices.items(@as(Field, @enumFromInt(field_index)));
322 var i: usize = self.len - 1;
323 while (i > index) : (i -= 1) {
324 field_slice[i] = field_slice[i - 1];
325 }
326 field_slice[index] = @field(entry, field_info.name);
327 }
328 }
329
330 /// Remove the specified item from the list, swapping the last
331 /// item in the list into its position. Fast, but does not
332 /// retain list ordering.
333 pub fn swapRemove(self: *Self, index: usize) void {
334 const slices = self.slice();
335 inline for (fields, 0..) |_, i| {
336 const field_slice = slices.items(@as(Field, @enumFromInt(i)));
337 field_slice[index] = field_slice[self.len - 1];
338 field_slice[self.len - 1] = undefined;
339 }
340 self.len -= 1;
341 }
342
343 /// Remove the specified item from the list, shifting items
344 /// after it to preserve order.
345 pub fn orderedRemove(self: *Self, index: usize) void {
346 const slices = self.slice();
347 inline for (fields, 0..) |_, field_index| {
348 const field_slice = slices.items(@as(Field, @enumFromInt(field_index)));
349 var i = index;
350 while (i < self.len - 1) : (i += 1) {
351 field_slice[i] = field_slice[i + 1];
352 }
353 field_slice[i] = undefined;
354 }
355 self.len -= 1;
356 }
357
358 /// Remove the elements indexed by `sorted_indexes`. The indexes to be
359 /// removed correspond to the array list before deletion.
360 ///
361 /// Asserts:
362 /// * Each index to be removed is in bounds.
363 /// * The indexes to be removed are sorted ascending.
364 ///
365 /// Duplicates in `sorted_indexes` are allowed.
366 ///
367 /// This operation is O(N).
368 ///
369 /// Invalidates element pointers beyond the first deleted index.
370 pub fn orderedRemoveMany(self: *Self, sorted_indexes: []const usize) void {
371 if (sorted_indexes.len == 0) return;
372 const slices = self.slice();
373 var shift: usize = 1;
374 for (sorted_indexes[0 .. sorted_indexes.len - 1], sorted_indexes[1..]) |removed, end| {
375 if (removed == end) continue; // allows duplicates in `sorted_indexes`
376 const start = removed + 1;
377 const len = end - start; // safety checks `sorted_indexes` are sorted
378 inline for (fields, 0..) |_, field_index| {
379 const field_slice = slices.items(@enumFromInt(field_index));
380 @memmove(field_slice[start - shift ..][0..len], field_slice[start..][0..len]); // safety checks initial `sorted_indexes` are in range
381 }
382 shift += 1;
383 }
384 const start = sorted_indexes[sorted_indexes.len - 1] + 1;
385 const end = self.len;
386 const len = end - start; // safety checks final `sorted_indexes` are in range
387 inline for (fields, 0..) |_, field_index| {
388 const field_slice = slices.items(@enumFromInt(field_index));
389 @memmove(field_slice[start - shift ..][0..len], field_slice[start..][0..len]);
390 }
391 self.len = end - shift;
392 }
393
394 /// Adjust the list's length to `new_len`.
395 /// Does not initialize added items, if any.
396 pub fn resize(self: *Self, gpa: Allocator, new_len: usize) !void {
397 try self.ensureTotalCapacity(gpa, new_len);
398 self.len = new_len;
399 }
400
401 /// Attempt to reduce allocated capacity to `new_len`.
402 /// If `new_len` is greater than zero, this may fail to reduce the capacity,
403 /// but the data remains intact and the length is updated to new_len.
404 pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void {
405 if (new_len == 0) return clearAndFree(self, gpa);
406
407 assert(new_len <= self.capacity);
408 assert(new_len <= self.len);
409
410 const other_bytes = gpa.alignedAlloc(u8, .of(Elem), capacityInBytes(new_len)) catch {
411 const self_slice = self.slice();
412 inline for (fields, 0..) |field_info, i| {
413 if (@sizeOf(field_info.type) != 0) {
414 const field = @as(Field, @enumFromInt(i));
415 const dest_slice = self_slice.items(field)[new_len..];
416 // We use memset here for more efficient codegen in safety-checked,
417 // valgrind-enabled builds. Otherwise the valgrind client request
418 // will be repeated for every element.
419 @memset(dest_slice, undefined);
420 }
421 }
422 self.len = new_len;
423 return;
424 };
425 var other = Self{
426 .bytes = other_bytes.ptr,
427 .capacity = new_len,
428 .len = new_len,
429 };
430 self.len = new_len;
431 const self_slice = self.slice();
432 const other_slice = other.slice();
433 inline for (fields, 0..) |field_info, i| {
434 if (@sizeOf(field_info.type) != 0) {
435 const field = @as(Field, @enumFromInt(i));
436 @memcpy(other_slice.items(field), self_slice.items(field));
437 }
438 }
439 gpa.free(self.allocatedBytes());
440 self.* = other;
441 }
442
443 pub fn clearAndFree(self: *Self, gpa: Allocator) void {
444 gpa.free(self.allocatedBytes());
445 self.* = .{};
446 }
447
448 /// Reduce length to `new_len`.
449 /// Invalidates pointers to elements `items[new_len..]`.
450 /// Keeps capacity the same.
451 pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
452 self.len = new_len;
453 }
454
455 /// Invalidates all element pointers.
456 pub fn clearRetainingCapacity(self: *Self) void {
457 self.len = 0;
458 }
459
460 /// Modify the array so that it can hold at least `new_capacity` items.
461 /// Implements super-linear growth to achieve amortized O(1) append operations.
462 /// Invalidates element pointers if additional memory is needed.
463 pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) Allocator.Error!void {
464 if (self.capacity >= new_capacity) return;
465 return self.setCapacity(gpa, growCapacity(new_capacity));
466 }
467
468 const init_capacity: comptime_int = init: {
469 var max: comptime_int = 1;
470 for (fields) |field| max = @max(max, @sizeOf(field.type));
471 break :init @max(1, std.atomic.cache_line / max);
472 };
473
474 /// Given a lower bound of required memory capacity, returns a larger value
475 /// with super-linear growth.
476 pub fn growCapacity(minimum: usize) usize {
477 return minimum +| (minimum / 2 + init_capacity);
478 }
479
480 /// Modify the array so that it can hold at least `additional_count` **more** items.
481 /// Invalidates pointers if additional memory is needed.
482 pub fn ensureUnusedCapacity(self: *Self, gpa: Allocator, additional_count: usize) !void {
483 return self.ensureTotalCapacity(gpa, self.len + additional_count);
484 }
485
486 /// Modify the array so that it can hold exactly `new_capacity` items.
487 /// Invalidates pointers if additional memory is needed.
488 /// `new_capacity` must be greater or equal to `len`.
489 pub fn setCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
490 assert(new_capacity >= self.len);
491 const new_bytes = try gpa.alignedAlloc(u8, .of(Elem), capacityInBytes(new_capacity));
492 if (self.len == 0) {
493 gpa.free(self.allocatedBytes());
494 self.bytes = new_bytes.ptr;
495 self.capacity = new_capacity;
496 return;
497 }
498 var other = Self{
499 .bytes = new_bytes.ptr,
500 .capacity = new_capacity,
501 .len = self.len,
502 };
503 const self_slice = self.slice();
504 const other_slice = other.slice();
505 inline for (fields, 0..) |field_info, i| {
506 if (@sizeOf(field_info.type) != 0) {
507 const field = @as(Field, @enumFromInt(i));
508 @memcpy(other_slice.items(field), self_slice.items(field));
509 }
510 }
511 gpa.free(self.allocatedBytes());
512 self.* = other;
513 }
514
515 /// Create a copy of this list with a new backing store,
516 /// using the specified allocator.
517 pub fn clone(self: Self, gpa: Allocator) !Self {
518 var result = Self{};
519 errdefer result.deinit(gpa);
520 try result.ensureTotalCapacity(gpa, self.len);
521 result.len = self.len;
522 const self_slice = self.slice();
523 const result_slice = result.slice();
524 inline for (fields, 0..) |field_info, i| {
525 if (@sizeOf(field_info.type) != 0) {
526 const field = @as(Field, @enumFromInt(i));
527 @memcpy(result_slice.items(field), self_slice.items(field));
528 }
529 }
530 return result;
531 }
532
533 /// `ctx` has the following method:
534 /// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
535 fn sortInternal(self: Self, a: usize, b: usize, ctx: anytype, comptime mode: std.sort.Mode) void {
536 const sort_context: struct {
537 sub_ctx: @TypeOf(ctx),
538 slice: Slice,
539
540 pub fn swap(sc: @This(), a_index: usize, b_index: usize) void {
541 inline for (fields, 0..) |field_info, i| {
542 if (@sizeOf(field_info.type) != 0) {
543 const field: Field = @enumFromInt(i);
544 const ptr = sc.slice.items(field);
545 mem.swap(field_info.type, &ptr[a_index], &ptr[b_index]);
546 }
547 }
548 }
549
550 pub fn lessThan(sc: @This(), a_index: usize, b_index: usize) bool {
551 return sc.sub_ctx.lessThan(a_index, b_index);
552 }
553 } = .{
554 .sub_ctx = ctx,
555 .slice = self.slice(),
556 };
557
558 switch (mode) {
559 .stable => mem.sortContext(a, b, sort_context),
560 .unstable => mem.sortUnstableContext(a, b, sort_context),
561 }
562 }
563
564 /// This function guarantees a stable sort, i.e the relative order of equal elements is preserved during sorting.
565 /// Read more about stable sorting here: https://en.wikipedia.org/wiki/Sorting_algorithm#Stability
566 /// If this guarantee does not matter, `sortUnstable` might be a faster alternative.
567 /// `ctx` has the following method:
568 /// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
569 pub fn sort(self: Self, ctx: anytype) void {
570 self.sortInternal(0, self.len, ctx, .stable);
571 }
572
573 /// Sorts only the subsection of items between indices `a` and `b` (excluding `b`)
574 /// This function guarantees a stable sort, i.e the relative order of equal elements is preserved during sorting.
575 /// Read more about stable sorting here: https://en.wikipedia.org/wiki/Sorting_algorithm#Stability
576 /// If this guarantee does not matter, `sortSpanUnstable` might be a faster alternative.
577 /// `ctx` has the following method:
578 /// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
579 pub fn sortSpan(self: Self, a: usize, b: usize, ctx: anytype) void {
580 self.sortInternal(a, b, ctx, .stable);
581 }
582
583 /// This function does NOT guarantee a stable sort, i.e the relative order of equal elements may change during sorting.
584 /// Due to the weaker guarantees of this function, this may be faster than the stable `sort` method.
585 /// Read more about stable sorting here: https://en.wikipedia.org/wiki/Sorting_algorithm#Stability
586 /// `ctx` has the following method:
587 /// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
588 pub fn sortUnstable(self: Self, ctx: anytype) void {
589 self.sortInternal(0, self.len, ctx, .unstable);
590 }
591
592 /// Sorts only the subsection of items between indices `a` and `b` (excluding `b`)
593 /// This function does NOT guarantee a stable sort, i.e the relative order of equal elements may change during sorting.
594 /// Due to the weaker guarantees of this function, this may be faster than the stable `sortSpan` method.
595 /// Read more about stable sorting here: https://en.wikipedia.org/wiki/Sorting_algorithm#Stability
596 /// `ctx` has the following method:
597 /// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
598 pub fn sortSpanUnstable(self: Self, a: usize, b: usize, ctx: anytype) void {
599 self.sortInternal(a, b, ctx, .unstable);
600 }
601
602 pub fn capacityInBytes(capacity: usize) usize {
603 comptime var elem_bytes: usize = 0;
604 inline for (sizes.bytes) |size| elem_bytes += size;
605 return elem_bytes * capacity;
606 }
607
608 fn allocatedBytes(self: Self) []align(@alignOf(Elem)) u8 {
609 return self.bytes[0..capacityInBytes(self.capacity)];
610 }
611
612 fn FieldType(comptime field: Field) type {
613 return @FieldType(Elem, @tagName(field));
614 }
615
616 const Entry = entry: {
617 var field_names: [fields.len][]const u8 = undefined;
618 var field_types: [fields.len]type = undefined;
619 var field_attrs: [fields.len]std.builtin.Type.StructField.Attributes = undefined;
620 for (sizes.fields, &field_names, &field_types, &field_attrs) |i, *name, *Type, *attrs| {
621 name.* = fields[i].name ++ "_ptr";
622 Type.* = *fields[i].type;
623 attrs.* = .{
624 .@"comptime" = fields[i].is_comptime,
625 .@"align" = fields[i].alignment,
626 };
627 }
628 break :entry @Struct(.@"extern", null, &field_names, &field_types, &field_attrs);
629 };
630 /// This function is used in the debugger pretty formatters in tools/ to fetch the
631 /// child field order and entry type to facilitate fancy debug printing for this type.
632 fn dbHelper(self: *Self, child: *Elem, field: *Field, entry: *Entry) void {
633 _ = self;
634 _ = child;
635 _ = field;
636 _ = entry;
637 }
638
639 comptime {
640 if (builtin.zig_backend == .stage2_llvm and !builtin.strip_debug_info) {
641 _ = &dbHelper;
642 _ = &Slice.dbHelper;
643 }
644 }
645 };
646}
647
648test "basic usage" {
649 const ally = testing.allocator;
650
651 const Foo = struct {
652 a: u32,
653 b: []const u8,
654 c: u8,
655 };
656
657 var list = MultiArrayList(Foo){};
658 defer list.deinit(ally);
659
660 try testing.expectEqual(@as(usize, 0), list.items(.a).len);
661
662 try list.ensureTotalCapacity(ally, 2);
663
664 list.appendAssumeCapacity(.{
665 .a = 1,
666 .b = "foobar",
667 .c = 'a',
668 });
669
670 list.appendAssumeCapacity(.{
671 .a = 2,
672 .b = "zigzag",
673 .c = 'b',
674 });
675
676 try testing.expectEqualSlices(u32, list.items(.a), &[_]u32{ 1, 2 });
677 try testing.expectEqualSlices(u8, list.items(.c), &[_]u8{ 'a', 'b' });
678
679 try testing.expectEqual(@as(usize, 2), list.items(.b).len);
680 try testing.expectEqualStrings("foobar", list.items(.b)[0]);
681 try testing.expectEqualStrings("zigzag", list.items(.b)[1]);
682
683 try list.append(ally, .{
684 .a = 3,
685 .b = "fizzbuzz",
686 .c = 'c',
687 });
688
689 try testing.expectEqualSlices(u32, list.items(.a), &[_]u32{ 1, 2, 3 });
690 try testing.expectEqualSlices(u8, list.items(.c), &[_]u8{ 'a', 'b', 'c' });
691
692 try testing.expectEqual(@as(usize, 3), list.items(.b).len);
693 try testing.expectEqualStrings("foobar", list.items(.b)[0]);
694 try testing.expectEqualStrings("zigzag", list.items(.b)[1]);
695 try testing.expectEqualStrings("fizzbuzz", list.items(.b)[2]);
696
697 // Add 6 more things to force a capacity increase.
698 var i: usize = 0;
699 while (i < 6) : (i += 1) {
700 try list.append(ally, .{
701 .a = @as(u32, @intCast(4 + i)),
702 .b = "whatever",
703 .c = @as(u8, @intCast('d' + i)),
704 });
705 }
706
707 try testing.expectEqualSlices(
708 u32,
709 &[_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
710 list.items(.a),
711 );
712 try testing.expectEqualSlices(
713 u8,
714 &[_]u8{ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i' },
715 list.items(.c),
716 );
717
718 list.shrinkAndFree(ally, 3);
719
720 try testing.expectEqualSlices(u32, list.items(.a), &[_]u32{ 1, 2, 3 });
721 try testing.expectEqualSlices(u8, list.items(.c), &[_]u8{ 'a', 'b', 'c' });
722
723 try testing.expectEqual(@as(usize, 3), list.items(.b).len);
724 try testing.expectEqualStrings("foobar", list.items(.b)[0]);
725 try testing.expectEqualStrings("zigzag", list.items(.b)[1]);
726 try testing.expectEqualStrings("fizzbuzz", list.items(.b)[2]);
727
728 list.set(try list.addOne(ally), .{
729 .a = 4,
730 .b = "xnopyt",
731 .c = 'd',
732 });
733 try testing.expectEqualStrings("xnopyt", list.pop().?.b);
734 try testing.expectEqual(@as(?u8, 'c'), if (list.pop()) |elem| elem.c else null);
735 try testing.expectEqual(@as(u32, 2), list.pop().?.a);
736 try testing.expectEqual(@as(u8, 'a'), list.pop().?.c);
737 try testing.expectEqual(@as(?Foo, null), list.pop());
738
739 list.clearRetainingCapacity();
740 try testing.expectEqual(0, list.len);
741 try testing.expect(list.capacity > 0);
742
743 list.clearAndFree(ally);
744 try testing.expectEqual(0, list.len);
745 try testing.expectEqual(0, list.capacity);
746}
747
748// This was observed to fail on aarch64 with LLVM 11, when the capacityInBytes
749// function used the @reduce code path.
750test "regression test for @reduce bug" {
751 const ally = testing.allocator;
752 var list = MultiArrayList(struct {
753 tag: std.zig.Token.Tag,
754 start: u32,
755 }){};
756 defer list.deinit(ally);
757
758 try list.ensureTotalCapacity(ally, 20);
759
760 try list.append(ally, .{ .tag = .keyword_const, .start = 0 });
761 try list.append(ally, .{ .tag = .identifier, .start = 6 });
762 try list.append(ally, .{ .tag = .equal, .start = 10 });
763 try list.append(ally, .{ .tag = .builtin, .start = 12 });
764 try list.append(ally, .{ .tag = .l_paren, .start = 19 });
765 try list.append(ally, .{ .tag = .string_literal, .start = 20 });
766 try list.append(ally, .{ .tag = .r_paren, .start = 25 });
767 try list.append(ally, .{ .tag = .semicolon, .start = 26 });
768 try list.append(ally, .{ .tag = .keyword_pub, .start = 29 });
769 try list.append(ally, .{ .tag = .keyword_fn, .start = 33 });
770 try list.append(ally, .{ .tag = .identifier, .start = 36 });
771 try list.append(ally, .{ .tag = .l_paren, .start = 40 });
772 try list.append(ally, .{ .tag = .r_paren, .start = 41 });
773 try list.append(ally, .{ .tag = .identifier, .start = 43 });
774 try list.append(ally, .{ .tag = .bang, .start = 51 });
775 try list.append(ally, .{ .tag = .identifier, .start = 52 });
776 try list.append(ally, .{ .tag = .l_brace, .start = 57 });
777 try list.append(ally, .{ .tag = .identifier, .start = 63 });
778 try list.append(ally, .{ .tag = .period, .start = 66 });
779 try list.append(ally, .{ .tag = .identifier, .start = 67 });
780 try list.append(ally, .{ .tag = .period, .start = 70 });
781 try list.append(ally, .{ .tag = .identifier, .start = 71 });
782 try list.append(ally, .{ .tag = .l_paren, .start = 75 });
783 try list.append(ally, .{ .tag = .string_literal, .start = 76 });
784 try list.append(ally, .{ .tag = .comma, .start = 113 });
785 try list.append(ally, .{ .tag = .period, .start = 115 });
786 try list.append(ally, .{ .tag = .l_brace, .start = 116 });
787 try list.append(ally, .{ .tag = .r_brace, .start = 117 });
788 try list.append(ally, .{ .tag = .r_paren, .start = 118 });
789 try list.append(ally, .{ .tag = .semicolon, .start = 119 });
790 try list.append(ally, .{ .tag = .r_brace, .start = 121 });
791 try list.append(ally, .{ .tag = .eof, .start = 123 });
792
793 const tags = list.items(.tag);
794 try testing.expectEqual(tags[1], .identifier);
795 try testing.expectEqual(tags[2], .equal);
796 try testing.expectEqual(tags[3], .builtin);
797 try testing.expectEqual(tags[4], .l_paren);
798 try testing.expectEqual(tags[5], .string_literal);
799 try testing.expectEqual(tags[6], .r_paren);
800 try testing.expectEqual(tags[7], .semicolon);
801 try testing.expectEqual(tags[8], .keyword_pub);
802 try testing.expectEqual(tags[9], .keyword_fn);
803 try testing.expectEqual(tags[10], .identifier);
804 try testing.expectEqual(tags[11], .l_paren);
805 try testing.expectEqual(tags[12], .r_paren);
806 try testing.expectEqual(tags[13], .identifier);
807 try testing.expectEqual(tags[14], .bang);
808 try testing.expectEqual(tags[15], .identifier);
809 try testing.expectEqual(tags[16], .l_brace);
810 try testing.expectEqual(tags[17], .identifier);
811 try testing.expectEqual(tags[18], .period);
812 try testing.expectEqual(tags[19], .identifier);
813 try testing.expectEqual(tags[20], .period);
814 try testing.expectEqual(tags[21], .identifier);
815 try testing.expectEqual(tags[22], .l_paren);
816 try testing.expectEqual(tags[23], .string_literal);
817 try testing.expectEqual(tags[24], .comma);
818 try testing.expectEqual(tags[25], .period);
819 try testing.expectEqual(tags[26], .l_brace);
820 try testing.expectEqual(tags[27], .r_brace);
821 try testing.expectEqual(tags[28], .r_paren);
822 try testing.expectEqual(tags[29], .semicolon);
823 try testing.expectEqual(tags[30], .r_brace);
824 try testing.expectEqual(tags[31], .eof);
825}
826
827test "ensure capacity on empty list" {
828 const ally = testing.allocator;
829
830 const Foo = struct {
831 a: u32,
832 b: u8,
833 };
834
835 var list = MultiArrayList(Foo){};
836 defer list.deinit(ally);
837
838 try list.ensureTotalCapacity(ally, 2);
839 list.appendAssumeCapacity(.{ .a = 1, .b = 2 });
840 list.appendAssumeCapacity(.{ .a = 3, .b = 4 });
841
842 try testing.expectEqualSlices(u32, &[_]u32{ 1, 3 }, list.items(.a));
843 try testing.expectEqualSlices(u8, &[_]u8{ 2, 4 }, list.items(.b));
844
845 list.len = 0;
846 list.appendAssumeCapacity(.{ .a = 5, .b = 6 });
847 list.appendAssumeCapacity(.{ .a = 7, .b = 8 });
848
849 try testing.expectEqualSlices(u32, &[_]u32{ 5, 7 }, list.items(.a));
850 try testing.expectEqualSlices(u8, &[_]u8{ 6, 8 }, list.items(.b));
851
852 list.len = 0;
853 try list.ensureTotalCapacity(ally, 16);
854
855 list.appendAssumeCapacity(.{ .a = 9, .b = 10 });
856 list.appendAssumeCapacity(.{ .a = 11, .b = 12 });
857
858 try testing.expectEqualSlices(u32, &[_]u32{ 9, 11 }, list.items(.a));
859 try testing.expectEqualSlices(u8, &[_]u8{ 10, 12 }, list.items(.b));
860}
861
862test "insert elements" {
863 const ally = testing.allocator;
864
865 const Foo = struct {
866 a: u8,
867 b: u32,
868 };
869
870 var list = MultiArrayList(Foo){};
871 defer list.deinit(ally);
872
873 try list.insert(ally, 0, .{ .a = 1, .b = 2 });
874 try list.ensureUnusedCapacity(ally, 1);
875 list.insertAssumeCapacity(1, .{ .a = 2, .b = 3 });
876
877 try testing.expectEqualSlices(u8, &[_]u8{ 1, 2 }, list.items(.a));
878 try testing.expectEqualSlices(u32, &[_]u32{ 2, 3 }, list.items(.b));
879}
880
881test "union" {
882 const ally = testing.allocator;
883
884 const Foo = union(enum) {
885 a: u32,
886 b: []const u8,
887 };
888
889 var list = MultiArrayList(Foo){};
890 defer list.deinit(ally);
891
892 try testing.expectEqual(@as(usize, 0), list.items(.tags).len);
893
894 try list.ensureTotalCapacity(ally, 3);
895
896 list.appendAssumeCapacity(.{ .a = 1 });
897 list.appendAssumeCapacity(.{ .b = "zigzag" });
898
899 try testing.expectEqualSlices(meta.Tag(Foo), list.items(.tags), &.{ .a, .b });
900 try testing.expectEqual(@as(usize, 2), list.items(.tags).len);
901
902 list.appendAssumeCapacity(.{ .b = "foobar" });
903 try testing.expectEqualStrings("zigzag", list.items(.data)[1].b);
904 try testing.expectEqualStrings("foobar", list.items(.data)[2].b);
905
906 // Add 6 more things to force a capacity increase.
907 for (0..6) |i| {
908 try list.append(ally, .{ .a = @as(u32, @intCast(4 + i)) });
909 }
910
911 try testing.expectEqualSlices(
912 meta.Tag(Foo),
913 &.{ .a, .b, .b, .a, .a, .a, .a, .a, .a },
914 list.items(.tags),
915 );
916 try testing.expectEqual(Foo{ .a = 1 }, list.get(0));
917 try testing.expectEqual(Foo{ .b = "zigzag" }, list.get(1));
918 try testing.expectEqual(Foo{ .b = "foobar" }, list.get(2));
919 try testing.expectEqual(Foo{ .a = 4 }, list.get(3));
920 try testing.expectEqual(Foo{ .a = 5 }, list.get(4));
921 try testing.expectEqual(Foo{ .a = 6 }, list.get(5));
922 try testing.expectEqual(Foo{ .a = 7 }, list.get(6));
923 try testing.expectEqual(Foo{ .a = 8 }, list.get(7));
924 try testing.expectEqual(Foo{ .a = 9 }, list.get(8));
925
926 list.shrinkAndFree(ally, 3);
927
928 try testing.expectEqual(@as(usize, 3), list.items(.tags).len);
929 try testing.expectEqualSlices(meta.Tag(Foo), list.items(.tags), &.{ .a, .b, .b });
930
931 try testing.expectEqual(Foo{ .a = 1 }, list.get(0));
932 try testing.expectEqual(Foo{ .b = "zigzag" }, list.get(1));
933 try testing.expectEqual(Foo{ .b = "foobar" }, list.get(2));
934}
935
936test "sorting a span" {
937 var list: MultiArrayList(struct { score: u32, chr: u8 }) = .{};
938 defer list.deinit(testing.allocator);
939
940 try list.ensureTotalCapacity(testing.allocator, 42);
941 for (
942 // zig fmt: off
943 [42]u8{ 'b', 'a', 'c', 'a', 'b', 'c', 'b', 'c', 'b', 'a', 'b', 'a', 'b', 'c', 'b', 'a', 'a', 'c', 'c', 'a', 'c', 'b', 'a', 'c', 'a', 'b', 'b', 'c', 'c', 'b', 'a', 'b', 'a', 'b', 'c', 'b', 'a', 'a', 'c', 'c', 'a', 'c' },
944 [42]u32{ 1, 1, 1, 2, 2, 2, 3, 3, 4, 3, 5, 4, 6, 4, 7, 5, 6, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 9, 10, 11, 10, 12, 11, 13, 11, 14, 12, 13, 12, 13, 14, 14 },
945 // zig fmt: on
946 ) |chr, score| {
947 list.appendAssumeCapacity(.{ .chr = chr, .score = score });
948 }
949
950 const sliced = list.slice();
951 list.sortSpan(6, 21, struct {
952 chars: []const u8,
953
954 fn lessThan(ctx: @This(), a: usize, b: usize) bool {
955 return ctx.chars[a] < ctx.chars[b];
956 }
957 }{ .chars = sliced.items(.chr) });
958
959 var i: u32 = undefined;
960 var j: u32 = 6;
961 var c: u8 = 'a';
962
963 while (j < 21) {
964 i = j;
965 j += 5;
966 var n: u32 = 3;
967 for (sliced.items(.chr)[i..j], sliced.items(.score)[i..j]) |chr, score| {
968 try testing.expectEqual(score, n);
969 try testing.expectEqual(chr, c);
970 n += 1;
971 }
972 c += 1;
973 }
974}
975
976test "0 sized struct field" {
977 const ally = testing.allocator;
978
979 const Foo = struct {
980 a: u0,
981 b: f32,
982 };
983
984 var list = MultiArrayList(Foo){};
985 defer list.deinit(ally);
986
987 try testing.expectEqualSlices(u0, &[_]u0{}, list.items(.a));
988 try testing.expectEqualSlices(f32, &[_]f32{}, list.items(.b));
989
990 try list.append(ally, .{ .a = 0, .b = 42.0 });
991 try testing.expectEqualSlices(u0, &[_]u0{0}, list.items(.a));
992 try testing.expectEqualSlices(f32, &[_]f32{42.0}, list.items(.b));
993
994 try list.insert(ally, 0, .{ .a = 0, .b = -1.0 });
995 try testing.expectEqualSlices(u0, &[_]u0{ 0, 0 }, list.items(.a));
996 try testing.expectEqualSlices(f32, &[_]f32{ -1.0, 42.0 }, list.items(.b));
997
998 list.swapRemove(list.len - 1);
999 try testing.expectEqualSlices(u0, &[_]u0{0}, list.items(.a));
1000 try testing.expectEqualSlices(f32, &[_]f32{-1.0}, list.items(.b));
1001}
1002
1003test "0 sized struct" {
1004 const ally = testing.allocator;
1005
1006 const Foo = struct {
1007 a: u0,
1008 };
1009
1010 var list = MultiArrayList(Foo){};
1011 defer list.deinit(ally);
1012
1013 try testing.expectEqualSlices(u0, &[_]u0{}, list.items(.a));
1014
1015 try list.append(ally, .{ .a = 0 });
1016 try testing.expectEqualSlices(u0, &[_]u0{0}, list.items(.a));
1017
1018 try list.insert(ally, 0, .{ .a = 0 });
1019 try testing.expectEqualSlices(u0, &[_]u0{ 0, 0 }, list.items(.a));
1020
1021 list.swapRemove(list.len - 1);
1022 try testing.expectEqualSlices(u0, &[_]u0{0}, list.items(.a));
1023}
1024
1025test "struct with many fields" {
1026 const ManyFields = struct {
1027 fn Type(count: comptime_int) type {
1028 @setEvalBranchQuota(50000);
1029 var field_names: [count][]const u8 = undefined;
1030 for (&field_names, 0..) |*n, i| n.* = std.fmt.comptimePrint("a{d}", .{i});
1031 return @Struct(.@"extern", null, &field_names, &@splat(u32), &@splat(.{}));
1032 }
1033
1034 fn doTest(ally: std.mem.Allocator, count: comptime_int) !void {
1035 var list: MultiArrayList(Type(count)) = .empty;
1036 defer list.deinit(ally);
1037
1038 try list.resize(ally, 1);
1039 list.items(.a0)[0] = 42;
1040 }
1041 };
1042
1043 try ManyFields.doTest(testing.allocator, 25);
1044 try ManyFields.doTest(testing.allocator, 50);
1045 try ManyFields.doTest(testing.allocator, 100);
1046 try ManyFields.doTest(testing.allocator, 200);
1047}
1048
1049test "orderedRemoveMany" {
1050 const gpa = testing.allocator;
1051
1052 var list: MultiArrayList(struct { x: usize }) = .empty;
1053 defer list.deinit(gpa);
1054
1055 for (0..10) |n| {
1056 try list.append(gpa, .{ .x = n });
1057 }
1058
1059 list.orderedRemoveMany(&.{ 1, 5, 5, 7, 9 });
1060 try testing.expectEqualSlices(usize, &.{ 0, 2, 3, 4, 6, 8 }, list.items(.x));
1061
1062 list.orderedRemoveMany(&.{0});
1063 try testing.expectEqualSlices(usize, &.{ 2, 3, 4, 6, 8 }, list.items(.x));
1064
1065 list.orderedRemoveMany(&.{});
1066 try testing.expectEqualSlices(usize, &.{ 2, 3, 4, 6, 8 }, list.items(.x));
1067
1068 list.orderedRemoveMany(&.{ 1, 2, 3, 4 });
1069 try testing.expectEqualSlices(usize, &.{2}, list.items(.x));
1070
1071 list.orderedRemoveMany(&.{0});
1072 try testing.expectEqualSlices(usize, &.{}, list.items(.x));
1073}