master
1const std = @import("std");
2const assert = std.debug.assert;
3const Allocator = std.mem.Allocator;
4const Zcu = @import("Zcu.zig");
5const InternPool = @import("InternPool.zig");
6const Type = @import("Type.zig");
7const Value = @import("Value.zig");
8
9/// We use a tagged union here because while it wastes a few bytes for some tags, having a fixed
10/// size for the type makes the common `aggregate` representation more efficient.
11/// For aggregates, the sentinel value, if any, *is* stored.
12pub const MutableValue = union(enum) {
13 /// An interned value.
14 interned: InternPool.Index,
15 /// An error union value which is a payload (not an error).
16 eu_payload: SubValue,
17 /// An optional value which is a payload (not `null`).
18 opt_payload: SubValue,
19 /// An aggregate consisting of a single repeated value.
20 repeated: SubValue,
21 /// An aggregate of `u8` consisting of "plain" bytes (no lazy or undefined elements).
22 bytes: Bytes,
23 /// An aggregate with arbitrary sub-values.
24 aggregate: Aggregate,
25 /// A slice, containing a pointer and length.
26 slice: Slice,
27 /// An instance of a union.
28 un: Union,
29
30 pub const SubValue = struct {
31 ty: InternPool.Index,
32 child: *MutableValue,
33 };
34 pub const Bytes = struct {
35 ty: InternPool.Index,
36 data: []u8,
37 };
38 pub const Aggregate = struct {
39 ty: InternPool.Index,
40 elems: []MutableValue,
41 };
42 pub const Slice = struct {
43 ty: InternPool.Index,
44 /// Must have the appropriate many-ptr type.
45 /// TODO: we want this to be an `InternPool.Index`, but `Sema.beginComptimePtrMutation` doesn't support it.
46 ptr: *MutableValue,
47 /// Must be of type `usize`.
48 /// TODO: we want this to be an `InternPool.Index`, but `Sema.beginComptimePtrMutation` doesn't support it.
49 len: *MutableValue,
50 };
51 pub const Union = struct {
52 ty: InternPool.Index,
53 tag: InternPool.Index,
54 payload: *MutableValue,
55 };
56
57 pub fn intern(mv: MutableValue, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value {
58 return Value.fromInterned(switch (mv) {
59 .interned => |ip_index| ip_index,
60 .eu_payload => |sv| try pt.intern(.{ .error_union = .{
61 .ty = sv.ty,
62 .val = .{ .payload = (try sv.child.intern(pt, arena)).toIntern() },
63 } }),
64 .opt_payload => |sv| try pt.intern(.{ .opt = .{
65 .ty = sv.ty,
66 .val = (try sv.child.intern(pt, arena)).toIntern(),
67 } }),
68 .repeated => |sv| return pt.aggregateSplatValue(.fromInterned(sv.ty), try sv.child.intern(pt, arena)),
69 .bytes => |b| try pt.intern(.{ .aggregate = .{
70 .ty = b.ty,
71 .storage = .{ .bytes = try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, pt.tid, b.data, .maybe_embedded_nulls) },
72 } }),
73 .aggregate => |a| {
74 const elems = try arena.alloc(InternPool.Index, a.elems.len);
75 for (a.elems, elems) |mut_elem, *interned_elem| {
76 interned_elem.* = (try mut_elem.intern(pt, arena)).toIntern();
77 }
78 return pt.aggregateValue(.fromInterned(a.ty), elems);
79 },
80 .slice => |s| try pt.intern(.{ .slice = .{
81 .ty = s.ty,
82 .ptr = (try s.ptr.intern(pt, arena)).toIntern(),
83 .len = (try s.len.intern(pt, arena)).toIntern(),
84 } }),
85 .un => |u| try pt.internUnion(.{
86 .ty = u.ty,
87 .tag = u.tag,
88 .val = (try u.payload.intern(pt, arena)).toIntern(),
89 }),
90 });
91 }
92
93 /// Un-interns the top level of this `MutableValue`, if applicable.
94 /// * Non-error error unions use `eu_payload`
95 /// * Non-null optionals use `eu_payload
96 /// * Slices use `slice`
97 /// * Unions use `un`
98 /// * Aggregates use `repeated` or `bytes` or `aggregate`
99 /// If `!allow_bytes`, the `bytes` representation will not be used.
100 /// If `!allow_repeated`, the `repeated` representation will not be used.
101 pub fn unintern(
102 mv: *MutableValue,
103 pt: Zcu.PerThread,
104 arena: Allocator,
105 allow_bytes: bool,
106 allow_repeated: bool,
107 ) Allocator.Error!void {
108 const zcu = pt.zcu;
109 const ip = &zcu.intern_pool;
110 switch (mv.*) {
111 .interned => |ip_index| switch (ip.indexToKey(ip_index)) {
112 .opt => |opt| if (opt.val != .none) {
113 const mut_payload = try arena.create(MutableValue);
114 mut_payload.* = .{ .interned = opt.val };
115 mv.* = .{ .opt_payload = .{
116 .ty = opt.ty,
117 .child = mut_payload,
118 } };
119 },
120 .error_union => |eu| switch (eu.val) {
121 .err_name => {},
122 .payload => |payload| {
123 const mut_payload = try arena.create(MutableValue);
124 mut_payload.* = .{ .interned = payload };
125 mv.* = .{ .eu_payload = .{
126 .ty = eu.ty,
127 .child = mut_payload,
128 } };
129 },
130 },
131 .slice => |slice| {
132 const ptr = try arena.create(MutableValue);
133 const len = try arena.create(MutableValue);
134 ptr.* = .{ .interned = slice.ptr };
135 len.* = .{ .interned = slice.len };
136 mv.* = .{ .slice = .{
137 .ty = slice.ty,
138 .ptr = ptr,
139 .len = len,
140 } };
141 },
142 .un => |un| {
143 const payload = try arena.create(MutableValue);
144 payload.* = .{ .interned = un.val };
145 mv.* = .{ .un = .{
146 .ty = un.ty,
147 .tag = un.tag,
148 .payload = payload,
149 } };
150 },
151 .aggregate => |agg| switch (agg.storage) {
152 .bytes => |bytes| {
153 const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(agg.ty));
154 assert(ip.childType(agg.ty) == .u8_type);
155 if (allow_bytes) {
156 const arena_bytes = try arena.alloc(u8, len);
157 @memcpy(arena_bytes, bytes.toSlice(len, ip));
158 mv.* = .{ .bytes = .{
159 .ty = agg.ty,
160 .data = arena_bytes,
161 } };
162 } else {
163 const mut_elems = try arena.alloc(MutableValue, len);
164 for (bytes.toSlice(len, ip), mut_elems) |b, *mut_elem| {
165 mut_elem.* = .{ .interned = try pt.intern(.{ .int = .{
166 .ty = .u8_type,
167 .storage = .{ .u64 = b },
168 } }) };
169 }
170 mv.* = .{ .aggregate = .{
171 .ty = agg.ty,
172 .elems = mut_elems,
173 } };
174 }
175 },
176 .elems => |elems| {
177 assert(elems.len == ip.aggregateTypeLenIncludingSentinel(agg.ty));
178 const mut_elems = try arena.alloc(MutableValue, elems.len);
179 for (elems, mut_elems) |interned_elem, *mut_elem| {
180 mut_elem.* = .{ .interned = interned_elem };
181 }
182 mv.* = .{ .aggregate = .{
183 .ty = agg.ty,
184 .elems = mut_elems,
185 } };
186 },
187 .repeated_elem => |val| {
188 if (allow_repeated) {
189 const repeated_val = try arena.create(MutableValue);
190 repeated_val.* = .{ .interned = val };
191 mv.* = .{ .repeated = .{
192 .ty = agg.ty,
193 .child = repeated_val,
194 } };
195 } else {
196 const len = ip.aggregateTypeLenIncludingSentinel(agg.ty);
197 const mut_elems = try arena.alloc(MutableValue, @intCast(len));
198 @memset(mut_elems, .{ .interned = val });
199 mv.* = .{ .aggregate = .{
200 .ty = agg.ty,
201 .elems = mut_elems,
202 } };
203 }
204 },
205 },
206 .undef => |ty_ip| switch (Type.fromInterned(ty_ip).zigTypeTag(zcu)) {
207 .@"struct", .array, .vector => |type_tag| {
208 const ty = Type.fromInterned(ty_ip);
209 const opt_sent = ty.sentinel(zcu);
210 if (type_tag == .@"struct" or opt_sent != null or !allow_repeated) {
211 const len_no_sent = ip.aggregateTypeLen(ty_ip);
212 const elems = try arena.alloc(MutableValue, @intCast(len_no_sent + @intFromBool(opt_sent != null)));
213 switch (type_tag) {
214 .array, .vector => {
215 const elem_ty = ip.childType(ty_ip);
216 const undef_elem = try pt.intern(.{ .undef = elem_ty });
217 @memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem });
218 },
219 .@"struct" => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| {
220 const field_ty = ty.fieldType(i, zcu).toIntern();
221 mut_elem.* = .{ .interned = try pt.intern(.{ .undef = field_ty }) };
222 },
223 else => unreachable,
224 }
225 if (opt_sent) |s| elems[@intCast(len_no_sent)] = .{ .interned = s.toIntern() };
226 mv.* = .{ .aggregate = .{
227 .ty = ty_ip,
228 .elems = elems,
229 } };
230 } else {
231 const repeated_val = try arena.create(MutableValue);
232 repeated_val.* = .{
233 .interned = try pt.intern(.{ .undef = ip.childType(ty_ip) }),
234 };
235 mv.* = .{ .repeated = .{
236 .ty = ty_ip,
237 .child = repeated_val,
238 } };
239 }
240 },
241 .@"union" => {
242 const payload = try arena.create(MutableValue);
243 const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(pt);
244 payload.* = .{ .interned = try pt.intern(.{ .undef = backing_ty.toIntern() }) };
245 mv.* = .{ .un = .{
246 .ty = ty_ip,
247 .tag = .none,
248 .payload = payload,
249 } };
250 },
251 .pointer => {
252 const ptr_ty = ip.indexToKey(ty_ip).ptr_type;
253 if (ptr_ty.flags.size != .slice) return;
254 const ptr = try arena.create(MutableValue);
255 const len = try arena.create(MutableValue);
256 ptr.* = .{ .interned = try pt.intern(.{ .undef = ip.slicePtrType(ty_ip) }) };
257 len.* = .{ .interned = .undef_usize };
258 mv.* = .{ .slice = .{
259 .ty = ty_ip,
260 .ptr = ptr,
261 .len = len,
262 } };
263 },
264 else => {},
265 },
266 else => {},
267 },
268 .bytes => |bytes| if (!allow_bytes) {
269 const elems = try arena.alloc(MutableValue, bytes.data.len);
270 for (bytes.data, elems) |byte, *interned_byte| {
271 interned_byte.* = .{ .interned = try pt.intern(.{ .int = .{
272 .ty = .u8_type,
273 .storage = .{ .u64 = byte },
274 } }) };
275 }
276 mv.* = .{ .aggregate = .{
277 .ty = bytes.ty,
278 .elems = elems,
279 } };
280 },
281 else => {},
282 }
283 }
284
285 /// Get a pointer to the `MutableValue` associated with a field/element.
286 /// The returned pointer can be safety mutated through to modify the field value.
287 /// The returned pointer is valid until the representation of `mv` changes.
288 pub fn elem(
289 mv: *MutableValue,
290 pt: Zcu.PerThread,
291 arena: Allocator,
292 field_idx: usize,
293 ) Allocator.Error!*MutableValue {
294 const zcu = pt.zcu;
295 const ip = &zcu.intern_pool;
296 // Convert to the `aggregate` representation.
297 switch (mv.*) {
298 .eu_payload, .opt_payload, .un => unreachable,
299 .interned => {
300 try mv.unintern(pt, arena, false, false);
301 },
302 .bytes => |bytes| {
303 const elems = try arena.alloc(MutableValue, bytes.data.len);
304 for (bytes.data, elems) |byte, *interned_byte| {
305 interned_byte.* = .{ .interned = try pt.intern(.{ .int = .{
306 .ty = .u8_type,
307 .storage = .{ .u64 = byte },
308 } }) };
309 }
310 mv.* = .{ .aggregate = .{
311 .ty = bytes.ty,
312 .elems = elems,
313 } };
314 },
315 .repeated => |repeated| {
316 const len = ip.aggregateTypeLenIncludingSentinel(repeated.ty);
317 const elems = try arena.alloc(MutableValue, @intCast(len));
318 @memset(elems, repeated.child.*);
319 mv.* = .{ .aggregate = .{
320 .ty = repeated.ty,
321 .elems = elems,
322 } };
323 },
324 .slice, .aggregate => {},
325 }
326 switch (mv.*) {
327 .aggregate => |*agg| return &agg.elems[field_idx],
328 .slice => |*slice| return switch (field_idx) {
329 Value.slice_ptr_index => slice.ptr,
330 Value.slice_len_index => slice.len,
331 else => unreachable,
332 },
333 else => unreachable,
334 }
335 }
336
337 /// Modify a single field of a `MutableValue` which represents an aggregate or slice, leaving others
338 /// untouched. When an entire field must be modified, this should be used in preference to `elemPtr`
339 /// to allow for an optimal representation.
340 /// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`.
341 pub fn setElem(
342 mv: *MutableValue,
343 pt: Zcu.PerThread,
344 arena: Allocator,
345 field_idx: usize,
346 field_val: MutableValue,
347 ) Allocator.Error!void {
348 const zcu = pt.zcu;
349 const ip = &zcu.intern_pool;
350 const is_trivial_int = field_val.isTrivialInt(zcu);
351 try mv.unintern(pt, arena, is_trivial_int, true);
352 switch (mv.*) {
353 .interned,
354 .eu_payload,
355 .opt_payload,
356 .un,
357 => unreachable,
358 .slice => |*s| switch (field_idx) {
359 Value.slice_ptr_index => s.ptr.* = field_val,
360 Value.slice_len_index => s.len.* = field_val,
361 else => unreachable,
362 },
363 .bytes => |b| {
364 assert(is_trivial_int);
365 assert(field_val.typeOf(zcu).toIntern() == .u8_type);
366 b.data[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
367 },
368 .repeated => |r| {
369 if (field_val.eqlTrivial(r.child.*)) return;
370 // We must switch to either the `aggregate` or the `bytes` representation.
371 const len_inc_sent = ip.aggregateTypeLenIncludingSentinel(r.ty);
372 if (Type.fromInterned(r.ty).zigTypeTag(zcu) != .@"struct" and
373 is_trivial_int and
374 Type.fromInterned(r.ty).childType(zcu).toIntern() == .u8_type and
375 r.child.isTrivialInt(zcu))
376 {
377 // We can use the `bytes` representation.
378 const bytes = try arena.alloc(u8, @intCast(len_inc_sent));
379 const repeated_byte = Value.fromInterned(r.child.interned).toUnsignedInt(zcu);
380 @memset(bytes, @intCast(repeated_byte));
381 bytes[field_idx] = @intCast(Value.fromInterned(field_val.interned).toUnsignedInt(zcu));
382 mv.* = .{ .bytes = .{
383 .ty = r.ty,
384 .data = bytes,
385 } };
386 } else {
387 // We must use the `aggregate` representation.
388 const mut_elems = try arena.alloc(MutableValue, @intCast(len_inc_sent));
389 @memset(mut_elems, r.child.*);
390 mut_elems[field_idx] = field_val;
391 mv.* = .{ .aggregate = .{
392 .ty = r.ty,
393 .elems = mut_elems,
394 } };
395 }
396 },
397 .aggregate => |a| {
398 a.elems[field_idx] = field_val;
399 const is_struct = Type.fromInterned(a.ty).zigTypeTag(zcu) == .@"struct";
400 // Attempt to switch to a more efficient representation.
401 const is_repeated = for (a.elems) |e| {
402 if (!e.eqlTrivial(field_val)) break false;
403 } else true;
404 if (!is_struct and is_repeated) {
405 // Switch to `repeated` repr
406 const mut_repeated = try arena.create(MutableValue);
407 mut_repeated.* = field_val;
408 mv.* = .{ .repeated = .{
409 .ty = a.ty,
410 .child = mut_repeated,
411 } };
412 } else if (!is_struct and is_trivial_int and Type.fromInterned(a.ty).childType(zcu).toIntern() == .u8_type) {
413 // See if we can switch to `bytes` repr
414 for (a.elems) |e| {
415 switch (e) {
416 else => break,
417 .interned => |ip_index| switch (ip.indexToKey(ip_index)) {
418 else => break,
419 .int => |int| switch (int.storage) {
420 .u64, .i64, .big_int => {},
421 .lazy_align, .lazy_size => break,
422 },
423 },
424 }
425 } else {
426 const bytes = try arena.alloc(u8, a.elems.len);
427 for (a.elems, bytes) |elem_val, *b| {
428 b.* = @intCast(Value.fromInterned(elem_val.interned).toUnsignedInt(zcu));
429 }
430 mv.* = .{ .bytes = .{
431 .ty = a.ty,
432 .data = bytes,
433 } };
434 }
435 }
436 },
437 }
438 }
439
440 /// Get the value of a single field of a `MutableValue` which represents an aggregate or slice.
441 /// For slices, uses `Value.slice_ptr_index` and `Value.slice_len_index`.
442 pub fn getElem(
443 mv: MutableValue,
444 pt: Zcu.PerThread,
445 field_idx: usize,
446 ) Allocator.Error!MutableValue {
447 return switch (mv) {
448 .eu_payload,
449 .opt_payload,
450 => unreachable,
451 .interned => |ip_index| {
452 const ty = Type.fromInterned(pt.zcu.intern_pool.typeOf(ip_index));
453 switch (ty.zigTypeTag(pt.zcu)) {
454 .array, .vector => return .{ .interned = (try Value.fromInterned(ip_index).elemValue(pt, field_idx)).toIntern() },
455 .@"struct", .@"union" => return .{ .interned = (try Value.fromInterned(ip_index).fieldValue(pt, field_idx)).toIntern() },
456 .pointer => {
457 assert(ty.isSlice(pt.zcu));
458 return switch (field_idx) {
459 Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(pt.zcu).toIntern() },
460 Value.slice_len_index => .{ .interned = switch (pt.zcu.intern_pool.indexToKey(ip_index)) {
461 .undef => .undef_usize,
462 .slice => |s| s.len,
463 else => unreachable,
464 } },
465 else => unreachable,
466 };
467 },
468 else => unreachable,
469 }
470 },
471 .un => |un| {
472 // TODO assert the tag is correct
473 return un.payload.*;
474 },
475 .slice => |s| switch (field_idx) {
476 Value.slice_ptr_index => s.ptr.*,
477 Value.slice_len_index => s.len.*,
478 else => unreachable,
479 },
480 .bytes => |b| .{ .interned = try pt.intern(.{ .int = .{
481 .ty = .u8_type,
482 .storage = .{ .u64 = b.data[field_idx] },
483 } }) },
484 .repeated => |r| r.child.*,
485 .aggregate => |a| a.elems[field_idx],
486 };
487 }
488
489 fn isTrivialInt(mv: MutableValue, zcu: *Zcu) bool {
490 return switch (mv) {
491 else => false,
492 .interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
493 else => false,
494 .int => |int| switch (int.storage) {
495 .u64, .i64, .big_int => true,
496 .lazy_align, .lazy_size => false,
497 },
498 },
499 };
500 }
501
502 pub fn typeOf(mv: MutableValue, zcu: *Zcu) Type {
503 return switch (mv) {
504 .interned => |ip_index| Type.fromInterned(zcu.intern_pool.typeOf(ip_index)),
505 inline else => |x| Type.fromInterned(x.ty),
506 };
507 }
508
509 pub fn unpackOptional(mv: MutableValue, zcu: *Zcu) union(enum) {
510 undef,
511 null,
512 payload: MutableValue,
513 } {
514 return switch (mv) {
515 .opt_payload => |pl| return .{ .payload = pl.child.* },
516 .interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
517 .undef => return .undef,
518 .opt => |opt| if (opt.val == .none) .null else .{ .payload = .{ .interned = opt.val } },
519 else => unreachable,
520 },
521 else => unreachable,
522 };
523 }
524
525 pub fn unpackErrorUnion(mv: MutableValue, zcu: *Zcu) union(enum) {
526 undef,
527 err: InternPool.NullTerminatedString,
528 payload: MutableValue,
529 } {
530 return switch (mv) {
531 .eu_payload => |pl| return .{ .payload = pl.child.* },
532 .interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
533 .undef => return .undef,
534 .error_union => |eu| switch (eu.val) {
535 .err_name => |name| .{ .err = name },
536 .payload => |pl| .{ .payload = .{ .interned = pl } },
537 },
538 else => unreachable,
539 },
540 else => unreachable,
541 };
542 }
543
544 /// Fast equality checking which may return false negatives.
545 /// Used for deciding when to switch aggregate representations without fully
546 /// interning many values.
547 fn eqlTrivial(a: MutableValue, b: MutableValue) bool {
548 const Tag = @typeInfo(MutableValue).@"union".tag_type.?;
549 if (@as(Tag, a) != @as(Tag, b)) return false;
550 return switch (a) {
551 .interned => |a_ip| a_ip == b.interned,
552 .eu_payload => |a_pl| a_pl.ty == b.eu_payload.ty and a_pl.child.eqlTrivial(b.eu_payload.child.*),
553 .opt_payload => |a_pl| a_pl.ty == b.opt_payload.ty and a_pl.child.eqlTrivial(b.opt_payload.child.*),
554 .repeated => |a_rep| a_rep.ty == b.repeated.ty and a_rep.child.eqlTrivial(b.repeated.child.*),
555 .bytes => |a_bytes| a_bytes.ty == b.bytes.ty and std.mem.eql(u8, a_bytes.data, b.bytes.data),
556 .aggregate => |a_agg| {
557 const b_agg = b.aggregate;
558 if (a_agg.ty != b_agg.ty) return false;
559 if (a_agg.elems.len != b_agg.elems.len) return false;
560 for (a_agg.elems, b_agg.elems) |a_elem, b_elem| {
561 if (!a_elem.eqlTrivial(b_elem)) return false;
562 }
563 return true;
564 },
565 .slice => |a_slice| a_slice.ty == b.slice.ty and
566 a_slice.ptr.interned == b.slice.ptr.interned and
567 a_slice.len.interned == b.slice.len.interned,
568 .un => |a_un| a_un.ty == b.un.ty and a_un.tag == b.un.tag and a_un.payload.eqlTrivial(b.un.payload.*),
569 };
570 }
571};