master
1pub const ComptimeLoadResult = union(enum) {
2 success: MutableValue,
3
4 runtime_load,
5 undef,
6 err_payload: InternPool.NullTerminatedString,
7 null_payload,
8 inactive_union_field,
9 needed_well_defined: Type,
10 out_of_bounds: Type,
11 exceeds_host_size,
12};
13
14pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value) !ComptimeLoadResult {
15 const pt = sema.pt;
16 const zcu = pt.zcu;
17 const ptr_info = ptr.typeOf(pt.zcu).ptrInfo(pt.zcu);
18 // TODO: host size for vectors is terrible
19 const host_bits = switch (ptr_info.flags.vector_index) {
20 .none => ptr_info.packed_offset.host_size * 8,
21 else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
22 };
23 const bit_offset = if (host_bits != 0) bit_offset: {
24 const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
25 const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
26 .none => 0,
27 else => |idx| switch (pt.zcu.getTarget().cpu.arch.endian()) {
28 .little => child_bits * @intFromEnum(idx),
29 .big => host_bits - child_bits * (@intFromEnum(idx) + 1), // element order reversed on big endian
30 },
31 };
32 if (child_bits + bit_offset > host_bits) {
33 return .exceeds_host_size;
34 }
35 break :bit_offset bit_offset;
36 } else 0;
37 return loadComptimePtrInner(sema, block, src, ptr, bit_offset, host_bits, Type.fromInterned(ptr_info.child), 0);
38}
39
40pub const ComptimeStoreResult = union(enum) {
41 success,
42
43 runtime_store,
44 comptime_field_mismatch: Value,
45 undef,
46 err_payload: InternPool.NullTerminatedString,
47 null_payload,
48 inactive_union_field,
49 needed_well_defined: Type,
50 out_of_bounds: Type,
51 exceeds_host_size,
52};
53
54/// Perform a comptime load of value `store_val` to a pointer.
55/// The pointer's type is ignored.
56pub fn storeComptimePtr(
57 sema: *Sema,
58 block: *Block,
59 src: LazySrcLoc,
60 ptr: Value,
61 store_val: Value,
62) !ComptimeStoreResult {
63 const pt = sema.pt;
64 const zcu = pt.zcu;
65 const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu);
66 assert(store_val.typeOf(zcu).toIntern() == ptr_info.child);
67
68 {
69 const store_ty: Type = .fromInterned(ptr_info.child);
70 if (!try store_ty.comptimeOnlySema(pt) and !try store_ty.hasRuntimeBitsIgnoreComptimeSema(pt)) {
71 // zero-bit store; nothing to do
72 return .success;
73 }
74 }
75
76 // TODO: host size for vectors is terrible
77 const host_bits = switch (ptr_info.flags.vector_index) {
78 .none => ptr_info.packed_offset.host_size * 8,
79 else => ptr_info.packed_offset.host_size * Type.fromInterned(ptr_info.child).bitSize(zcu),
80 };
81 const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
82 .none => 0,
83 else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
84 .little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx),
85 .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian
86 },
87 };
88 const pseudo_store_ty = if (host_bits > 0) t: {
89 const need_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
90 if (need_bits + bit_offset > host_bits) {
91 return .exceeds_host_size;
92 }
93 break :t try sema.pt.intType(.unsigned, @intCast(host_bits));
94 } else Type.fromInterned(ptr_info.child);
95
96 const strat = try prepareComptimePtrStore(sema, block, src, ptr, pseudo_store_ty, 0);
97
98 // Propagate errors and handle comptime fields.
99 switch (strat) {
100 .direct, .index, .flat_index, .reinterpret => {},
101 .comptime_field => {
102 // To "store" to a comptime field, just perform a load of the field
103 // and see if the store value matches.
104 const expected_mv = switch (try loadComptimePtr(sema, block, src, ptr)) {
105 .success => |mv| mv,
106 .runtime_load => unreachable, // this is a comptime field
107 .exceeds_host_size => unreachable, // checked above
108 .undef => return .undef,
109 .err_payload => |err| return .{ .err_payload = err },
110 .null_payload => return .null_payload,
111 .inactive_union_field => return .inactive_union_field,
112 .needed_well_defined => |ty| return .{ .needed_well_defined = ty },
113 .out_of_bounds => |ty| return .{ .out_of_bounds = ty },
114 };
115 const expected = try expected_mv.intern(pt, sema.arena);
116 if (store_val.toIntern() != expected.toIntern()) {
117 return .{ .comptime_field_mismatch = expected };
118 }
119 return .success;
120 },
121 .runtime_store => return .runtime_store,
122 .undef => return .undef,
123 .err_payload => |err| return .{ .err_payload = err },
124 .null_payload => return .null_payload,
125 .inactive_union_field => return .inactive_union_field,
126 .needed_well_defined => |ty| return .{ .needed_well_defined = ty },
127 .out_of_bounds => |ty| return .{ .out_of_bounds = ty },
128 }
129
130 // Check the store is not inside a runtime condition
131 try checkComptimeVarStore(sema, block, src, strat.alloc());
132
133 if (host_bits == 0) {
134 // We can attempt a direct store depending on the strategy.
135 switch (strat) {
136 .direct => |direct| {
137 const want_ty = direct.val.typeOf(zcu);
138 const coerced_store_val = try pt.getCoerced(store_val, want_ty);
139 direct.val.* = .{ .interned = coerced_store_val.toIntern() };
140 return .success;
141 },
142 .index => |index| {
143 const want_ty = index.val.typeOf(zcu).childType(zcu);
144 const coerced_store_val = try pt.getCoerced(store_val, want_ty);
145 try index.val.setElem(pt, sema.arena, @intCast(index.elem_index), .{ .interned = coerced_store_val.toIntern() });
146 return .success;
147 },
148 .flat_index => |flat| {
149 const store_elems = store_val.typeOf(zcu).arrayBase(zcu)[1];
150 const flat_elems = try sema.arena.alloc(InternPool.Index, @intCast(store_elems));
151 {
152 var next_idx: u64 = 0;
153 var skip: u64 = 0;
154 try flattenArray(sema, .{ .interned = store_val.toIntern() }, &skip, &next_idx, flat_elems);
155 }
156 for (flat_elems, 0..) |elem, idx| {
157 // TODO: recursiveIndex in a loop does a lot of redundant work!
158 // Better would be to gather all the store targets into an array.
159 var index: u64 = flat.flat_elem_index + idx;
160 const val_ptr, const final_idx = (try recursiveIndex(sema, flat.val, &index)).?;
161 try val_ptr.setElem(pt, sema.arena, @intCast(final_idx), .{ .interned = elem });
162 }
163 return .success;
164 },
165 .reinterpret => {},
166 else => unreachable,
167 }
168 }
169
170 // Either there is a bit offset, or the strategy required reinterpreting.
171 // Therefore, we must perform a bitcast.
172
173 const val_ptr: *MutableValue, const byte_offset: u64 = switch (strat) {
174 .direct => |direct| .{ direct.val, 0 },
175 .index => |index| .{
176 index.val,
177 index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu),
178 },
179 .flat_index => |flat| .{ flat.val, flat.flat_elem_index * flat.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu) },
180 .reinterpret => |reinterpret| .{ reinterpret.val, reinterpret.byte_offset },
181 else => unreachable,
182 };
183
184 if (!val_ptr.typeOf(zcu).hasWellDefinedLayout(zcu)) {
185 return .{ .needed_well_defined = val_ptr.typeOf(zcu) };
186 }
187
188 if (!store_val.typeOf(zcu).hasWellDefinedLayout(zcu)) {
189 return .{ .needed_well_defined = store_val.typeOf(zcu) };
190 }
191
192 const new_val = try sema.bitCastSpliceVal(
193 try val_ptr.intern(pt, sema.arena),
194 store_val,
195 byte_offset,
196 host_bits,
197 bit_offset,
198 ) orelse return .runtime_store;
199 val_ptr.* = .{ .interned = new_val.toIntern() };
200 return .success;
201}
202
203/// Perform a comptime load of type `load_ty` from a pointer.
204/// The pointer's type is ignored.
205fn loadComptimePtrInner(
206 sema: *Sema,
207 block: *Block,
208 src: LazySrcLoc,
209 ptr_val: Value,
210 bit_offset: u64,
211 host_bits: u64,
212 load_ty: Type,
213 /// If `load_ty` is an array, this is the number of array elements to skip
214 /// before `load_ty`. Otherwise, it is ignored and may be `undefined`.
215 array_offset: u64,
216) !ComptimeLoadResult {
217 const pt = sema.pt;
218 const zcu = pt.zcu;
219 const ip = &zcu.intern_pool;
220
221 const ptr = switch (ip.indexToKey(ptr_val.toIntern())) {
222 .undef => return .undef,
223 .ptr => |ptr| ptr,
224 else => unreachable,
225 };
226
227 const base_val: MutableValue = switch (ptr.base_addr) {
228 .nav => |nav| val: {
229 try sema.ensureNavResolved(block, src, nav, .fully);
230 const val = ip.getNav(nav).status.fully_resolved.val;
231 switch (ip.indexToKey(val)) {
232 .variable => return .runtime_load,
233 // We let `.@"extern"` through here if it's a function.
234 // This allows you to alias `extern fn`s.
235 .@"extern" => |e| if (Type.fromInterned(e.ty).zigTypeTag(zcu) == .@"fn")
236 break :val .{ .interned = val }
237 else
238 return .runtime_load,
239 else => break :val .{ .interned = val },
240 }
241 },
242 .comptime_alloc => |alloc_index| sema.getComptimeAlloc(alloc_index).val,
243 .uav => |uav| .{ .interned = uav.val },
244 .comptime_field => |val| .{ .interned = val },
245 .int => return .runtime_load,
246 .eu_payload => |base_ptr_ip| val: {
247 const base_ptr = Value.fromInterned(base_ptr_ip);
248 const base_ty = base_ptr.typeOf(zcu).childType(zcu);
249 switch (try loadComptimePtrInner(sema, block, src, base_ptr, 0, 0, base_ty, undefined)) {
250 .success => |eu_val| switch (eu_val.unpackErrorUnion(zcu)) {
251 .undef => return .undef,
252 .err => |err| return .{ .err_payload = err },
253 .payload => |payload| break :val payload,
254 },
255 else => |err| return err,
256 }
257 },
258 .opt_payload => |base_ptr_ip| val: {
259 const base_ptr = Value.fromInterned(base_ptr_ip);
260 const base_ty = base_ptr.typeOf(zcu).childType(zcu);
261 switch (try loadComptimePtrInner(sema, block, src, base_ptr, 0, 0, base_ty, undefined)) {
262 .success => |eu_val| switch (eu_val.unpackOptional(zcu)) {
263 .undef => return .undef,
264 .null => return .null_payload,
265 .payload => |payload| break :val payload,
266 },
267 else => |err| return err,
268 }
269 },
270 .arr_elem => |base_index| val: {
271 const base_ptr = Value.fromInterned(base_index.base);
272 const base_ty = base_ptr.typeOf(zcu).childType(zcu);
273
274 // We have a comptime-only array. This case is a little nasty.
275 // To avoid loading too much data, we want to figure out how many elements we need.
276 // If `load_ty` and the array share a base type, we'll load the correct number of elements.
277 // Otherwise, we'll be reinterpreting (which we can't do, since it's comptime-only); just
278 // load a single element and let the logic below emit its error.
279
280 const load_one_ty, const load_count = load_ty.arrayBase(zcu);
281 const count = if (load_one_ty.toIntern() == base_ty.toIntern()) load_count else 1;
282
283 const want_ty = try sema.pt.arrayType(.{
284 .len = count,
285 .child = base_ty.toIntern(),
286 });
287
288 switch (try loadComptimePtrInner(sema, block, src, base_ptr, 0, 0, want_ty, base_index.index)) {
289 .success => |arr_val| break :val arr_val,
290 else => |err| return err,
291 }
292 },
293 .field => |base_index| val: {
294 const base_ptr = Value.fromInterned(base_index.base);
295 const base_ty = base_ptr.typeOf(zcu).childType(zcu);
296
297 // Field of a slice, or of an auto-layout struct or union.
298 const agg_val = switch (try loadComptimePtrInner(sema, block, src, base_ptr, 0, 0, base_ty, undefined)) {
299 .success => |val| val,
300 else => |err| return err,
301 };
302
303 const agg_ty = agg_val.typeOf(zcu);
304 switch (agg_ty.zigTypeTag(zcu)) {
305 .@"struct", .pointer => break :val try agg_val.getElem(sema.pt, @intCast(base_index.index)),
306 .@"union" => {
307 const tag_val: Value, const payload_mv: MutableValue = switch (agg_val) {
308 .un => |un| .{ Value.fromInterned(un.tag), un.payload.* },
309 .interned => |ip_index| switch (ip.indexToKey(ip_index)) {
310 .undef => return .undef,
311 .un => |un| .{ Value.fromInterned(un.tag), .{ .interned = un.val } },
312 else => unreachable,
313 },
314 else => unreachable,
315 };
316 const tag_ty = agg_ty.unionTagTypeHypothetical(zcu);
317 if (tag_ty.enumTagFieldIndex(tag_val, zcu).? != base_index.index) {
318 return .inactive_union_field;
319 }
320 break :val payload_mv;
321 },
322 else => unreachable,
323 }
324
325 break :val try agg_val.getElem(zcu, base_index.index);
326 },
327 };
328
329 if (ptr.byte_offset == 0 and host_bits == 0) {
330 if (load_ty.zigTypeTag(zcu) != .array or array_offset == 0) {
331 if (.ok == try sema.coerceInMemoryAllowed(
332 block,
333 load_ty,
334 base_val.typeOf(zcu),
335 false,
336 zcu.getTarget(),
337 src,
338 src,
339 null,
340 )) {
341 // We already have a value which is IMC to the desired type.
342 return .{ .success = base_val };
343 }
344 }
345 }
346
347 restructure_array: {
348 if (host_bits != 0) break :restructure_array;
349
350 // We might also be changing the length of an array, or restructuring it.
351 // e.g. [1][2][3]T -> [3][2]T.
352 // This case is important because it's permitted for types with ill-defined layouts.
353
354 const load_one_ty, const load_count = load_ty.arrayBase(zcu);
355
356 const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
357 if (try load_one_ty.comptimeOnlySema(pt)) break :restructure_array;
358 const elem_len = try load_one_ty.abiSizeSema(pt);
359 if (ptr.byte_offset % elem_len != 0) break :restructure_array;
360 break :idx @divExact(ptr.byte_offset, elem_len);
361 };
362
363 const val_one_ty, const val_count = base_val.typeOf(zcu).arrayBase(zcu);
364 if (.ok == try sema.coerceInMemoryAllowed(
365 block,
366 load_one_ty,
367 val_one_ty,
368 false,
369 zcu.getTarget(),
370 src,
371 src,
372 null,
373 )) {
374 // Changing the length of an array.
375 const skip_base: u64 = extra_base_index + if (load_ty.zigTypeTag(zcu) == .array) skip: {
376 break :skip load_ty.childType(zcu).arrayBase(zcu)[1] * array_offset;
377 } else 0;
378 if (skip_base + load_count > val_count) return .{ .out_of_bounds = base_val.typeOf(zcu) };
379 const elems = try sema.arena.alloc(InternPool.Index, @intCast(load_count));
380 var skip: u64 = skip_base;
381 var next_idx: u64 = 0;
382 try flattenArray(sema, base_val, &skip, &next_idx, elems);
383 next_idx = 0;
384 const val = try unflattenArray(sema, load_ty, elems, &next_idx);
385 return .{ .success = .{ .interned = val.toIntern() } };
386 }
387 }
388
389 // We need to reinterpret memory, which is only possible if neither the load
390 // type nor the type of the base value are comptime-only.
391
392 if (!load_ty.hasWellDefinedLayout(zcu)) {
393 return .{ .needed_well_defined = load_ty };
394 }
395
396 if (!base_val.typeOf(zcu).hasWellDefinedLayout(zcu)) {
397 return .{ .needed_well_defined = base_val.typeOf(zcu) };
398 }
399
400 var cur_val = base_val;
401 var cur_offset = ptr.byte_offset;
402
403 if (load_ty.zigTypeTag(zcu) == .array and array_offset > 0) {
404 cur_offset += try load_ty.childType(zcu).abiSizeSema(pt) * array_offset;
405 }
406
407 const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try load_ty.abiSizeSema(pt);
408
409 if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
410 return .{ .out_of_bounds = cur_val.typeOf(zcu) };
411 }
412
413 // In the worst case, we can reinterpret the entire value - however, that's
414 // pretty wasteful. If the memory region we're interested in refers to one
415 // field or array element, let's just look at that.
416 while (true) {
417 const cur_ty = cur_val.typeOf(zcu);
418 switch (cur_ty.zigTypeTag(zcu)) {
419 .noreturn,
420 .type,
421 .comptime_int,
422 .comptime_float,
423 .null,
424 .undefined,
425 .enum_literal,
426 .@"opaque",
427 .@"fn",
428 .error_union,
429 => unreachable, // ill-defined layout
430 .int,
431 .float,
432 .bool,
433 .void,
434 .pointer,
435 .error_set,
436 .@"anyframe",
437 .frame,
438 .@"enum",
439 .vector,
440 => break, // terminal types (no sub-values)
441 .optional => break, // this can only be a pointer-like optional so is terminal
442 .array => {
443 const elem_ty = cur_ty.childType(zcu);
444 const elem_size = try elem_ty.abiSizeSema(pt);
445 const elem_idx = cur_offset / elem_size;
446 const next_elem_off = elem_size * (elem_idx + 1);
447 if (cur_offset + need_bytes <= next_elem_off) {
448 // We can look at a single array element.
449 cur_val = try cur_val.getElem(sema.pt, @intCast(elem_idx));
450 cur_offset -= elem_idx * elem_size;
451 } else {
452 break;
453 }
454 },
455 .@"struct" => switch (cur_ty.containerLayout(zcu)) {
456 .auto => unreachable, // ill-defined layout
457 .@"packed" => break, // let the bitcast logic handle this
458 .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
459 const start_off = cur_ty.structFieldOffset(field_idx, zcu);
460 const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt);
461 if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
462 cur_val = try cur_val.getElem(sema.pt, field_idx);
463 cur_offset -= start_off;
464 break;
465 }
466 } else break, // pointer spans multiple fields
467 },
468 .@"union" => switch (cur_ty.containerLayout(zcu)) {
469 .auto => unreachable, // ill-defined layout
470 .@"packed" => break, // let the bitcast logic handle this
471 .@"extern" => {
472 // TODO: we have to let bitcast logic handle this for now.
473 // Otherwise, we might traverse into a union field which doesn't allow pointers.
474 // Figure out a solution!
475 if (true) break;
476 const payload: MutableValue = switch (cur_val) {
477 .un => |un| un.payload.*,
478 .interned => |ip_index| switch (ip.indexToKey(ip_index)) {
479 .un => |un| .{ .interned = un.val },
480 .undef => return .undef,
481 else => unreachable,
482 },
483 else => unreachable,
484 };
485 // The payload always has offset 0. If it's big enough
486 // to represent the whole load type, we can use it.
487 if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
488 cur_val = payload;
489 } else {
490 break;
491 }
492 },
493 },
494 }
495 }
496
497 // Fast path: check again if we're now at the type we want to load.
498 // If so, just return the loaded value.
499 if (cur_offset == 0 and host_bits == 0 and cur_val.typeOf(zcu).toIntern() == load_ty.toIntern()) {
500 return .{ .success = cur_val };
501 }
502
503 const result_val = try sema.bitCastVal(
504 try cur_val.intern(sema.pt, sema.arena),
505 load_ty,
506 cur_offset,
507 host_bits,
508 bit_offset,
509 ) orelse return .runtime_load;
510 return .{ .success = .{ .interned = result_val.toIntern() } };
511}
512
513const ComptimeStoreStrategy = union(enum) {
514 /// The store should be performed directly to this value, which `store_ty`
515 /// is in-memory coercible to.
516 direct: struct {
517 alloc: ComptimeAllocIndex,
518 val: *MutableValue,
519 },
520 /// The store should be performed at the index `elem_index` into `val`,
521 /// which is an array.
522 /// This strategy exists to avoid the need to convert the parent value
523 /// to the `aggregate` representation when `repeated` or `bytes` may
524 /// suffice.
525 index: struct {
526 alloc: ComptimeAllocIndex,
527 val: *MutableValue,
528 elem_index: u64,
529 },
530 /// The store should be performed on this array value, but it is being
531 /// restructured, e.g. [3][2][1]T -> [2][3]T.
532 /// This includes the case where it is a sub-array, e.g. [3]T -> [2]T.
533 /// This is only returned if `store_ty` is an array type, and its array
534 /// base type is IMC to that of the type of `val`.
535 flat_index: struct {
536 alloc: ComptimeAllocIndex,
537 val: *MutableValue,
538 flat_elem_index: u64,
539 },
540 /// This value should be reinterpreted using bitcast logic to perform the
541 /// store. Only returned if `store_ty` and the type of `val` both have
542 /// well-defined layouts.
543 reinterpret: struct {
544 alloc: ComptimeAllocIndex,
545 val: *MutableValue,
546 byte_offset: u64,
547 },
548
549 comptime_field,
550 runtime_store,
551 undef,
552 err_payload: InternPool.NullTerminatedString,
553 null_payload,
554 inactive_union_field,
555 needed_well_defined: Type,
556 out_of_bounds: Type,
557
558 fn alloc(strat: ComptimeStoreStrategy) ComptimeAllocIndex {
559 return switch (strat) {
560 inline .direct, .index, .flat_index, .reinterpret => |info| info.alloc,
561 .comptime_field,
562 .runtime_store,
563 .undef,
564 .err_payload,
565 .null_payload,
566 .inactive_union_field,
567 .needed_well_defined,
568 .out_of_bounds,
569 => unreachable,
570 };
571 }
572};
573
574/// Decide the strategy we will use to perform a comptime store of type `store_ty` to a pointer.
575/// The pointer's type is ignored.
576fn prepareComptimePtrStore(
577 sema: *Sema,
578 block: *Block,
579 src: LazySrcLoc,
580 ptr_val: Value,
581 store_ty: Type,
582 /// If `store_ty` is an array, this is the number of array elements to skip
583 /// before `store_ty`. Otherwise, it is ignored and may be `undefined`.
584 array_offset: u64,
585) !ComptimeStoreStrategy {
586 const pt = sema.pt;
587 const zcu = pt.zcu;
588 const ip = &zcu.intern_pool;
589
590 const ptr = switch (ip.indexToKey(ptr_val.toIntern())) {
591 .undef => return .undef,
592 .ptr => |ptr| ptr,
593 else => unreachable,
594 };
595
596 // `base_strat` will not be an error case.
597 const base_strat: ComptimeStoreStrategy = switch (ptr.base_addr) {
598 .nav, .uav, .int => return .runtime_store,
599 .comptime_field => return .comptime_field,
600 .comptime_alloc => |alloc_index| .{ .direct = .{
601 .alloc = alloc_index,
602 .val = &sema.getComptimeAlloc(alloc_index).val,
603 } },
604 .eu_payload => |base_ptr_ip| base_val: {
605 const base_ptr = Value.fromInterned(base_ptr_ip);
606 const base_ty = base_ptr.typeOf(zcu).childType(zcu);
607 const eu_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
608 .direct => |direct| .{ direct.val, direct.alloc },
609 .index => |index| .{
610 try index.val.elem(pt, sema.arena, @intCast(index.elem_index)),
611 index.alloc,
612 },
613 .flat_index => unreachable, // base_ty is not an array
614 .reinterpret => unreachable, // base_ty has ill-defined layout
615 else => |err| return err,
616 };
617 try eu_val_ptr.unintern(pt, sema.arena, false, false);
618 switch (eu_val_ptr.*) {
619 .interned => |ip_index| switch (ip.indexToKey(ip_index)) {
620 .undef => return .undef,
621 .error_union => |eu| return .{ .err_payload = eu.val.err_name },
622 else => unreachable,
623 },
624 .eu_payload => |data| break :base_val .{ .direct = .{
625 .val = data.child,
626 .alloc = alloc,
627 } },
628 else => unreachable,
629 }
630 },
631 .opt_payload => |base_ptr_ip| base_val: {
632 const base_ptr = Value.fromInterned(base_ptr_ip);
633 const base_ty = base_ptr.typeOf(zcu).childType(zcu);
634 const opt_val_ptr, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
635 .direct => |direct| .{ direct.val, direct.alloc },
636 .index => |index| .{
637 try index.val.elem(pt, sema.arena, @intCast(index.elem_index)),
638 index.alloc,
639 },
640 .flat_index => unreachable, // base_ty is not an array
641 .reinterpret => unreachable, // base_ty has ill-defined layout
642 else => |err| return err,
643 };
644 try opt_val_ptr.unintern(pt, sema.arena, false, false);
645 switch (opt_val_ptr.*) {
646 .interned => |ip_index| switch (ip.indexToKey(ip_index)) {
647 .undef => return .undef,
648 .opt => return .null_payload,
649 else => unreachable,
650 },
651 .opt_payload => |data| break :base_val .{ .direct = .{
652 .val = data.child,
653 .alloc = alloc,
654 } },
655 else => unreachable,
656 }
657 },
658 .arr_elem => |base_index| base_val: {
659 const base_ptr = Value.fromInterned(base_index.base);
660 const base_ty = base_ptr.typeOf(zcu).childType(zcu);
661
662 // We have a comptime-only array. This case is a little nasty.
663 // To avoid messing with too much data, we want to figure out how many elements we need to store.
664 // If `store_ty` and the array share a base type, we'll store the correct number of elements.
665 // Otherwise, we'll be reinterpreting (which we can't do, since it's comptime-only); just
666 // load a single element and let the logic below emit its error.
667
668 const store_one_ty, const store_count = store_ty.arrayBase(zcu);
669 const count = if (store_one_ty.toIntern() == base_ty.toIntern()) store_count else 1;
670
671 const want_ty = try pt.arrayType(.{
672 .len = count,
673 .child = base_ty.toIntern(),
674 });
675
676 const result = try prepareComptimePtrStore(sema, block, src, base_ptr, want_ty, base_index.index);
677 switch (result) {
678 .direct, .index, .flat_index => break :base_val result,
679 .reinterpret => unreachable, // comptime-only array so ill-defined layout
680 else => |err| return err,
681 }
682 },
683 .field => |base_index| strat: {
684 const base_ptr = Value.fromInterned(base_index.base);
685 const base_ty = base_ptr.typeOf(zcu).childType(zcu);
686
687 // Field of a slice, or of an auto-layout struct or union.
688 const agg_val, const alloc = switch (try prepareComptimePtrStore(sema, block, src, base_ptr, base_ty, undefined)) {
689 .direct => |direct| .{ direct.val, direct.alloc },
690 .index => |index| .{
691 try index.val.elem(pt, sema.arena, @intCast(index.elem_index)),
692 index.alloc,
693 },
694 .flat_index => unreachable, // base_ty is not an array
695 .reinterpret => unreachable, // base_ty has ill-defined layout
696 else => |err| return err,
697 };
698
699 const agg_ty = agg_val.typeOf(zcu);
700 switch (agg_ty.zigTypeTag(zcu)) {
701 .@"struct", .pointer => break :strat .{ .direct = .{
702 .val = try agg_val.elem(pt, sema.arena, @intCast(base_index.index)),
703 .alloc = alloc,
704 } },
705 .@"union" => {
706 if (agg_val.* == .interned and Value.fromInterned(agg_val.interned).isUndef(zcu)) {
707 return .undef;
708 }
709 try agg_val.unintern(pt, sema.arena, false, false);
710 const un = agg_val.un;
711 const tag_ty = agg_ty.unionTagTypeHypothetical(zcu);
712 if (tag_ty.enumTagFieldIndex(Value.fromInterned(un.tag), zcu).? != base_index.index) {
713 return .inactive_union_field;
714 }
715 break :strat .{ .direct = .{
716 .val = un.payload,
717 .alloc = alloc,
718 } };
719 },
720 else => unreachable,
721 }
722 },
723 };
724
725 if (ptr.byte_offset == 0) {
726 if (store_ty.zigTypeTag(zcu) != .array or array_offset == 0) direct: {
727 const base_val_ty = switch (base_strat) {
728 .direct => |direct| direct.val.typeOf(zcu),
729 .index => |index| index.val.typeOf(zcu).childType(zcu),
730 .flat_index, .reinterpret => break :direct,
731 else => unreachable,
732 };
733 if (.ok == try sema.coerceInMemoryAllowed(
734 block,
735 base_val_ty,
736 store_ty,
737 true,
738 zcu.getTarget(),
739 src,
740 src,
741 null,
742 )) {
743 // The base strategy already gets us a value which the desired type is IMC to.
744 return base_strat;
745 }
746 }
747 }
748
749 restructure_array: {
750 // We might also be changing the length of an array, or restructuring it.
751 // e.g. [1][2][3]T -> [3][2]T.
752 // This case is important because it's permitted for types with ill-defined layouts.
753
754 const store_one_ty, const store_count = store_ty.arrayBase(zcu);
755 const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
756 if (try store_one_ty.comptimeOnlySema(pt)) break :restructure_array;
757 const elem_len = try store_one_ty.abiSizeSema(pt);
758 if (ptr.byte_offset % elem_len != 0) break :restructure_array;
759 break :idx @divExact(ptr.byte_offset, elem_len);
760 };
761
762 const base_val, const base_elem_offset, const oob_ty = switch (base_strat) {
763 .direct => |direct| .{ direct.val, 0, direct.val.typeOf(zcu) },
764 .index => |index| restructure_info: {
765 const elem_ty = index.val.typeOf(zcu).childType(zcu);
766 const elem_off = elem_ty.arrayBase(zcu)[1] * index.elem_index;
767 break :restructure_info .{ index.val, elem_off, elem_ty };
768 },
769 .flat_index => |flat| .{ flat.val, flat.flat_elem_index, flat.val.typeOf(zcu) },
770 .reinterpret => break :restructure_array,
771 else => unreachable,
772 };
773 const val_one_ty, const val_count = base_val.typeOf(zcu).arrayBase(zcu);
774 if (.ok != try sema.coerceInMemoryAllowed(block, val_one_ty, store_one_ty, true, zcu.getTarget(), src, src, null)) {
775 break :restructure_array;
776 }
777 if (base_elem_offset + extra_base_index + store_count > val_count) return .{ .out_of_bounds = oob_ty };
778
779 if (store_ty.zigTypeTag(zcu) == .array) {
780 const skip = store_ty.childType(zcu).arrayBase(zcu)[1] * array_offset;
781 return .{ .flat_index = .{
782 .alloc = base_strat.alloc(),
783 .val = base_val,
784 .flat_elem_index = skip + base_elem_offset + extra_base_index,
785 } };
786 }
787
788 // `base_val` must be an array, since otherwise the "direct reinterpret" logic above noticed it.
789 assert(base_val.typeOf(zcu).zigTypeTag(zcu) == .array);
790
791 var index: u64 = base_elem_offset + extra_base_index;
792 const arr_val, const arr_index = (try recursiveIndex(sema, base_val, &index)).?;
793 return .{ .index = .{
794 .alloc = base_strat.alloc(),
795 .val = arr_val,
796 .elem_index = arr_index,
797 } };
798 }
799
800 // We need to reinterpret memory, which is only possible if neither the store
801 // type nor the type of the base value have an ill-defined layout.
802
803 if (!store_ty.hasWellDefinedLayout(zcu)) {
804 return .{ .needed_well_defined = store_ty };
805 }
806
807 var cur_val: *MutableValue, var cur_offset: u64 = switch (base_strat) {
808 .direct => |direct| .{ direct.val, 0 },
809 // It's okay to do `abiSize` - the comptime-only case will be caught below.
810 .index => |index| .{ index.val, index.elem_index * try index.val.typeOf(zcu).childType(zcu).abiSizeSema(pt) },
811 .flat_index => |flat_index| .{
812 flat_index.val,
813 // It's okay to do `abiSize` - the comptime-only case will be caught below.
814 flat_index.flat_elem_index * try flat_index.val.typeOf(zcu).arrayBase(zcu)[0].abiSizeSema(pt),
815 },
816 .reinterpret => |r| .{ r.val, r.byte_offset },
817 else => unreachable,
818 };
819 cur_offset += ptr.byte_offset;
820
821 if (!cur_val.typeOf(zcu).hasWellDefinedLayout(zcu)) {
822 return .{ .needed_well_defined = cur_val.typeOf(zcu) };
823 }
824
825 if (store_ty.zigTypeTag(zcu) == .array and array_offset > 0) {
826 cur_offset += try store_ty.childType(zcu).abiSizeSema(pt) * array_offset;
827 }
828
829 const need_bytes = try store_ty.abiSizeSema(pt);
830
831 if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
832 return .{ .out_of_bounds = cur_val.typeOf(zcu) };
833 }
834
835 // In the worst case, we can reinterpret the entire value - however, that's
836 // pretty wasteful. If the memory region we're interested in refers to one
837 // field or array element, let's just look at that.
838 while (true) {
839 const cur_ty = cur_val.typeOf(zcu);
840 switch (cur_ty.zigTypeTag(zcu)) {
841 .noreturn,
842 .type,
843 .comptime_int,
844 .comptime_float,
845 .null,
846 .undefined,
847 .enum_literal,
848 .@"opaque",
849 .@"fn",
850 .error_union,
851 => unreachable, // ill-defined layout
852 .int,
853 .float,
854 .bool,
855 .void,
856 .pointer,
857 .error_set,
858 .@"anyframe",
859 .frame,
860 .@"enum",
861 .vector,
862 => break, // terminal types (no sub-values)
863 .optional => break, // this can only be a pointer-like optional so is terminal
864 .array => {
865 const elem_ty = cur_ty.childType(zcu);
866 const elem_size = try elem_ty.abiSizeSema(pt);
867 const elem_idx = cur_offset / elem_size;
868 const next_elem_off = elem_size * (elem_idx + 1);
869 if (cur_offset + need_bytes <= next_elem_off) {
870 // We can look at a single array element.
871 cur_val = try cur_val.elem(pt, sema.arena, @intCast(elem_idx));
872 cur_offset -= elem_idx * elem_size;
873 } else {
874 break;
875 }
876 },
877 .@"struct" => switch (cur_ty.containerLayout(zcu)) {
878 .auto => unreachable, // ill-defined layout
879 .@"packed" => break, // let the bitcast logic handle this
880 .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
881 const start_off = cur_ty.structFieldOffset(field_idx, zcu);
882 const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt);
883 if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
884 cur_val = try cur_val.elem(pt, sema.arena, field_idx);
885 cur_offset -= start_off;
886 break;
887 }
888 } else break, // pointer spans multiple fields
889 },
890 .@"union" => switch (cur_ty.containerLayout(zcu)) {
891 .auto => unreachable, // ill-defined layout
892 .@"packed" => break, // let the bitcast logic handle this
893 .@"extern" => {
894 // TODO: we have to let bitcast logic handle this for now.
895 // Otherwise, we might traverse into a union field which doesn't allow pointers.
896 // Figure out a solution!
897 if (true) break;
898 try cur_val.unintern(pt, sema.arena, false, false);
899 const payload = switch (cur_val.*) {
900 .un => |un| un.payload,
901 else => unreachable,
902 };
903 // The payload always has offset 0. If it's big enough
904 // to represent the whole load type, we can use it.
905 if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
906 cur_val = payload;
907 } else {
908 break;
909 }
910 },
911 },
912 }
913 }
914
915 // Fast path: check again if we're now at the type we want to store.
916 // If so, we can use the `direct` strategy.
917 if (cur_offset == 0 and cur_val.typeOf(zcu).toIntern() == store_ty.toIntern()) {
918 return .{ .direct = .{
919 .alloc = base_strat.alloc(),
920 .val = cur_val,
921 } };
922 }
923
924 return .{ .reinterpret = .{
925 .alloc = base_strat.alloc(),
926 .val = cur_val,
927 .byte_offset = cur_offset,
928 } };
929}
930
931/// Given a potentially-nested array value, recursively flatten all of its elements into the given
932/// output array. The result can be used by `unflattenArray` to restructure array values.
933fn flattenArray(
934 sema: *Sema,
935 val: MutableValue,
936 skip: *u64,
937 next_idx: *u64,
938 out: []InternPool.Index,
939) Allocator.Error!void {
940 if (next_idx.* == out.len) return;
941
942 const zcu = sema.pt.zcu;
943
944 const ty = val.typeOf(zcu);
945 const base_elem_count = ty.arrayBase(zcu)[1];
946 if (skip.* >= base_elem_count) {
947 skip.* -= base_elem_count;
948 return;
949 }
950
951 if (ty.zigTypeTag(zcu) != .array) {
952 out[@intCast(next_idx.*)] = (try val.intern(sema.pt, sema.arena)).toIntern();
953 next_idx.* += 1;
954 return;
955 }
956
957 const arr_base_elem_count = ty.childType(zcu).arrayBase(zcu)[1];
958 for (0..@intCast(ty.arrayLen(zcu))) |elem_idx| {
959 // Optimization: the `getElem` here may be expensive since we might intern an
960 // element of the `bytes` representation, so avoid doing it unnecessarily.
961 if (next_idx.* == out.len) return;
962 if (skip.* >= arr_base_elem_count) {
963 skip.* -= arr_base_elem_count;
964 continue;
965 }
966 try flattenArray(sema, try val.getElem(sema.pt, elem_idx), skip, next_idx, out);
967 }
968 if (ty.sentinel(zcu)) |s| {
969 try flattenArray(sema, .{ .interned = s.toIntern() }, skip, next_idx, out);
970 }
971}
972
973/// Given a sequence of non-array elements, "unflatten" them into the given array type.
974/// Asserts that values of `elems` are in-memory coercible to the array base type of `ty`.
975fn unflattenArray(
976 sema: *Sema,
977 ty: Type,
978 elems: []const InternPool.Index,
979 next_idx: *u64,
980) Allocator.Error!Value {
981 const pt = sema.pt;
982 const zcu = pt.zcu;
983 const arena = sema.arena;
984
985 if (ty.zigTypeTag(zcu) != .array) {
986 const val = Value.fromInterned(elems[@intCast(next_idx.*)]);
987 next_idx.* += 1;
988 return pt.getCoerced(val, ty);
989 }
990
991 const elem_ty = ty.childType(zcu);
992 const buf = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
993 for (buf) |*elem| {
994 elem.* = (try unflattenArray(sema, elem_ty, elems, next_idx)).toIntern();
995 }
996 if (ty.sentinel(zcu) != null) {
997 // TODO: validate sentinel
998 _ = try unflattenArray(sema, elem_ty, elems, next_idx);
999 }
1000 return pt.aggregateValue(ty, buf);
1001}
1002
1003/// Given a `MutableValue` representing a potentially-nested array, treats `index` as an index into
1004/// the array's base type. For instance, given a [3][3]T, the index 5 represents 'val[1][2]'.
1005/// The final level of array is not dereferenced. This allows use sites to use `setElem` to prevent
1006/// unnecessary `MutableValue` representation changes.
1007fn recursiveIndex(
1008 sema: *Sema,
1009 mv: *MutableValue,
1010 index: *u64,
1011) !?struct { *MutableValue, u64 } {
1012 const pt = sema.pt;
1013
1014 const ty = mv.typeOf(pt.zcu);
1015 assert(ty.zigTypeTag(pt.zcu) == .array);
1016
1017 const ty_base_elems = ty.arrayBase(pt.zcu)[1];
1018 if (index.* >= ty_base_elems) {
1019 index.* -= ty_base_elems;
1020 return null;
1021 }
1022
1023 const elem_ty = ty.childType(pt.zcu);
1024 if (elem_ty.zigTypeTag(pt.zcu) != .array) {
1025 assert(index.* < ty.arrayLenIncludingSentinel(pt.zcu)); // should be handled by initial check
1026 return .{ mv, index.* };
1027 }
1028
1029 for (0..@intCast(ty.arrayLenIncludingSentinel(pt.zcu))) |elem_index| {
1030 if (try recursiveIndex(sema, try mv.elem(pt, sema.arena, elem_index), index)) |result| {
1031 return result;
1032 }
1033 }
1034 unreachable; // should be handled by initial check
1035}
1036
1037fn checkComptimeVarStore(
1038 sema: *Sema,
1039 block: *Block,
1040 src: LazySrcLoc,
1041 alloc_index: ComptimeAllocIndex,
1042) !void {
1043 const runtime_index = sema.getComptimeAlloc(alloc_index).runtime_index;
1044 if (@intFromEnum(runtime_index) < @intFromEnum(block.runtime_index)) {
1045 if (block.runtime_cond) |cond_src| {
1046 const msg = msg: {
1047 const msg = try sema.errMsg(src, "store to comptime variable depends on runtime condition", .{});
1048 errdefer msg.destroy(sema.gpa);
1049 try sema.errNote(cond_src, msg, "runtime condition here", .{});
1050 break :msg msg;
1051 };
1052 return sema.failWithOwnedErrorMsg(block, msg);
1053 }
1054 if (block.runtime_loop) |loop_src| {
1055 const msg = msg: {
1056 const msg = try sema.errMsg(src, "cannot store to comptime variable in non-inline loop", .{});
1057 errdefer msg.destroy(sema.gpa);
1058 try sema.errNote(loop_src, msg, "non-inline loop here", .{});
1059 break :msg msg;
1060 };
1061 return sema.failWithOwnedErrorMsg(block, msg);
1062 }
1063 unreachable;
1064 }
1065}
1066
1067const std = @import("std");
1068const assert = std.debug.assert;
1069const Allocator = std.mem.Allocator;
1070
1071const InternPool = @import("../InternPool.zig");
1072const ComptimeAllocIndex = InternPool.ComptimeAllocIndex;
1073const Sema = @import("../Sema.zig");
1074const Block = Sema.Block;
1075const MutableValue = @import("../mutable_value.zig").MutableValue;
1076const Type = @import("../Type.zig");
1077const Value = @import("../Value.zig");
1078const Zcu = @import("../Zcu.zig");
1079const LazySrcLoc = Zcu.LazySrcLoc;