master
1//! This file contains logic for bit-casting arbitrary values at comptime, including splicing
2//! bits together for comptime stores of bit-pointers. The strategy is to "flatten" values to
3//! a sequence of values in *packed* memory, and then unflatten through a combination of special
4//! cases (particularly for pointers and `undefined` values) and in-memory buffer reinterprets.
5//!
6//! This is a little awkward on big-endian targets, as non-packed datastructures (e.g. `extern struct`)
7//! have their fields reversed when represented as packed memory on such targets.
8
9/// If `host_bits` is `0`, attempts to convert the memory at offset
10/// `byte_offset` into `val` to a non-packed value of type `dest_ty`,
11/// ignoring `bit_offset`.
12///
13/// Otherwise, `byte_offset` is an offset in bytes into `val` to a
14/// non-packed value consisting of `host_bits` bits. A value of type
15/// `dest_ty` will be interpreted at a packed offset of `bit_offset`
16/// into this value.
17///
18/// Returns `null` if the operation must be performed at runtime.
19pub fn bitCast(
20 sema: *Sema,
21 val: Value,
22 dest_ty: Type,
23 byte_offset: u64,
24 host_bits: u64,
25 bit_offset: u64,
26) CompileError!?Value {
27 return bitCastInner(sema, val, dest_ty, byte_offset, host_bits, bit_offset) catch |err| switch (err) {
28 error.ReinterpretDeclRef => return null,
29 error.IllDefinedMemoryLayout => unreachable,
30 error.Unimplemented => @panic("unimplemented bitcast"),
31 else => |e| return e,
32 };
33}
34
35/// Uses bitcasting to splice the value `splice_val` into `val`,
36/// replacing overlapping bits and returning the modified value.
37///
38/// If `host_bits` is `0`, splices `splice_val` at an offset
39/// `byte_offset` bytes into the virtual memory of `val`, ignoring
40/// `bit_offset`.
41///
42/// Otherwise, `byte_offset` is an offset into bytes into `val` to
43/// a non-packed value consisting of `host_bits` bits. The value
44/// `splice_val` will be placed at a packed offset of `bit_offset`
45/// into this value.
46pub fn bitCastSplice(
47 sema: *Sema,
48 val: Value,
49 splice_val: Value,
50 byte_offset: u64,
51 host_bits: u64,
52 bit_offset: u64,
53) CompileError!?Value {
54 return bitCastSpliceInner(sema, val, splice_val, byte_offset, host_bits, bit_offset) catch |err| switch (err) {
55 error.ReinterpretDeclRef => return null,
56 error.IllDefinedMemoryLayout => unreachable,
57 error.Unimplemented => @panic("unimplemented bitcast"),
58 else => |e| return e,
59 };
60}
61
62const BitCastError = CompileError || error{ ReinterpretDeclRef, IllDefinedMemoryLayout, Unimplemented };
63
64fn bitCastInner(
65 sema: *Sema,
66 val: Value,
67 dest_ty: Type,
68 byte_offset: u64,
69 host_bits: u64,
70 bit_offset: u64,
71) BitCastError!Value {
72 const pt = sema.pt;
73 const zcu = pt.zcu;
74 const endian = zcu.getTarget().cpu.arch.endian();
75
76 if (dest_ty.toIntern() == val.typeOf(zcu).toIntern() and bit_offset == 0) {
77 return val;
78 }
79
80 const val_ty = val.typeOf(zcu);
81
82 try val_ty.resolveLayout(pt);
83 try dest_ty.resolveLayout(pt);
84
85 assert(val_ty.hasWellDefinedLayout(zcu));
86
87 const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
88 .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
89 else
90 .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
91
92 const skip_bits = switch (endian) {
93 .little => bit_offset + byte_offset * 8,
94 .big => if (host_bits > 0)
95 val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
96 else
97 val_ty.abiSize(zcu) * 8 - byte_offset * 8 - dest_ty.bitSize(zcu),
98 };
99
100 var unpack: UnpackValueBits = .{
101 .pt = sema.pt,
102 .arena = sema.arena,
103 .skip_bits = skip_bits,
104 .remaining_bits = dest_ty.bitSize(zcu),
105 .unpacked = std.array_list.Managed(InternPool.Index).init(sema.arena),
106 };
107 switch (endian) {
108 .little => {
109 try unpack.add(val);
110 try unpack.padding(abi_pad_bits);
111 },
112 .big => {
113 try unpack.padding(abi_pad_bits);
114 try unpack.add(val);
115 },
116 }
117 try unpack.padding(host_pad_bits);
118
119 var pack: PackValueBits = .{
120 .pt = sema.pt,
121 .arena = sema.arena,
122 .unpacked = unpack.unpacked.items,
123 };
124 return pack.get(dest_ty);
125}
126
127fn bitCastSpliceInner(
128 sema: *Sema,
129 val: Value,
130 splice_val: Value,
131 byte_offset: u64,
132 host_bits: u64,
133 bit_offset: u64,
134) BitCastError!Value {
135 const pt = sema.pt;
136 const zcu = pt.zcu;
137 const endian = zcu.getTarget().cpu.arch.endian();
138 const val_ty = val.typeOf(zcu);
139 const splice_val_ty = splice_val.typeOf(zcu);
140
141 try val_ty.resolveLayout(pt);
142 try splice_val_ty.resolveLayout(pt);
143
144 const splice_bits = splice_val_ty.bitSize(zcu);
145
146 const splice_offset = switch (endian) {
147 .little => bit_offset + byte_offset * 8,
148 .big => if (host_bits > 0)
149 val_ty.abiSize(zcu) * 8 - byte_offset * 8 - host_bits + bit_offset
150 else
151 val_ty.abiSize(zcu) * 8 - byte_offset * 8 - splice_bits,
152 };
153
154 assert(splice_offset + splice_bits <= val_ty.abiSize(zcu) * 8);
155
156 const abi_pad_bits, const host_pad_bits = if (host_bits > 0)
157 .{ val_ty.abiSize(zcu) * 8 - host_bits, host_bits - val_ty.bitSize(zcu) }
158 else
159 .{ val_ty.abiSize(zcu) * 8 - val_ty.bitSize(zcu), 0 };
160
161 var unpack: UnpackValueBits = .{
162 .pt = pt,
163 .arena = sema.arena,
164 .skip_bits = 0,
165 .remaining_bits = splice_offset,
166 .unpacked = std.array_list.Managed(InternPool.Index).init(sema.arena),
167 };
168 switch (endian) {
169 .little => {
170 try unpack.add(val);
171 try unpack.padding(abi_pad_bits);
172 },
173 .big => {
174 try unpack.padding(abi_pad_bits);
175 try unpack.add(val);
176 },
177 }
178 try unpack.padding(host_pad_bits);
179
180 unpack.remaining_bits = splice_bits;
181 try unpack.add(splice_val);
182
183 unpack.skip_bits = splice_offset + splice_bits;
184 unpack.remaining_bits = val_ty.abiSize(zcu) * 8 - splice_offset - splice_bits;
185 switch (endian) {
186 .little => {
187 try unpack.add(val);
188 try unpack.padding(abi_pad_bits);
189 },
190 .big => {
191 try unpack.padding(abi_pad_bits);
192 try unpack.add(val);
193 },
194 }
195 try unpack.padding(host_pad_bits);
196
197 var pack: PackValueBits = .{
198 .pt = pt,
199 .arena = sema.arena,
200 .unpacked = unpack.unpacked.items,
201 };
202 switch (endian) {
203 .little => {},
204 .big => try pack.padding(abi_pad_bits),
205 }
206 return pack.get(val_ty);
207}
208
209/// Recurses through struct fields, array elements, etc, to get a sequence of "primitive" values
210/// which are bit-packed in memory to represent a single value. `unpacked` represents a series
211/// of values in *packed* memory - therefore, on big-endian targets, the first element of this
212/// list contains bits from the *final* byte of the value.
213const UnpackValueBits = struct {
214 pt: Zcu.PerThread,
215 arena: Allocator,
216 skip_bits: u64,
217 remaining_bits: u64,
218 extra_bits: u64 = undefined,
219 unpacked: std.array_list.Managed(InternPool.Index),
220
221 fn add(unpack: *UnpackValueBits, val: Value) BitCastError!void {
222 const pt = unpack.pt;
223 const zcu = pt.zcu;
224 const endian = zcu.getTarget().cpu.arch.endian();
225 const ip = &zcu.intern_pool;
226
227 if (unpack.remaining_bits == 0) {
228 return;
229 }
230
231 const ty = val.typeOf(zcu);
232 const bit_size = ty.bitSize(zcu);
233
234 if (unpack.skip_bits >= bit_size) {
235 unpack.skip_bits -= bit_size;
236 return;
237 }
238
239 switch (ip.indexToKey(val.toIntern())) {
240 .int_type,
241 .ptr_type,
242 .array_type,
243 .vector_type,
244 .opt_type,
245 .anyframe_type,
246 .error_union_type,
247 .simple_type,
248 .struct_type,
249 .tuple_type,
250 .union_type,
251 .opaque_type,
252 .enum_type,
253 .func_type,
254 .error_set_type,
255 .inferred_error_set_type,
256 .variable,
257 .@"extern",
258 .func,
259 .err,
260 .error_union,
261 .enum_literal,
262 .slice,
263 .memoized_call,
264 => unreachable, // ill-defined layout or not real values
265
266 .undef,
267 .int,
268 .enum_tag,
269 .simple_value,
270 .empty_enum_value,
271 .float,
272 .ptr,
273 .opt,
274 => try unpack.primitive(val),
275
276 .aggregate => switch (ty.zigTypeTag(zcu)) {
277 .vector => {
278 const len: usize = @intCast(ty.arrayLen(zcu));
279 for (0..len) |i| {
280 // We reverse vector elements in packed memory on BE targets.
281 const real_idx = switch (endian) {
282 .little => i,
283 .big => len - i - 1,
284 };
285 const elem_val = try val.elemValue(pt, real_idx);
286 try unpack.add(elem_val);
287 }
288 },
289 .array => {
290 // Each element is padded up to its ABI size. Padding bits are undefined.
291 // The final element does not have trailing padding.
292 // Elements are reversed in packed memory on BE targets.
293 const elem_ty = ty.childType(zcu);
294 const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
295 const len = ty.arrayLen(zcu);
296 const maybe_sent = ty.sentinel(zcu);
297
298 if (endian == .big) if (maybe_sent) |s| {
299 try unpack.add(s);
300 if (len != 0) try unpack.padding(pad_bits);
301 };
302
303 for (0..@intCast(len)) |i| {
304 // We reverse array elements in packed memory on BE targets.
305 const real_idx = switch (endian) {
306 .little => i,
307 .big => len - i - 1,
308 };
309 const elem_val = try val.elemValue(pt, @intCast(real_idx));
310 try unpack.add(elem_val);
311 if (i != len - 1) try unpack.padding(pad_bits);
312 }
313
314 if (endian == .little) if (maybe_sent) |s| {
315 if (len != 0) try unpack.padding(pad_bits);
316 try unpack.add(s);
317 };
318 },
319 .@"struct" => switch (ty.containerLayout(zcu)) {
320 .auto => unreachable, // ill-defined layout
321 .@"extern" => switch (endian) {
322 .little => {
323 var cur_bit_off: u64 = 0;
324 var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
325 while (it.next()) |field_idx| {
326 const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
327 const pad_bits = want_bit_off - cur_bit_off;
328 const field_val = try val.fieldValue(pt, field_idx);
329 try unpack.padding(pad_bits);
330 try unpack.add(field_val);
331 cur_bit_off = want_bit_off + field_val.typeOf(zcu).bitSize(zcu);
332 }
333 // Add trailing padding bits.
334 try unpack.padding(bit_size - cur_bit_off);
335 },
336 .big => {
337 var cur_bit_off: u64 = bit_size;
338 var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
339 while (it.next()) |field_idx| {
340 const field_val = try val.fieldValue(pt, field_idx);
341 const field_ty = field_val.typeOf(zcu);
342 const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
343 const pad_bits = cur_bit_off - want_bit_off;
344 try unpack.padding(pad_bits);
345 try unpack.add(field_val);
346 cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
347 }
348 assert(cur_bit_off == 0);
349 },
350 },
351 .@"packed" => {
352 // Just add all fields in order. There are no padding bits.
353 // This is identical between LE and BE targets.
354 for (0..ty.structFieldCount(zcu)) |i| {
355 const field_val = try val.fieldValue(pt, i);
356 try unpack.add(field_val);
357 }
358 },
359 },
360 else => unreachable,
361 },
362
363 .un => |un| {
364 // We actually don't care about the tag here!
365 // Instead, we just need to write the payload value, plus any necessary padding.
366 // This correctly handles the case where `tag == .none`, since the payload is then
367 // either an integer or a byte array, both of which we can unpack.
368 const payload_val = Value.fromInterned(un.val);
369 const pad_bits = bit_size - payload_val.typeOf(zcu).bitSize(zcu);
370 if (endian == .little or ty.containerLayout(zcu) == .@"packed") {
371 try unpack.add(payload_val);
372 try unpack.padding(pad_bits);
373 } else {
374 try unpack.padding(pad_bits);
375 try unpack.add(payload_val);
376 }
377 },
378 }
379 }
380
381 fn padding(unpack: *UnpackValueBits, pad_bits: u64) BitCastError!void {
382 if (pad_bits == 0) return;
383 const pt = unpack.pt;
384 // Figure out how many full bytes and leftover bits there are.
385 const bytes = pad_bits / 8;
386 const bits = pad_bits % 8;
387 // Add undef u8 values for the bytes...
388 const undef_u8 = try pt.undefValue(Type.u8);
389 for (0..@intCast(bytes)) |_| {
390 try unpack.primitive(undef_u8);
391 }
392 // ...and an undef int for the leftover bits.
393 if (bits == 0) return;
394 const bits_ty = try pt.intType(.unsigned, @intCast(bits));
395 const bits_val = try pt.undefValue(bits_ty);
396 try unpack.primitive(bits_val);
397 }
398
399 fn primitive(unpack: *UnpackValueBits, val: Value) BitCastError!void {
400 const pt = unpack.pt;
401 const zcu = pt.zcu;
402
403 if (unpack.remaining_bits == 0) {
404 return;
405 }
406
407 const ty = val.typeOf(pt.zcu);
408 const bit_size = ty.bitSize(zcu);
409
410 // Note that this skips all zero-bit types.
411 if (unpack.skip_bits >= bit_size) {
412 unpack.skip_bits -= bit_size;
413 return;
414 }
415
416 if (unpack.skip_bits > 0) {
417 const skip = unpack.skip_bits;
418 unpack.skip_bits = 0;
419 return unpack.splitPrimitive(val, skip, bit_size - skip);
420 }
421
422 if (unpack.remaining_bits < bit_size) {
423 return unpack.splitPrimitive(val, 0, unpack.remaining_bits);
424 }
425
426 unpack.remaining_bits -|= bit_size;
427
428 try unpack.unpacked.append(val.toIntern());
429 }
430
431 fn splitPrimitive(unpack: *UnpackValueBits, val: Value, bit_offset: u64, bit_count: u64) BitCastError!void {
432 const pt = unpack.pt;
433 const zcu = pt.zcu;
434 const ty = val.typeOf(pt.zcu);
435
436 const val_bits = ty.bitSize(zcu);
437 assert(bit_offset + bit_count <= val_bits);
438
439 switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
440 // In the `ptr` case, this will return `error.ReinterpretDeclRef`
441 // if we're trying to split a non-integer pointer value.
442 .int, .float, .enum_tag, .ptr, .opt => {
443 // This @intCast is okay because no primitive can exceed the size of a u16.
444 const int_ty = try unpack.pt.intType(.unsigned, @intCast(bit_count));
445 const buf = try unpack.arena.alloc(u8, @intCast((val_bits + 7) / 8));
446 try val.writeToPackedMemory(ty, unpack.pt, buf, 0);
447 const sub_val = try Value.readFromPackedMemory(int_ty, unpack.pt, buf, @intCast(bit_offset), unpack.arena);
448 try unpack.primitive(sub_val);
449 },
450 .undef => try unpack.padding(bit_count),
451 // The only values here with runtime bits are `true` and `false.
452 // These are both 1 bit, so will never need truncating.
453 .simple_value => unreachable,
454 .empty_enum_value => unreachable, // zero-bit
455 else => unreachable, // zero-bit or not primitives
456 }
457 }
458};
459
460/// Given a sequence of bit-packed values in packed memory (see `UnpackValueBits`),
461/// reconstructs a value of an arbitrary type, with correct handling of `undefined`
462/// values and of pointers which align in virtual memory.
463const PackValueBits = struct {
464 pt: Zcu.PerThread,
465 arena: Allocator,
466 bit_offset: u64 = 0,
467 unpacked: []const InternPool.Index,
468
469 fn get(pack: *PackValueBits, ty: Type) BitCastError!Value {
470 const pt = pack.pt;
471 const zcu = pt.zcu;
472 const endian = zcu.getTarget().cpu.arch.endian();
473 const ip = &zcu.intern_pool;
474 const arena = pack.arena;
475 switch (ty.zigTypeTag(zcu)) {
476 .vector => {
477 // Elements are bit-packed.
478 const len = ty.arrayLen(zcu);
479 const elem_ty = ty.childType(zcu);
480 const elems = try arena.alloc(InternPool.Index, @intCast(len));
481 // We reverse vector elements in packed memory on BE targets.
482 switch (endian) {
483 .little => for (elems) |*elem| {
484 elem.* = (try pack.get(elem_ty)).toIntern();
485 },
486 .big => {
487 var i = elems.len;
488 while (i > 0) {
489 i -= 1;
490 elems[i] = (try pack.get(elem_ty)).toIntern();
491 }
492 },
493 }
494 return pt.aggregateValue(ty, elems);
495 },
496 .array => {
497 // Each element is padded up to its ABI size. The final element does not have trailing padding.
498 const len = ty.arrayLen(zcu);
499 const elem_ty = ty.childType(zcu);
500 const maybe_sent = ty.sentinel(zcu);
501 const pad_bits = elem_ty.abiSize(zcu) * 8 - elem_ty.bitSize(zcu);
502 const elems = try arena.alloc(InternPool.Index, @intCast(len));
503
504 if (endian == .big and maybe_sent != null) {
505 // TODO: validate sentinel was preserved!
506 try pack.padding(elem_ty.bitSize(zcu));
507 if (len != 0) try pack.padding(pad_bits);
508 }
509
510 for (0..elems.len) |i| {
511 const real_idx = switch (endian) {
512 .little => i,
513 .big => len - i - 1,
514 };
515 elems[@intCast(real_idx)] = (try pack.get(elem_ty)).toIntern();
516 if (i != len - 1) try pack.padding(pad_bits);
517 }
518
519 if (endian == .little and maybe_sent != null) {
520 // TODO: validate sentinel was preserved!
521 if (len != 0) try pack.padding(pad_bits);
522 try pack.padding(elem_ty.bitSize(zcu));
523 }
524
525 return pt.aggregateValue(ty, elems);
526 },
527 .@"struct" => switch (ty.containerLayout(zcu)) {
528 .auto => unreachable, // ill-defined layout
529 .@"extern" => {
530 const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
531 @memset(elems, .none);
532 switch (endian) {
533 .little => {
534 var cur_bit_off: u64 = 0;
535 var it = zcu.typeToStruct(ty).?.iterateRuntimeOrder(ip);
536 while (it.next()) |field_idx| {
537 const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
538 try pack.padding(want_bit_off - cur_bit_off);
539 const field_ty = ty.fieldType(field_idx, zcu);
540 elems[field_idx] = (try pack.get(field_ty)).toIntern();
541 cur_bit_off = want_bit_off + field_ty.bitSize(zcu);
542 }
543 try pack.padding(ty.bitSize(zcu) - cur_bit_off);
544 },
545 .big => {
546 var cur_bit_off: u64 = ty.bitSize(zcu);
547 var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
548 while (it.next()) |field_idx| {
549 const field_ty = ty.fieldType(field_idx, zcu);
550 const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
551 try pack.padding(cur_bit_off - want_bit_off);
552 elems[field_idx] = (try pack.get(field_ty)).toIntern();
553 cur_bit_off = want_bit_off - field_ty.bitSize(zcu);
554 }
555 assert(cur_bit_off == 0);
556 },
557 }
558 // Any fields which do not have runtime bits should be OPV or comptime fields.
559 // Fill those values now.
560 for (elems, 0..) |*elem, field_idx| {
561 if (elem.* != .none) continue;
562 const val = (try ty.structFieldValueComptime(pt, field_idx)).?;
563 elem.* = val.toIntern();
564 }
565 return pt.aggregateValue(ty, elems);
566 },
567 .@"packed" => {
568 // All fields are in order with no padding.
569 // This is identical between LE and BE targets.
570 const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
571 for (elems, 0..) |*elem, i| {
572 const field_ty = ty.fieldType(i, zcu);
573 elem.* = (try pack.get(field_ty)).toIntern();
574 }
575 return pt.aggregateValue(ty, elems);
576 },
577 },
578 .@"union" => {
579 // We will attempt to read as the backing representation. If this emits
580 // `error.ReinterpretDeclRef`, we will try each union field, preferring larger ones.
581 // We will also attempt smaller fields when we get `undefined`, as if some bits are
582 // defined we want to include them.
583 // TODO: this is very very bad. We need a more sophisticated union representation.
584
585 const prev_unpacked = pack.unpacked;
586 const prev_bit_offset = pack.bit_offset;
587
588 const backing_ty = try ty.unionBackingType(pt);
589
590 backing: {
591 const backing_val = pack.get(backing_ty) catch |err| switch (err) {
592 error.ReinterpretDeclRef => {
593 pack.unpacked = prev_unpacked;
594 pack.bit_offset = prev_bit_offset;
595 break :backing;
596 },
597 else => |e| return e,
598 };
599 if (backing_val.isUndef(zcu)) {
600 pack.unpacked = prev_unpacked;
601 pack.bit_offset = prev_bit_offset;
602 break :backing;
603 }
604 return Value.fromInterned(try pt.internUnion(.{
605 .ty = ty.toIntern(),
606 .tag = .none,
607 .val = backing_val.toIntern(),
608 }));
609 }
610
611 const field_order = try pack.arena.alloc(u32, ty.unionTagTypeHypothetical(zcu).enumFieldCount(zcu));
612 for (field_order, 0..) |*f, i| f.* = @intCast(i);
613 // Sort `field_order` to put the fields with the largest bit sizes first.
614 const SizeSortCtx = struct {
615 zcu: *Zcu,
616 field_types: []const InternPool.Index,
617 fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
618 const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
619 const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
620 return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
621 }
622 };
623 std.mem.sortUnstable(u32, field_order, SizeSortCtx{
624 .zcu = zcu,
625 .field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
626 }, SizeSortCtx.lessThan);
627
628 const padding_after = endian == .little or ty.containerLayout(zcu) == .@"packed";
629
630 for (field_order) |field_idx| {
631 const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
632 const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
633 if (!padding_after) try pack.padding(pad_bits);
634 const field_val = pack.get(field_ty) catch |err| switch (err) {
635 error.ReinterpretDeclRef => {
636 pack.unpacked = prev_unpacked;
637 pack.bit_offset = prev_bit_offset;
638 continue;
639 },
640 else => |e| return e,
641 };
642 if (padding_after) try pack.padding(pad_bits);
643 if (field_val.isUndef(zcu)) {
644 pack.unpacked = prev_unpacked;
645 pack.bit_offset = prev_bit_offset;
646 continue;
647 }
648 const tag_val = try pt.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx);
649 return Value.fromInterned(try pt.internUnion(.{
650 .ty = ty.toIntern(),
651 .tag = tag_val.toIntern(),
652 .val = field_val.toIntern(),
653 }));
654 }
655
656 // No field could represent the value. Just do whatever happens when we try to read
657 // the backing type - either `undefined` or `error.ReinterpretDeclRef`.
658 const backing_val = try pack.get(backing_ty);
659 return Value.fromInterned(try pt.internUnion(.{
660 .ty = ty.toIntern(),
661 .tag = .none,
662 .val = backing_val.toIntern(),
663 }));
664 },
665 else => return pack.primitive(ty),
666 }
667 }
668
669 fn padding(pack: *PackValueBits, pad_bits: u64) BitCastError!void {
670 _ = pack.prepareBits(pad_bits);
671 }
672
673 fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value {
674 const pt = pack.pt;
675 const zcu = pt.zcu;
676 const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu));
677
678 for (vals) |val| {
679 if (!Value.fromInterned(val).isUndef(zcu)) break;
680 } else {
681 // All bits of the value are `undefined`.
682 return pt.undefValue(want_ty);
683 }
684
685 // TODO: we need to decide how to handle partially-undef values here.
686 // Currently, a value with some undefined bits becomes `0xAA` so that we
687 // preserve the well-defined bits, because we can't currently represent
688 // a partially-undefined primitive (e.g. an int with some undef bits).
689 // In future, we probably want to take one of these two routes:
690 // * Define that if any bits are `undefined`, the entire value is `undefined`.
691 // This is a major breaking change, and probably a footgun.
692 // * Introduce tracking for partially-undef values at comptime.
693 // This would complicate a lot of operations in Sema, such as basic
694 // arithmetic.
695 // This design complexity is tracked by #19634.
696
697 ptr_cast: {
698 if (vals.len != 1) break :ptr_cast;
699 const val = Value.fromInterned(vals[0]);
700 if (!val.typeOf(zcu).isPtrAtRuntime(zcu)) break :ptr_cast;
701 if (!want_ty.isPtrAtRuntime(zcu)) break :ptr_cast;
702 return pt.getCoerced(val, want_ty);
703 }
704
705 // Reinterpret via an in-memory buffer.
706
707 var buf_bits: u64 = 0;
708 for (vals) |ip_val| {
709 const val = Value.fromInterned(ip_val);
710 const ty = val.typeOf(pt.zcu);
711 buf_bits += ty.bitSize(zcu);
712 }
713
714 const buf = try pack.arena.alloc(u8, @intCast((buf_bits + 7) / 8));
715 // We will skip writing undefined values, so mark the buffer as `0xAA` so we get "undefined" bits.
716 @memset(buf, 0xAA);
717 var cur_bit_off: usize = 0;
718 for (vals) |ip_val| {
719 const val = Value.fromInterned(ip_val);
720 const ty = val.typeOf(zcu);
721 if (!val.isUndef(zcu)) {
722 try val.writeToPackedMemory(ty, pt, buf, cur_bit_off);
723 }
724 cur_bit_off += @intCast(ty.bitSize(zcu));
725 }
726
727 return Value.readFromPackedMemory(want_ty, pt, buf, @intCast(bit_offset), pack.arena);
728 }
729
730 fn prepareBits(pack: *PackValueBits, need_bits: u64) struct { []const InternPool.Index, u64 } {
731 if (need_bits == 0) return .{ &.{}, 0 };
732
733 const pt = pack.pt;
734 const zcu = pt.zcu;
735
736 var bits: u64 = 0;
737 var len: usize = 0;
738 while (bits < pack.bit_offset + need_bits) {
739 bits += Value.fromInterned(pack.unpacked[len]).typeOf(pt.zcu).bitSize(zcu);
740 len += 1;
741 }
742
743 const result_vals = pack.unpacked[0..len];
744 const result_offset = pack.bit_offset;
745
746 const extra_bits = bits - pack.bit_offset - need_bits;
747 if (extra_bits == 0) {
748 pack.unpacked = pack.unpacked[len..];
749 pack.bit_offset = 0;
750 } else {
751 pack.unpacked = pack.unpacked[len - 1 ..];
752 pack.bit_offset = Value.fromInterned(pack.unpacked[0]).typeOf(pt.zcu).bitSize(zcu) - extra_bits;
753 }
754
755 return .{ result_vals, result_offset };
756 }
757};
758
759const std = @import("std");
760const Allocator = std.mem.Allocator;
761const assert = std.debug.assert;
762
763const Sema = @import("../Sema.zig");
764const Zcu = @import("../Zcu.zig");
765const InternPool = @import("../InternPool.zig");
766const Type = @import("../Type.zig");
767const Value = @import("../Value.zig");
768const CompileError = Zcu.CompileError;