master
1const build_options = @import("build_options");
2const builtin = @import("builtin");
3
4const std = @import("std");
5const Io = std.Io;
6const assert = std.debug.assert;
7const BigIntConst = std.math.big.int.Const;
8const BigIntMutable = std.math.big.int.Mutable;
9const Target = std.Target;
10const Allocator = std.mem.Allocator;
11
12const Type = @import("Type.zig");
13const Zcu = @import("Zcu.zig");
14const Sema = @import("Sema.zig");
15const InternPool = @import("InternPool.zig");
16const print_value = @import("print_value.zig");
17const Value = @This();
18
19ip_index: InternPool.Index,
20
21pub fn format(val: Value, writer: *std.Io.Writer) !void {
22 _ = val;
23 _ = writer;
24 @compileError("do not use format values directly; use either fmtDebug or fmtValue");
25}
26
27/// This is a debug function. In order to print values in a meaningful way
28/// we also need access to the type.
29pub fn dump(start_val: Value, w: *std.Io.Writer) std.Io.Writer.Error!void {
30 try w.print("(interned: {})", .{start_val.toIntern()});
31}
32
33pub fn fmtDebug(val: Value) std.fmt.Alt(Value, dump) {
34 return .{ .data = val };
35}
36
37pub fn fmtValue(val: Value, pt: Zcu.PerThread) std.fmt.Alt(print_value.FormatContext, print_value.format) {
38 return .{ .data = .{
39 .val = val,
40 .pt = pt,
41 .opt_sema = null,
42 .depth = 3,
43 } };
44}
45
46pub fn fmtValueSema(val: Value, pt: Zcu.PerThread, sema: *Sema) std.fmt.Alt(print_value.FormatContext, print_value.formatSema) {
47 return .{ .data = .{
48 .val = val,
49 .pt = pt,
50 .opt_sema = sema,
51 .depth = 3,
52 } };
53}
54
55pub fn fmtValueSemaFull(ctx: print_value.FormatContext) std.fmt.Alt(print_value.FormatContext, print_value.formatSema) {
56 return .{ .data = ctx };
57}
58
59/// Converts `val` to a null-terminated string stored in the InternPool.
60/// Asserts `val` is an array of `u8`
61pub fn toIpString(val: Value, ty: Type, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
62 const zcu = pt.zcu;
63 assert(ty.zigTypeTag(zcu) == .array);
64 assert(ty.childType(zcu).toIntern() == .u8_type);
65 const ip = &zcu.intern_pool;
66 switch (zcu.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
67 .bytes => |bytes| return bytes.toNullTerminatedString(ty.arrayLen(zcu), ip),
68 .elems => return arrayToIpString(val, ty.arrayLen(zcu), pt),
69 .repeated_elem => |elem| {
70 const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(zcu));
71 const len: u32 = @intCast(ty.arrayLen(zcu));
72 const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(zcu.gpa);
73 try string_bytes.appendNTimes(.{byte}, len);
74 return ip.getOrPutTrailingString(zcu.gpa, pt.tid, len, .no_embedded_nulls);
75 },
76 }
77}
78
79/// Asserts that the value is representable as an array of bytes.
80/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
81pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, pt: Zcu.PerThread) ![]u8 {
82 const zcu = pt.zcu;
83 const ip = &zcu.intern_pool;
84 return switch (ip.indexToKey(val.toIntern())) {
85 .enum_literal => |enum_literal| allocator.dupe(u8, enum_literal.toSlice(ip)),
86 .slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(zcu), allocator, pt),
87 .aggregate => |aggregate| switch (aggregate.storage) {
88 .bytes => |bytes| try allocator.dupe(u8, bytes.toSlice(ty.arrayLenIncludingSentinel(zcu), ip)),
89 .elems => try arrayToAllocatedBytes(val, ty.arrayLen(zcu), allocator, pt),
90 .repeated_elem => |elem| {
91 const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(zcu));
92 const result = try allocator.alloc(u8, @intCast(ty.arrayLen(zcu)));
93 @memset(result, byte);
94 return result;
95 },
96 },
97 else => unreachable,
98 };
99}
100
101fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, pt: Zcu.PerThread) ![]u8 {
102 const result = try allocator.alloc(u8, @intCast(len));
103 for (result, 0..) |*elem, i| {
104 const elem_val = try val.elemValue(pt, i);
105 elem.* = @intCast(elem_val.toUnsignedInt(pt.zcu));
106 }
107 return result;
108}
109
110fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
111 const zcu = pt.zcu;
112 const gpa = zcu.gpa;
113 const ip = &zcu.intern_pool;
114 const len: u32 = @intCast(len_u64);
115 const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa);
116 try string_bytes.ensureUnusedCapacity(len);
117 for (0..len) |i| {
118 // I don't think elemValue has the possibility to affect ip.string_bytes. Let's
119 // assert just to be sure.
120 const prev_len = string_bytes.mutate.len;
121 const elem_val = try val.elemValue(pt, i);
122 assert(string_bytes.mutate.len == prev_len);
123 const byte: u8 = @intCast(elem_val.toUnsignedInt(zcu));
124 string_bytes.appendAssumeCapacity(.{byte});
125 }
126 return ip.getOrPutTrailingString(gpa, pt.tid, len, .no_embedded_nulls);
127}
128
129pub fn fromInterned(i: InternPool.Index) Value {
130 assert(i != .none);
131 return .{ .ip_index = i };
132}
133
134pub fn toIntern(val: Value) InternPool.Index {
135 assert(val.ip_index != .none);
136 return val.ip_index;
137}
138
139/// Asserts that the value is representable as a type.
140pub fn toType(self: Value) Type {
141 return Type.fromInterned(self.toIntern());
142}
143
144pub fn intFromEnum(val: Value, ty: Type, pt: Zcu.PerThread) Allocator.Error!Value {
145 const ip = &pt.zcu.intern_pool;
146 const enum_ty = ip.typeOf(val.toIntern());
147 return switch (ip.indexToKey(enum_ty)) {
148 // Assume it is already an integer and return it directly.
149 .simple_type, .int_type => val,
150 .enum_literal => |enum_literal| {
151 const field_index = ty.enumFieldIndex(enum_literal, pt.zcu).?;
152 switch (ip.indexToKey(ty.toIntern())) {
153 // Assume it is already an integer and return it directly.
154 .simple_type, .int_type => return val,
155 .enum_type => {
156 const enum_type = ip.loadEnumType(ty.toIntern());
157 if (enum_type.values.len != 0) {
158 return Value.fromInterned(enum_type.values.get(ip)[field_index]);
159 } else {
160 // Field index and integer values are the same.
161 return pt.intValue(Type.fromInterned(enum_type.tag_ty), field_index);
162 }
163 },
164 else => unreachable,
165 }
166 },
167 .enum_type => try pt.getCoerced(val, Type.fromInterned(ip.loadEnumType(enum_ty).tag_ty)),
168 else => unreachable,
169 };
170}
171
172pub const ResolveStrat = Type.ResolveStrat;
173
174/// Asserts the value is an integer.
175pub fn toBigInt(val: Value, space: *BigIntSpace, zcu: *Zcu) BigIntConst {
176 return val.toBigIntAdvanced(space, .normal, zcu, {}) catch unreachable;
177}
178
179pub fn toBigIntSema(val: Value, space: *BigIntSpace, pt: Zcu.PerThread) !BigIntConst {
180 return try val.toBigIntAdvanced(space, .sema, pt.zcu, pt.tid);
181}
182
183/// Asserts the value is an integer.
184pub fn toBigIntAdvanced(
185 val: Value,
186 space: *BigIntSpace,
187 comptime strat: ResolveStrat,
188 zcu: *Zcu,
189 tid: strat.Tid(),
190) Zcu.SemaError!BigIntConst {
191 const ip = &zcu.intern_pool;
192 return switch (val.toIntern()) {
193 .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(),
194 .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(),
195 .null_value => BigIntMutable.init(&space.limbs, 0).toConst(),
196 else => switch (ip.indexToKey(val.toIntern())) {
197 .int => |int| switch (int.storage) {
198 .u64, .i64, .big_int => int.storage.toBigInt(space),
199 .lazy_align, .lazy_size => |ty| {
200 if (strat == .sema) try Type.fromInterned(ty).resolveLayout(strat.pt(zcu, tid));
201 const x = switch (int.storage) {
202 else => unreachable,
203 .lazy_align => Type.fromInterned(ty).abiAlignment(zcu).toByteUnits() orelse 0,
204 .lazy_size => Type.fromInterned(ty).abiSize(zcu),
205 };
206 return BigIntMutable.init(&space.limbs, x).toConst();
207 },
208 },
209 .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, strat, zcu, tid),
210 .opt, .ptr => BigIntMutable.init(
211 &space.limbs,
212 (try val.getUnsignedIntInner(strat, zcu, tid)).?,
213 ).toConst(),
214 .err => |err| BigIntMutable.init(&space.limbs, ip.getErrorValueIfExists(err.name).?).toConst(),
215 else => unreachable,
216 },
217 };
218}
219
220pub fn isFuncBody(val: Value, zcu: *Zcu) bool {
221 return zcu.intern_pool.isFuncBody(val.toIntern());
222}
223
224pub fn getFunction(val: Value, zcu: *Zcu) ?InternPool.Key.Func {
225 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
226 .func => |x| x,
227 else => null,
228 };
229}
230
231pub fn getVariable(val: Value, mod: *Zcu) ?InternPool.Key.Variable {
232 return switch (mod.intern_pool.indexToKey(val.toIntern())) {
233 .variable => |variable| variable,
234 else => null,
235 };
236}
237
238/// If the value fits in a u64, return it, otherwise null.
239/// Asserts not undefined.
240pub fn getUnsignedInt(val: Value, zcu: *const Zcu) ?u64 {
241 return getUnsignedIntInner(val, .normal, zcu, {}) catch unreachable;
242}
243
244/// Asserts the value is an integer and it fits in a u64
245pub fn toUnsignedInt(val: Value, zcu: *const Zcu) u64 {
246 return getUnsignedInt(val, zcu).?;
247}
248
249pub fn getUnsignedIntSema(val: Value, pt: Zcu.PerThread) !?u64 {
250 return try val.getUnsignedIntInner(.sema, pt.zcu, pt.tid);
251}
252
253/// If the value fits in a u64, return it, otherwise null.
254/// Asserts not undefined.
255pub fn getUnsignedIntInner(
256 val: Value,
257 comptime strat: ResolveStrat,
258 zcu: strat.ZcuPtr(),
259 tid: strat.Tid(),
260) !?u64 {
261 return switch (val.toIntern()) {
262 .undef => unreachable,
263 .bool_false => 0,
264 .bool_true => 1,
265 else => switch (zcu.intern_pool.indexToKey(val.toIntern())) {
266 .undef => unreachable,
267 .int => |int| switch (int.storage) {
268 .big_int => |big_int| big_int.toInt(u64) catch null,
269 .u64 => |x| x,
270 .i64 => |x| std.math.cast(u64, x),
271 .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentInner(strat.toLazy(), zcu, tid)).scalar.toByteUnits() orelse 0,
272 .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeInner(strat.toLazy(), zcu, tid)).scalar,
273 },
274 .ptr => |ptr| switch (ptr.base_addr) {
275 .int => ptr.byte_offset,
276 .field => |field| {
277 const base_addr = (try Value.fromInterned(field.base).getUnsignedIntInner(strat, zcu, tid)) orelse return null;
278 const struct_ty = Value.fromInterned(field.base).typeOf(zcu).childType(zcu);
279 if (strat == .sema) {
280 const pt = strat.pt(zcu, tid);
281 try struct_ty.resolveLayout(pt);
282 }
283 return base_addr + struct_ty.structFieldOffset(@intCast(field.index), zcu) + ptr.byte_offset;
284 },
285 else => null,
286 },
287 .opt => |opt| switch (opt.val) {
288 .none => 0,
289 else => |payload| Value.fromInterned(payload).getUnsignedIntInner(strat, zcu, tid),
290 },
291 .enum_tag => |enum_tag| return Value.fromInterned(enum_tag.int).getUnsignedIntInner(strat, zcu, tid),
292 else => null,
293 },
294 };
295}
296
297/// Asserts the value is an integer and it fits in a u64
298pub fn toUnsignedIntSema(val: Value, pt: Zcu.PerThread) !u64 {
299 return (try getUnsignedIntInner(val, .sema, pt.zcu, pt.tid)).?;
300}
301
302/// Asserts the value is an integer and it fits in a i64
303pub fn toSignedInt(val: Value, zcu: *const Zcu) i64 {
304 return switch (val.toIntern()) {
305 .bool_false => 0,
306 .bool_true => 1,
307 else => switch (zcu.intern_pool.indexToKey(val.toIntern())) {
308 .int => |int| switch (int.storage) {
309 .big_int => |big_int| big_int.toInt(i64) catch unreachable,
310 .i64 => |x| x,
311 .u64 => |x| @intCast(x),
312 .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(zcu).toByteUnits() orelse 0),
313 .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(zcu)),
314 },
315 else => unreachable,
316 },
317 };
318}
319
320pub fn toBool(val: Value) bool {
321 return switch (val.toIntern()) {
322 .bool_true => true,
323 .bool_false => false,
324 else => unreachable,
325 };
326}
327
328/// Write a Value's contents to `buffer`.
329///
330/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
331/// the end of the value in memory.
332pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
333 ReinterpretDeclRef,
334 IllDefinedMemoryLayout,
335 Unimplemented,
336 OutOfMemory,
337}!void {
338 const zcu = pt.zcu;
339 const target = zcu.getTarget();
340 const endian = target.cpu.arch.endian();
341 const ip = &zcu.intern_pool;
342 const ty = val.typeOf(zcu);
343 if (val.isUndef(zcu)) {
344 const size: usize = @intCast(ty.abiSize(zcu));
345 @memset(buffer[0..size], 0xaa);
346 return;
347 }
348 switch (ty.zigTypeTag(zcu)) {
349 .void => {},
350 .bool => {
351 buffer[0] = @intFromBool(val.toBool());
352 },
353 .int, .@"enum", .error_set, .pointer => |tag| {
354 const int_ty = if (tag == .pointer) int_ty: {
355 if (ty.isSlice(zcu)) return error.IllDefinedMemoryLayout;
356 if (ip.getBackingAddrTag(val.toIntern()).? != .int) return error.ReinterpretDeclRef;
357 break :int_ty Type.usize;
358 } else ty;
359 const int_info = int_ty.intInfo(zcu);
360 const bits = int_info.bits;
361 const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
362
363 var bigint_buffer: BigIntSpace = undefined;
364 const bigint = val.toBigInt(&bigint_buffer, zcu);
365 bigint.writeTwosComplement(buffer[0..byte_count], endian);
366 },
367 .float => switch (ty.floatBits(target)) {
368 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(val.toFloat(f16, zcu)), endian),
369 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(val.toFloat(f32, zcu)), endian),
370 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(val.toFloat(f64, zcu)), endian),
371 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(val.toFloat(f80, zcu)), endian),
372 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(val.toFloat(f128, zcu)), endian),
373 else => unreachable,
374 },
375 .array => {
376 const len = ty.arrayLen(zcu);
377 const elem_ty = ty.childType(zcu);
378 const elem_size: usize = @intCast(elem_ty.abiSize(zcu));
379 var elem_i: usize = 0;
380 var buf_off: usize = 0;
381 while (elem_i < len) : (elem_i += 1) {
382 const elem_val = try val.elemValue(pt, elem_i);
383 try elem_val.writeToMemory(pt, buffer[buf_off..]);
384 buf_off += elem_size;
385 }
386 },
387 .vector => {
388 // We use byte_count instead of abi_size here, so that any padding bytes
389 // follow the data bytes, on both big- and little-endian systems.
390 const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
391 return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
392 },
393 .@"struct" => {
394 const struct_type = zcu.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
395 switch (struct_type.layout) {
396 .auto => return error.IllDefinedMemoryLayout,
397 .@"extern" => for (0..struct_type.field_types.len) |field_index| {
398 const off: usize = @intCast(ty.structFieldOffset(field_index, zcu));
399 const field_val = Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
400 .bytes => |bytes| {
401 buffer[off] = bytes.at(field_index, ip);
402 continue;
403 },
404 .elems => |elems| elems[field_index],
405 .repeated_elem => |elem| elem,
406 });
407 try writeToMemory(field_val, pt, buffer[off..]);
408 },
409 .@"packed" => {
410 const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
411 return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
412 },
413 }
414 },
415 .@"union" => switch (ty.containerLayout(zcu)) {
416 .auto => return error.IllDefinedMemoryLayout, // Sema is supposed to have emitted a compile error already
417 .@"extern" => {
418 if (val.unionTag(zcu)) |union_tag| {
419 const union_obj = zcu.typeToUnion(ty).?;
420 const field_index = zcu.unionTagFieldIndex(union_obj, union_tag).?;
421 const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
422 const field_val = try val.fieldValue(pt, field_index);
423 const byte_count: usize = @intCast(field_type.abiSize(zcu));
424 return writeToMemory(field_val, pt, buffer[0..byte_count]);
425 } else {
426 const backing_ty = try ty.unionBackingType(pt);
427 const byte_count: usize = @intCast(backing_ty.abiSize(zcu));
428 return writeToMemory(val.unionValue(zcu), pt, buffer[0..byte_count]);
429 }
430 },
431 .@"packed" => {
432 const backing_ty = try ty.unionBackingType(pt);
433 const byte_count: usize = @intCast(backing_ty.abiSize(zcu));
434 return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
435 },
436 },
437 .optional => {
438 if (!ty.isPtrLikeOptional(zcu)) return error.IllDefinedMemoryLayout;
439 const opt_val = val.optionalValue(zcu);
440 if (opt_val) |some| {
441 return some.writeToMemory(pt, buffer);
442 } else {
443 return writeToMemory(try pt.intValue(Type.usize, 0), pt, buffer);
444 }
445 },
446 else => return error.Unimplemented,
447 }
448}
449
450/// Write a Value's contents to `buffer`.
451///
452/// Both the start and the end of the provided buffer must be tight, since
453/// big-endian packed memory layouts start at the end of the buffer.
454pub fn writeToPackedMemory(
455 val: Value,
456 ty: Type,
457 pt: Zcu.PerThread,
458 buffer: []u8,
459 bit_offset: usize,
460) error{ ReinterpretDeclRef, OutOfMemory }!void {
461 const zcu = pt.zcu;
462 const ip = &zcu.intern_pool;
463 const target = zcu.getTarget();
464 const endian = target.cpu.arch.endian();
465 if (val.isUndef(zcu)) {
466 const bit_size: usize = @intCast(ty.bitSize(zcu));
467 if (bit_size != 0) {
468 std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
469 }
470 return;
471 }
472 switch (ty.zigTypeTag(zcu)) {
473 .void => {},
474 .bool => {
475 const byte_index = switch (endian) {
476 .little => bit_offset / 8,
477 .big => buffer.len - bit_offset / 8 - 1,
478 };
479 if (val.toBool()) {
480 buffer[byte_index] |= (@as(u8, 1) << @as(u3, @intCast(bit_offset % 8)));
481 } else {
482 buffer[byte_index] &= ~(@as(u8, 1) << @as(u3, @intCast(bit_offset % 8)));
483 }
484 },
485 .int, .@"enum" => {
486 if (buffer.len == 0) return;
487 const bits = ty.intInfo(zcu).bits;
488 if (bits == 0) return;
489
490 switch (ip.indexToKey((try val.intFromEnum(ty, pt)).toIntern()).int.storage) {
491 inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian),
492 .big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian),
493 .lazy_align => |lazy_align| {
494 const num = Type.fromInterned(lazy_align).abiAlignment(zcu).toByteUnits() orelse 0;
495 std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian);
496 },
497 .lazy_size => |lazy_size| {
498 const num = Type.fromInterned(lazy_size).abiSize(zcu);
499 std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian);
500 },
501 }
502 },
503 .float => switch (ty.floatBits(target)) {
504 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(val.toFloat(f16, zcu)), endian),
505 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(val.toFloat(f32, zcu)), endian),
506 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(val.toFloat(f64, zcu)), endian),
507 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(val.toFloat(f80, zcu)), endian),
508 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(val.toFloat(f128, zcu)), endian),
509 else => unreachable,
510 },
511 .vector => {
512 const elem_ty = ty.childType(zcu);
513 const elem_bit_size: u16 = @intCast(elem_ty.bitSize(zcu));
514 const len: usize = @intCast(ty.arrayLen(zcu));
515
516 var bits: u16 = 0;
517 var elem_i: usize = 0;
518 while (elem_i < len) : (elem_i += 1) {
519 // On big-endian systems, LLVM reverses the element order of vectors by default
520 const tgt_elem_i = if (endian == .big) len - elem_i - 1 else elem_i;
521 const elem_val = try val.elemValue(pt, tgt_elem_i);
522 try elem_val.writeToPackedMemory(elem_ty, pt, buffer, bit_offset + bits);
523 bits += elem_bit_size;
524 }
525 },
526 .@"struct" => {
527 const struct_type = ip.loadStructType(ty.toIntern());
528 // Sema is supposed to have emitted a compile error already in the case of Auto,
529 // and Extern is handled in non-packed writeToMemory.
530 assert(struct_type.layout == .@"packed");
531 var bits: u16 = 0;
532 for (0..struct_type.field_types.len) |i| {
533 const field_val = Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
534 .bytes => unreachable,
535 .elems => |elems| elems[i],
536 .repeated_elem => |elem| elem,
537 });
538 const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
539 const field_bits: u16 = @intCast(field_ty.bitSize(zcu));
540 try field_val.writeToPackedMemory(field_ty, pt, buffer, bit_offset + bits);
541 bits += field_bits;
542 }
543 },
544 .@"union" => {
545 const union_obj = zcu.typeToUnion(ty).?;
546 switch (union_obj.flagsUnordered(ip).layout) {
547 .auto, .@"extern" => unreachable, // Handled in non-packed writeToMemory
548 .@"packed" => {
549 if (val.unionTag(zcu)) |union_tag| {
550 const field_index = zcu.unionTagFieldIndex(union_obj, union_tag).?;
551 const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
552 const field_val = try val.fieldValue(pt, field_index);
553 return field_val.writeToPackedMemory(field_type, pt, buffer, bit_offset);
554 } else {
555 const backing_ty = try ty.unionBackingType(pt);
556 return val.unionValue(zcu).writeToPackedMemory(backing_ty, pt, buffer, bit_offset);
557 }
558 },
559 }
560 },
561 .pointer => {
562 assert(!ty.isSlice(zcu)); // No well defined layout.
563 if (ip.getBackingAddrTag(val.toIntern()).? != .int) return error.ReinterpretDeclRef;
564 return val.writeToPackedMemory(Type.usize, pt, buffer, bit_offset);
565 },
566 .optional => {
567 assert(ty.isPtrLikeOptional(zcu));
568 const child = ty.optionalChild(zcu);
569 const opt_val = val.optionalValue(zcu);
570 if (opt_val) |some| {
571 return some.writeToPackedMemory(child, pt, buffer, bit_offset);
572 } else {
573 return writeToPackedMemory(try pt.intValue(Type.usize, 0), Type.usize, pt, buffer, bit_offset);
574 }
575 },
576 else => @panic("TODO implement writeToPackedMemory for more types"),
577 }
578}
579
580/// Load a Value from the contents of `buffer`, where `ty` is an unsigned integer type.
581///
582/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
583/// the end of the value in memory.
584pub fn readUintFromMemory(
585 ty: Type,
586 pt: Zcu.PerThread,
587 buffer: []const u8,
588 arena: Allocator,
589) Allocator.Error!Value {
590 const zcu = pt.zcu;
591 const endian = zcu.getTarget().cpu.arch.endian();
592
593 assert(ty.isUnsignedInt(zcu));
594 const bits = ty.intInfo(zcu).bits;
595 const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
596
597 assert(buffer.len >= byte_count);
598
599 if (bits <= 64) {
600 const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
601 const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
602 return pt.intValue(ty, result);
603 } else {
604 const Limb = std.math.big.Limb;
605 const limb_count = (byte_count + @sizeOf(Limb) - 1) / @sizeOf(Limb);
606 const limbs_buffer = try arena.alloc(Limb, limb_count);
607
608 var bigint: BigIntMutable = .init(limbs_buffer, 0);
609 bigint.readTwosComplement(buffer[0..byte_count], bits, endian, .unsigned);
610 return pt.intValue_big(ty, bigint.toConst());
611 }
612}
613
614/// Load a Value from the contents of `buffer`.
615///
616/// Both the start and the end of the provided buffer must be tight, since
617/// big-endian packed memory layouts start at the end of the buffer.
618pub fn readFromPackedMemory(
619 ty: Type,
620 pt: Zcu.PerThread,
621 buffer: []const u8,
622 bit_offset: usize,
623 arena: Allocator,
624) error{
625 IllDefinedMemoryLayout,
626 OutOfMemory,
627}!Value {
628 const zcu = pt.zcu;
629 const ip = &zcu.intern_pool;
630 const target = zcu.getTarget();
631 const endian = target.cpu.arch.endian();
632 switch (ty.zigTypeTag(zcu)) {
633 .void => return Value.void,
634 .bool => {
635 const byte = switch (endian) {
636 .big => buffer[buffer.len - bit_offset / 8 - 1],
637 .little => buffer[bit_offset / 8],
638 };
639 if (((byte >> @as(u3, @intCast(bit_offset % 8))) & 1) == 0) {
640 return Value.false;
641 } else {
642 return Value.true;
643 }
644 },
645 .int => {
646 if (buffer.len == 0) return pt.intValue(ty, 0);
647 const int_info = ty.intInfo(zcu);
648 const bits = int_info.bits;
649 if (bits == 0) return pt.intValue(ty, 0);
650
651 // Fast path for integers <= u64
652 if (bits <= 64) switch (int_info.signedness) {
653 // Use different backing types for unsigned vs signed to avoid the need to go via
654 // a larger type like `i128`.
655 .unsigned => return pt.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)),
656 .signed => return pt.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)),
657 };
658
659 // Slow path, we have to construct a big-int
660 const abi_size: usize = @intCast(ty.abiSize(zcu));
661 const Limb = std.math.big.Limb;
662 const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb);
663 const limbs_buffer = try arena.alloc(Limb, limb_count);
664
665 var bigint = BigIntMutable.init(limbs_buffer, 0);
666 bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness);
667 return pt.intValue_big(ty, bigint.toConst());
668 },
669 .@"enum" => {
670 const int_ty = ty.intTagType(zcu);
671 const int_val = try Value.readFromPackedMemory(int_ty, pt, buffer, bit_offset, arena);
672 return pt.getCoerced(int_val, ty);
673 },
674 .float => return Value.fromInterned(try pt.intern(.{ .float = .{
675 .ty = ty.toIntern(),
676 .storage = switch (ty.floatBits(target)) {
677 16 => .{ .f16 = @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian)) },
678 32 => .{ .f32 = @bitCast(std.mem.readPackedInt(u32, buffer, bit_offset, endian)) },
679 64 => .{ .f64 = @bitCast(std.mem.readPackedInt(u64, buffer, bit_offset, endian)) },
680 80 => .{ .f80 = @bitCast(std.mem.readPackedInt(u80, buffer, bit_offset, endian)) },
681 128 => .{ .f128 = @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian)) },
682 else => unreachable,
683 },
684 } })),
685 .vector => {
686 const elem_ty = ty.childType(zcu);
687 const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
688
689 var bits: u16 = 0;
690 const elem_bit_size: u16 = @intCast(elem_ty.bitSize(zcu));
691 for (elems, 0..) |_, i| {
692 // On big-endian systems, LLVM reverses the element order of vectors by default
693 const tgt_elem_i = if (endian == .big) elems.len - i - 1 else i;
694 elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
695 bits += elem_bit_size;
696 }
697 return pt.aggregateValue(ty, elems);
698 },
699 .@"struct" => {
700 // Sema is supposed to have emitted a compile error already for Auto layout structs,
701 // and Extern is handled by non-packed readFromMemory.
702 const struct_type = zcu.typeToPackedStruct(ty).?;
703 var bits: u16 = 0;
704 const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len);
705 for (field_vals, 0..) |*field_val, i| {
706 const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
707 const field_bits: u16 = @intCast(field_ty.bitSize(zcu));
708 field_val.* = (try readFromPackedMemory(field_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
709 bits += field_bits;
710 }
711 return pt.aggregateValue(ty, field_vals);
712 },
713 .@"union" => switch (ty.containerLayout(zcu)) {
714 .auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
715 .@"packed" => {
716 const backing_ty = try ty.unionBackingType(pt);
717 const val = (try readFromPackedMemory(backing_ty, pt, buffer, bit_offset, arena)).toIntern();
718 return Value.fromInterned(try pt.internUnion(.{
719 .ty = ty.toIntern(),
720 .tag = .none,
721 .val = val,
722 }));
723 },
724 },
725 .pointer => {
726 assert(!ty.isSlice(zcu)); // No well defined layout.
727 const int_val = try readFromPackedMemory(Type.usize, pt, buffer, bit_offset, arena);
728 return Value.fromInterned(try pt.intern(.{ .ptr = .{
729 .ty = ty.toIntern(),
730 .base_addr = .int,
731 .byte_offset = int_val.toUnsignedInt(zcu),
732 } }));
733 },
734 .optional => {
735 assert(ty.isPtrLikeOptional(zcu));
736 const child_ty = ty.optionalChild(zcu);
737 const child_val = try readFromPackedMemory(child_ty, pt, buffer, bit_offset, arena);
738 return Value.fromInterned(try pt.intern(.{ .opt = .{
739 .ty = ty.toIntern(),
740 .val = switch (child_val.orderAgainstZero(zcu)) {
741 .lt => unreachable,
742 .eq => .none,
743 .gt => child_val.toIntern(),
744 },
745 } }));
746 },
747 else => @panic("TODO implement readFromPackedMemory for more types"),
748 }
749}
750
751/// Asserts that the value is a float or an integer.
752pub fn toFloat(val: Value, comptime T: type, zcu: *const Zcu) T {
753 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
754 .int => |int| switch (int.storage) {
755 .big_int => |big_int| big_int.toFloat(T, .nearest_even)[0],
756 inline .u64, .i64 => |x| {
757 if (T == f80) {
758 @panic("TODO we can't lower this properly on non-x86 llvm backend yet");
759 }
760 return @floatFromInt(x);
761 },
762 .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(zcu).toByteUnits() orelse 0),
763 .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(zcu)),
764 },
765 .float => |float| switch (float.storage) {
766 inline else => |x| @floatCast(x),
767 },
768 else => unreachable,
769 };
770}
771
772pub fn clz(val: Value, ty: Type, zcu: *Zcu) u64 {
773 var bigint_buf: BigIntSpace = undefined;
774 const bigint = val.toBigInt(&bigint_buf, zcu);
775 return bigint.clz(ty.intInfo(zcu).bits);
776}
777
778pub fn ctz(val: Value, ty: Type, zcu: *Zcu) u64 {
779 var bigint_buf: BigIntSpace = undefined;
780 const bigint = val.toBigInt(&bigint_buf, zcu);
781 return bigint.ctz(ty.intInfo(zcu).bits);
782}
783
784pub fn popCount(val: Value, ty: Type, zcu: *Zcu) u64 {
785 var bigint_buf: BigIntSpace = undefined;
786 const bigint = val.toBigInt(&bigint_buf, zcu);
787 return @intCast(bigint.popCount(ty.intInfo(zcu).bits));
788}
789
790/// Asserts the value is an integer and not undefined.
791/// Returns the number of bits the value requires to represent stored in twos complement form.
792pub fn intBitCountTwosComp(self: Value, zcu: *Zcu) usize {
793 var buffer: BigIntSpace = undefined;
794 const big_int = self.toBigInt(&buffer, zcu);
795 return big_int.bitCountTwosComp();
796}
797
798/// Converts an integer or a float to a float. May result in a loss of information.
799/// Caller can find out by equality checking the result against the operand.
800pub fn floatCast(val: Value, dest_ty: Type, pt: Zcu.PerThread) !Value {
801 const zcu = pt.zcu;
802 const target = zcu.getTarget();
803 if (val.isUndef(zcu)) return pt.undefValue(dest_ty);
804 return Value.fromInterned(try pt.intern(.{ .float = .{
805 .ty = dest_ty.toIntern(),
806 .storage = switch (dest_ty.floatBits(target)) {
807 16 => .{ .f16 = val.toFloat(f16, zcu) },
808 32 => .{ .f32 = val.toFloat(f32, zcu) },
809 64 => .{ .f64 = val.toFloat(f64, zcu) },
810 80 => .{ .f80 = val.toFloat(f80, zcu) },
811 128 => .{ .f128 = val.toFloat(f128, zcu) },
812 else => unreachable,
813 },
814 } }));
815}
816
817pub fn orderAgainstZero(lhs: Value, zcu: *Zcu) std.math.Order {
818 return orderAgainstZeroInner(lhs, .normal, zcu, {}) catch unreachable;
819}
820
821pub fn orderAgainstZeroSema(lhs: Value, pt: Zcu.PerThread) !std.math.Order {
822 return try orderAgainstZeroInner(lhs, .sema, pt.zcu, pt.tid);
823}
824
825pub fn orderAgainstZeroInner(
826 lhs: Value,
827 comptime strat: ResolveStrat,
828 zcu: *Zcu,
829 tid: strat.Tid(),
830) Zcu.SemaError!std.math.Order {
831 return switch (lhs.toIntern()) {
832 .bool_false => .eq,
833 .bool_true => .gt,
834 else => switch (zcu.intern_pool.indexToKey(lhs.toIntern())) {
835 .ptr => |ptr| if (ptr.byte_offset > 0) .gt else switch (ptr.base_addr) {
836 .nav, .comptime_alloc, .comptime_field => .gt,
837 .int => .eq,
838 else => unreachable,
839 },
840 .int => |int| switch (int.storage) {
841 .big_int => |big_int| big_int.orderAgainstScalar(0),
842 inline .u64, .i64 => |x| std.math.order(x, 0),
843 .lazy_align => .gt, // alignment is never 0
844 .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsInner(
845 false,
846 strat.toLazy(),
847 zcu,
848 tid,
849 ) catch |err| switch (err) {
850 error.NeedLazy => unreachable,
851 else => |e| return e,
852 }) .gt else .eq,
853 },
854 .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroInner(strat, zcu, tid),
855 .float => |float| switch (float.storage) {
856 inline else => |x| std.math.order(x, 0),
857 },
858 .err => .gt, // error values cannot be 0
859 else => unreachable,
860 },
861 };
862}
863
864/// Asserts the value is comparable.
865pub fn order(lhs: Value, rhs: Value, zcu: *Zcu) std.math.Order {
866 return orderAdvanced(lhs, rhs, .normal, zcu, {}) catch unreachable;
867}
868
869/// Asserts the value is comparable.
870pub fn orderAdvanced(
871 lhs: Value,
872 rhs: Value,
873 comptime strat: ResolveStrat,
874 zcu: *Zcu,
875 tid: strat.Tid(),
876) !std.math.Order {
877 const lhs_against_zero = try lhs.orderAgainstZeroInner(strat, zcu, tid);
878 const rhs_against_zero = try rhs.orderAgainstZeroInner(strat, zcu, tid);
879 switch (lhs_against_zero) {
880 .lt => if (rhs_against_zero != .lt) return .lt,
881 .eq => return rhs_against_zero.invert(),
882 .gt => {},
883 }
884 switch (rhs_against_zero) {
885 .lt => if (lhs_against_zero != .lt) return .gt,
886 .eq => return lhs_against_zero,
887 .gt => {},
888 }
889
890 if (lhs.isFloat(zcu) or rhs.isFloat(zcu)) {
891 const lhs_f128 = lhs.toFloat(f128, zcu);
892 const rhs_f128 = rhs.toFloat(f128, zcu);
893 return std.math.order(lhs_f128, rhs_f128);
894 }
895
896 var lhs_bigint_space: BigIntSpace = undefined;
897 var rhs_bigint_space: BigIntSpace = undefined;
898 const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, strat, zcu, tid);
899 const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, strat, zcu, tid);
900 return lhs_bigint.order(rhs_bigint);
901}
902
903/// Asserts the value is comparable. Does not take a type parameter because it supports
904/// comparisons between heterogeneous types.
905pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, zcu: *Zcu) bool {
906 return compareHeteroAdvanced(lhs, op, rhs, .normal, zcu, {}) catch unreachable;
907}
908
909pub fn compareHeteroSema(lhs: Value, op: std.math.CompareOperator, rhs: Value, pt: Zcu.PerThread) !bool {
910 return try compareHeteroAdvanced(lhs, op, rhs, .sema, pt.zcu, pt.tid);
911}
912
913pub fn compareHeteroAdvanced(
914 lhs: Value,
915 op: std.math.CompareOperator,
916 rhs: Value,
917 comptime strat: ResolveStrat,
918 zcu: *Zcu,
919 tid: strat.Tid(),
920) !bool {
921 if (lhs.pointerNav(zcu)) |lhs_nav| {
922 if (rhs.pointerNav(zcu)) |rhs_nav| {
923 switch (op) {
924 .eq => return lhs_nav == rhs_nav,
925 .neq => return lhs_nav != rhs_nav,
926 else => {},
927 }
928 } else {
929 switch (op) {
930 .eq => return false,
931 .neq => return true,
932 else => {},
933 }
934 }
935 } else if (rhs.pointerNav(zcu)) |_| {
936 switch (op) {
937 .eq => return false,
938 .neq => return true,
939 else => {},
940 }
941 }
942
943 if (lhs.isNan(zcu) or rhs.isNan(zcu)) return op == .neq;
944 return (try orderAdvanced(lhs, rhs, strat, zcu, tid)).compare(op);
945}
946
947/// Asserts the values are comparable. Both operands have type `ty`.
948/// For vectors, returns true if comparison is true for ALL elements.
949pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, pt: Zcu.PerThread) !bool {
950 const zcu = pt.zcu;
951 if (ty.zigTypeTag(zcu) == .vector) {
952 const scalar_ty = ty.scalarType(zcu);
953 for (0..ty.vectorLen(zcu)) |i| {
954 const lhs_elem = try lhs.elemValue(pt, i);
955 const rhs_elem = try rhs.elemValue(pt, i);
956 if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, zcu)) {
957 return false;
958 }
959 }
960 return true;
961 }
962 return compareScalar(lhs, op, rhs, ty, zcu);
963}
964
965/// Asserts the values are comparable. Both operands have type `ty`.
966pub fn compareScalar(
967 lhs: Value,
968 op: std.math.CompareOperator,
969 rhs: Value,
970 ty: Type,
971 zcu: *Zcu,
972) bool {
973 return switch (op) {
974 .eq => lhs.eql(rhs, ty, zcu),
975 .neq => !lhs.eql(rhs, ty, zcu),
976 else => compareHetero(lhs, op, rhs, zcu),
977 };
978}
979
980/// Asserts the value is comparable.
981/// For vectors, returns true if comparison is true for ALL elements.
982/// Returns `false` if the value or any vector element is undefined.
983///
984/// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)`
985pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, zcu: *Zcu) bool {
986 return compareAllWithZeroAdvancedExtra(lhs, op, .normal, zcu, {}) catch unreachable;
987}
988
989pub fn compareAllWithZeroSema(
990 lhs: Value,
991 op: std.math.CompareOperator,
992 pt: Zcu.PerThread,
993) Zcu.CompileError!bool {
994 return compareAllWithZeroAdvancedExtra(lhs, op, .sema, pt.zcu, pt.tid);
995}
996
997pub fn compareAllWithZeroAdvancedExtra(
998 lhs: Value,
999 op: std.math.CompareOperator,
1000 comptime strat: ResolveStrat,
1001 zcu: *Zcu,
1002 tid: strat.Tid(),
1003) Zcu.CompileError!bool {
1004 if (lhs.isInf(zcu)) {
1005 switch (op) {
1006 .neq => return true,
1007 .eq => return false,
1008 .gt, .gte => return !lhs.isNegativeInf(zcu),
1009 .lt, .lte => return lhs.isNegativeInf(zcu),
1010 }
1011 }
1012
1013 switch (zcu.intern_pool.indexToKey(lhs.toIntern())) {
1014 .float => |float| switch (float.storage) {
1015 inline else => |x| if (std.math.isNan(x)) return op == .neq,
1016 },
1017 .aggregate => |aggregate| return switch (aggregate.storage) {
1018 .bytes => |bytes| for (bytes.toSlice(lhs.typeOf(zcu).arrayLenIncludingSentinel(zcu), &zcu.intern_pool)) |byte| {
1019 if (!std.math.order(byte, 0).compare(op)) break false;
1020 } else true,
1021 .elems => |elems| for (elems) |elem| {
1022 if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, strat, zcu, tid)) break false;
1023 } else true,
1024 .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, strat, zcu, tid),
1025 },
1026 .undef => return false,
1027 else => {},
1028 }
1029 return (try orderAgainstZeroInner(lhs, strat, zcu, tid)).compare(op);
1030}
1031
1032pub fn eql(a: Value, b: Value, ty: Type, zcu: *Zcu) bool {
1033 assert(zcu.intern_pool.typeOf(a.toIntern()) == ty.toIntern());
1034 assert(zcu.intern_pool.typeOf(b.toIntern()) == ty.toIntern());
1035 return a.toIntern() == b.toIntern();
1036}
1037
1038pub fn canMutateComptimeVarState(val: Value, zcu: *Zcu) bool {
1039 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1040 .error_union => |error_union| switch (error_union.val) {
1041 .err_name => false,
1042 .payload => |payload| Value.fromInterned(payload).canMutateComptimeVarState(zcu),
1043 },
1044 .ptr => |ptr| switch (ptr.base_addr) {
1045 .nav => false, // The value of a Nav can never reference a comptime alloc.
1046 .int => false,
1047 .comptime_alloc => true, // A comptime alloc is either mutable or references comptime-mutable memory.
1048 .comptime_field => true, // Comptime field pointers are comptime-mutable, albeit only to the "correct" value.
1049 .eu_payload, .opt_payload => |base| Value.fromInterned(base).canMutateComptimeVarState(zcu),
1050 .uav => |uav| Value.fromInterned(uav.val).canMutateComptimeVarState(zcu),
1051 .arr_elem, .field => |base_index| Value.fromInterned(base_index.base).canMutateComptimeVarState(zcu),
1052 },
1053 .slice => |slice| return Value.fromInterned(slice.ptr).canMutateComptimeVarState(zcu),
1054 .opt => |opt| switch (opt.val) {
1055 .none => false,
1056 else => |payload| Value.fromInterned(payload).canMutateComptimeVarState(zcu),
1057 },
1058 .aggregate => |aggregate| for (aggregate.storage.values()) |elem| {
1059 if (Value.fromInterned(elem).canMutateComptimeVarState(zcu)) break true;
1060 } else false,
1061 .un => |un| Value.fromInterned(un.val).canMutateComptimeVarState(zcu),
1062 else => false,
1063 };
1064}
1065
1066/// Gets the `Nav` referenced by this pointer. If the pointer does not point
1067/// to a `Nav`, or if it points to some part of one (like a field or element),
1068/// returns null.
1069pub fn pointerNav(val: Value, zcu: *Zcu) ?InternPool.Nav.Index {
1070 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1071 // TODO: these 3 cases are weird; these aren't pointer values!
1072 .variable => |v| v.owner_nav,
1073 .@"extern" => |e| e.owner_nav,
1074 .func => |func| func.owner_nav,
1075 .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
1076 .nav => |nav| nav,
1077 else => null,
1078 } else null,
1079 else => null,
1080 };
1081}
1082
1083pub const slice_ptr_index = 0;
1084pub const slice_len_index = 1;
1085
1086pub fn slicePtr(val: Value, zcu: *Zcu) Value {
1087 return Value.fromInterned(zcu.intern_pool.slicePtr(val.toIntern()));
1088}
1089
1090/// Gets the `len` field of a slice value as a `u64`.
1091/// Resolves the length using `Sema` if necessary.
1092pub fn sliceLen(val: Value, pt: Zcu.PerThread) !u64 {
1093 return Value.fromInterned(pt.zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(pt);
1094}
1095
1096/// Asserts the value is an aggregate, and returns the element value at the given index.
1097pub fn elemValue(val: Value, pt: Zcu.PerThread, index: usize) Allocator.Error!Value {
1098 const zcu = pt.zcu;
1099 const ip = &zcu.intern_pool;
1100 switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1101 .undef => |ty| {
1102 return Value.fromInterned(try pt.intern(.{ .undef = Type.fromInterned(ty).childType(zcu).toIntern() }));
1103 },
1104 .aggregate => |aggregate| {
1105 const len = ip.aggregateTypeLen(aggregate.ty);
1106 if (index < len) return Value.fromInterned(switch (aggregate.storage) {
1107 .bytes => |bytes| try pt.intern(.{ .int = .{
1108 .ty = .u8_type,
1109 .storage = .{ .u64 = bytes.at(index, ip) },
1110 } }),
1111 .elems => |elems| elems[index],
1112 .repeated_elem => |elem| elem,
1113 });
1114 assert(index == len);
1115 return Type.fromInterned(aggregate.ty).sentinel(zcu).?;
1116 },
1117 else => unreachable,
1118 }
1119}
1120
1121pub fn isLazyAlign(val: Value, zcu: *Zcu) bool {
1122 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1123 .int => |int| int.storage == .lazy_align,
1124 else => false,
1125 };
1126}
1127
1128pub fn isLazySize(val: Value, zcu: *Zcu) bool {
1129 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1130 .int => |int| int.storage == .lazy_size,
1131 else => false,
1132 };
1133}
1134
1135// Asserts that the provided start/end are in-bounds.
1136pub fn sliceArray(
1137 val: Value,
1138 sema: *Sema,
1139 start: usize,
1140 end: usize,
1141) error{OutOfMemory}!Value {
1142 const pt = sema.pt;
1143 const ip = &pt.zcu.intern_pool;
1144 return Value.fromInterned(try pt.intern(.{
1145 .aggregate = .{
1146 .ty = switch (pt.zcu.intern_pool.indexToKey(pt.zcu.intern_pool.typeOf(val.toIntern()))) {
1147 .array_type => |array_type| try pt.arrayType(.{
1148 .len = @intCast(end - start),
1149 .child = array_type.child,
1150 .sentinel = if (end == array_type.len) array_type.sentinel else .none,
1151 }),
1152 .vector_type => |vector_type| try pt.vectorType(.{
1153 .len = @intCast(end - start),
1154 .child = vector_type.child,
1155 }),
1156 else => unreachable,
1157 }.toIntern(),
1158 .storage = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
1159 .bytes => |bytes| storage: {
1160 try ip.string_bytes.ensureUnusedCapacity(sema.gpa, end - start + 1);
1161 break :storage .{ .bytes = try ip.getOrPutString(
1162 sema.gpa,
1163 bytes.toSlice(end, ip)[start..],
1164 .maybe_embedded_nulls,
1165 ) };
1166 },
1167 // TODO: write something like getCoercedInts to avoid needing to dupe
1168 .elems => |elems| .{ .elems = try sema.arena.dupe(InternPool.Index, elems[start..end]) },
1169 .repeated_elem => |elem| .{ .repeated_elem = elem },
1170 },
1171 },
1172 }));
1173}
1174
1175pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value {
1176 const zcu = pt.zcu;
1177 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1178 .undef => |ty| Value.fromInterned(try pt.intern(.{
1179 .undef = Type.fromInterned(ty).fieldType(index, zcu).toIntern(),
1180 })),
1181 .aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) {
1182 .bytes => |bytes| try pt.intern(.{ .int = .{
1183 .ty = .u8_type,
1184 .storage = .{ .u64 = bytes.at(index, &zcu.intern_pool) },
1185 } }),
1186 .elems => |elems| elems[index],
1187 .repeated_elem => |elem| elem,
1188 }),
1189 // TODO assert the tag is correct
1190 .un => |un| Value.fromInterned(un.val),
1191 else => unreachable,
1192 };
1193}
1194
1195pub fn unionTag(val: Value, zcu: *Zcu) ?Value {
1196 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1197 .undef, .enum_tag => val,
1198 .un => |un| if (un.tag != .none) Value.fromInterned(un.tag) else return null,
1199 else => unreachable,
1200 };
1201}
1202
1203pub fn unionValue(val: Value, zcu: *Zcu) Value {
1204 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1205 .un => |un| Value.fromInterned(un.val),
1206 else => unreachable,
1207 };
1208}
1209
1210pub fn isUndef(val: Value, zcu: *const Zcu) bool {
1211 return zcu.intern_pool.isUndef(val.toIntern());
1212}
1213
1214/// `val` must have a numeric or vector type.
1215/// Returns whether `val` is undefined or contains any undefined elements.
1216/// Returns the index of the first undefined element it encounters
1217/// or `null` if no element is undefined.
1218pub fn anyScalarIsUndef(val: Value, zcu: *const Zcu) bool {
1219 switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1220 .undef => return true,
1221 .int, .float => return false,
1222 .aggregate => |agg| {
1223 assert(Type.fromInterned(agg.ty).zigTypeTag(zcu) == .vector);
1224 for (agg.storage.values()) |elem_val| {
1225 if (Value.fromInterned(elem_val).isUndef(zcu)) return true;
1226 }
1227 return false;
1228 },
1229 else => unreachable,
1230 }
1231}
1232
1233/// `val` must have a numeric or vector type.
1234/// Returns whether `val` contains any elements equal to zero.
1235/// Asserts that `val` is not `undefined`, nor a vector containing any `undefined` elements.
1236pub fn anyScalarIsZero(val: Value, zcu: *Zcu) bool {
1237 assert(!val.anyScalarIsUndef(zcu));
1238
1239 switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1240 .int, .float => return val.eqlScalarNum(.zero_comptime_int, zcu),
1241 .aggregate => |agg| {
1242 assert(Type.fromInterned(agg.ty).zigTypeTag(zcu) == .vector);
1243 switch (agg.storage) {
1244 .bytes => |str| {
1245 const len = Type.fromInterned(agg.ty).vectorLen(zcu);
1246 const slice = str.toSlice(len, &zcu.intern_pool);
1247 return std.mem.indexOfScalar(u8, slice, 0) != null;
1248 },
1249 .elems => |elems| {
1250 for (elems) |elem| {
1251 if (Value.fromInterned(elem).isUndef(zcu)) return true;
1252 }
1253 return false;
1254 },
1255 .repeated_elem => |elem| return Value.fromInterned(elem).isUndef(zcu),
1256 }
1257 },
1258 else => unreachable,
1259 }
1260}
1261
1262/// Asserts the value is not undefined and not unreachable.
1263/// C pointers with an integer value of 0 are also considered null.
1264pub fn isNull(val: Value, zcu: *Zcu) bool {
1265 return switch (val.toIntern()) {
1266 .undef => unreachable,
1267 .unreachable_value => unreachable,
1268 .null_value => true,
1269 else => return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1270 .undef => unreachable,
1271 .ptr => |ptr| switch (ptr.base_addr) {
1272 .int => ptr.byte_offset == 0,
1273 else => false,
1274 },
1275 .opt => |opt| opt.val == .none,
1276 else => false,
1277 },
1278 };
1279}
1280
1281/// Valid only for error (union) types. Asserts the value is not undefined and not unreachable.
1282pub fn getErrorName(val: Value, zcu: *const Zcu) InternPool.OptionalNullTerminatedString {
1283 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1284 .err => |err| err.name.toOptional(),
1285 .error_union => |error_union| switch (error_union.val) {
1286 .err_name => |err_name| err_name.toOptional(),
1287 .payload => .none,
1288 },
1289 else => unreachable,
1290 };
1291}
1292
1293pub fn getErrorInt(val: Value, zcu: *Zcu) Zcu.ErrorInt {
1294 return if (getErrorName(val, zcu).unwrap()) |err_name|
1295 zcu.intern_pool.getErrorValueIfExists(err_name).?
1296 else
1297 0;
1298}
1299
1300/// Assumes the type is an error union. Returns true if and only if the value is
1301/// the error union payload, not an error.
1302pub fn errorUnionIsPayload(val: Value, zcu: *const Zcu) bool {
1303 return zcu.intern_pool.indexToKey(val.toIntern()).error_union.val == .payload;
1304}
1305
1306/// Value of the optional, null if optional has no payload.
1307pub fn optionalValue(val: Value, zcu: *const Zcu) ?Value {
1308 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1309 .opt => |opt| switch (opt.val) {
1310 .none => null,
1311 else => |payload| Value.fromInterned(payload),
1312 },
1313 .ptr => val,
1314 else => unreachable,
1315 };
1316}
1317
1318/// Valid for all types. Asserts the value is not undefined.
1319pub fn isFloat(self: Value, zcu: *const Zcu) bool {
1320 return switch (self.toIntern()) {
1321 .undef => unreachable,
1322 else => switch (zcu.intern_pool.indexToKey(self.toIntern())) {
1323 .undef => unreachable,
1324 .float => true,
1325 else => false,
1326 },
1327 };
1328}
1329
1330pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, zcu: *Zcu) !Value {
1331 return floatFromIntAdvanced(val, arena, int_ty, float_ty, zcu, .normal) catch |err| switch (err) {
1332 error.OutOfMemory => return error.OutOfMemory,
1333 else => unreachable,
1334 };
1335}
1336
1337pub fn floatFromIntAdvanced(
1338 val: Value,
1339 arena: Allocator,
1340 int_ty: Type,
1341 float_ty: Type,
1342 pt: Zcu.PerThread,
1343 comptime strat: ResolveStrat,
1344) !Value {
1345 const zcu = pt.zcu;
1346 if (int_ty.zigTypeTag(zcu) == .vector) {
1347 const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(zcu));
1348 const scalar_ty = float_ty.scalarType(zcu);
1349 for (result_data, 0..) |*scalar, i| {
1350 const elem_val = try val.elemValue(pt, i);
1351 scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, pt, strat)).toIntern();
1352 }
1353 return pt.aggregateValue(float_ty, result_data);
1354 }
1355 return floatFromIntScalar(val, float_ty, pt, strat);
1356}
1357
1358pub fn floatFromIntScalar(val: Value, float_ty: Type, pt: Zcu.PerThread, comptime strat: ResolveStrat) !Value {
1359 return switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
1360 .undef => try pt.undefValue(float_ty),
1361 .int => |int| switch (int.storage) {
1362 .big_int => |big_int| pt.floatValue(float_ty, big_int.toFloat(f128, .nearest_even)[0]),
1363 inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, pt),
1364 .lazy_align => |ty| floatFromIntInner((try Type.fromInterned(ty).abiAlignmentInner(strat.toLazy(), pt.zcu, pt.tid)).scalar.toByteUnits() orelse 0, float_ty, pt),
1365 .lazy_size => |ty| floatFromIntInner((try Type.fromInterned(ty).abiSizeInner(strat.toLazy(), pt.zcu, pt.tid)).scalar, float_ty, pt),
1366 },
1367 else => unreachable,
1368 };
1369}
1370
1371fn floatFromIntInner(x: anytype, dest_ty: Type, pt: Zcu.PerThread) !Value {
1372 const target = pt.zcu.getTarget();
1373 const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) {
1374 16 => .{ .f16 = @floatFromInt(x) },
1375 32 => .{ .f32 = @floatFromInt(x) },
1376 64 => .{ .f64 = @floatFromInt(x) },
1377 80 => .{ .f80 = @floatFromInt(x) },
1378 128 => .{ .f128 = @floatFromInt(x) },
1379 else => unreachable,
1380 };
1381 return Value.fromInterned(try pt.intern(.{ .float = .{
1382 .ty = dest_ty.toIntern(),
1383 .storage = storage,
1384 } }));
1385}
1386
1387fn calcLimbLenFloat(scalar: anytype) usize {
1388 if (scalar == 0) {
1389 return 1;
1390 }
1391
1392 const w_value = @abs(scalar);
1393 return @divFloor(@as(std.math.big.Limb, @intFromFloat(std.math.log2(w_value))), @typeInfo(std.math.big.Limb).int.bits) + 1;
1394}
1395
1396pub const OverflowArithmeticResult = struct {
1397 overflow_bit: Value,
1398 wrapped_result: Value,
1399};
1400
1401/// Supports both floats and ints; handles undefined.
1402pub fn numberMax(lhs: Value, rhs: Value, zcu: *Zcu) Value {
1403 if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return undef;
1404 if (lhs.isNan(zcu)) return rhs;
1405 if (rhs.isNan(zcu)) return lhs;
1406
1407 return switch (order(lhs, rhs, zcu)) {
1408 .lt => rhs,
1409 .gt, .eq => lhs,
1410 };
1411}
1412
1413/// Supports both floats and ints; handles undefined.
1414pub fn numberMin(lhs: Value, rhs: Value, zcu: *Zcu) Value {
1415 if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return undef;
1416 if (lhs.isNan(zcu)) return rhs;
1417 if (rhs.isNan(zcu)) return lhs;
1418
1419 return switch (order(lhs, rhs, zcu)) {
1420 .lt => lhs,
1421 .gt, .eq => rhs,
1422 };
1423}
1424
1425/// Returns true if the value is a floating point type and is NaN. Returns false otherwise.
1426pub fn isNan(val: Value, zcu: *const Zcu) bool {
1427 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1428 .float => |float| switch (float.storage) {
1429 inline else => |x| std.math.isNan(x),
1430 },
1431 else => false,
1432 };
1433}
1434
1435/// Returns true if the value is a floating point type and is infinite. Returns false otherwise.
1436pub fn isInf(val: Value, zcu: *const Zcu) bool {
1437 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1438 .float => |float| switch (float.storage) {
1439 inline else => |x| std.math.isInf(x),
1440 },
1441 else => false,
1442 };
1443}
1444
1445/// Returns true if the value is a floating point type and is negative infinite. Returns false otherwise.
1446pub fn isNegativeInf(val: Value, zcu: *const Zcu) bool {
1447 return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
1448 .float => |float| switch (float.storage) {
1449 inline else => |x| std.math.isNegativeInf(x),
1450 },
1451 else => false,
1452 };
1453}
1454
1455pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1456 if (float_type.zigTypeTag(pt.zcu) == .vector) {
1457 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(pt.zcu));
1458 const scalar_ty = float_type.scalarType(pt.zcu);
1459 for (result_data, 0..) |*scalar, i| {
1460 const elem_val = try val.elemValue(pt, i);
1461 scalar.* = (try sqrtScalar(elem_val, scalar_ty, pt)).toIntern();
1462 }
1463 return pt.aggregateValue(float_type, result_data);
1464 }
1465 return sqrtScalar(val, float_type, pt);
1466}
1467
1468pub fn sqrtScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1469 const zcu = pt.zcu;
1470 const target = zcu.getTarget();
1471 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1472 16 => .{ .f16 = @sqrt(val.toFloat(f16, zcu)) },
1473 32 => .{ .f32 = @sqrt(val.toFloat(f32, zcu)) },
1474 64 => .{ .f64 = @sqrt(val.toFloat(f64, zcu)) },
1475 80 => .{ .f80 = @sqrt(val.toFloat(f80, zcu)) },
1476 128 => .{ .f128 = @sqrt(val.toFloat(f128, zcu)) },
1477 else => unreachable,
1478 };
1479 return Value.fromInterned(try pt.intern(.{ .float = .{
1480 .ty = float_type.toIntern(),
1481 .storage = storage,
1482 } }));
1483}
1484
1485pub fn sin(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1486 const zcu = pt.zcu;
1487 if (float_type.zigTypeTag(zcu) == .vector) {
1488 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1489 const scalar_ty = float_type.scalarType(zcu);
1490 for (result_data, 0..) |*scalar, i| {
1491 const elem_val = try val.elemValue(pt, i);
1492 scalar.* = (try sinScalar(elem_val, scalar_ty, pt)).toIntern();
1493 }
1494 return pt.aggregateValue(float_type, result_data);
1495 }
1496 return sinScalar(val, float_type, pt);
1497}
1498
1499pub fn sinScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1500 const zcu = pt.zcu;
1501 const target = zcu.getTarget();
1502 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1503 16 => .{ .f16 = @sin(val.toFloat(f16, zcu)) },
1504 32 => .{ .f32 = @sin(val.toFloat(f32, zcu)) },
1505 64 => .{ .f64 = @sin(val.toFloat(f64, zcu)) },
1506 80 => .{ .f80 = @sin(val.toFloat(f80, zcu)) },
1507 128 => .{ .f128 = @sin(val.toFloat(f128, zcu)) },
1508 else => unreachable,
1509 };
1510 return Value.fromInterned(try pt.intern(.{ .float = .{
1511 .ty = float_type.toIntern(),
1512 .storage = storage,
1513 } }));
1514}
1515
1516pub fn cos(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1517 const zcu = pt.zcu;
1518 if (float_type.zigTypeTag(zcu) == .vector) {
1519 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1520 const scalar_ty = float_type.scalarType(zcu);
1521 for (result_data, 0..) |*scalar, i| {
1522 const elem_val = try val.elemValue(pt, i);
1523 scalar.* = (try cosScalar(elem_val, scalar_ty, pt)).toIntern();
1524 }
1525 return pt.aggregateValue(float_type, result_data);
1526 }
1527 return cosScalar(val, float_type, pt);
1528}
1529
1530pub fn cosScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1531 const zcu = pt.zcu;
1532 const target = zcu.getTarget();
1533 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1534 16 => .{ .f16 = @cos(val.toFloat(f16, zcu)) },
1535 32 => .{ .f32 = @cos(val.toFloat(f32, zcu)) },
1536 64 => .{ .f64 = @cos(val.toFloat(f64, zcu)) },
1537 80 => .{ .f80 = @cos(val.toFloat(f80, zcu)) },
1538 128 => .{ .f128 = @cos(val.toFloat(f128, zcu)) },
1539 else => unreachable,
1540 };
1541 return Value.fromInterned(try pt.intern(.{ .float = .{
1542 .ty = float_type.toIntern(),
1543 .storage = storage,
1544 } }));
1545}
1546
1547pub fn tan(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1548 const zcu = pt.zcu;
1549 if (float_type.zigTypeTag(zcu) == .vector) {
1550 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1551 const scalar_ty = float_type.scalarType(zcu);
1552 for (result_data, 0..) |*scalar, i| {
1553 const elem_val = try val.elemValue(pt, i);
1554 scalar.* = (try tanScalar(elem_val, scalar_ty, pt)).toIntern();
1555 }
1556 return pt.aggregateValue(float_type, result_data);
1557 }
1558 return tanScalar(val, float_type, pt);
1559}
1560
1561pub fn tanScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1562 const zcu = pt.zcu;
1563 const target = zcu.getTarget();
1564 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1565 16 => .{ .f16 = @tan(val.toFloat(f16, zcu)) },
1566 32 => .{ .f32 = @tan(val.toFloat(f32, zcu)) },
1567 64 => .{ .f64 = @tan(val.toFloat(f64, zcu)) },
1568 80 => .{ .f80 = @tan(val.toFloat(f80, zcu)) },
1569 128 => .{ .f128 = @tan(val.toFloat(f128, zcu)) },
1570 else => unreachable,
1571 };
1572 return Value.fromInterned(try pt.intern(.{ .float = .{
1573 .ty = float_type.toIntern(),
1574 .storage = storage,
1575 } }));
1576}
1577
1578pub fn exp(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1579 const zcu = pt.zcu;
1580 if (float_type.zigTypeTag(zcu) == .vector) {
1581 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1582 const scalar_ty = float_type.scalarType(zcu);
1583 for (result_data, 0..) |*scalar, i| {
1584 const elem_val = try val.elemValue(pt, i);
1585 scalar.* = (try expScalar(elem_val, scalar_ty, pt)).toIntern();
1586 }
1587 return pt.aggregateValue(float_type, result_data);
1588 }
1589 return expScalar(val, float_type, pt);
1590}
1591
1592pub fn expScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1593 const zcu = pt.zcu;
1594 const target = zcu.getTarget();
1595 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1596 16 => .{ .f16 = @exp(val.toFloat(f16, zcu)) },
1597 32 => .{ .f32 = @exp(val.toFloat(f32, zcu)) },
1598 64 => .{ .f64 = @exp(val.toFloat(f64, zcu)) },
1599 80 => .{ .f80 = @exp(val.toFloat(f80, zcu)) },
1600 128 => .{ .f128 = @exp(val.toFloat(f128, zcu)) },
1601 else => unreachable,
1602 };
1603 return Value.fromInterned(try pt.intern(.{ .float = .{
1604 .ty = float_type.toIntern(),
1605 .storage = storage,
1606 } }));
1607}
1608
1609pub fn exp2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1610 const zcu = pt.zcu;
1611 if (float_type.zigTypeTag(zcu) == .vector) {
1612 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1613 const scalar_ty = float_type.scalarType(zcu);
1614 for (result_data, 0..) |*scalar, i| {
1615 const elem_val = try val.elemValue(pt, i);
1616 scalar.* = (try exp2Scalar(elem_val, scalar_ty, pt)).toIntern();
1617 }
1618 return pt.aggregateValue(float_type, result_data);
1619 }
1620 return exp2Scalar(val, float_type, pt);
1621}
1622
1623pub fn exp2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1624 const zcu = pt.zcu;
1625 const target = zcu.getTarget();
1626 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1627 16 => .{ .f16 = @exp2(val.toFloat(f16, zcu)) },
1628 32 => .{ .f32 = @exp2(val.toFloat(f32, zcu)) },
1629 64 => .{ .f64 = @exp2(val.toFloat(f64, zcu)) },
1630 80 => .{ .f80 = @exp2(val.toFloat(f80, zcu)) },
1631 128 => .{ .f128 = @exp2(val.toFloat(f128, zcu)) },
1632 else => unreachable,
1633 };
1634 return Value.fromInterned(try pt.intern(.{ .float = .{
1635 .ty = float_type.toIntern(),
1636 .storage = storage,
1637 } }));
1638}
1639
1640pub fn log(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1641 const zcu = pt.zcu;
1642 if (float_type.zigTypeTag(zcu) == .vector) {
1643 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1644 const scalar_ty = float_type.scalarType(zcu);
1645 for (result_data, 0..) |*scalar, i| {
1646 const elem_val = try val.elemValue(pt, i);
1647 scalar.* = (try logScalar(elem_val, scalar_ty, pt)).toIntern();
1648 }
1649 return pt.aggregateValue(float_type, result_data);
1650 }
1651 return logScalar(val, float_type, pt);
1652}
1653
1654pub fn logScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1655 const zcu = pt.zcu;
1656 const target = zcu.getTarget();
1657 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1658 16 => .{ .f16 = @log(val.toFloat(f16, zcu)) },
1659 32 => .{ .f32 = @log(val.toFloat(f32, zcu)) },
1660 64 => .{ .f64 = @log(val.toFloat(f64, zcu)) },
1661 80 => .{ .f80 = @log(val.toFloat(f80, zcu)) },
1662 128 => .{ .f128 = @log(val.toFloat(f128, zcu)) },
1663 else => unreachable,
1664 };
1665 return Value.fromInterned(try pt.intern(.{ .float = .{
1666 .ty = float_type.toIntern(),
1667 .storage = storage,
1668 } }));
1669}
1670
1671pub fn log2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1672 const zcu = pt.zcu;
1673 if (float_type.zigTypeTag(zcu) == .vector) {
1674 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1675 const scalar_ty = float_type.scalarType(zcu);
1676 for (result_data, 0..) |*scalar, i| {
1677 const elem_val = try val.elemValue(pt, i);
1678 scalar.* = (try log2Scalar(elem_val, scalar_ty, pt)).toIntern();
1679 }
1680 return pt.aggregateValue(float_type, result_data);
1681 }
1682 return log2Scalar(val, float_type, pt);
1683}
1684
1685pub fn log2Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1686 const zcu = pt.zcu;
1687 const target = zcu.getTarget();
1688 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1689 16 => .{ .f16 = @log2(val.toFloat(f16, zcu)) },
1690 32 => .{ .f32 = @log2(val.toFloat(f32, zcu)) },
1691 64 => .{ .f64 = @log2(val.toFloat(f64, zcu)) },
1692 80 => .{ .f80 = @log2(val.toFloat(f80, zcu)) },
1693 128 => .{ .f128 = @log2(val.toFloat(f128, zcu)) },
1694 else => unreachable,
1695 };
1696 return Value.fromInterned(try pt.intern(.{ .float = .{
1697 .ty = float_type.toIntern(),
1698 .storage = storage,
1699 } }));
1700}
1701
1702pub fn log10(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1703 const zcu = pt.zcu;
1704 if (float_type.zigTypeTag(zcu) == .vector) {
1705 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1706 const scalar_ty = float_type.scalarType(zcu);
1707 for (result_data, 0..) |*scalar, i| {
1708 const elem_val = try val.elemValue(pt, i);
1709 scalar.* = (try log10Scalar(elem_val, scalar_ty, pt)).toIntern();
1710 }
1711 return pt.aggregateValue(float_type, result_data);
1712 }
1713 return log10Scalar(val, float_type, pt);
1714}
1715
1716pub fn log10Scalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1717 const zcu = pt.zcu;
1718 const target = zcu.getTarget();
1719 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1720 16 => .{ .f16 = @log10(val.toFloat(f16, zcu)) },
1721 32 => .{ .f32 = @log10(val.toFloat(f32, zcu)) },
1722 64 => .{ .f64 = @log10(val.toFloat(f64, zcu)) },
1723 80 => .{ .f80 = @log10(val.toFloat(f80, zcu)) },
1724 128 => .{ .f128 = @log10(val.toFloat(f128, zcu)) },
1725 else => unreachable,
1726 };
1727 return Value.fromInterned(try pt.intern(.{ .float = .{
1728 .ty = float_type.toIntern(),
1729 .storage = storage,
1730 } }));
1731}
1732
1733pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1734 const zcu = pt.zcu;
1735 if (ty.zigTypeTag(zcu) == .vector) {
1736 const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(zcu));
1737 const scalar_ty = ty.scalarType(zcu);
1738 for (result_data, 0..) |*scalar, i| {
1739 const elem_val = try val.elemValue(pt, i);
1740 scalar.* = (try absScalar(elem_val, scalar_ty, pt, arena)).toIntern();
1741 }
1742 return pt.aggregateValue(ty, result_data);
1743 }
1744 return absScalar(val, ty, pt, arena);
1745}
1746
1747pub fn absScalar(val: Value, ty: Type, pt: Zcu.PerThread, arena: Allocator) Allocator.Error!Value {
1748 const zcu = pt.zcu;
1749 switch (ty.zigTypeTag(zcu)) {
1750 .int => {
1751 var buffer: Value.BigIntSpace = undefined;
1752 var operand_bigint = try val.toBigInt(&buffer, zcu).toManaged(arena);
1753 operand_bigint.abs();
1754
1755 return pt.intValue_big(try ty.toUnsigned(pt), operand_bigint.toConst());
1756 },
1757 .comptime_int => {
1758 var buffer: Value.BigIntSpace = undefined;
1759 var operand_bigint = try val.toBigInt(&buffer, zcu).toManaged(arena);
1760 operand_bigint.abs();
1761
1762 return pt.intValue_big(ty, operand_bigint.toConst());
1763 },
1764 .comptime_float, .float => {
1765 const target = zcu.getTarget();
1766 const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(target)) {
1767 16 => .{ .f16 = @abs(val.toFloat(f16, zcu)) },
1768 32 => .{ .f32 = @abs(val.toFloat(f32, zcu)) },
1769 64 => .{ .f64 = @abs(val.toFloat(f64, zcu)) },
1770 80 => .{ .f80 = @abs(val.toFloat(f80, zcu)) },
1771 128 => .{ .f128 = @abs(val.toFloat(f128, zcu)) },
1772 else => unreachable,
1773 };
1774 return Value.fromInterned(try pt.intern(.{ .float = .{
1775 .ty = ty.toIntern(),
1776 .storage = storage,
1777 } }));
1778 },
1779 else => unreachable,
1780 }
1781}
1782
1783pub fn floor(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1784 const zcu = pt.zcu;
1785 if (float_type.zigTypeTag(zcu) == .vector) {
1786 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1787 const scalar_ty = float_type.scalarType(zcu);
1788 for (result_data, 0..) |*scalar, i| {
1789 const elem_val = try val.elemValue(pt, i);
1790 scalar.* = (try floorScalar(elem_val, scalar_ty, pt)).toIntern();
1791 }
1792 return pt.aggregateValue(float_type, result_data);
1793 }
1794 return floorScalar(val, float_type, pt);
1795}
1796
1797pub fn floorScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1798 const zcu = pt.zcu;
1799 const target = zcu.getTarget();
1800 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1801 16 => .{ .f16 = @floor(val.toFloat(f16, zcu)) },
1802 32 => .{ .f32 = @floor(val.toFloat(f32, zcu)) },
1803 64 => .{ .f64 = @floor(val.toFloat(f64, zcu)) },
1804 80 => .{ .f80 = @floor(val.toFloat(f80, zcu)) },
1805 128 => .{ .f128 = @floor(val.toFloat(f128, zcu)) },
1806 else => unreachable,
1807 };
1808 return Value.fromInterned(try pt.intern(.{ .float = .{
1809 .ty = float_type.toIntern(),
1810 .storage = storage,
1811 } }));
1812}
1813
1814pub fn ceil(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1815 const zcu = pt.zcu;
1816 if (float_type.zigTypeTag(zcu) == .vector) {
1817 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1818 const scalar_ty = float_type.scalarType(zcu);
1819 for (result_data, 0..) |*scalar, i| {
1820 const elem_val = try val.elemValue(pt, i);
1821 scalar.* = (try ceilScalar(elem_val, scalar_ty, pt)).toIntern();
1822 }
1823 return pt.aggregateValue(float_type, result_data);
1824 }
1825 return ceilScalar(val, float_type, pt);
1826}
1827
1828pub fn ceilScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1829 const zcu = pt.zcu;
1830 const target = zcu.getTarget();
1831 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1832 16 => .{ .f16 = @ceil(val.toFloat(f16, zcu)) },
1833 32 => .{ .f32 = @ceil(val.toFloat(f32, zcu)) },
1834 64 => .{ .f64 = @ceil(val.toFloat(f64, zcu)) },
1835 80 => .{ .f80 = @ceil(val.toFloat(f80, zcu)) },
1836 128 => .{ .f128 = @ceil(val.toFloat(f128, zcu)) },
1837 else => unreachable,
1838 };
1839 return Value.fromInterned(try pt.intern(.{ .float = .{
1840 .ty = float_type.toIntern(),
1841 .storage = storage,
1842 } }));
1843}
1844
1845pub fn round(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1846 const zcu = pt.zcu;
1847 if (float_type.zigTypeTag(zcu) == .vector) {
1848 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1849 const scalar_ty = float_type.scalarType(zcu);
1850 for (result_data, 0..) |*scalar, i| {
1851 const elem_val = try val.elemValue(pt, i);
1852 scalar.* = (try roundScalar(elem_val, scalar_ty, pt)).toIntern();
1853 }
1854 return pt.aggregateValue(float_type, result_data);
1855 }
1856 return roundScalar(val, float_type, pt);
1857}
1858
1859pub fn roundScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1860 const zcu = pt.zcu;
1861 const target = zcu.getTarget();
1862 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1863 16 => .{ .f16 = @round(val.toFloat(f16, zcu)) },
1864 32 => .{ .f32 = @round(val.toFloat(f32, zcu)) },
1865 64 => .{ .f64 = @round(val.toFloat(f64, zcu)) },
1866 80 => .{ .f80 = @round(val.toFloat(f80, zcu)) },
1867 128 => .{ .f128 = @round(val.toFloat(f128, zcu)) },
1868 else => unreachable,
1869 };
1870 return Value.fromInterned(try pt.intern(.{ .float = .{
1871 .ty = float_type.toIntern(),
1872 .storage = storage,
1873 } }));
1874}
1875
1876pub fn trunc(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
1877 const zcu = pt.zcu;
1878 if (float_type.zigTypeTag(zcu) == .vector) {
1879 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1880 const scalar_ty = float_type.scalarType(zcu);
1881 for (result_data, 0..) |*scalar, i| {
1882 const elem_val = try val.elemValue(pt, i);
1883 scalar.* = (try truncScalar(elem_val, scalar_ty, pt)).toIntern();
1884 }
1885 return pt.aggregateValue(float_type, result_data);
1886 }
1887 return truncScalar(val, float_type, pt);
1888}
1889
1890pub fn truncScalar(val: Value, float_type: Type, pt: Zcu.PerThread) Allocator.Error!Value {
1891 const zcu = pt.zcu;
1892 const target = zcu.getTarget();
1893 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1894 16 => .{ .f16 = @trunc(val.toFloat(f16, zcu)) },
1895 32 => .{ .f32 = @trunc(val.toFloat(f32, zcu)) },
1896 64 => .{ .f64 = @trunc(val.toFloat(f64, zcu)) },
1897 80 => .{ .f80 = @trunc(val.toFloat(f80, zcu)) },
1898 128 => .{ .f128 = @trunc(val.toFloat(f128, zcu)) },
1899 else => unreachable,
1900 };
1901 return Value.fromInterned(try pt.intern(.{ .float = .{
1902 .ty = float_type.toIntern(),
1903 .storage = storage,
1904 } }));
1905}
1906
1907pub fn mulAdd(
1908 float_type: Type,
1909 mulend1: Value,
1910 mulend2: Value,
1911 addend: Value,
1912 arena: Allocator,
1913 pt: Zcu.PerThread,
1914) !Value {
1915 const zcu = pt.zcu;
1916 if (float_type.zigTypeTag(zcu) == .vector) {
1917 const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(zcu));
1918 const scalar_ty = float_type.scalarType(zcu);
1919 for (result_data, 0..) |*scalar, i| {
1920 const mulend1_elem = try mulend1.elemValue(pt, i);
1921 const mulend2_elem = try mulend2.elemValue(pt, i);
1922 const addend_elem = try addend.elemValue(pt, i);
1923 scalar.* = (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, pt)).toIntern();
1924 }
1925 return pt.aggregateValue(float_type, result_data);
1926 }
1927 return mulAddScalar(float_type, mulend1, mulend2, addend, pt);
1928}
1929
1930pub fn mulAddScalar(
1931 float_type: Type,
1932 mulend1: Value,
1933 mulend2: Value,
1934 addend: Value,
1935 pt: Zcu.PerThread,
1936) Allocator.Error!Value {
1937 const zcu = pt.zcu;
1938 const target = zcu.getTarget();
1939 const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) {
1940 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, zcu), mulend2.toFloat(f16, zcu), addend.toFloat(f16, zcu)) },
1941 32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, zcu), mulend2.toFloat(f32, zcu), addend.toFloat(f32, zcu)) },
1942 64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, zcu), mulend2.toFloat(f64, zcu), addend.toFloat(f64, zcu)) },
1943 80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, zcu), mulend2.toFloat(f80, zcu), addend.toFloat(f80, zcu)) },
1944 128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, zcu), mulend2.toFloat(f128, zcu), addend.toFloat(f128, zcu)) },
1945 else => unreachable,
1946 };
1947 return Value.fromInterned(try pt.intern(.{ .float = .{
1948 .ty = float_type.toIntern(),
1949 .storage = storage,
1950 } }));
1951}
1952
1953/// If the value is represented in-memory as a series of bytes that all
1954/// have the same value, return that byte value, otherwise null.
1955pub fn hasRepeatedByteRepr(val: Value, pt: Zcu.PerThread) !?u8 {
1956 const zcu = pt.zcu;
1957 const ty = val.typeOf(zcu);
1958 const abi_size = std.math.cast(usize, ty.abiSize(zcu)) orelse return null;
1959 assert(abi_size >= 1);
1960 const byte_buffer = try zcu.gpa.alloc(u8, abi_size);
1961 defer zcu.gpa.free(byte_buffer);
1962
1963 writeToMemory(val, pt, byte_buffer) catch |err| switch (err) {
1964 error.OutOfMemory => return error.OutOfMemory,
1965 error.ReinterpretDeclRef => return null,
1966 // TODO: The writeToMemory function was originally created for the purpose
1967 // of comptime pointer casting. However, it is now additionally being used
1968 // for checking the actual memory layout that will be generated by machine
1969 // code late in compilation. So, this error handling is too aggressive and
1970 // causes some false negatives, causing less-than-ideal code generation.
1971 error.IllDefinedMemoryLayout => return null,
1972 error.Unimplemented => return null,
1973 };
1974 const first_byte = byte_buffer[0];
1975 for (byte_buffer[1..]) |byte| {
1976 if (byte != first_byte) return null;
1977 }
1978 return first_byte;
1979}
1980
1981pub fn typeOf(val: Value, zcu: *const Zcu) Type {
1982 return Type.fromInterned(zcu.intern_pool.typeOf(val.toIntern()));
1983}
1984
1985/// For an integer (comptime or fixed-width) `val`, returns the comptime-known bounds of the value.
1986/// If `val` is not undef, the bounds are both `val`.
1987/// If `val` is undef and has a fixed-width type, the bounds are the bounds of the type.
1988/// If `val` is undef and is a `comptime_int`, returns null.
1989pub fn intValueBounds(val: Value, pt: Zcu.PerThread) !?[2]Value {
1990 if (!val.isUndef(pt.zcu)) return .{ val, val };
1991 const ty = pt.zcu.intern_pool.typeOf(val.toIntern());
1992 if (ty == .comptime_int_type) return null;
1993 return .{
1994 try Type.fromInterned(ty).minInt(pt, Type.fromInterned(ty)),
1995 try Type.fromInterned(ty).maxInt(pt, Type.fromInterned(ty)),
1996 };
1997}
1998
1999pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace;
2000
2001pub const undef: Value = .{ .ip_index = .undef };
2002pub const undef_bool: Value = .{ .ip_index = .undef_bool };
2003pub const undef_usize: Value = .{ .ip_index = .undef_usize };
2004pub const undef_u1: Value = .{ .ip_index = .undef_u1 };
2005pub const zero_comptime_int: Value = .{ .ip_index = .zero };
2006pub const zero_usize: Value = .{ .ip_index = .zero_usize };
2007pub const zero_u1: Value = .{ .ip_index = .zero_u1 };
2008pub const zero_u8: Value = .{ .ip_index = .zero_u8 };
2009pub const one_comptime_int: Value = .{ .ip_index = .one };
2010pub const one_usize: Value = .{ .ip_index = .one_usize };
2011pub const one_u1: Value = .{ .ip_index = .one_u1 };
2012pub const one_u8: Value = .{ .ip_index = .one_u8 };
2013pub const four_u8: Value = .{ .ip_index = .four_u8 };
2014pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one };
2015pub const @"void": Value = .{ .ip_index = .void_value };
2016pub const @"unreachable": Value = .{ .ip_index = .unreachable_value };
2017pub const @"null": Value = .{ .ip_index = .null_value };
2018pub const @"true": Value = .{ .ip_index = .bool_true };
2019pub const @"false": Value = .{ .ip_index = .bool_false };
2020pub const empty_tuple: Value = .{ .ip_index = .empty_tuple };
2021
2022pub fn makeBool(x: bool) Value {
2023 return if (x) .true else .false;
2024}
2025
2026/// `parent_ptr` must be a single-pointer or C pointer to some optional.
2027///
2028/// Returns a pointer to the payload of the optional.
2029///
2030/// May perform type resolution.
2031pub fn ptrOptPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
2032 const zcu = pt.zcu;
2033 const parent_ptr_ty = parent_ptr.typeOf(zcu);
2034 const opt_ty = parent_ptr_ty.childType(zcu);
2035 const ptr_size = parent_ptr_ty.ptrSize(zcu);
2036
2037 assert(ptr_size == .one or ptr_size == .c);
2038 assert(opt_ty.zigTypeTag(zcu) == .optional);
2039
2040 const result_ty = try pt.ptrTypeSema(info: {
2041 var new = parent_ptr_ty.ptrInfo(zcu);
2042 // We can correctly preserve alignment `.none`, since an optional has the same
2043 // natural alignment as its child type.
2044 new.child = opt_ty.childType(zcu).toIntern();
2045 break :info new;
2046 });
2047
2048 if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty);
2049
2050 if (opt_ty.isPtrLikeOptional(zcu)) {
2051 // Just reinterpret the pointer, since the layout is well-defined
2052 return pt.getCoerced(parent_ptr, result_ty);
2053 }
2054
2055 const base_ptr = try parent_ptr.canonicalizeBasePtr(.one, opt_ty, pt);
2056 return Value.fromInterned(try pt.intern(.{ .ptr = .{
2057 .ty = result_ty.toIntern(),
2058 .base_addr = .{ .opt_payload = base_ptr.toIntern() },
2059 .byte_offset = 0,
2060 } }));
2061}
2062
2063/// `parent_ptr` must be a single-pointer to some error union.
2064/// Returns a pointer to the payload of the error union.
2065/// May perform type resolution.
2066pub fn ptrEuPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
2067 const zcu = pt.zcu;
2068 const parent_ptr_ty = parent_ptr.typeOf(zcu);
2069 const eu_ty = parent_ptr_ty.childType(zcu);
2070
2071 assert(parent_ptr_ty.ptrSize(zcu) == .one);
2072 assert(eu_ty.zigTypeTag(zcu) == .error_union);
2073
2074 const result_ty = try pt.ptrTypeSema(info: {
2075 var new = parent_ptr_ty.ptrInfo(zcu);
2076 // We can correctly preserve alignment `.none`, since an error union has a
2077 // natural alignment greater than or equal to that of its payload type.
2078 new.child = eu_ty.errorUnionPayload(zcu).toIntern();
2079 break :info new;
2080 });
2081
2082 if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty);
2083
2084 const base_ptr = try parent_ptr.canonicalizeBasePtr(.one, eu_ty, pt);
2085 return Value.fromInterned(try pt.intern(.{ .ptr = .{
2086 .ty = result_ty.toIntern(),
2087 .base_addr = .{ .eu_payload = base_ptr.toIntern() },
2088 .byte_offset = 0,
2089 } }));
2090}
2091
2092/// `parent_ptr` must be a single-pointer or c pointer to a struct, union, or slice.
2093///
2094/// Returns a pointer to the aggregate field at the specified index.
2095///
2096/// For slices, uses `slice_ptr_index` and `slice_len_index`.
2097///
2098/// May perform type resolution.
2099pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
2100 const zcu = pt.zcu;
2101 const parent_ptr_ty = parent_ptr.typeOf(zcu);
2102 const aggregate_ty = parent_ptr_ty.childType(zcu);
2103
2104 const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
2105 assert(parent_ptr_info.flags.size == .one or parent_ptr_info.flags.size == .c);
2106
2107 // Exiting this `switch` indicates that the `field` pointer representation should be used.
2108 // `field_align` may be `.none` to represent the natural alignment of `field_ty`, but is not necessarily.
2109 const field_ty: Type, const field_align: InternPool.Alignment = switch (aggregate_ty.zigTypeTag(zcu)) {
2110 .@"struct" => field: {
2111 const field_ty = aggregate_ty.fieldType(field_idx, zcu);
2112 switch (aggregate_ty.containerLayout(zcu)) {
2113 .auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) },
2114 .@"extern" => {
2115 // Well-defined layout, so just offset the pointer appropriately.
2116 try aggregate_ty.resolveLayout(pt);
2117 const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu);
2118 const field_align = a: {
2119 const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: {
2120 break :pa try aggregate_ty.abiAlignmentSema(pt);
2121 } else parent_ptr_info.flags.alignment;
2122 break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off)));
2123 };
2124 const result_ty = try pt.ptrTypeSema(info: {
2125 var new = parent_ptr_info;
2126 new.child = field_ty.toIntern();
2127 new.flags.alignment = field_align;
2128 break :info new;
2129 });
2130 return parent_ptr.getOffsetPtr(byte_off, result_ty, pt);
2131 },
2132 .@"packed" => {
2133 const packed_offset = aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, pt);
2134 const result_ty = try pt.ptrType(info: {
2135 var new = parent_ptr_info;
2136 new.packed_offset = packed_offset;
2137 new.child = field_ty.toIntern();
2138 if (new.flags.alignment == .none) {
2139 new.flags.alignment = try aggregate_ty.abiAlignmentSema(pt);
2140 }
2141 break :info new;
2142 });
2143 return pt.getCoerced(parent_ptr, result_ty);
2144 },
2145 }
2146 },
2147 .@"union" => field: {
2148 const union_obj = zcu.typeToUnion(aggregate_ty).?;
2149 const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]);
2150 switch (aggregate_ty.containerLayout(zcu)) {
2151 .auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) },
2152 .@"extern" => {
2153 // Point to the same address.
2154 const result_ty = try pt.ptrTypeSema(info: {
2155 var new = parent_ptr_info;
2156 new.child = field_ty.toIntern();
2157 break :info new;
2158 });
2159 return pt.getCoerced(parent_ptr, result_ty);
2160 },
2161 .@"packed" => {
2162 // If the field has an ABI size matching its bit size, then we can continue to use a
2163 // non-bit pointer if the parent pointer is also a non-bit pointer.
2164 if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar * 8 == try field_ty.bitSizeSema(pt)) {
2165 // We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely.
2166 const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) {
2167 .little => 0,
2168 .big => (try aggregate_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar - (try field_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar,
2169 };
2170 const result_ty = try pt.ptrTypeSema(info: {
2171 var new = parent_ptr_info;
2172 new.child = field_ty.toIntern();
2173 new.flags.alignment = InternPool.Alignment.fromLog2Units(
2174 @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentSema(pt)).toByteUnits().?),
2175 );
2176 break :info new;
2177 });
2178 return parent_ptr.getOffsetPtr(byte_offset, result_ty, pt);
2179 } else {
2180 // The result must be a bit-pointer if it is not already.
2181 const result_ty = try pt.ptrTypeSema(info: {
2182 var new = parent_ptr_info;
2183 new.child = field_ty.toIntern();
2184 if (new.packed_offset.host_size == 0) {
2185 new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeSema(pt)) + 7) / 8);
2186 assert(new.packed_offset.bit_offset == 0);
2187 }
2188 break :info new;
2189 });
2190 return pt.getCoerced(parent_ptr, result_ty);
2191 }
2192 },
2193 }
2194 },
2195 .pointer => field_ty: {
2196 assert(aggregate_ty.isSlice(zcu));
2197 break :field_ty switch (field_idx) {
2198 Value.slice_ptr_index => .{ aggregate_ty.slicePtrFieldType(zcu), Type.usize.abiAlignment(zcu) },
2199 Value.slice_len_index => .{ Type.usize, Type.usize.abiAlignment(zcu) },
2200 else => unreachable,
2201 };
2202 },
2203 else => unreachable,
2204 };
2205
2206 const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: {
2207 const ty_align = (try field_ty.abiAlignmentInner(.sema, zcu, pt.tid)).scalar;
2208 const true_field_align = if (field_align == .none) ty_align else field_align;
2209 const new_align = true_field_align.min(parent_ptr_info.flags.alignment);
2210 if (new_align == ty_align) break :a .none;
2211 break :a new_align;
2212 } else field_align;
2213
2214 const result_ty = try pt.ptrTypeSema(info: {
2215 var new = parent_ptr_info;
2216 new.child = field_ty.toIntern();
2217 new.flags.alignment = new_align;
2218 break :info new;
2219 });
2220
2221 if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty);
2222
2223 const base_ptr = try parent_ptr.canonicalizeBasePtr(.one, aggregate_ty, pt);
2224 return Value.fromInterned(try pt.intern(.{ .ptr = .{
2225 .ty = result_ty.toIntern(),
2226 .base_addr = .{ .field = .{
2227 .base = base_ptr.toIntern(),
2228 .index = field_idx,
2229 } },
2230 .byte_offset = 0,
2231 } }));
2232}
2233
2234/// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice.
2235/// Returns a pointer to the element at the specified index.
2236/// May perform type resolution.
2237pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, pt: Zcu.PerThread) !Value {
2238 const zcu = pt.zcu;
2239 const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) {
2240 .one, .many, .c => orig_parent_ptr,
2241 .slice => orig_parent_ptr.slicePtr(zcu),
2242 };
2243
2244 const parent_ptr_ty = parent_ptr.typeOf(zcu);
2245 const elem_ty = parent_ptr_ty.childType(zcu);
2246 const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), pt);
2247
2248 if (parent_ptr.isUndef(zcu)) return pt.undefValue(result_ty);
2249
2250 if (result_ty.ptrInfo(zcu).packed_offset.host_size != 0) {
2251 // Since we have a bit-pointer, the pointer address should be unchanged.
2252 assert(elem_ty.zigTypeTag(zcu) == .vector);
2253 return pt.getCoerced(parent_ptr, result_ty);
2254 }
2255
2256 const PtrStrat = union(enum) {
2257 offset: u64,
2258 elem_ptr: Type, // many-ptr elem ty
2259 };
2260
2261 const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) {
2262 .one => switch (elem_ty.zigTypeTag(zcu)) {
2263 .vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeSema(pt), 8) },
2264 .array => strat: {
2265 const arr_elem_ty = elem_ty.childType(zcu);
2266 if (try arr_elem_ty.comptimeOnlySema(pt)) {
2267 break :strat .{ .elem_ptr = arr_elem_ty };
2268 }
2269 break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar };
2270 },
2271 else => unreachable,
2272 },
2273
2274 .many, .c => if (try elem_ty.comptimeOnlySema(pt))
2275 .{ .elem_ptr = elem_ty }
2276 else
2277 .{ .offset = field_idx * (try elem_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar },
2278
2279 .slice => unreachable,
2280 };
2281
2282 switch (strat) {
2283 .offset => |byte_offset| {
2284 return parent_ptr.getOffsetPtr(byte_offset, result_ty, pt);
2285 },
2286 .elem_ptr => |manyptr_elem_ty| if (field_idx == 0) {
2287 return pt.getCoerced(parent_ptr, result_ty);
2288 } else {
2289 const arr_base_ty, const arr_base_len = manyptr_elem_ty.arrayBase(zcu);
2290 const base_idx = arr_base_len * field_idx;
2291 const parent_info = zcu.intern_pool.indexToKey(parent_ptr.toIntern()).ptr;
2292 switch (parent_info.base_addr) {
2293 .arr_elem => |arr_elem| {
2294 if (Value.fromInterned(arr_elem.base).typeOf(zcu).childType(zcu).toIntern() == arr_base_ty.toIntern()) {
2295 // We already have a pointer to an element of an array of this type.
2296 // Just modify the index.
2297 return Value.fromInterned(try pt.intern(.{ .ptr = ptr: {
2298 var new = parent_info;
2299 new.base_addr.arr_elem.index += base_idx;
2300 new.ty = result_ty.toIntern();
2301 break :ptr new;
2302 } }));
2303 }
2304 },
2305 else => {},
2306 }
2307 const base_ptr = try parent_ptr.canonicalizeBasePtr(.many, arr_base_ty, pt);
2308 return Value.fromInterned(try pt.intern(.{ .ptr = .{
2309 .ty = result_ty.toIntern(),
2310 .base_addr = .{ .arr_elem = .{
2311 .base = base_ptr.toIntern(),
2312 .index = base_idx,
2313 } },
2314 .byte_offset = 0,
2315 } }));
2316 },
2317 }
2318}
2319
2320fn canonicalizeBasePtr(base_ptr: Value, want_size: std.builtin.Type.Pointer.Size, want_child: Type, pt: Zcu.PerThread) !Value {
2321 const ptr_ty = base_ptr.typeOf(pt.zcu);
2322 const ptr_info = ptr_ty.ptrInfo(pt.zcu);
2323
2324 if (ptr_info.flags.size == want_size and
2325 ptr_info.child == want_child.toIntern() and
2326 !ptr_info.flags.is_const and
2327 !ptr_info.flags.is_volatile and
2328 !ptr_info.flags.is_allowzero and
2329 ptr_info.sentinel == .none and
2330 ptr_info.flags.alignment == .none)
2331 {
2332 // Already canonical!
2333 return base_ptr;
2334 }
2335
2336 const new_ty = try pt.ptrType(.{
2337 .child = want_child.toIntern(),
2338 .sentinel = .none,
2339 .flags = .{
2340 .size = want_size,
2341 .alignment = .none,
2342 .is_const = false,
2343 .is_volatile = false,
2344 .is_allowzero = false,
2345 .address_space = ptr_info.flags.address_space,
2346 },
2347 });
2348 return pt.getCoerced(base_ptr, new_ty);
2349}
2350
2351pub fn getOffsetPtr(ptr_val: Value, byte_off: u64, new_ty: Type, pt: Zcu.PerThread) !Value {
2352 if (ptr_val.isUndef(pt.zcu)) return ptr_val;
2353 var ptr = pt.zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
2354 ptr.ty = new_ty.toIntern();
2355 ptr.byte_offset += byte_off;
2356 return Value.fromInterned(try pt.intern(.{ .ptr = ptr }));
2357}
2358
2359pub const PointerDeriveStep = union(enum) {
2360 int: struct {
2361 addr: u64,
2362 ptr_ty: Type,
2363 },
2364 nav_ptr: InternPool.Nav.Index,
2365 uav_ptr: InternPool.Key.Ptr.BaseAddr.Uav,
2366 comptime_alloc_ptr: struct {
2367 idx: InternPool.ComptimeAllocIndex,
2368 val: Value,
2369 ptr_ty: Type,
2370 },
2371 comptime_field_ptr: Value,
2372 eu_payload_ptr: struct {
2373 parent: *PointerDeriveStep,
2374 /// This type will never be cast: it is provided for convenience.
2375 result_ptr_ty: Type,
2376 },
2377 opt_payload_ptr: struct {
2378 parent: *PointerDeriveStep,
2379 /// This type will never be cast: it is provided for convenience.
2380 result_ptr_ty: Type,
2381 },
2382 field_ptr: struct {
2383 parent: *PointerDeriveStep,
2384 field_idx: u32,
2385 /// This type will never be cast: it is provided for convenience.
2386 result_ptr_ty: Type,
2387 },
2388 elem_ptr: struct {
2389 parent: *PointerDeriveStep,
2390 elem_idx: u64,
2391 /// This type will never be cast: it is provided for convenience.
2392 result_ptr_ty: Type,
2393 },
2394 offset_and_cast: struct {
2395 parent: *PointerDeriveStep,
2396 byte_offset: u64,
2397 new_ptr_ty: Type,
2398 },
2399
2400 pub fn ptrType(step: PointerDeriveStep, pt: Zcu.PerThread) !Type {
2401 return switch (step) {
2402 .int => |int| int.ptr_ty,
2403 .nav_ptr => |nav| try pt.navPtrType(nav),
2404 .uav_ptr => |uav| Type.fromInterned(uav.orig_ty),
2405 .comptime_alloc_ptr => |info| info.ptr_ty,
2406 .comptime_field_ptr => |val| try pt.singleConstPtrType(val.typeOf(pt.zcu)),
2407 .offset_and_cast => |oac| oac.new_ptr_ty,
2408 inline .eu_payload_ptr, .opt_payload_ptr, .field_ptr, .elem_ptr => |x| x.result_ptr_ty,
2409 };
2410 }
2411};
2412
2413pub fn pointerDerivation(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread) Allocator.Error!PointerDeriveStep {
2414 return ptr_val.pointerDerivationAdvanced(arena, pt, false, null) catch |err| switch (err) {
2415 error.OutOfMemory => |e| return e,
2416 error.Canceled => @panic("TODO"), // pls remove from error set mlugg
2417 error.AnalysisFail => unreachable,
2418 };
2419}
2420
2421/// Given a pointer value, get the sequence of steps to derive it, ideally by taking
2422/// only field and element pointers with no casts. This can be used by codegen backends
2423/// which prefer field/elem accesses when lowering constant pointer values.
2424/// It is also used by the Value printing logic for pointers.
2425pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread, comptime resolve_types: bool, opt_sema: ?*Sema) !PointerDeriveStep {
2426 const zcu = pt.zcu;
2427 const ptr = zcu.intern_pool.indexToKey(ptr_val.toIntern()).ptr;
2428 const base_derive: PointerDeriveStep = switch (ptr.base_addr) {
2429 .int => return .{ .int = .{
2430 .addr = ptr.byte_offset,
2431 .ptr_ty = Type.fromInterned(ptr.ty),
2432 } },
2433 .nav => |nav| .{ .nav_ptr = nav },
2434 .uav => |uav| base: {
2435 // A slight tweak: `orig_ty` here is sometimes not `const`, but it ought to be.
2436 // TODO: fix this in the sites interning anon decls!
2437 const const_ty = try pt.ptrType(info: {
2438 var info = Type.fromInterned(uav.orig_ty).ptrInfo(zcu);
2439 info.flags.is_const = true;
2440 break :info info;
2441 });
2442 break :base .{ .uav_ptr = .{
2443 .val = uav.val,
2444 .orig_ty = const_ty.toIntern(),
2445 } };
2446 },
2447 .comptime_alloc => |idx| base: {
2448 const sema = opt_sema.?;
2449 const alloc = sema.getComptimeAlloc(idx);
2450 const val = try alloc.val.intern(pt, sema.arena);
2451 const ty = val.typeOf(zcu);
2452 break :base .{ .comptime_alloc_ptr = .{
2453 .idx = idx,
2454 .val = val,
2455 .ptr_ty = try pt.ptrType(.{
2456 .child = ty.toIntern(),
2457 .flags = .{
2458 .alignment = alloc.alignment,
2459 },
2460 }),
2461 } };
2462 },
2463 .comptime_field => |val| .{ .comptime_field_ptr = Value.fromInterned(val) },
2464 .eu_payload => |eu_ptr| base: {
2465 const base_ptr = Value.fromInterned(eu_ptr);
2466 const base_ptr_ty = base_ptr.typeOf(zcu);
2467 const parent_step = try arena.create(PointerDeriveStep);
2468 parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(eu_ptr), arena, pt, resolve_types, opt_sema);
2469 break :base .{ .eu_payload_ptr = .{
2470 .parent = parent_step,
2471 .result_ptr_ty = try pt.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).errorUnionPayload(zcu)),
2472 } };
2473 },
2474 .opt_payload => |opt_ptr| base: {
2475 const base_ptr = Value.fromInterned(opt_ptr);
2476 const base_ptr_ty = base_ptr.typeOf(zcu);
2477 const parent_step = try arena.create(PointerDeriveStep);
2478 parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(opt_ptr), arena, pt, resolve_types, opt_sema);
2479 break :base .{ .opt_payload_ptr = .{
2480 .parent = parent_step,
2481 .result_ptr_ty = try pt.adjustPtrTypeChild(base_ptr_ty, base_ptr_ty.childType(zcu).optionalChild(zcu)),
2482 } };
2483 },
2484 .field => |field| base: {
2485 const base_ptr = Value.fromInterned(field.base);
2486 const base_ptr_ty = base_ptr.typeOf(zcu);
2487 const agg_ty = base_ptr_ty.childType(zcu);
2488 const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) {
2489 .@"struct" => .{ agg_ty.fieldType(@intCast(field.index), zcu), try agg_ty.fieldAlignmentInner(
2490 @intCast(field.index),
2491 if (resolve_types) .sema else .normal,
2492 pt.zcu,
2493 if (resolve_types) pt.tid else {},
2494 ) },
2495 .@"union" => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.fieldAlignmentInner(
2496 @intCast(field.index),
2497 if (resolve_types) .sema else .normal,
2498 pt.zcu,
2499 if (resolve_types) pt.tid else {},
2500 ) },
2501 .pointer => .{ switch (field.index) {
2502 Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu),
2503 Value.slice_len_index => Type.usize,
2504 else => unreachable,
2505 }, Type.usize.abiAlignment(zcu) },
2506 else => unreachable,
2507 };
2508 const base_align = base_ptr_ty.ptrAlignment(zcu);
2509 const result_align = field_align.minStrict(base_align);
2510 const result_ty = try pt.ptrType(.{
2511 .child = field_ty.toIntern(),
2512 .flags = flags: {
2513 var flags = base_ptr_ty.ptrInfo(zcu).flags;
2514 if (result_align == field_ty.abiAlignment(zcu)) {
2515 flags.alignment = .none;
2516 } else {
2517 flags.alignment = result_align;
2518 }
2519 break :flags flags;
2520 },
2521 });
2522 const parent_step = try arena.create(PointerDeriveStep);
2523 parent_step.* = try pointerDerivationAdvanced(base_ptr, arena, pt, resolve_types, opt_sema);
2524 break :base .{ .field_ptr = .{
2525 .parent = parent_step,
2526 .field_idx = @intCast(field.index),
2527 .result_ptr_ty = result_ty,
2528 } };
2529 },
2530 .arr_elem => |arr_elem| base: {
2531 const parent_step = try arena.create(PointerDeriveStep);
2532 parent_step.* = try pointerDerivationAdvanced(Value.fromInterned(arr_elem.base), arena, pt, resolve_types, opt_sema);
2533 const parent_ptr_info = (try parent_step.ptrType(pt)).ptrInfo(zcu);
2534 const result_ptr_ty = try pt.ptrType(.{
2535 .child = parent_ptr_info.child,
2536 .flags = flags: {
2537 var flags = parent_ptr_info.flags;
2538 flags.size = .one;
2539 break :flags flags;
2540 },
2541 });
2542 break :base .{ .elem_ptr = .{
2543 .parent = parent_step,
2544 .elem_idx = arr_elem.index,
2545 .result_ptr_ty = result_ptr_ty,
2546 } };
2547 },
2548 };
2549
2550 if (ptr.byte_offset == 0 and ptr.ty == (try base_derive.ptrType(pt)).toIntern()) {
2551 return base_derive;
2552 }
2553
2554 const ptr_ty_info = Type.fromInterned(ptr.ty).ptrInfo(zcu);
2555 const need_child: Type = .fromInterned(ptr_ty_info.child);
2556 if (need_child.comptimeOnly(zcu)) {
2557 // No refinement can happen - this pointer is presumably invalid.
2558 // Just offset it.
2559 const parent = try arena.create(PointerDeriveStep);
2560 parent.* = base_derive;
2561 return .{ .offset_and_cast = .{
2562 .parent = parent,
2563 .byte_offset = ptr.byte_offset,
2564 .new_ptr_ty = Type.fromInterned(ptr.ty),
2565 } };
2566 }
2567 const need_bytes = need_child.abiSize(zcu);
2568
2569 var cur_derive = base_derive;
2570 var cur_offset = ptr.byte_offset;
2571
2572 // Refine through fields and array elements as much as possible.
2573
2574 if (need_bytes > 0) while (true) {
2575 const cur_ty = (try cur_derive.ptrType(pt)).childType(zcu);
2576 if (cur_ty.toIntern() == need_child.toIntern() and cur_offset == 0) {
2577 break;
2578 }
2579 switch (cur_ty.zigTypeTag(zcu)) {
2580 .noreturn,
2581 .type,
2582 .comptime_int,
2583 .comptime_float,
2584 .null,
2585 .undefined,
2586 .enum_literal,
2587 .@"opaque",
2588 .@"fn",
2589 .error_union,
2590 .int,
2591 .float,
2592 .bool,
2593 .void,
2594 .pointer,
2595 .error_set,
2596 .@"anyframe",
2597 .frame,
2598 .@"enum",
2599 .vector,
2600 .@"union",
2601 => break,
2602
2603 .optional => {
2604 ptr_opt: {
2605 if (!cur_ty.isPtrLikeOptional(zcu)) break :ptr_opt;
2606 if (need_child.zigTypeTag(zcu) != .pointer) break :ptr_opt;
2607 switch (need_child.ptrSize(zcu)) {
2608 .one, .many => {},
2609 .slice, .c => break :ptr_opt,
2610 }
2611 const parent = try arena.create(PointerDeriveStep);
2612 parent.* = cur_derive;
2613 cur_derive = .{ .opt_payload_ptr = .{
2614 .parent = parent,
2615 .result_ptr_ty = try pt.adjustPtrTypeChild(try parent.ptrType(pt), cur_ty.optionalChild(zcu)),
2616 } };
2617 continue;
2618 }
2619 break;
2620 },
2621
2622 .array => {
2623 const elem_ty = cur_ty.childType(zcu);
2624 const elem_size = elem_ty.abiSize(zcu);
2625 const start_idx = cur_offset / elem_size;
2626 const end_idx = (cur_offset + need_bytes + elem_size - 1) / elem_size;
2627 if (end_idx == start_idx + 1 and ptr_ty_info.flags.size == .one) {
2628 const parent = try arena.create(PointerDeriveStep);
2629 parent.* = cur_derive;
2630 cur_derive = .{ .elem_ptr = .{
2631 .parent = parent,
2632 .elem_idx = start_idx,
2633 .result_ptr_ty = try pt.adjustPtrTypeChild(try parent.ptrType(pt), elem_ty),
2634 } };
2635 cur_offset -= start_idx * elem_size;
2636 } else {
2637 // Go into the first element if needed, but don't go any deeper.
2638 if (start_idx > 0) {
2639 const parent = try arena.create(PointerDeriveStep);
2640 parent.* = cur_derive;
2641 cur_derive = .{ .elem_ptr = .{
2642 .parent = parent,
2643 .elem_idx = start_idx,
2644 .result_ptr_ty = try pt.adjustPtrTypeChild(try parent.ptrType(pt), elem_ty),
2645 } };
2646 cur_offset -= start_idx * elem_size;
2647 }
2648 break;
2649 }
2650 },
2651 .@"struct" => switch (cur_ty.containerLayout(zcu)) {
2652 .auto, .@"packed" => break,
2653 .@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
2654 const field_ty = cur_ty.fieldType(field_idx, zcu);
2655 const start_off = cur_ty.structFieldOffset(field_idx, zcu);
2656 const end_off = start_off + field_ty.abiSize(zcu);
2657 if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
2658 const old_ptr_ty = try cur_derive.ptrType(pt);
2659 const parent_align = old_ptr_ty.ptrAlignment(zcu);
2660 const field_align = InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(start_off)));
2661 const parent = try arena.create(PointerDeriveStep);
2662 parent.* = cur_derive;
2663 const new_ptr_ty = try pt.ptrType(.{
2664 .child = field_ty.toIntern(),
2665 .flags = flags: {
2666 var flags = old_ptr_ty.ptrInfo(zcu).flags;
2667 if (field_align == field_ty.abiAlignment(zcu)) {
2668 flags.alignment = .none;
2669 } else {
2670 flags.alignment = field_align;
2671 }
2672 break :flags flags;
2673 },
2674 });
2675 cur_derive = .{ .field_ptr = .{
2676 .parent = parent,
2677 .field_idx = @intCast(field_idx),
2678 .result_ptr_ty = new_ptr_ty,
2679 } };
2680 cur_offset -= start_off;
2681 break;
2682 }
2683 } else break, // pointer spans multiple fields
2684 },
2685 }
2686 };
2687
2688 if (cur_offset == 0) compatible: {
2689 const src_ptr_ty_info = (try cur_derive.ptrType(pt)).ptrInfo(zcu);
2690 // We allow silently doing some "coercible" pointer things.
2691 // In particular, we only give up if cv qualifiers are *removed*.
2692 if (src_ptr_ty_info.flags.is_const and !ptr_ty_info.flags.is_const) break :compatible;
2693 if (src_ptr_ty_info.flags.is_volatile and !ptr_ty_info.flags.is_volatile) break :compatible;
2694 if (src_ptr_ty_info.flags.is_allowzero and !ptr_ty_info.flags.is_allowzero) break :compatible;
2695 // Everything else has to match exactly.
2696 if (src_ptr_ty_info.child != ptr_ty_info.child) break :compatible;
2697 if (src_ptr_ty_info.sentinel != ptr_ty_info.sentinel) break :compatible;
2698 if (src_ptr_ty_info.packed_offset != ptr_ty_info.packed_offset) break :compatible;
2699 if (src_ptr_ty_info.flags.size != ptr_ty_info.flags.size) break :compatible;
2700 if (src_ptr_ty_info.flags.alignment != ptr_ty_info.flags.alignment) break :compatible;
2701 if (src_ptr_ty_info.flags.address_space != ptr_ty_info.flags.address_space) break :compatible;
2702 if (src_ptr_ty_info.flags.vector_index != ptr_ty_info.flags.vector_index) break :compatible;
2703
2704 return cur_derive;
2705 }
2706
2707 const parent = try arena.create(PointerDeriveStep);
2708 parent.* = cur_derive;
2709 return .{ .offset_and_cast = .{
2710 .parent = parent,
2711 .byte_offset = cur_offset,
2712 .new_ptr_ty = Type.fromInterned(ptr.ty),
2713 } };
2714}
2715
2716pub fn resolveLazy(
2717 val: Value,
2718 arena: Allocator,
2719 pt: Zcu.PerThread,
2720) Zcu.SemaError!Value {
2721 switch (pt.zcu.intern_pool.indexToKey(val.toIntern())) {
2722 .int => |int| switch (int.storage) {
2723 .u64, .i64, .big_int => return val,
2724 .lazy_align, .lazy_size => return pt.intValue(
2725 Type.fromInterned(int.ty),
2726 try val.toUnsignedIntSema(pt),
2727 ),
2728 },
2729 .slice => |slice| {
2730 const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, pt);
2731 const len = try Value.fromInterned(slice.len).resolveLazy(arena, pt);
2732 if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val;
2733 return Value.fromInterned(try pt.intern(.{ .slice = .{
2734 .ty = slice.ty,
2735 .ptr = ptr.toIntern(),
2736 .len = len.toIntern(),
2737 } }));
2738 },
2739 .ptr => |ptr| {
2740 switch (ptr.base_addr) {
2741 .nav, .comptime_alloc, .uav, .int => return val,
2742 .comptime_field => |field_val| {
2743 const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, pt)).toIntern();
2744 return if (resolved_field_val == field_val)
2745 val
2746 else
2747 Value.fromInterned(try pt.intern(.{ .ptr = .{
2748 .ty = ptr.ty,
2749 .base_addr = .{ .comptime_field = resolved_field_val },
2750 .byte_offset = ptr.byte_offset,
2751 } }));
2752 },
2753 .eu_payload, .opt_payload => |base| {
2754 const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, pt)).toIntern();
2755 return if (resolved_base == base)
2756 val
2757 else
2758 Value.fromInterned(try pt.intern(.{ .ptr = .{
2759 .ty = ptr.ty,
2760 .base_addr = switch (ptr.base_addr) {
2761 .eu_payload => .{ .eu_payload = resolved_base },
2762 .opt_payload => .{ .opt_payload = resolved_base },
2763 else => unreachable,
2764 },
2765 .byte_offset = ptr.byte_offset,
2766 } }));
2767 },
2768 .arr_elem, .field => |base_index| {
2769 const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, pt)).toIntern();
2770 return if (resolved_base == base_index.base)
2771 val
2772 else
2773 Value.fromInterned(try pt.intern(.{ .ptr = .{
2774 .ty = ptr.ty,
2775 .base_addr = switch (ptr.base_addr) {
2776 .arr_elem => .{ .arr_elem = .{
2777 .base = resolved_base,
2778 .index = base_index.index,
2779 } },
2780 .field => .{ .field = .{
2781 .base = resolved_base,
2782 .index = base_index.index,
2783 } },
2784 else => unreachable,
2785 },
2786 .byte_offset = ptr.byte_offset,
2787 } }));
2788 },
2789 }
2790 },
2791 .aggregate => |aggregate| switch (aggregate.storage) {
2792 .bytes => return val,
2793 .elems => |elems| {
2794 var resolved_elems: []InternPool.Index = &.{};
2795 for (elems, 0..) |elem, i| {
2796 const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, pt)).toIntern();
2797 if (resolved_elems.len == 0 and resolved_elem != elem) {
2798 resolved_elems = try arena.alloc(InternPool.Index, elems.len);
2799 @memcpy(resolved_elems[0..i], elems[0..i]);
2800 }
2801 if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
2802 }
2803 return if (resolved_elems.len == 0)
2804 val
2805 else
2806 pt.aggregateValue(.fromInterned(aggregate.ty), resolved_elems);
2807 },
2808 .repeated_elem => |elem| {
2809 const resolved_elem = try Value.fromInterned(elem).resolveLazy(arena, pt);
2810 return if (resolved_elem.toIntern() == elem)
2811 val
2812 else
2813 pt.aggregateSplatValue(.fromInterned(aggregate.ty), resolved_elem);
2814 },
2815 },
2816 .un => |un| {
2817 const resolved_tag = if (un.tag == .none)
2818 .none
2819 else
2820 (try Value.fromInterned(un.tag).resolveLazy(arena, pt)).toIntern();
2821 const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, pt)).toIntern();
2822 return if (resolved_tag == un.tag and resolved_val == un.val)
2823 val
2824 else
2825 Value.fromInterned(try pt.internUnion(.{
2826 .ty = un.ty,
2827 .tag = resolved_tag,
2828 .val = resolved_val,
2829 }));
2830 },
2831 .error_union => |eu| switch (eu.val) {
2832 .err_name => return val,
2833 .payload => |payload| {
2834 const resolved_payload = try Value.fromInterned(payload).resolveLazy(arena, pt);
2835 if (resolved_payload.toIntern() == payload) return val;
2836 return .fromInterned(try pt.intern(.{ .error_union = .{
2837 .ty = eu.ty,
2838 .val = .{ .payload = resolved_payload.toIntern() },
2839 } }));
2840 },
2841 },
2842 .opt => |opt| switch (opt.val) {
2843 .none => return val,
2844 else => |payload| {
2845 const resolved_payload = try Value.fromInterned(payload).resolveLazy(arena, pt);
2846 if (resolved_payload.toIntern() == payload) return val;
2847 return .fromInterned(try pt.intern(.{ .opt = .{
2848 .ty = opt.ty,
2849 .val = resolved_payload.toIntern(),
2850 } }));
2851 },
2852 },
2853
2854 else => return val,
2855 }
2856}
2857
2858const InterpretMode = enum {
2859 /// In this mode, types are assumed to match what the compiler was built with in terms of field
2860 /// order, field types, etc. This improves compiler performance. However, it means that certain
2861 /// modifications to `std.builtin` will result in compiler crashes.
2862 direct,
2863 /// In this mode, various details of the type are allowed to differ from what the compiler was built
2864 /// with. Fields are matched by name rather than index; added struct fields are ignored, and removed
2865 /// struct fields use their default value if one exists. This is slower than `.direct`, but permits
2866 /// making certain changes to `std.builtin` (in particular reordering/adding/removing fields), so it
2867 /// is useful when applying breaking changes.
2868 by_name,
2869};
2870const interpret_mode: InterpretMode = @field(InterpretMode, @tagName(build_options.value_interpret_mode));
2871
2872/// Given a `Value` representing a comptime-known value of type `T`, unwrap it into an actual `T` known to the compiler.
2873/// This is useful for accessing `std.builtin` structures received from comptime logic.
2874/// `val` must be fully resolved.
2875pub fn interpret(val: Value, comptime T: type, pt: Zcu.PerThread) error{ OutOfMemory, UndefinedValue, TypeMismatch }!T {
2876 const zcu = pt.zcu;
2877 const ip = &zcu.intern_pool;
2878 const ty = val.typeOf(zcu);
2879 if (ty.zigTypeTag(zcu) != @typeInfo(T)) return error.TypeMismatch;
2880 if (val.isUndef(zcu)) return error.UndefinedValue;
2881
2882 return switch (@typeInfo(T)) {
2883 .type,
2884 .noreturn,
2885 .comptime_float,
2886 .comptime_int,
2887 .undefined,
2888 .null,
2889 .@"fn",
2890 .@"opaque",
2891 .enum_literal,
2892 => comptime unreachable, // comptime-only or otherwise impossible
2893
2894 .pointer,
2895 .array,
2896 .error_union,
2897 .error_set,
2898 .frame,
2899 .@"anyframe",
2900 .vector,
2901 => comptime unreachable, // unsupported
2902
2903 .void => {},
2904
2905 .bool => switch (val.toIntern()) {
2906 .bool_false => false,
2907 .bool_true => true,
2908 else => unreachable,
2909 },
2910
2911 .int => switch (ip.indexToKey(val.toIntern()).int.storage) {
2912 .lazy_align, .lazy_size => unreachable, // `val` is fully resolved
2913 inline .u64, .i64 => |x| std.math.cast(T, x) orelse return error.TypeMismatch,
2914 .big_int => |big| big.toInt(T) catch return error.TypeMismatch,
2915 },
2916
2917 .float => val.toFloat(T, zcu),
2918
2919 .optional => |opt| if (val.optionalValue(zcu)) |unwrapped|
2920 try unwrapped.interpret(opt.child, pt)
2921 else
2922 null,
2923
2924 .@"enum" => switch (interpret_mode) {
2925 .direct => {
2926 const int = val.getUnsignedInt(zcu) orelse return error.TypeMismatch;
2927 return std.enums.fromInt(T, int) orelse error.TypeMismatch;
2928 },
2929 .by_name => {
2930 const field_index = ty.enumTagFieldIndex(val, zcu) orelse return error.TypeMismatch;
2931 const field_name = ty.enumFieldName(field_index, zcu);
2932 return std.meta.stringToEnum(T, field_name.toSlice(ip)) orelse error.TypeMismatch;
2933 },
2934 },
2935
2936 .@"union" => |@"union"| {
2937 // No need to handle `interpret_mode`, because the `.@"enum"` handling already deals with it.
2938 const tag_val = val.unionTag(zcu) orelse return error.TypeMismatch;
2939 const tag = try tag_val.interpret(@"union".tag_type.?, pt);
2940 return switch (tag) {
2941 inline else => |tag_comptime| @unionInit(
2942 T,
2943 @tagName(tag_comptime),
2944 try val.unionValue(zcu).interpret(@FieldType(T, @tagName(tag_comptime)), pt),
2945 ),
2946 };
2947 },
2948
2949 .@"struct" => |@"struct"| switch (interpret_mode) {
2950 .direct => {
2951 if (ty.structFieldCount(zcu) != @"struct".fields.len) return error.TypeMismatch;
2952 var result: T = undefined;
2953 inline for (@"struct".fields, 0..) |field, field_idx| {
2954 const field_val = try val.fieldValue(pt, field_idx);
2955 @field(result, field.name) = try field_val.interpret(field.type, pt);
2956 }
2957 return result;
2958 },
2959 .by_name => {
2960 const struct_obj = zcu.typeToStruct(ty) orelse return error.TypeMismatch;
2961 var result: T = undefined;
2962 inline for (@"struct".fields) |field| {
2963 const field_name_ip = try ip.getOrPutString(zcu.gpa, pt.tid, field.name, .no_embedded_nulls);
2964 @field(result, field.name) = if (struct_obj.nameIndex(ip, field_name_ip)) |field_idx| f: {
2965 const field_val = try val.fieldValue(pt, field_idx);
2966 break :f try field_val.interpret(field.type, pt);
2967 } else (field.defaultValue() orelse return error.TypeMismatch);
2968 }
2969 return result;
2970 },
2971 },
2972 };
2973}
2974
2975/// Given any `val` and a `Type` corresponding `@TypeOf(val)`, construct a `Value` representing it which can be used
2976/// within the compilation. This is useful for passing `std.builtin` structures in the compiler back to the compilation.
2977/// This is the inverse of `interpret`.
2978pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory, TypeMismatch }!Value {
2979 const T = @TypeOf(val);
2980
2981 const zcu = pt.zcu;
2982 const ip = &zcu.intern_pool;
2983 if (ty.zigTypeTag(zcu) != @typeInfo(T)) return error.TypeMismatch;
2984
2985 return switch (@typeInfo(T)) {
2986 .type,
2987 .noreturn,
2988 .comptime_float,
2989 .comptime_int,
2990 .undefined,
2991 .null,
2992 .@"fn",
2993 .@"opaque",
2994 .enum_literal,
2995 => comptime unreachable, // comptime-only or otherwise impossible
2996
2997 .pointer,
2998 .array,
2999 .error_union,
3000 .error_set,
3001 .frame,
3002 .@"anyframe",
3003 .vector,
3004 => comptime unreachable, // unsupported
3005
3006 .void => .void,
3007
3008 .bool => if (val) .true else .false,
3009
3010 .int => try pt.intValue(ty, val),
3011
3012 .float => try pt.floatValue(ty, val),
3013
3014 .optional => if (val) |some|
3015 .fromInterned(try pt.intern(.{ .opt = .{
3016 .ty = ty.toIntern(),
3017 .val = (try uninterpret(some, ty.optionalChild(zcu), pt)).toIntern(),
3018 } }))
3019 else
3020 try pt.nullValue(ty),
3021
3022 .@"enum" => switch (interpret_mode) {
3023 .direct => try pt.enumValue(ty, (try uninterpret(@intFromEnum(val), ty.intTagType(zcu), pt)).toIntern()),
3024 .by_name => {
3025 const field_name_ip = try ip.getOrPutString(zcu.gpa, pt.tid, @tagName(val), .no_embedded_nulls);
3026 const field_idx = ty.enumFieldIndex(field_name_ip, zcu) orelse return error.TypeMismatch;
3027 return pt.enumValueFieldIndex(ty, field_idx);
3028 },
3029 },
3030
3031 .@"union" => |@"union"| {
3032 // No need to handle `interpret_mode`, because the `.@"enum"` handling already deals with it.
3033 const tag: @"union".tag_type.? = val;
3034 const tag_val = try uninterpret(tag, ty.unionTagType(zcu).?, pt);
3035 const field_ty = ty.unionFieldType(tag_val, zcu) orelse return error.TypeMismatch;
3036 return switch (val) {
3037 inline else => |payload| try pt.unionValue(
3038 ty,
3039 tag_val,
3040 try uninterpret(payload, field_ty, pt),
3041 ),
3042 };
3043 },
3044
3045 .@"struct" => |@"struct"| switch (interpret_mode) {
3046 .direct => {
3047 if (ty.structFieldCount(zcu) != @"struct".fields.len) return error.TypeMismatch;
3048 var field_vals: [@"struct".fields.len]InternPool.Index = undefined;
3049 inline for (&field_vals, @"struct".fields, 0..) |*field_val, field, field_idx| {
3050 const field_ty = ty.fieldType(field_idx, zcu);
3051 field_val.* = (try uninterpret(@field(val, field.name), field_ty, pt)).toIntern();
3052 }
3053 return pt.aggregateValue(ty, &field_vals);
3054 },
3055 .by_name => {
3056 const struct_obj = zcu.typeToStruct(ty) orelse return error.TypeMismatch;
3057 const want_fields_len = struct_obj.field_types.len;
3058 const field_vals = try zcu.gpa.alloc(InternPool.Index, want_fields_len);
3059 defer zcu.gpa.free(field_vals);
3060 @memset(field_vals, .none);
3061 inline for (@"struct".fields) |field| {
3062 const field_name_ip = try ip.getOrPutString(zcu.gpa, pt.tid, field.name, .no_embedded_nulls);
3063 if (struct_obj.nameIndex(ip, field_name_ip)) |field_idx| {
3064 const field_ty = ty.fieldType(field_idx, zcu);
3065 field_vals[field_idx] = (try uninterpret(@field(val, field.name), field_ty, pt)).toIntern();
3066 }
3067 }
3068 for (field_vals, 0..) |*field_val, field_idx| {
3069 if (field_val.* == .none) {
3070 const default_init = struct_obj.field_inits.get(ip)[field_idx];
3071 if (default_init == .none) return error.TypeMismatch;
3072 field_val.* = default_init;
3073 }
3074 }
3075 return pt.aggregateValue(ty, field_vals);
3076 },
3077 },
3078 };
3079}
3080
3081/// Returns whether `ptr_val_a[0..elem_count]` and `ptr_val_b[0..elem_count]` overlap.
3082/// `ptr_val_a` and `ptr_val_b` are indexable pointers (not slices) whose element types are in-memory coercible.
3083pub fn doPointersOverlap(ptr_val_a: Value, ptr_val_b: Value, elem_count: u64, zcu: *const Zcu) bool {
3084 const ip = &zcu.intern_pool;
3085
3086 const a_elem_ty = ptr_val_a.typeOf(zcu).indexablePtrElem(zcu);
3087 const b_elem_ty = ptr_val_b.typeOf(zcu).indexablePtrElem(zcu);
3088
3089 const a_ptr = ip.indexToKey(ptr_val_a.toIntern()).ptr;
3090 const b_ptr = ip.indexToKey(ptr_val_b.toIntern()).ptr;
3091
3092 // If `a_elem_ty` is not comptime-only, then overlapping pointers have identical
3093 // `base_addr`, and we just need to look at the byte offset. If it *is* comptime-only,
3094 // then `base_addr` may be an `arr_elem`, and we'll have to consider the element index.
3095 if (a_elem_ty.comptimeOnly(zcu)) {
3096 assert(a_elem_ty.toIntern() == b_elem_ty.toIntern()); // IMC comptime-only types are equivalent
3097
3098 const a_base_addr: InternPool.Key.Ptr.BaseAddr, const a_idx: u64 = switch (a_ptr.base_addr) {
3099 else => .{ a_ptr.base_addr, 0 },
3100 .arr_elem => |arr_elem| a: {
3101 const base_ptr = Value.fromInterned(arr_elem.base);
3102 const base_child_ty = base_ptr.typeOf(zcu).childType(zcu);
3103 if (base_child_ty.toIntern() == a_elem_ty.toIntern()) {
3104 // This `arr_elem` is indexing into the element type we want.
3105 const base_ptr_info = ip.indexToKey(base_ptr.toIntern()).ptr;
3106 if (base_ptr_info.byte_offset != 0) {
3107 return false; // this pointer is invalid, just let the access fail
3108 }
3109 break :a .{ base_ptr_info.base_addr, arr_elem.index };
3110 }
3111 break :a .{ a_ptr.base_addr, 0 };
3112 },
3113 };
3114 const b_base_addr: InternPool.Key.Ptr.BaseAddr, const b_idx: u64 = switch (a_ptr.base_addr) {
3115 else => .{ b_ptr.base_addr, 0 },
3116 .arr_elem => |arr_elem| b: {
3117 const base_ptr = Value.fromInterned(arr_elem.base);
3118 const base_child_ty = base_ptr.typeOf(zcu).childType(zcu);
3119 if (base_child_ty.toIntern() == b_elem_ty.toIntern()) {
3120 // This `arr_elem` is indexing into the element type we want.
3121 const base_ptr_info = ip.indexToKey(base_ptr.toIntern()).ptr;
3122 if (base_ptr_info.byte_offset != 0) {
3123 return false; // this pointer is invalid, just let the access fail
3124 }
3125 break :b .{ base_ptr_info.base_addr, arr_elem.index };
3126 }
3127 break :b .{ b_ptr.base_addr, 0 };
3128 },
3129 };
3130 if (!std.meta.eql(a_base_addr, b_base_addr)) return false;
3131 const diff = if (a_idx >= b_idx) a_idx - b_idx else b_idx - a_idx;
3132 return diff < elem_count;
3133 } else {
3134 assert(a_elem_ty.abiSize(zcu) == b_elem_ty.abiSize(zcu));
3135
3136 if (!std.meta.eql(a_ptr.base_addr, b_ptr.base_addr)) return false;
3137
3138 const bytes_diff = if (a_ptr.byte_offset >= b_ptr.byte_offset)
3139 a_ptr.byte_offset - b_ptr.byte_offset
3140 else
3141 b_ptr.byte_offset - a_ptr.byte_offset;
3142
3143 const need_bytes_diff = elem_count * a_elem_ty.abiSize(zcu);
3144 return bytes_diff < need_bytes_diff;
3145 }
3146}
3147
3148/// `lhs` and `rhs` are both scalar numeric values (int or float).
3149/// Supports comparisons between heterogeneous types.
3150/// If `lhs` or `rhs` is undef, returns `false`.
3151pub fn eqlScalarNum(lhs: Value, rhs: Value, zcu: *Zcu) bool {
3152 if (lhs.isUndef(zcu)) return false;
3153 if (rhs.isUndef(zcu)) return false;
3154
3155 if (lhs.isFloat(zcu) or rhs.isFloat(zcu)) {
3156 const lhs_f128 = lhs.toFloat(f128, zcu);
3157 const rhs_f128 = rhs.toFloat(f128, zcu);
3158 return lhs_f128 == rhs_f128;
3159 }
3160
3161 if (lhs.getUnsignedInt(zcu)) |lhs_u64| {
3162 if (rhs.getUnsignedInt(zcu)) |rhs_u64| {
3163 return lhs_u64 == rhs_u64;
3164 }
3165 }
3166
3167 var lhs_bigint_space: BigIntSpace = undefined;
3168 var rhs_bigint_space: BigIntSpace = undefined;
3169 const lhs_bigint = lhs.toBigInt(&lhs_bigint_space, zcu);
3170 const rhs_bigint = rhs.toBigInt(&rhs_bigint_space, zcu);
3171 return lhs_bigint.eql(rhs_bigint);
3172}