Commit 88dbd62bcb
Changed files (19)
lib/std/builtin.zig
@@ -223,6 +223,13 @@ pub const SourceLocation = struct {
pub const TypeId = std.meta.Tag(Type);
pub const TypeInfo = @compileError("deprecated; use Type");
+/// TODO this is a temporary alias because I don't see any handy methods in
+/// Sema for accessing inner declarations.
+pub const PtrSize = Type.Pointer.Size;
+/// TODO this is a temporary alias because I don't see any handy methods in
+/// Sema for accessing inner declarations.
+pub const TmpContainerLayoutAlias = Type.ContainerLayout;
+
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Type = union(enum) {
src/arch/wasm/CodeGen.zig
@@ -11,6 +11,7 @@ const log = std.log.scoped(.codegen);
const codegen = @import("../../codegen.zig");
const Module = @import("../../Module.zig");
+const InternPool = @import("../../InternPool.zig");
const Decl = Module.Decl;
const Type = @import("../../type.zig").Type;
const Value = @import("../../value.zig").Value;
@@ -3044,11 +3045,12 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
}
fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
var val = arg_val;
if (val.castTag(.runtime_value)) |rt| {
val = rt.data;
}
- if (val.isUndefDeep()) return func.emitUndefined(ty);
+ if (val.isUndefDeep(mod)) return func.emitUndefined(ty);
if (val.castTag(.decl_ref)) |decl_ref| {
const decl_index = decl_ref.data;
return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0);
@@ -3057,7 +3059,6 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const decl_index = decl_ref_mut.data.decl_index;
return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0);
}
- const mod = func.bin_file.base.options.module.?;
switch (ty.zigTypeTag(mod)) {
.Void => return WValue{ .none = {} },
.Int => {
@@ -3100,18 +3101,9 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
},
},
.Enum => {
- if (val.castTag(.enum_field_index)) |field_index| {
- const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
- if (enum_type.values.len != 0) {
- const tag_val = enum_type.values[field_index.data];
- return func.lowerConstant(tag_val.toValue(), enum_type.tag_ty.toType());
- } else {
- return WValue{ .imm32 = field_index.data };
- }
- } else {
- const int_tag_ty = try ty.intTagType(mod);
- return func.lowerConstant(val, int_tag_ty);
- }
+ const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag;
+ const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
+ return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType());
},
.ErrorSet => switch (val.tag()) {
.@"error" => {
@@ -3223,37 +3215,42 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
/// Returns a `Value` as a signed 32 bit value.
/// It's illegal to provide a value with a type that cannot be represented
/// as an integer value.
-fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) !i32 {
+fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
const mod = func.bin_file.base.options.module.?;
- switch (ty.zigTypeTag(mod)) {
- .Enum => {
- if (val.castTag(.enum_field_index)) |field_index| {
- const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
- if (enum_type.values.len != 0) {
- const tag_val = enum_type.values[field_index.data];
- return func.valueAsI32(tag_val.toValue(), enum_type.tag_ty.toType());
- } else {
- return @bitCast(i32, field_index.data);
- }
- } else {
- const int_tag_ty = try ty.intTagType(mod);
- return func.valueAsI32(val, int_tag_ty);
- }
- },
- .Int => switch (ty.intInfo(mod).signedness) {
- .signed => return @truncate(i32, val.toSignedInt(mod)),
- .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(mod))),
+
+ switch (val.ip_index) {
+ .none => {},
+ .bool_true => return 1,
+ .bool_false => return 0,
+ else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int),
+ .int => |int| intStorageAsI32(int.storage),
+ .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int),
+ else => unreachable,
},
+ }
+
+ switch (ty.zigTypeTag(mod)) {
.ErrorSet => {
const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function
return @bitCast(i32, kv.value);
},
- .Bool => return @intCast(i32, val.toSignedInt(mod)),
- .Pointer => return @intCast(i32, val.toSignedInt(mod)),
else => unreachable, // Programmer called this function for an illegal type
}
}
+fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index) i32 {
+ return intStorageAsI32(ip.indexToKey(int).int.storage);
+}
+
+fn intStorageAsI32(storage: InternPool.Key.Int.Storage) i32 {
+ return switch (storage) {
+ .i64 => |x| @intCast(i32, x),
+ .u64 => |x| @bitCast(i32, @intCast(u32, x)),
+ .big_int => unreachable,
+ };
+}
+
fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
@@ -3772,7 +3769,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
for (items, 0..) |ref, i| {
const item_val = (try func.air.value(ref, mod)).?;
- const int_val = try func.valueAsI32(item_val, target_ty);
+ const int_val = func.valueAsI32(item_val, target_ty);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
lowest_maybe = int_val;
}
@@ -5071,12 +5068,8 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const tag_int = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
- const enum_field_index = tag_ty.enumFieldIndex(field_name).?;
- var tag_val_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, enum_field_index),
- };
- const tag_val = Value.initPayload(&tag_val_payload.base);
+ const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?;
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
break :blk try func.lowerConstant(tag_val, tag_ty);
};
if (layout.payload_size == 0) {
@@ -6815,7 +6808,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse.
// generate an if-else chain for each tag value as well as constant.
- for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index| {
+ for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| {
+ const field_index = @intCast(u32, field_index_usize);
const tag_name = mod.intern_pool.stringToSlice(tag_name_ip);
// for each tag name, create an unnamed const,
// and then get a pointer to its value.
@@ -6857,11 +6851,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.local_get));
try leb.writeULEB128(writer, @as(u32, 1));
- var tag_val_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, field_index),
- };
- const tag_value = try func.lowerConstant(Value.initPayload(&tag_val_payload.base), enum_ty);
+ const tag_val = try mod.enumValueFieldIndex(enum_ty, field_index);
+ const tag_value = try func.lowerConstant(tag_val, enum_ty);
switch (tag_value) {
.imm32 => |value| {
src/arch/x86_64/CodeGen.zig
@@ -2029,13 +2029,10 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
exitlude_jump_relocs,
enum_ty.enumFields(mod),
0..,
- ) |*exitlude_jump_reloc, tag_name_ip, index| {
+ ) |*exitlude_jump_reloc, tag_name_ip, index_usize| {
+ const index = @intCast(u32, index_usize);
const tag_name = mod.intern_pool.stringToSlice(tag_name_ip);
- var tag_pl = Value.Payload.U32{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, index),
- };
- const tag_val = Value.initPayload(&tag_pl.base);
+ const tag_val = try mod.enumValueFieldIndex(enum_ty, index);
const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val });
try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv);
const skip_reloc = try self.asmJccReloc(undefined, .ne);
@@ -11415,8 +11412,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const field_name = union_obj.fields.keys()[extra.field_index];
const tag_ty = union_obj.tag_ty;
const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
- var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index };
- const tag_val = Value.initPayload(&tag_pl.base);
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
const tag_int_val = try tag_val.enumToInt(tag_ty, mod);
const tag_int = tag_int_val.toUnsignedInt(mod);
const tag_off = if (layout.tag_align < layout.payload_align)
src/codegen/c.zig
@@ -748,7 +748,7 @@ pub const DeclGen = struct {
.ReleaseFast, .ReleaseSmall => false,
};
- if (val.isUndefDeep()) {
+ if (val.isUndefDeep(mod)) {
switch (ty.zigTypeTag(mod)) {
.Bool => {
if (safety_on) {
@@ -1183,7 +1183,7 @@ pub const DeclGen = struct {
var index: usize = 0;
while (index < ai.len) : (index += 1) {
const elem_val = try val.elemValue(mod, index);
- const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
try literal.writeChar(elem_val_u8);
}
if (ai.sentinel) |s| {
@@ -1197,7 +1197,7 @@ pub const DeclGen = struct {
while (index < ai.len) : (index += 1) {
if (index != 0) try writer.writeByte(',');
const elem_val = try val.elemValue(mod, index);
- const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
try writer.print("'\\x{x}'", .{elem_val_u8});
}
if (ai.sentinel) |s| {
@@ -1284,23 +1284,16 @@ pub const DeclGen = struct {
try dg.renderValue(writer, error_ty, error_val, initializer_type);
try writer.writeAll(" }");
},
- .Enum => {
- switch (val.tag()) {
- .enum_field_index => {
- const field_index = val.castTag(.enum_field_index).?.data;
- const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
- if (enum_type.values.len != 0) {
- const tag_val = enum_type.values[field_index];
- return dg.renderValue(writer, enum_type.tag_ty.toType(), tag_val.toValue(), location);
- } else {
- return writer.print("{d}", .{field_index});
- }
- },
- else => {
- const int_tag_ty = try ty.intTagType(mod);
- return dg.renderValue(writer, int_tag_ty, val, location);
- },
- }
+ .Enum => switch (val.ip_index) {
+ .none => {
+ const int_tag_ty = try ty.intTagType(mod);
+ return dg.renderValue(writer, int_tag_ty, val, location);
+ },
+ else => {
+ const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag;
+ const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
+ return dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location);
+ },
},
.Fn => switch (val.tag()) {
.function => {
@@ -2524,13 +2517,10 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try w.writeByte('(');
try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete);
try w.writeAll(") {\n switch (tag) {\n");
- for (enum_ty.enumFields(mod), 0..) |name_ip, index| {
+ for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| {
+ const index = @intCast(u32, index_usize);
const name = mod.intern_pool.stringToSlice(name_ip);
- var tag_pl: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, index),
- };
- const tag_val = Value.initPayload(&tag_pl.base);
+ const tag_val = try mod.enumValueFieldIndex(enum_ty, index);
const int_val = try tag_val.enumToInt(enum_ty, mod);
@@ -3609,7 +3599,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const ptr_val = try f.resolveInst(bin_op.lhs);
const src_ty = f.typeOf(bin_op.rhs);
- const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false;
+ const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep(mod) else false;
if (val_is_undef) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
@@ -4267,7 +4257,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
const mod = f.object.dg.module;
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const name = f.air.nullTerminatedString(pl_op.payload);
- const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false;
+ const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep(mod) else false;
if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
@@ -6290,7 +6280,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const value = try f.resolveInst(bin_op.rhs);
const elem_ty = f.typeOf(bin_op.rhs);
const elem_abi_size = elem_ty.abiSize(mod);
- const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false;
+ const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false;
const writer = f.object.writer();
if (val_is_undef) {
@@ -6907,11 +6897,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
if (layout.tag_size != 0) {
const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
- var tag_pl: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, field_index),
- };
- const tag_val = Value.initPayload(&tag_pl.base);
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
const int_val = try tag_val.enumToInt(tag_ty, mod);
@@ -7438,7 +7424,7 @@ fn formatIntLiteral(
defer allocator.free(undef_limbs);
var int_buf: Value.BigIntSpace = undefined;
- const int = if (data.val.isUndefDeep()) blk: {
+ const int = if (data.val.isUndefDeep(mod)) blk: {
undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits));
@memset(undef_limbs, undefPattern(BigIntLimb));
src/codegen/llvm.zig
@@ -3233,16 +3233,16 @@ pub const DeclGen = struct {
}
fn lowerValue(dg: *DeclGen, arg_tv: TypedValue) Error!*llvm.Value {
+ const mod = dg.module;
+ const target = mod.getTarget();
var tv = arg_tv;
if (tv.val.castTag(.runtime_value)) |rt| {
tv.val = rt.data;
}
- if (tv.val.isUndef()) {
+ if (tv.val.isUndef(mod)) {
const llvm_type = try dg.lowerType(tv.ty);
return llvm_type.getUndef();
}
- const mod = dg.module;
- const target = mod.getTarget();
switch (tv.ty.zigTypeTag(mod)) {
.Bool => {
const llvm_type = try dg.lowerType(tv.ty);
@@ -8204,7 +8204,7 @@ pub const FuncGen = struct {
const ptr_ty = self.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType(mod);
- const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false;
+ const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false;
if (val_is_undef) {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
@@ -8496,7 +8496,7 @@ pub const FuncGen = struct {
const is_volatile = ptr_ty.isVolatilePtr(mod);
if (try self.air.value(bin_op.rhs, mod)) |elem_val| {
- if (elem_val.isUndefDeep()) {
+ if (elem_val.isUndefDeep(mod)) {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
// 0xaa or actual undefined for the fill byte.
@@ -8890,15 +8890,12 @@ pub const FuncGen = struct {
const tag_int_value = fn_val.getParam(0);
const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len));
- for (enum_type.names, 0..) |_, field_index| {
+ for (enum_type.names, 0..) |_, field_index_usize| {
+ const field_index = @intCast(u32, field_index_usize);
const this_tag_int_value = int: {
- var tag_val_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, field_index),
- };
break :int try self.dg.lowerValue(.{
.ty = enum_ty,
- .val = Value.initPayload(&tag_val_payload.base),
+ .val = try mod.enumValueFieldIndex(enum_ty, field_index),
});
};
switch_instr.addCase(this_tag_int_value, named_block);
@@ -8973,7 +8970,8 @@ pub const FuncGen = struct {
usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
};
- for (enum_type.names, 0..) |name_ip, field_index| {
+ for (enum_type.names, 0..) |name_ip, field_index_usize| {
+ const field_index = @intCast(u32, field_index_usize);
const name = mod.intern_pool.stringToSlice(name_ip);
const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_init_llvm_ty = str_init.typeOf();
@@ -8997,16 +8995,10 @@ pub const FuncGen = struct {
slice_global.setAlignment(slice_alignment);
const return_block = self.context.appendBasicBlock(fn_val, "Name");
- const this_tag_int_value = int: {
- var tag_val_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, field_index),
- };
- break :int try self.dg.lowerValue(.{
- .ty = enum_ty,
- .val = Value.initPayload(&tag_val_payload.base),
- });
- };
+ const this_tag_int_value = try self.dg.lowerValue(.{
+ .ty = enum_ty,
+ .val = try mod.enumValueFieldIndex(enum_ty, field_index),
+ });
switch_instr.addCase(this_tag_int_value, return_block);
self.builder.positionBuilderAtEnd(return_block);
@@ -9094,7 +9086,7 @@ pub const FuncGen = struct {
for (values, 0..) |*val, i| {
const elem = try mask.elemValue(mod, i);
- if (elem.isUndef()) {
+ if (elem.isUndef(mod)) {
val.* = llvm_i32.getUndef();
} else {
const int = elem.toSignedInt(mod);
@@ -9419,11 +9411,7 @@ pub const FuncGen = struct {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const union_field_name = union_obj.fields.keys()[extra.field_index];
const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
- var tag_val_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, enum_field_index),
- };
- const tag_val = Value.initPayload(&tag_val_payload.base);
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
const tag_int_val = try tag_val.enumToInt(tag_ty, mod);
break :blk tag_int_val.toUnsignedInt(mod);
};
src/codegen/spirv.zig
@@ -614,7 +614,7 @@ pub const DeclGen = struct {
const dg = self.dg;
const mod = dg.module;
- if (val.isUndef()) {
+ if (val.isUndef(mod)) {
const size = ty.abiSize(mod);
return try self.addUndef(size);
}
@@ -882,7 +882,7 @@ pub const DeclGen = struct {
// const target = self.getTarget();
// TODO: Fix the resulting global linking for these paths.
- // if (val.isUndef()) {
+ // if (val.isUndef(mod)) {
// // Special case: the entire value is undefined. In this case, we can just
// // generate an OpVariable with no initializer.
// return try section.emit(self.spv.gpa, .OpVariable, .{
@@ -978,7 +978,7 @@ pub const DeclGen = struct {
log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) });
- if (val.isUndef()) {
+ if (val.isUndef(mod)) {
return self.spv.constUndef(result_ty_ref);
}
@@ -2091,7 +2091,7 @@ pub const DeclGen = struct {
var i: usize = 0;
while (i < mask_len) : (i += 1) {
const elem = try mask.elemValue(self.module, i);
- if (elem.isUndef()) {
+ if (elem.isUndef(mod)) {
self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF);
} else {
const int = elem.toSignedInt(mod);
src/link/Coff.zig
@@ -1304,7 +1304,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
const zig_ty = ty.zigTypeTag(mod);
const val = decl.val;
const index: u16 = blk: {
- if (val.isUndefDeep()) {
+ if (val.isUndefDeep(mod)) {
// TODO in release-fast and release-small, we should put undef in .bss
break :blk self.data_section_index.?;
}
src/link/Elf.zig
@@ -2456,7 +2456,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 {
const zig_ty = ty.zigTypeTag(mod);
const val = decl.val;
const shdr_index: u16 = blk: {
- if (val.isUndefDeep()) {
+ if (val.isUndefDeep(mod)) {
// TODO in release-fast and release-small, we should put undef in .bss
break :blk self.data_section_index.?;
}
src/link/MachO.zig
@@ -2270,7 +2270,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
const single_threaded = self.base.options.single_threaded;
const sect_id: u8 = blk: {
// TODO finish and audit this function
- if (val.isUndefDeep()) {
+ if (val.isUndefDeep(mod)) {
if (mode == .ReleaseFast or mode == .ReleaseSmall) {
@panic("TODO __DATA,__bss");
} else {
src/link/Wasm.zig
@@ -3374,7 +3374,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
} else if (decl.getVariable()) |variable| {
if (!variable.is_mutable) {
try wasm.parseAtom(atom_index, .{ .data = .read_only });
- } else if (variable.init.isUndefDeep()) {
+ } else if (variable.init.isUndefDeep(mod)) {
// for safe build modes, we store the atom in the data segment,
// whereas for unsafe build modes we store it in bss.
const is_initialized = wasm.base.options.optimize_mode == .Debug or
src/Air.zig
@@ -845,6 +845,7 @@ pub const Inst = struct {
pub const Ref = enum(u32) {
u1_type = @enumToInt(InternPool.Index.u1_type),
+ u5_type = @enumToInt(InternPool.Index.u5_type),
u8_type = @enumToInt(InternPool.Index.u8_type),
i8_type = @enumToInt(InternPool.Index.i8_type),
u16_type = @enumToInt(InternPool.Index.u16_type),
@@ -913,6 +914,8 @@ pub const Inst = struct {
zero_u8 = @enumToInt(InternPool.Index.zero_u8),
one = @enumToInt(InternPool.Index.one),
one_usize = @enumToInt(InternPool.Index.one_usize),
+ one_u5 = @enumToInt(InternPool.Index.one_u5),
+ four_u5 = @enumToInt(InternPool.Index.four_u5),
negative_one = @enumToInt(InternPool.Index.negative_one),
calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),
src/codegen.zig
@@ -196,7 +196,7 @@ pub fn generateSymbol(
typed_value.val.fmtValue(typed_value.ty, mod),
});
- if (typed_value.val.isUndefDeep()) {
+ if (typed_value.val.isUndefDeep(mod)) {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size);
return Result.ok;
@@ -1168,7 +1168,7 @@ pub fn genTypedValue(
typed_value.val.fmtValue(typed_value.ty, mod),
});
- if (typed_value.val.isUndef())
+ if (typed_value.val.isUndef(mod))
return GenResult.mcv(.undef);
const target = bin_file.options.target;
@@ -1229,24 +1229,12 @@ pub fn genTypedValue(
}
},
.Enum => {
- if (typed_value.val.castTag(.enum_field_index)) |field_index| {
- const enum_type = mod.intern_pool.indexToKey(typed_value.ty.ip_index).enum_type;
- if (enum_type.values.len != 0) {
- const tag_val = enum_type.values[field_index.data];
- return genTypedValue(bin_file, src_loc, .{
- .ty = enum_type.tag_ty.toType(),
- .val = tag_val.toValue(),
- }, owner_decl_index);
- } else {
- return GenResult.mcv(.{ .immediate = field_index.data });
- }
- } else {
- const int_tag_ty = try typed_value.ty.intTagType(mod);
- return genTypedValue(bin_file, src_loc, .{
- .ty = int_tag_ty,
- .val = typed_value.val,
- }, owner_decl_index);
- }
+ const enum_tag = mod.intern_pool.indexToKey(typed_value.val.ip_index).enum_tag;
+ const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
+ return genTypedValue(bin_file, src_loc, .{
+ .ty = int_tag_ty.toType(),
+ .val = enum_tag.int.toValue(),
+ }, owner_decl_index);
},
.ErrorSet => {
switch (typed_value.val.tag()) {
src/InternPool.zig
@@ -144,6 +144,9 @@ pub const Key = union(enum) {
opaque_type: OpaqueType,
enum_type: EnumType,
+ /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented
+ /// via `simple_value` and has a named `Index` tag for it.
+ undef: Index,
simple_value: SimpleValue,
extern_func: struct {
ty: Index,
@@ -155,13 +158,12 @@ pub const Key = union(enum) {
lib_name: u32,
},
int: Key.Int,
+ /// A specific enum tag, indicated by the integer tag value.
+ enum_tag: Key.EnumTag,
float: Key.Float,
ptr: Ptr,
opt: Opt,
- enum_tag: struct {
- ty: Index,
- tag: BigIntConst,
- },
+
/// An instance of a struct, array, or vector.
/// Each element/field stored as an `Index`.
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
@@ -284,21 +286,33 @@ pub const Key = union(enum) {
};
/// Look up field index based on field name.
- pub fn nameIndex(self: EnumType, ip: InternPool, name: NullTerminatedString) ?usize {
+ pub fn nameIndex(self: EnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)];
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names };
- return map.getIndexAdapted(name, adapter);
+ const field_index = map.getIndexAdapted(name, adapter) orelse return null;
+ return @intCast(u32, field_index);
}
/// Look up field index based on tag value.
/// Asserts that `values_map` is not `none`.
/// This function returns `null` when `tag_val` does not have the
/// integer tag type of the enum.
- pub fn tagValueIndex(self: EnumType, ip: InternPool, tag_val: Index) ?usize {
+ pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 {
assert(tag_val != .none);
- const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)];
- const adapter: Index.Adapter = .{ .indexes = self.values };
- return map.getIndexAdapted(tag_val, adapter);
+ if (self.values_map.unwrap()) |values_map| {
+ const map = &ip.maps.items[@enumToInt(values_map)];
+ const adapter: Index.Adapter = .{ .indexes = self.values };
+ const field_index = map.getIndexAdapted(tag_val, adapter) orelse return null;
+ return @intCast(u32, field_index);
+ }
+ // Auto-numbered enum. Convert `tag_val` to field index.
+ switch (ip.indexToKey(tag_val).int.storage) {
+ .u64 => |x| {
+ if (x >= self.names.len) return null;
+ return @intCast(u32, x);
+ },
+ .i64, .big_int => return null, // out of range
+ }
}
};
@@ -362,6 +376,13 @@ pub const Key = union(enum) {
};
};
+ pub const EnumTag = struct {
+ /// The enum type.
+ ty: Index,
+ /// The integer tag value which has the integer tag type of the enum.
+ int: Index,
+ };
+
pub const Float = struct {
ty: Index,
/// The storage used must match the size of the float type being represented.
@@ -436,6 +457,8 @@ pub const Key = union(enum) {
.struct_type,
.union_type,
.un,
+ .undef,
+ .enum_tag,
=> |info| std.hash.autoHash(hasher, info),
.opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl),
@@ -471,12 +494,6 @@ pub const Key = union(enum) {
}
},
- .enum_tag => |enum_tag| {
- std.hash.autoHash(hasher, enum_tag.ty);
- std.hash.autoHash(hasher, enum_tag.tag.positive);
- for (enum_tag.tag.limbs) |limb| std.hash.autoHash(hasher, limb);
- },
-
.aggregate => |aggregate| {
std.hash.autoHash(hasher, aggregate.ty);
for (aggregate.fields) |field| std.hash.autoHash(hasher, field);
@@ -522,6 +539,10 @@ pub const Key = union(enum) {
const b_info = b.simple_value;
return a_info == b_info;
},
+ .undef => |a_info| {
+ const b_info = b.undef;
+ return a_info == b_info;
+ },
.extern_func => |a_info| {
const b_info = b.extern_func;
return std.meta.eql(a_info, b_info);
@@ -542,6 +563,10 @@ pub const Key = union(enum) {
const b_info = b.un;
return std.meta.eql(a_info, b_info);
},
+ .enum_tag => |a_info| {
+ const b_info = b.enum_tag;
+ return std.meta.eql(a_info, b_info);
+ },
.ptr => |a_info| {
const b_info = b.ptr;
@@ -612,13 +637,6 @@ pub const Key = union(enum) {
};
},
- .enum_tag => |a_info| {
- const b_info = b.enum_tag;
- _ = a_info;
- _ = b_info;
- @panic("TODO");
- },
-
.opaque_type => |a_info| {
const b_info = b.opaque_type;
return a_info.decl == b_info.decl;
@@ -636,7 +654,7 @@ pub const Key = union(enum) {
}
pub fn typeOf(key: Key) Index {
- switch (key) {
+ return switch (key) {
.int_type,
.ptr_type,
.array_type,
@@ -648,7 +666,7 @@ pub const Key = union(enum) {
.union_type,
.opaque_type,
.enum_type,
- => return .type_type,
+ => .type_type,
inline .ptr,
.int,
@@ -658,18 +676,20 @@ pub const Key = union(enum) {
.enum_tag,
.aggregate,
.un,
- => |x| return x.ty,
+ => |x| x.ty,
+
+ .undef => |x| x,
.simple_value => |s| switch (s) {
- .undefined => return .undefined_type,
- .void => return .void_type,
- .null => return .null_type,
- .false, .true => return .bool_type,
- .empty_struct => return .empty_struct_type,
- .@"unreachable" => return .noreturn_type,
+ .undefined => .undefined_type,
+ .void => .void_type,
+ .null => .null_type,
+ .false, .true => .bool_type,
+ .empty_struct => .empty_struct_type,
+ .@"unreachable" => .noreturn_type,
.generic_poison => unreachable,
},
- }
+ };
}
};
@@ -693,6 +713,7 @@ pub const Index = enum(u32) {
pub const last_value: Index = .empty_struct;
u1_type,
+ u5_type,
u8_type,
i8_type,
u16_type,
@@ -769,6 +790,10 @@ pub const Index = enum(u32) {
one,
/// `1` (usize)
one_usize,
+ /// `1` (u5)
+ one_u5,
+ /// `4` (u5)
+ four_u5,
/// `-1` (comptime_int)
negative_one,
/// `std.builtin.CallingConvention.C`
@@ -834,6 +859,12 @@ pub const static_keys = [_]Key{
.bits = 1,
} },
+ // u5_type
+ .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 5,
+ } },
+
.{ .int_type = .{
.signedness = .unsigned,
.bits = 8,
@@ -1021,25 +1052,30 @@ pub const static_keys = [_]Key{
.storage = .{ .u64 = 1 },
} },
+ // one_u5
+ .{ .int = .{
+ .ty = .u5_type,
+ .storage = .{ .u64 = 1 },
+ } },
+ // four_u5
+ .{ .int = .{
+ .ty = .u5_type,
+ .storage = .{ .u64 = 4 },
+ } },
+ // negative_one
.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .i64 = -1 },
} },
-
+ // calling_convention_c
.{ .enum_tag = .{
.ty = .calling_convention_type,
- .tag = .{
- .limbs = &.{@enumToInt(std.builtin.CallingConvention.C)},
- .positive = true,
- },
+ .int = .one_u5,
} },
-
+ // calling_convention_inline
.{ .enum_tag = .{
.ty = .calling_convention_type,
- .tag = .{
- .limbs = &.{@enumToInt(std.builtin.CallingConvention.Inline)},
- .positive = true,
- },
+ .int = .four_u5,
} },
.{ .simple_value = .void },
@@ -1118,6 +1154,10 @@ pub const Tag = enum(u8) {
/// `data` is `Module.Union.Index`.
type_union_safety,
+ /// Typed `undefined`.
+ /// `data` is `Index` of the type.
+ /// Untyped `undefined` is stored instead via `simple_value`.
+ undef,
/// A value that can be represented with only an enum tag.
/// data is SimpleValue enum value.
simple_value,
@@ -1132,7 +1172,7 @@ pub const Tag = enum(u8) {
/// already contains the optional type corresponding to this payload.
opt_payload,
/// An optional value that is null.
- /// data is Index of the payload type.
+ /// data is Index of the optional type.
opt_null,
/// Type: u8
/// data is integer value
@@ -1155,18 +1195,18 @@ pub const Tag = enum(u8) {
/// A comptime_int that fits in an i32.
/// data is integer value bitcasted to u32.
int_comptime_int_i32,
+ /// An integer value that fits in 32 bits with an explicitly provided type.
+ /// data is extra index of `IntSmall`.
+ int_small,
/// A positive integer value.
- /// data is a limbs index to Int.
+ /// data is a limbs index to `Int`.
int_positive,
/// A negative integer value.
- /// data is a limbs index to Int.
+ /// data is a limbs index to `Int`.
int_negative,
- /// An enum tag identified by a positive integer value.
- /// data is a limbs index to Int.
- enum_tag_positive,
- /// An enum tag identified by a negative integer value.
- /// data is a limbs index to Int.
- enum_tag_negative,
+ /// An enum tag value.
+ /// data is extra index of `Key.EnumTag`.
+ enum_tag,
/// An f16 value.
/// data is float value bitcasted to u16 and zero-extended.
float_f16,
@@ -1404,6 +1444,11 @@ pub const Int = struct {
limbs_len: u32,
};
+pub const IntSmall = struct {
+ ty: Index,
+ value: u32,
+};
+
/// A f64 value, broken up into 2 u32 parts.
pub const Float64 = struct {
piece0: u32,
@@ -1479,15 +1524,28 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void {
try ip.items.ensureUnusedCapacity(gpa, static_keys.len);
try ip.map.ensureUnusedCapacity(gpa, static_keys.len);
try ip.extra.ensureUnusedCapacity(gpa, static_keys.len);
- try ip.limbs.ensureUnusedCapacity(gpa, 2);
// This inserts all the statically-known values into the intern pool in the
// order expected.
for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable;
- // Sanity check.
- assert(ip.indexToKey(.bool_true).simple_value == .true);
- assert(ip.indexToKey(.bool_false).simple_value == .false);
+ if (std.debug.runtime_safety) {
+ // Sanity check.
+ assert(ip.indexToKey(.bool_true).simple_value == .true);
+ assert(ip.indexToKey(.bool_false).simple_value == .false);
+
+ const cc_inline = ip.indexToKey(.calling_convention_inline).enum_tag.int;
+ const cc_c = ip.indexToKey(.calling_convention_c).enum_tag.int;
+
+ assert(ip.indexToKey(cc_inline).int.storage.u64 ==
+ @enumToInt(std.builtin.CallingConvention.Inline));
+
+ assert(ip.indexToKey(cc_c).int.storage.u64 ==
+ @enumToInt(std.builtin.CallingConvention.C));
+
+ assert(ip.indexToKey(ip.typeOf(cc_inline)).int_type.bits ==
+ @typeInfo(@typeInfo(std.builtin.CallingConvention).Enum.tag_type).Int.bits);
+ }
assert(ip.items.len == static_keys.len);
}
@@ -1634,6 +1692,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.type_enum_explicit => indexToKeyEnum(ip, data, .explicit),
.type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive),
+ .undef => .{ .undef = @intToEnum(Index, data) },
.opt_null => .{ .opt = .{
.ty = @intToEnum(Index, data),
.val = .none,
@@ -1687,8 +1746,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
} },
.int_positive => indexToKeyBigInt(ip, data, true),
.int_negative => indexToKeyBigInt(ip, data, false),
- .enum_tag_positive => @panic("TODO"),
- .enum_tag_negative => @panic("TODO"),
+ .int_small => {
+ const info = ip.extraData(IntSmall, data);
+ return .{ .int = .{
+ .ty = info.ty,
+ .storage = .{ .u64 = info.value },
+ } };
+ },
.float_f16 => .{ .float = .{
.ty = .f16_type,
.storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) },
@@ -1734,6 +1798,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
};
},
.union_value => .{ .un = ip.extraData(Key.Union, data) },
+ .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) },
};
}
@@ -1896,6 +1961,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.data = @enumToInt(simple_value),
});
},
+ .undef => |ty| {
+ assert(ty != .none);
+ ip.items.appendAssumeCapacity(.{
+ .tag = .undef,
+ .data = @enumToInt(ty),
+ });
+ },
.struct_type => |struct_type| {
ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{
@@ -2112,10 +2184,32 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
switch (int.storage) {
.big_int => |big_int| {
+ if (big_int.to(u32)) |casted| {
+ ip.items.appendAssumeCapacity(.{
+ .tag = .int_small,
+ .data = try ip.addExtra(gpa, IntSmall{
+ .ty = int.ty,
+ .value = casted,
+ }),
+ });
+ return @intToEnum(Index, ip.items.len - 1);
+ } else |_| {}
+
const tag: Tag = if (big_int.positive) .int_positive else .int_negative;
try addInt(ip, gpa, int.ty, tag, big_int.limbs);
},
- inline .i64, .u64 => |x| {
+ inline .u64, .i64 => |x| {
+ if (std.math.cast(u32, x)) |casted| {
+ ip.items.appendAssumeCapacity(.{
+ .tag = .int_small,
+ .data = try ip.addExtra(gpa, IntSmall{
+ .ty = int.ty,
+ .value = casted,
+ }),
+ });
+ return @intToEnum(Index, ip.items.len - 1);
+ }
+
var buf: [2]Limb = undefined;
const big_int = BigIntMutable.init(&buf, x).toConst();
const tag: Tag = if (big_int.positive) .int_positive else .int_negative;
@@ -2124,6 +2218,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
},
+ .enum_tag => |enum_tag| {
+ assert(enum_tag.ty != .none);
+ assert(enum_tag.int != .none);
+
+ ip.items.appendAssumeCapacity(.{
+ .tag = .enum_tag,
+ .data = try ip.addExtra(gpa, enum_tag),
+ });
+ },
+
.float => |float| {
switch (float.ty) {
.f16_type => ip.items.appendAssumeCapacity(.{
@@ -2164,11 +2268,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
},
- .enum_tag => |enum_tag| {
- const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative;
- try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs);
- },
-
.aggregate => |aggregate| {
if (aggregate.fields.len == 0) {
ip.items.appendAssumeCapacity(.{
@@ -2671,44 +2770,59 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index {
/// Given an existing value, returns the same value but with the supplied type.
/// Only some combinations are allowed:
-/// * int to int
+/// * int <=> int
+/// * int <=> enum
pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
switch (ip.indexToKey(val)) {
- .int => |int| {
- // The key cannot be passed directly to `get`, otherwise in the case of
- // big_int storage, the limbs would be invalidated before they are read.
- // Here we pre-reserve the limbs to ensure that the logic in `addInt` will
- // not use an invalidated limbs pointer.
- switch (int.storage) {
- .u64 => |x| return ip.get(gpa, .{ .int = .{
- .ty = new_ty,
- .storage = .{ .u64 = x },
- } }),
- .i64 => |x| return ip.get(gpa, .{ .int = .{
- .ty = new_ty,
- .storage = .{ .i64 = x },
- } }),
-
- .big_int => |big_int| {
- const positive = big_int.positive;
- const limbs = ip.limbsSliceToIndex(big_int.limbs);
- // This line invalidates the limbs slice, but the indexes computed in the
- // previous line are still correct.
- try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len);
- return ip.get(gpa, .{ .int = .{
- .ty = new_ty,
- .storage = .{ .big_int = .{
- .limbs = ip.limbsIndexToSlice(limbs),
- .positive = positive,
- } },
- } });
- },
- }
+ .int => |int| switch (ip.indexToKey(new_ty)) {
+ .enum_type => return ip.get(gpa, .{ .enum_tag = .{
+ .ty = new_ty,
+ .int = val,
+ } }),
+ else => return getCoercedInts(ip, gpa, int, new_ty),
+ },
+ .enum_tag => |enum_tag| {
+ // Assume new_ty is an integer type.
+ return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty);
},
else => unreachable,
}
}
+/// Asserts `val` has an integer type.
+/// Assumes `new_ty` is an integer type.
+pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index {
+ // The key cannot be passed directly to `get`, otherwise in the case of
+ // big_int storage, the limbs would be invalidated before they are read.
+ // Here we pre-reserve the limbs to ensure that the logic in `addInt` will
+ // not use an invalidated limbs pointer.
+ switch (int.storage) {
+ .u64 => |x| return ip.get(gpa, .{ .int = .{
+ .ty = new_ty,
+ .storage = .{ .u64 = x },
+ } }),
+ .i64 => |x| return ip.get(gpa, .{ .int = .{
+ .ty = new_ty,
+ .storage = .{ .i64 = x },
+ } }),
+
+ .big_int => |big_int| {
+ const positive = big_int.positive;
+ const limbs = ip.limbsSliceToIndex(big_int.limbs);
+ // This line invalidates the limbs slice, but the indexes computed in the
+ // previous line are still correct.
+ try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len);
+ return ip.get(gpa, .{ .int = .{
+ .ty = new_ty,
+ .storage = .{ .big_int = .{
+ .limbs = ip.limbsIndexToSlice(limbs),
+ .positive = positive,
+ } },
+ } });
+ },
+ }
+}
+
pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex {
const tags = ip.items.items(.tag);
if (val == .none) return .none;
@@ -2805,6 +2919,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.type_union_safety,
=> @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl),
+ .undef => 0,
.simple_type => 0,
.simple_value => 0,
.ptr_int => @sizeOf(PtrInt),
@@ -2817,15 +2932,15 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.int_usize => 0,
.int_comptime_int_u32 => 0,
.int_comptime_int_i32 => 0,
+ .int_small => @sizeOf(IntSmall),
.int_positive,
.int_negative,
- .enum_tag_positive,
- .enum_tag_negative,
=> b: {
const int = ip.limbData(Int, data);
break :b @sizeOf(Int) + int.limbs_len * 8;
},
+ .enum_tag => @sizeOf(Key.EnumTag),
.float_f16 => 0,
.float_f32 => 0,
@@ -2958,3 +3073,9 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 {
pub fn typeOf(ip: InternPool, index: Index) Index {
return ip.indexToKey(index).typeOf();
}
+
+/// Assumes that the enum's field indexes equal its value tags.
+pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E {
+ const int = ip.indexToKey(i).enum_tag.int;
+ return @intToEnum(E, ip.indexToKey(int).int.storage.u64);
+}
src/Module.zig
@@ -6896,6 +6896,43 @@ pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value
return i.toValue();
}
+/// Creates an enum tag value based on the integer tag value.
+pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value {
+ if (std.debug.runtime_safety) {
+ const tag = ty.zigTypeTag(mod);
+ assert(tag == .Enum);
+ }
+ const i = try intern(mod, .{ .enum_tag = .{
+ .ty = ty.ip_index,
+ .int = tag_int,
+ } });
+ return i.toValue();
+}
+
+/// Creates an enum tag value based on the field index according to source code
+/// declaration order.
+pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value {
+ const ip = &mod.intern_pool;
+ const gpa = mod.gpa;
+ const enum_type = ip.indexToKey(ty.ip_index).enum_type;
+
+ if (enum_type.values.len == 0) {
+ // Auto-numbered fields.
+ return (try ip.get(gpa, .{ .enum_tag = .{
+ .ty = ty.ip_index,
+ .int = try ip.get(gpa, .{ .int = .{
+ .ty = enum_type.tag_ty,
+ .storage = .{ .u64 = field_index },
+ } }),
+ } })).toValue();
+ }
+
+ return (try ip.get(gpa, .{ .enum_tag = .{
+ .ty = ty.ip_index,
+ .int = enum_type.values[field_index],
+ } })).toValue();
+}
+
pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
if (std.debug.runtime_safety) {
const tag = ty.zigTypeTag(mod);
@@ -6967,8 +7004,8 @@ pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type {
/// `max`. Asserts that neither value is undef.
/// TODO: if #3806 is implemented, this becomes trivial
pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type {
- assert(!min.isUndef());
- assert(!max.isUndef());
+ assert(!min.isUndef(mod));
+ assert(!max.isUndef(mod));
if (std.debug.runtime_safety) {
assert(Value.order(min, max, mod).compare(.lte));
@@ -6990,7 +7027,7 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type {
/// twos-complement integer; otherwise in an unsigned integer.
/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true.
pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
- assert(!val.isUndef());
+ assert(!val.isUndef(mod));
const key = mod.intern_pool.indexToKey(val.ip_index);
switch (key.int.storage) {
@@ -7193,3 +7230,7 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu
return owner_decl.srcLoc(mod);
}
}
+
+pub fn toEnum(mod: *Module, comptime E: type, val: Value) E {
+ return mod.intern_pool.toEnum(E, val.ip_index);
+}
src/Sema.zig
@@ -1904,8 +1904,9 @@ fn resolveDefinedValue(
src: LazySrcLoc,
air_ref: Air.Inst.Ref,
) CompileError!?Value {
+ const mod = sema.mod;
if (try sema.resolveMaybeUndefVal(air_ref)) |val| {
- if (val.isUndef()) {
+ if (val.isUndef(mod)) {
if (block.is_typeof) return null;
return sema.failWithUseOfUndef(block, src);
}
@@ -4333,7 +4334,7 @@ fn validateUnionInit(
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
- const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
if (init_val) |val| {
// Our task is to delete all the `field_ptr` and `store` instructions, and insert
@@ -4832,7 +4833,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const elem_ty = operand_ty.elemType2(mod);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef()) {
+ if (val.isUndef(mod)) {
return sema.fail(block, src, "cannot dereference undefined value", .{});
}
} else if (!(try sema.validateRunTimeType(elem_ty, false))) {
@@ -6194,15 +6195,16 @@ fn lookupInNamespace(
}
fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
+ const mod = sema.mod;
const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null;
- if (func_val.isUndef()) return null;
+ if (func_val.isUndef(mod)) return null;
const owner_decl_index = switch (func_val.tag()) {
.extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl,
.function => func_val.castTag(.function).?.data.owner_decl,
- .decl_ref => sema.mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl,
+ .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl,
else => return null,
};
- return sema.mod.declPtr(owner_decl_index);
+ return mod.declPtr(owner_decl_index);
}
pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref {
@@ -8106,7 +8108,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef()) {
+ if (val.isUndef(mod)) {
return sema.addConstUndef(Type.err_int);
}
switch (val.tag()) {
@@ -8326,7 +8328,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (int_val.isUndef()) {
+ if (int_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, operand_src);
}
if (!(try sema.enumHasInt(dest_ty, int_val))) {
@@ -11472,7 +11474,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
if (f != null) continue;
cases_len += 1;
- const item_val = try Value.Tag.enum_field_index.create(sema.arena, @intCast(u32, i));
+ const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(u32, i));
const item_ref = try sema.addConstant(operand_ty, item_val);
case_block.inline_case_capture = item_ref;
@@ -12208,7 +12210,7 @@ fn zirShl(
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(sema.typeOf(lhs));
}
// If rhs is 0, return lhs without doing any calculations.
@@ -12255,7 +12257,7 @@ fn zirShl(
}
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
- if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty);
+ if (lhs_val.isUndef(mod)) return sema.addConstUndef(lhs_ty);
const rhs_val = maybe_rhs_val orelse {
if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
@@ -12389,7 +12391,7 @@ fn zirShr(
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
const runtime_src = if (maybe_rhs_val) |rhs_val| rs: {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(lhs_ty);
}
// If rhs is 0, return lhs without doing any calculations.
@@ -12434,7 +12436,7 @@ fn zirShr(
});
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(lhs_ty);
}
if (air_tag == .shr_exact) {
@@ -12578,7 +12580,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef()) {
+ if (val.isUndef(mod)) {
return sema.addConstUndef(operand_type);
} else if (operand_type.zigTypeTag(mod) == .Vector) {
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod));
@@ -13154,7 +13156,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (rhs_scalar_ty.isAnyFloat()) {
// We handle float negation here to ensure negative zero is represented in the bits.
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
- if (rhs_val.isUndef()) return sema.addConstUndef(rhs_ty);
+ if (rhs_val.isUndef(mod)) return sema.addConstUndef(rhs_ty);
return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod));
}
try sema.requireRuntimeBlock(block, src, null);
@@ -13297,7 +13299,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
switch (scalar_tag) {
.Int, .ComptimeInt, .ComptimeFloat => {
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef()) {
+ if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
@@ -13312,7 +13314,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -13326,7 +13328,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const runtime_src = rs: {
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
@@ -13434,7 +13436,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// If the lhs is undefined, compile error because there is a possible
// value for which the division would result in a remainder.
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
@@ -13451,7 +13453,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -13611,7 +13613,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef()) {
+ if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
@@ -13626,7 +13628,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -13635,7 +13637,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// TODO: if the RHS is one, return the LHS directly
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
@@ -13732,7 +13734,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// value (zero) for which the division would be illegal behavior.
// If the lhs is undefined, result is undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef()) {
+ if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const scalar_zero = switch (scalar_tag) {
.ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0),
@@ -13747,7 +13749,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -13755,7 +13757,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) {
@@ -13977,7 +13979,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// then emit a compile error saying you have to pick one.
if (is_int) {
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, lhs_src);
}
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
@@ -13995,7 +13997,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -14024,7 +14026,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
// float operands
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -14034,7 +14036,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef() or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
+ if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
return sema.addConstant(
@@ -14155,12 +14157,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
// If the lhs is undefined, result is undefined.
if (is_int) {
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, lhs_src);
}
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -14179,7 +14181,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
// float operands
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -14187,7 +14189,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
@@ -14257,12 +14259,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
// If the lhs is undefined, result is undefined.
if (is_int) {
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, lhs_src);
}
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -14281,7 +14283,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
// float operands
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, rhs_src);
}
if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) {
@@ -14289,7 +14291,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
@@ -14372,18 +14374,18 @@ fn zirOverflowArithmetic(
// to the result, even if it is undefined..
// Otherwise, if either of the argument is undefined, undefined is returned.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
- if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+ if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
- if (lhs_val.isUndef() or rhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
@@ -14396,12 +14398,12 @@ fn zirOverflowArithmetic(
// If the rhs is zero, then the result is lhs and no overflow occured.
// Otherwise, if either result is undefined, both results are undefined.
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
} else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
} else if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
@@ -14416,7 +14418,7 @@ fn zirOverflowArithmetic(
// Otherwise, if either of the arguments is undefined, both results are undefined.
const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1);
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef()) {
+ if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
} else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) {
@@ -14426,7 +14428,7 @@ fn zirOverflowArithmetic(
}
if (maybe_rhs_val) |rhs_val| {
- if (!rhs_val.isUndef()) {
+ if (!rhs_val.isUndef(mod)) {
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs };
} else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) {
@@ -14437,7 +14439,7 @@ fn zirOverflowArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
- if (lhs_val.isUndef() or rhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
@@ -14451,18 +14453,18 @@ fn zirOverflowArithmetic(
// If rhs is zero, the result is lhs (even if undefined) and no overflow occurred.
// Oterhwise if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
}
}
if (maybe_rhs_val) |rhs_val| {
- if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+ if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
if (maybe_rhs_val) |rhs_val| {
- if (lhs_val.isUndef() or rhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
}
@@ -14606,12 +14608,12 @@ fn analyzeArithmetic(
// overflow (max_int), causing illegal behavior.
// For floats: either operand being undef makes the result undef.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
return casted_rhs;
}
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
@@ -14624,7 +14626,7 @@ fn analyzeArithmetic(
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add;
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
@@ -14653,13 +14655,13 @@ fn analyzeArithmetic(
// If either of the operands are zero, the other operand is returned.
// If either of the operands are undefined, the result is undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
return casted_rhs;
}
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .addwrap_optimized else .addwrap;
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
@@ -14678,12 +14680,12 @@ fn analyzeArithmetic(
// If either of the operands are zero, then the other operand is returned.
// If either of the operands are undefined, the result is undefined.
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
+ if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
return casted_rhs;
}
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
@@ -14708,7 +14710,7 @@ fn analyzeArithmetic(
// overflow, causing illegal behavior.
// For floats: either operand being undef makes the result undef.
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
@@ -14721,7 +14723,7 @@ fn analyzeArithmetic(
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub;
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
@@ -14750,7 +14752,7 @@ fn analyzeArithmetic(
// If the RHS is zero, then the other operand is returned, even if it is undefined.
// If either of the operands are undefined, the result is undefined.
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
@@ -14759,7 +14761,7 @@ fn analyzeArithmetic(
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .subwrap_optimized else .subwrap;
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
@@ -14775,7 +14777,7 @@ fn analyzeArithmetic(
// If the RHS is zero, result is LHS.
// If either of the operands are undefined, result is undefined.
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
@@ -14783,7 +14785,7 @@ fn analyzeArithmetic(
}
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (maybe_rhs_val) |rhs_val| {
@@ -14814,7 +14816,7 @@ fn analyzeArithmetic(
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef()) {
+ if (!lhs_val.isUndef(mod)) {
if (lhs_val.isNan(mod)) {
return sema.addConstant(resolved_type, lhs_val);
}
@@ -14844,7 +14846,7 @@ fn analyzeArithmetic(
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul;
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, rhs_src);
} else {
@@ -14874,7 +14876,7 @@ fn analyzeArithmetic(
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
if (is_int) {
return sema.failWithUseOfUndef(block, lhs_src);
} else {
@@ -14908,7 +14910,7 @@ fn analyzeArithmetic(
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef()) {
+ if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
@@ -14922,7 +14924,7 @@ fn analyzeArithmetic(
}
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mulwrap_optimized else .mulwrap;
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
@@ -14935,7 +14937,7 @@ fn analyzeArithmetic(
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
return sema.addConstant(
@@ -14956,7 +14958,7 @@ fn analyzeArithmetic(
else => unreachable,
};
if (maybe_lhs_val) |lhs_val| {
- if (!lhs_val.isUndef()) {
+ if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
const zero_val = if (is_vector) b: {
break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
@@ -14969,7 +14971,7 @@ fn analyzeArithmetic(
}
}
if (maybe_rhs_val) |rhs_val| {
- if (rhs_val.isUndef()) {
+ if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
@@ -14982,7 +14984,7 @@ fn analyzeArithmetic(
return casted_lhs;
}
if (maybe_lhs_val) |lhs_val| {
- if (lhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
}
@@ -15100,7 +15102,7 @@ fn analyzePtrArithmetic(
const runtime_src = rs: {
if (opt_ptr_val) |ptr_val| {
if (opt_off_val) |offset_val| {
- if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty);
+ if (ptr_val.isUndef(mod)) return sema.addConstUndef(new_ptr_ty);
const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod));
if (offset_int == 0) return ptr;
@@ -15363,7 +15365,7 @@ fn zirCmpEq(
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(lhs)) |lval| {
if (try sema.resolveMaybeUndefVal(rhs)) |rval| {
- if (lval.isUndef() or rval.isUndef()) {
+ if (lval.isUndef(mod) or rval.isUndef(mod)) {
return sema.addConstUndef(Type.bool);
}
// TODO optimisation opportunity: evaluate if mem.eql is faster with the names,
@@ -15425,7 +15427,7 @@ fn analyzeCmpUnionTag(
const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| {
- if (enum_val.isUndef()) return sema.addConstUndef(Type.bool);
+ if (enum_val.isUndef(mod)) return sema.addConstUndef(Type.bool);
const field_ty = union_ty.unionFieldType(enum_val, sema.mod);
if (field_ty.zigTypeTag(mod) == .NoReturn) {
return Air.Inst.Ref.bool_false;
@@ -15527,9 +15529,9 @@ fn cmpSelf(
const resolved_type = sema.typeOf(casted_lhs);
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| {
- if (lhs_val.isUndef()) return sema.addConstUndef(Type.bool);
+ if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool);
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
- if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool);
+ if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool);
if (resolved_type.zigTypeTag(mod) == .Vector) {
const result_ty = try mod.vectorType(.{
@@ -15557,7 +15559,7 @@ fn cmpSelf(
// bool eq/neq more efficiently.
if (resolved_type.zigTypeTag(mod) == .Bool) {
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
- if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool);
+ if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool);
return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(mod), lhs_src);
}
}
@@ -15892,68 +15894,69 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const src = inst_data.src();
const ty = try sema.resolveType(block, src, inst_data.operand);
const type_info_ty = try sema.getBuiltinType("Type");
+ const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
switch (ty.zigTypeTag(mod)) {
.Type => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Type)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Type)),
.val = Value.void,
}),
),
.Void => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Void)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Void)),
.val = Value.void,
}),
),
.Bool => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Bool)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Bool)),
.val = Value.void,
}),
),
.NoReturn => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.NoReturn)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.NoReturn)),
.val = Value.void,
}),
),
.ComptimeFloat => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeFloat)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeFloat)),
.val = Value.void,
}),
),
.ComptimeInt => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeInt)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeInt)),
.val = Value.void,
}),
),
.Undefined => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Undefined)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Undefined)),
.val = Value.void,
}),
),
.Null => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Null)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Null)),
.val = Value.void,
}),
),
.EnumLiteral => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.EnumLiteral)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.EnumLiteral)),
.val = Value.void,
}),
),
@@ -16040,10 +16043,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else
Value.null;
+ const callconv_ty = try sema.getBuiltinType("CallingConvention");
+
const field_values = try sema.arena.create([6]Value);
field_values.* = .{
// calling_convention: CallingConvention,
- try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)),
+ try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc)),
// alignment: comptime_int,
try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)),
// is_generic: bool,
@@ -16059,26 +16064,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Fn)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
},
.Int => {
+ const signedness_ty = try sema.getBuiltinType("Signedness");
const info = ty.intInfo(mod);
const field_values = try sema.arena.alloc(Value, 2);
// signedness: Signedness,
- field_values[0] = try Value.Tag.enum_field_index.create(
- sema.arena,
- @enumToInt(info.signedness),
- );
+ field_values[0] = try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness));
// bits: u16,
field_values[1] = try mod.intValue(Type.u16, info.bits);
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Int)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16091,7 +16094,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Float)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16103,10 +16106,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else
try info.pointee_type.lazyAbiAlignment(mod, sema.arena);
+ const addrspace_ty = try sema.getBuiltinType("AddressSpace");
+ const ptr_size_ty = try sema.getBuiltinType("PtrSize");
+
const field_values = try sema.arena.create([8]Value);
field_values.* = .{
// size: Size,
- try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size)),
+ try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size)),
// is_const: bool,
Value.makeBool(!info.mutable),
// is_volatile: bool,
@@ -16114,7 +16120,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// alignment: comptime_int,
alignment,
// address_space: AddressSpace
- try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace")),
+ try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace")),
// child: type,
try Value.Tag.ty.create(sema.arena, info.pointee_type),
// is_allowzero: bool,
@@ -16126,7 +16132,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Pointer)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16144,7 +16150,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Array)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16160,7 +16166,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Vector)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16173,7 +16179,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Optional)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16263,7 +16269,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorSet)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet)),
.val = errors_val,
}),
);
@@ -16278,7 +16284,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorUnion)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16365,7 +16371,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Enum)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16454,13 +16460,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :v try Value.Tag.opt_payload.create(sema.arena, ty_val);
} else Value.null;
+ const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias");
+
const field_values = try sema.arena.create([4]Value);
field_values.* = .{
// layout: ContainerLayout,
- try Value.Tag.enum_field_index.create(
- sema.arena,
- @enumToInt(layout),
- ),
+ try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)),
// tag_type: ?type,
enum_tag_ty_val,
@@ -16473,7 +16478,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Union)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16625,13 +16630,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
};
+ const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias");
+
const field_values = try sema.arena.create([5]Value);
field_values.* = .{
// layout: ContainerLayout,
- try Value.Tag.enum_field_index.create(
- sema.arena,
- @enumToInt(layout),
- ),
+ try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)),
// backing_integer: ?type,
backing_integer_val,
// fields: []const StructField,
@@ -16645,7 +16649,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Struct)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16665,7 +16669,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Opaque)),
+ .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque)),
.val = try Value.Tag.aggregate.create(sema.arena, field_values),
}),
);
@@ -16912,7 +16916,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- return if (val.isUndef())
+ return if (val.isUndef(mod))
sema.addConstUndef(Type.bool)
else if (val.toBool(mod))
Air.Inst.Ref.bool_false
@@ -17879,7 +17883,7 @@ fn unionInit(
if (try sema.resolveMaybeUndefVal(init)) |init_val| {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
- const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{
.tag = tag_val,
.val = init_val,
@@ -17980,7 +17984,7 @@ fn zirStructInit(
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
const tag_ty = resolved_ty.unionTagTypeHypothetical(mod);
const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
- const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
const init_inst = try sema.resolveInst(item.data.init);
if (try sema.resolveMaybeUndefVal(init_inst)) |val| {
@@ -18614,7 +18618,7 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef()) return sema.addConstUndef(Type.u1);
+ if (val.isUndef(mod)) return sema.addConstUndef(Type.u1);
if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1));
return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0));
}
@@ -18673,7 +18677,7 @@ fn zirUnaryMath(
.child = scalar_ty.ip_index,
});
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef())
+ if (val.isUndef(mod))
return sema.addConstUndef(result_ty);
const elems = try sema.arena.alloc(Value, vec_len);
@@ -18692,7 +18696,7 @@ fn zirUnaryMath(
},
.ComptimeFloat, .Float => {
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
- if (operand_val.isUndef())
+ if (operand_val.isUndef(mod))
return sema.addConstUndef(operand_ty);
const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod);
return sema.addConstant(operand_ty, result_val);
@@ -18809,7 +18813,7 @@ fn zirReify(
const signedness_val = struct_val[0];
const bits_val = struct_val[1];
- const signedness = signedness_val.toEnum(std.builtin.Signedness);
+ const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
const bits = @intCast(u16, bits_val.toUnsignedInt(mod));
const ty = try mod.intType(signedness, bits);
return sema.addType(ty);
@@ -18874,7 +18878,7 @@ fn zirReify(
break :t elem_ty;
};
- const ptr_size = size_val.toEnum(std.builtin.Type.Pointer.Size);
+ const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val);
var actual_sentinel: ?Value = null;
if (!sentinel_val.isNull(mod)) {
@@ -18927,7 +18931,7 @@ fn zirReify(
.mutable = !is_const_val.toBool(mod),
.@"volatile" = is_volatile_val.toBool(mod),
.@"align" = abi_align,
- .@"addrspace" = address_space_val.toEnum(std.builtin.AddressSpace),
+ .@"addrspace" = mod.toEnum(std.builtin.AddressSpace, address_space_val),
.pointee_type = try elem_ty.copy(sema.arena),
.@"allowzero" = is_allowzero_val.toBool(mod),
.sentinel = actual_sentinel,
@@ -19033,7 +19037,7 @@ fn zirReify(
const is_tuple_val = struct_val[4];
assert(struct_val.len == 5);
- const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout);
+ const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
// Decls
if (decls_val.sliceLen(mod) > 0) {
@@ -19208,7 +19212,7 @@ fn zirReify(
if (decls_val.sliceLen(mod) > 0) {
return sema.fail(block, src, "reified unions must have no decls", .{});
}
- const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout);
+ const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
@@ -19309,7 +19313,7 @@ fn zirReify(
}
if (explicit_enum_info) |tag_info| {
- const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse {
+ const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) });
errdefer msg.destroy(gpa);
@@ -19402,7 +19406,7 @@ fn zirReify(
const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// calling_convention: CallingConvention,
- const cc = struct_val[0].toEnum(std.builtin.CallingConvention);
+ const cc = mod.toEnum(std.builtin.CallingConvention, struct_val[0]);
// alignment: comptime_int,
const alignment_val = struct_val[1];
// is_generic: bool,
@@ -20180,7 +20184,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| {
- if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef()) {
+ if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, operand_src);
}
if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) {
@@ -20315,7 +20319,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
if (try sema.resolveMaybeUndefValIntable(operand)) |val| {
- if (val.isUndef()) return sema.addConstUndef(dest_ty);
+ if (val.isUndef(mod)) return sema.addConstUndef(dest_ty);
if (!is_vector) {
return sema.addConstant(
dest_ty,
@@ -20419,7 +20423,7 @@ fn zirBitCount(
.child = result_scalar_ty.ip_index,
});
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef()) return sema.addConstUndef(result_ty);
+ if (val.isUndef(mod)) return sema.addConstUndef(result_ty);
const elems = try sema.arena.alloc(Value, vec_len);
const scalar_ty = operand_ty.scalarType(mod);
@@ -20439,7 +20443,7 @@ fn zirBitCount(
},
.Int => {
if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef()) return sema.addConstUndef(result_scalar_ty);
+ if (val.isUndef(mod)) return sema.addConstUndef(result_scalar_ty);
try sema.resolveLazyValue(val);
return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod));
} else {
@@ -20476,7 +20480,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
switch (operand_ty.zigTypeTag(mod)) {
.Int => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef()) return sema.addConstUndef(operand_ty);
+ if (val.isUndef(mod)) return sema.addConstUndef(operand_ty);
const result_val = try val.byteSwap(operand_ty, mod, sema.arena);
return sema.addConstant(operand_ty, result_val);
} else operand_src;
@@ -20486,7 +20490,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
.Vector => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef())
+ if (val.isUndef(mod))
return sema.addConstUndef(operand_ty);
const vec_len = operand_ty.vectorLen(mod);
@@ -20524,7 +20528,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
switch (operand_ty.zigTypeTag(mod)) {
.Int => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef()) return sema.addConstUndef(operand_ty);
+ if (val.isUndef(mod)) return sema.addConstUndef(operand_ty);
const result_val = try val.bitReverse(operand_ty, mod, sema.arena);
return sema.addConstant(operand_ty, result_val);
} else operand_src;
@@ -20534,7 +20538,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
},
.Vector => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
- if (val.isUndef())
+ if (val.isUndef(mod))
return sema.addConstUndef(operand_ty);
const vec_len = operand_ty.vectorLen(mod);
@@ -21072,7 +21076,7 @@ fn resolveExportOptions(
const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src);
const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known");
- const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage);
+ const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
const section_operand = try sema.fieldVal(block, src, options, "section", section_src);
const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known");
@@ -21084,7 +21088,7 @@ fn resolveExportOptions(
const visibility_operand = try sema.fieldVal(block, src, options, "visibility", visibility_src);
const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known");
- const visibility = visibility_val.toEnum(std.builtin.SymbolVisibility);
+ const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val);
if (name.len < 1) {
return sema.fail(block, name_src, "exported symbol name cannot be empty", .{});
@@ -21112,11 +21116,12 @@ fn resolveBuiltinEnum(
comptime name: []const u8,
reason: []const u8,
) CompileError!@field(std.builtin, name) {
+ const mod = sema.mod;
const ty = try sema.getBuiltinType(name);
const air_ref = try sema.resolveInst(zir_ref);
const coerced = try sema.coerce(block, ty, air_ref, src);
const val = try sema.resolveConstValue(block, src, coerced, reason);
- return val.toEnum(@field(std.builtin, name));
+ return mod.toEnum(@field(std.builtin, name), val);
}
fn resolveAtomicOrder(
@@ -21198,7 +21203,7 @@ fn zirCmpxchg(
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
if (try sema.resolveMaybeUndefVal(expected_value)) |expected_val| {
if (try sema.resolveMaybeUndefVal(new_value)) |new_val| {
- if (expected_val.isUndef() or new_val.isUndef()) {
+ if (expected_val.isUndef(mod) or new_val.isUndef(mod)) {
// TODO: this should probably cause the memory stored at the pointer
// to become undef as well
return sema.addConstUndef(result_ty);
@@ -21248,7 +21253,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
.child = scalar_ty.ip_index,
});
if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| {
- if (scalar_val.isUndef()) return sema.addConstUndef(vector_ty);
+ if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty);
return sema.addConstant(
vector_ty,
@@ -21300,7 +21305,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
- if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty);
+ if (operand_val.isUndef(mod)) return sema.addConstUndef(scalar_ty);
var accum: Value = try operand_val.elemValue(mod, 0);
var i: u32 = 1;
@@ -21420,7 +21425,7 @@ fn analyzeShuffle(
var i: usize = 0;
while (i < mask_len) : (i += 1) {
const elem = try mask.elemValue(sema.mod, i);
- if (elem.isUndef()) continue;
+ if (elem.isUndef(mod)) continue;
const int = elem.toSignedInt(mod);
var unsigned: u32 = undefined;
var chosen: u32 = undefined;
@@ -21458,7 +21463,7 @@ fn analyzeShuffle(
i = 0;
while (i < mask_len) : (i += 1) {
const mask_elem_val = try mask.elemValue(sema.mod, i);
- if (mask_elem_val.isUndef()) {
+ if (mask_elem_val.isUndef(mod)) {
values[i] = Value.undef;
continue;
}
@@ -21559,13 +21564,13 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
const maybe_b = try sema.resolveMaybeUndefVal(b);
const runtime_src = if (maybe_pred) |pred_val| rs: {
- if (pred_val.isUndef()) return sema.addConstUndef(vec_ty);
+ if (pred_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
if (maybe_a) |a_val| {
- if (a_val.isUndef()) return sema.addConstUndef(vec_ty);
+ if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
if (maybe_b) |b_val| {
- if (b_val.isUndef()) return sema.addConstUndef(vec_ty);
+ if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
const elems = try sema.gpa.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
@@ -21587,16 +21592,16 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
}
} else {
if (maybe_b) |b_val| {
- if (b_val.isUndef()) return sema.addConstUndef(vec_ty);
+ if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
}
break :rs a_src;
}
} else rs: {
if (maybe_a) |a_val| {
- if (a_val.isUndef()) return sema.addConstUndef(vec_ty);
+ if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
}
if (maybe_b) |b_val| {
- if (b_val.isUndef()) return sema.addConstUndef(vec_ty);
+ if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty);
}
break :rs pred_src;
};
@@ -21803,10 +21808,10 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const runtime_src = if (maybe_mulend1) |mulend1_val| rs: {
if (maybe_mulend2) |mulend2_val| {
- if (mulend2_val.isUndef()) return sema.addConstUndef(ty);
+ if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty);
if (maybe_addend) |addend_val| {
- if (addend_val.isUndef()) return sema.addConstUndef(ty);
+ if (addend_val.isUndef(mod)) return sema.addConstUndef(ty);
const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod);
return sema.addConstant(ty, result_val);
} else {
@@ -21814,16 +21819,16 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
} else {
if (maybe_addend) |addend_val| {
- if (addend_val.isUndef()) return sema.addConstUndef(ty);
+ if (addend_val.isUndef(mod)) return sema.addConstUndef(ty);
}
break :rs mulend2_src;
}
} else rs: {
if (maybe_mulend2) |mulend2_val| {
- if (mulend2_val.isUndef()) return sema.addConstUndef(ty);
+ if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty);
}
if (maybe_addend) |addend_val| {
- if (addend_val.isUndef()) return sema.addConstUndef(ty);
+ if (addend_val.isUndef(mod)) return sema.addConstUndef(ty);
}
break :rs mulend1_src;
};
@@ -21859,7 +21864,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const air_ref = try sema.resolveInst(extra.modifier);
const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src);
const modifier_val = try sema.resolveConstValue(block, modifier_src, modifier_ref, "call modifier must be comptime-known");
- var modifier = modifier_val.toEnum(std.builtin.CallModifier);
+ var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val);
switch (modifier) {
// These can be upgraded to comptime or nosuspend calls.
.auto, .never_tail, .no_async => {
@@ -22111,8 +22116,8 @@ fn analyzeMinMax(
runtime_known.unset(operand_idx);
- if (cur_val.isUndef()) continue; // result is also undef
- if (operand_val.isUndef()) {
+ if (cur_val.isUndef(mod)) continue; // result is also undef
+ if (operand_val.isUndef(mod)) {
cur_minmax = try sema.addConstUndef(simd_op.result_ty);
continue;
}
@@ -22165,7 +22170,7 @@ fn analyzeMinMax(
var cur_max: Value = cur_min;
for (1..len) |idx| {
const elem_val = try val.elemValue(mod, idx);
- if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef
+ if (elem_val.isUndef(mod)) break :blk orig_ty; // can't refine undef
if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val;
if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val;
}
@@ -22177,7 +22182,7 @@ fn analyzeMinMax(
});
} else blk: {
if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats
- if (val.isUndef()) break :blk orig_ty; // can't refine undef
+ if (val.isUndef(mod)) break :blk orig_ty; // can't refine undef
break :blk try mod.intFittingRange(val, val);
};
@@ -22205,7 +22210,7 @@ fn analyzeMinMax(
// If the comptime-known part is undef we can avoid emitting actual instructions later
const known_undef = if (cur_minmax) |operand| blk: {
const val = (try sema.resolveMaybeUndefVal(operand)).?;
- break :blk val.isUndef();
+ break :blk val.isUndef(mod);
} else false;
if (cur_minmax == null) {
@@ -22749,7 +22754,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
if (val.isGenericPoison()) {
break :blk null;
}
- break :blk val.toEnum(std.builtin.AddressSpace);
+ break :blk mod.toEnum(std.builtin.AddressSpace, val);
} else if (extra.data.bits.has_addrspace_ref) blk: {
const addrspace_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
@@ -22759,7 +22764,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
},
else => |e| return e,
};
- break :blk addrspace_tv.val.toEnum(std.builtin.AddressSpace);
+ break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
} else target_util.defaultAddressSpace(target, .function);
const @"linksection": FuncLinkSection = if (extra.data.bits.has_section_body) blk: {
@@ -22797,7 +22802,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
if (val.isGenericPoison()) {
break :blk null;
}
- break :blk val.toEnum(std.builtin.CallingConvention);
+ break :blk mod.toEnum(std.builtin.CallingConvention, val);
} else if (extra.data.bits.has_cc_ref) blk: {
const cc_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
@@ -22807,7 +22812,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
},
else => |e| return e,
};
- break :blk cc_tv.val.toEnum(std.builtin.CallingConvention);
+ break :blk mod.toEnum(std.builtin.CallingConvention, cc_tv.val);
} else if (sema.owner_decl.is_exported and has_body)
.C
else
@@ -22994,9 +22999,9 @@ fn resolvePrefetchOptions(
const cache_val = try sema.resolveConstValue(block, cache_src, cache, "prefetch cache must be comptime-known");
return std.builtin.PrefetchOptions{
- .rw = rw_val.toEnum(std.builtin.PrefetchOptions.Rw),
+ .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val),
.locality = @intCast(u2, locality_val.toUnsignedInt(mod)),
- .cache = cache_val.toEnum(std.builtin.PrefetchOptions.Cache),
+ .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val),
};
}
@@ -23059,7 +23064,7 @@ fn resolveExternOptions(
const linkage_ref = try sema.fieldVal(block, src, options, "linkage", linkage_src);
const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_ref, "linkage of the extern symbol must be comptime-known");
- const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage);
+ const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val);
const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src);
const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known");
@@ -24140,7 +24145,7 @@ fn fieldVal(
const field_index = @intCast(u32, field_index_usize);
return sema.addConstant(
enum_ty,
- try Value.Tag.enum_field_index.create(sema.arena, field_index),
+ try mod.enumValueFieldIndex(enum_ty, field_index),
);
}
}
@@ -24155,8 +24160,8 @@ fn fieldVal(
const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
const field_index = @intCast(u32, field_index_usize);
- const enum_val = try Value.Tag.enum_field_index.create(arena, field_index);
- return sema.addConstant(try child_type.copy(arena), enum_val);
+ const enum_val = try mod.enumValueFieldIndex(child_type, field_index);
+ return sema.addConstant(child_type, enum_val);
},
.Struct, .Opaque => {
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
@@ -24355,8 +24360,8 @@ fn fieldPtr(
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
- try enum_ty.copy(anon_decl.arena()),
- try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32),
+ enum_ty,
+ try mod.enumValueFieldIndex(enum_ty, field_index_u32),
0, // default alignment
));
}
@@ -24376,8 +24381,8 @@ fn fieldPtr(
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
- try child_type.copy(anon_decl.arena()),
- try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32),
+ child_type,
+ try mod.enumValueFieldIndex(child_type, field_index_u32),
0, // default alignment
));
},
@@ -24850,7 +24855,7 @@ fn structFieldVal(
}
if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| {
- if (struct_val.isUndef()) return sema.addConstUndef(field.ty);
+ if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty);
if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| {
return sema.addConstant(field.ty, opv);
}
@@ -24922,7 +24927,7 @@ fn tupleFieldValByIndex(
}
if (try sema.resolveMaybeUndefVal(tuple_byval)) |tuple_val| {
- if (tuple_val.isUndef()) return sema.addConstUndef(field_ty);
+ if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty);
if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
return sema.addConstant(field_ty, opv);
}
@@ -24983,19 +24988,15 @@ fn unionFieldPtr(
.Auto => if (!initializing) {
const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse
break :ct;
- if (union_val.isUndef()) {
+ if (union_val.isUndef(mod)) {
return sema.failWithUseOfUndef(block, src);
}
const tag_and_val = union_val.castTag(.@"union").?.data;
- var field_tag_buf: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = enum_field_index,
- };
- const field_tag = Value.initPayload(&field_tag_buf.base);
+ const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod);
if (!tag_matches) {
const msg = msg: {
- const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data;
+ const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?;
const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod);
const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name });
errdefer msg.destroy(sema.gpa);
@@ -25021,7 +25022,7 @@ fn unionFieldPtr(
if (!initializing and union_obj.layout == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1)
{
- const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
+ const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val);
// TODO would it be better if get_union_tag supported pointers to unions?
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
@@ -25054,14 +25055,10 @@ fn unionFieldVal(
const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?);
if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| {
- if (union_val.isUndef()) return sema.addConstUndef(field.ty);
+ if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty);
const tag_and_val = union_val.castTag(.@"union").?.data;
- var field_tag_buf: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = enum_field_index,
- };
- const field_tag = Value.initPayload(&field_tag_buf.base);
+ const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod);
switch (union_obj.layout) {
.Auto => {
@@ -25069,7 +25066,7 @@ fn unionFieldVal(
return sema.addConstant(field.ty, tag_and_val.val);
} else {
const msg = msg: {
- const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data;
+ const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?;
const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod);
const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name });
errdefer msg.destroy(sema.gpa);
@@ -25096,7 +25093,7 @@ fn unionFieldVal(
if (union_obj.layout == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1)
{
- const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
+ const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index);
const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val);
const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval);
try sema.panicInactiveUnionField(block, active_tag, wanted_tag);
@@ -25364,7 +25361,7 @@ fn tupleField(
}
if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| {
- if (tuple_val.isUndef()) return sema.addConstUndef(field_ty);
+ if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty);
return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index));
}
@@ -25412,7 +25409,7 @@ fn elemValArray(
}
}
if (maybe_undef_array_val) |array_val| {
- if (array_val.isUndef()) {
+ if (array_val.isUndef(mod)) {
return sema.addConstUndef(elem_ty);
}
if (maybe_index_val) |index_val| {
@@ -25473,7 +25470,7 @@ fn elemPtrArray(
const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset);
if (maybe_undef_array_ptr_val) |array_ptr_val| {
- if (array_ptr_val.isUndef()) {
+ if (array_ptr_val.isUndef(mod)) {
return sema.addConstUndef(elem_ptr_ty);
}
if (offset) |index| {
@@ -25580,7 +25577,7 @@ fn elemPtrSlice(
const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset);
if (maybe_undef_slice_val) |slice_val| {
- if (slice_val.isUndef()) {
+ if (slice_val.isUndef(mod)) {
return sema.addConstUndef(elem_ptr_ty);
}
const slice_len = slice_val.sliceLen(mod);
@@ -25605,7 +25602,7 @@ fn elemPtrSlice(
if (oob_safety and block.wantSafety()) {
const len_inst = len: {
if (maybe_undef_slice_val) |slice_val|
- if (!slice_val.isUndef())
+ if (!slice_val.isUndef(mod))
break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod));
break :len try block.addTyOp(.slice_len, Type.usize, slice);
};
@@ -25681,7 +25678,6 @@ fn coerceExtra(
if (dest_ty.eql(inst_ty, mod))
return inst;
- const arena = sema.arena;
const maybe_inst_val = try sema.resolveMaybeUndefVal(inst);
var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src);
@@ -26175,7 +26171,7 @@ fn coerceExtra(
};
return sema.addConstant(
dest_ty,
- try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)),
+ try mod.enumValueFieldIndex(dest_ty, @intCast(u32, field_index)),
);
},
.Union => blk: {
@@ -27858,8 +27854,9 @@ fn beginComptimePtrMutation(
},
.Union => {
const payload = try arena.create(Value.Payload.Union);
+ const tag_ty = parent.ty.unionTagTypeHypothetical(mod);
payload.* = .{ .data = .{
- .tag = try Value.Tag.enum_field_index.create(arena, field_index),
+ .tag = try mod.enumValueFieldIndex(tag_ty, field_index),
.val = Value.undef,
} };
@@ -27934,11 +27931,10 @@ fn beginComptimePtrMutation(
.@"union" => {
// We need to set the active field of the union.
- const arena = parent.beginArena(sema.mod);
- defer parent.finishArena(sema.mod);
+ const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod);
const payload = &val_ptr.castTag(.@"union").?.data;
- payload.tag = try Value.Tag.enum_field_index.create(arena, field_index);
+ payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index);
return beginComptimePtrMutationInner(
sema,
@@ -28575,7 +28571,7 @@ fn coerceCompatiblePtrs(
const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
if (try sema.resolveMaybeUndefVal(inst)) |val| {
- if (!val.isUndef() and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) {
+ if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) {
return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
}
// The comptime Value representation is compatible with both types.
@@ -29426,7 +29422,7 @@ fn analyzeSlicePtr(
const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer);
const result_ty = slice_ty.slicePtrFieldType(buf, mod);
if (try sema.resolveMaybeUndefVal(slice)) |val| {
- if (val.isUndef()) return sema.addConstUndef(result_ty);
+ if (val.isUndef(mod)) return sema.addConstUndef(result_ty);
return sema.addConstant(result_ty, val.slicePtr());
}
try sema.requireRuntimeBlock(block, slice_src, null);
@@ -29439,8 +29435,9 @@ fn analyzeSliceLen(
src: LazySrcLoc,
slice_inst: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
if (try sema.resolveMaybeUndefVal(slice_inst)) |slice_val| {
- if (slice_val.isUndef()) {
+ if (slice_val.isUndef(mod)) {
return sema.addConstUndef(Type.usize);
}
return sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod));
@@ -29459,7 +29456,7 @@ fn analyzeIsNull(
const mod = sema.mod;
const result_ty = Type.bool;
if (try sema.resolveMaybeUndefVal(operand)) |opt_val| {
- if (opt_val.isUndef()) {
+ if (opt_val.isUndef(mod)) {
return sema.addConstUndef(result_ty);
}
const is_null = opt_val.isNull(mod);
@@ -29588,7 +29585,7 @@ fn analyzeIsNonErrComptimeOnly(
}
if (maybe_operand_val) |err_union| {
- if (err_union.isUndef()) {
+ if (err_union.isUndef(mod)) {
return sema.addConstUndef(Type.bool);
}
if (err_union.getError() == null) {
@@ -29768,7 +29765,7 @@ fn analyzeSlice(
} else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
if (try sema.resolveMaybeUndefVal(ptr_or_slice)) |slice_val| {
- if (slice_val.isUndef()) {
+ if (slice_val.isUndef(mod)) {
return sema.fail(block, src, "slice of undefined", .{});
}
const has_sentinel = slice_ty.sentinel(mod) != null;
@@ -29948,7 +29945,7 @@ fn analyzeSlice(
return result;
};
- if (!new_ptr_val.isUndef()) {
+ if (!new_ptr_val.isUndef(mod)) {
return sema.addConstant(return_ty, new_ptr_val);
}
@@ -30069,19 +30066,19 @@ fn cmpNumeric(
if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
// Compare ints: const vs. undefined (or vice versa)
- if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef()) {
+ if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) {
try sema.resolveLazyValue(lhs_val);
if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
- } else if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef()) {
+ } else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) {
try sema.resolveLazyValue(rhs_val);
if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
}
- if (lhs_val.isUndef() or rhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
return sema.addConstUndef(Type.bool);
}
if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) {
@@ -30097,7 +30094,7 @@ fn cmpNumeric(
return Air.Inst.Ref.bool_false;
}
} else {
- if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) {
+ if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) {
// Compare ints: const vs. var
try sema.resolveLazyValue(lhs_val);
if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| {
@@ -30108,7 +30105,7 @@ fn cmpNumeric(
}
} else {
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
- if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) {
+ if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) {
// Compare ints: var vs. const
try sema.resolveLazyValue(rhs_val);
if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| {
@@ -30177,7 +30174,7 @@ fn cmpNumeric(
var lhs_bits: usize = undefined;
if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| {
try sema.resolveLazyValue(lhs_val);
- if (lhs_val.isUndef())
+ if (lhs_val.isUndef(mod))
return sema.addConstUndef(Type.bool);
if (lhs_val.isNan(mod)) switch (op) {
.neq => return Air.Inst.Ref.bool_true,
@@ -30236,7 +30233,7 @@ fn cmpNumeric(
var rhs_bits: usize = undefined;
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
try sema.resolveLazyValue(rhs_val);
- if (rhs_val.isUndef())
+ if (rhs_val.isUndef(mod))
return sema.addConstUndef(Type.bool);
if (rhs_val.isNan(mod)) switch (op) {
.neq => return Air.Inst.Ref.bool_true,
@@ -30441,7 +30438,7 @@ fn cmpVector(
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
- if (lhs_val.isUndef() or rhs_val.isUndef()) {
+ if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) {
return sema.addConstUndef(result_ty);
}
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty);
@@ -30558,11 +30555,12 @@ fn unionToTag(
un: Air.Inst.Ref,
un_src: LazySrcLoc,
) !Air.Inst.Ref {
+ const mod = sema.mod;
if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| {
return sema.addConstant(enum_ty, opv);
}
if (try sema.resolveMaybeUndefVal(un)) |un_val| {
- return sema.addConstant(enum_ty, un_val.unionTag());
+ return sema.addConstant(enum_ty, un_val.unionTag(mod));
}
try sema.requireRuntimeBlock(block, un_src, null);
return block.addTyOp(.get_union_tag, enum_ty, un);
@@ -31718,6 +31716,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()),
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -31845,6 +31844,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
.none => return ty,
.u1_type,
+ .u5_type,
.u8_type,
.i8_type,
.u16_type,
@@ -31904,6 +31904,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
+ .one_u5 => unreachable,
+ .four_u5 => unreachable,
.negative_one => unreachable,
.calling_convention_c => unreachable,
.calling_convention_inline => unreachable,
@@ -32720,7 +32722,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
}
if (explicit_enum_info) |tag_info| {
- const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse {
+ const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{
.index = field_i,
@@ -33186,19 +33188,30 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.opaque_type => null,
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.nonexhaustive => {
- if (enum_type.tag_ty != .comptime_int_type and
- !(try sema.typeHasRuntimeBits(enum_type.tag_ty.toType())))
- {
- return Value.enum_field_0;
- } else {
- return null;
+ if (enum_type.tag_ty == .comptime_int_type) return null;
+
+ if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| {
+ const only = try mod.intern(.{ .enum_tag = .{
+ .ty = ty.ip_index,
+ .int = int_opv.ip_index,
+ } });
+ return only.toValue();
}
+
+ return null;
},
.auto, .explicit => switch (enum_type.names.len) {
0 => return Value.@"unreachable",
1 => {
if (enum_type.values.len == 0) {
- return Value.enum_field_0; // auto-numbered
+ const only = try mod.intern(.{ .enum_tag = .{
+ .ty = ty.ip_index,
+ .int = try mod.intern(.{ .int = .{
+ .ty = enum_type.tag_ty,
+ .storage = .{ .u64 = 0 },
+ } }),
+ } });
+ return only.toValue();
} else {
return enum_type.values[0].toValue();
}
@@ -33208,6 +33221,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
},
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -33397,8 +33411,9 @@ pub fn analyzeAddressSpace(
zir_ref: Zir.Inst.Ref,
ctx: AddressSpaceContext,
) !std.builtin.AddressSpace {
+ const mod = sema.mod;
const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref, "addresspace must be comptime-known");
- const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace);
+ const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
const target = sema.mod.getTarget();
const arch = target.cpu.arch;
@@ -33766,6 +33781,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()),
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -33921,9 +33937,9 @@ fn numberAddWrapScalar(
rhs: Value,
ty: Type,
) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.undef;
-
const mod = sema.mod;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
+
if (ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.intAdd(lhs, rhs, ty);
}
@@ -33975,9 +33991,9 @@ fn numberSubWrapScalar(
rhs: Value,
ty: Type,
) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.undef;
-
const mod = sema.mod;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
+
if (ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.intSub(lhs, rhs, ty);
}
@@ -34222,17 +34238,12 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
const mod = sema.mod;
const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
assert(enum_type.tag_mode != .nonexhaustive);
- if (enum_type.values.len == 0) {
- // auto-numbered
- return sema.intInRange(enum_type.tag_ty.toType(), int, enum_type.names.len);
- }
-
// The `tagValueIndex` function call below relies on the type being the integer tag type.
// `getCoerced` assumes the value will fit the new type.
if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false;
const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.ip_index, enum_type.tag_ty);
- return enum_type.tagValueIndex(mod.intern_pool, int_coerced) != null;
+ return enum_type.tagValueIndex(&mod.intern_pool, int_coerced) != null;
}
fn intAddWithOverflow(
src/type.zig
@@ -126,6 +126,7 @@ pub const Type = struct {
},
// values, not types
+ .undef => unreachable,
.un => unreachable,
.extern_func => unreachable,
.int => unreachable,
@@ -1350,6 +1351,7 @@ pub const Type = struct {
},
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -1600,6 +1602,7 @@ pub const Type = struct {
.enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -1713,6 +1716,7 @@ pub const Type = struct {
},
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -2104,6 +2108,7 @@ pub const Type = struct {
.enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) },
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -2499,6 +2504,7 @@ pub const Type = struct {
.enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) },
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -2736,6 +2742,7 @@ pub const Type = struct {
.enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema),
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -3492,6 +3499,7 @@ pub const Type = struct {
.opaque_type => unreachable,
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -3826,19 +3834,30 @@ pub const Type = struct {
.opaque_type => return null,
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.nonexhaustive => {
- if (enum_type.tag_ty != .comptime_int_type and
- !enum_type.tag_ty.toType().hasRuntimeBits(mod))
- {
- return Value.enum_field_0;
- } else {
- return null;
+ if (enum_type.tag_ty == .comptime_int_type) return null;
+
+ if (try enum_type.tag_ty.toType().onePossibleValue(mod)) |int_opv| {
+ const only = try mod.intern(.{ .enum_tag = .{
+ .ty = ty.ip_index,
+ .int = int_opv.ip_index,
+ } });
+ return only.toValue();
}
+
+ return null;
},
.auto, .explicit => switch (enum_type.names.len) {
0 => return Value.@"unreachable",
1 => {
if (enum_type.values.len == 0) {
- return Value.enum_field_0; // auto-numbered
+ const only = try mod.intern(.{ .enum_tag = .{
+ .ty = ty.ip_index,
+ .int = try mod.intern(.{ .int = .{
+ .ty = enum_type.tag_ty,
+ .storage = .{ .u64 = 0 },
+ } }),
+ } });
+ return only.toValue();
} else {
return enum_type.values[0].toValue();
}
@@ -3848,6 +3867,7 @@ pub const Type = struct {
},
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -4006,6 +4026,7 @@ pub const Type = struct {
.enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod),
// values, not types
+ .undef => unreachable,
.un => unreachable,
.simple_value => unreachable,
.extern_func => unreachable,
@@ -4224,36 +4245,22 @@ pub const Type = struct {
return ip.stringToSlice(field_name);
}
- pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?usize {
+ pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
const enum_type = ip.indexToKey(ty.ip_index).enum_type;
// If the string is not interned, then the field certainly is not present.
const field_name_interned = ip.getString(field_name).unwrap() orelse return null;
- return enum_type.nameIndex(ip.*, field_name_interned);
+ return enum_type.nameIndex(ip, field_name_interned);
}
/// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or
/// an integer which represents the enum value. Returns the field index in
/// declaration order, or `null` if `enum_tag` does not match any field.
- pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize {
- if (enum_tag.castTag(.enum_field_index)) |payload| {
- return @as(usize, payload.data);
- }
+ pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
const enum_type = ip.indexToKey(ty.ip_index).enum_type;
- const tag_ty = enum_type.tag_ty.toType();
- if (enum_type.values.len == 0) {
- if (enum_tag.compareAllWithZero(.lt, mod)) return null;
- const end_val = mod.intValue(tag_ty, enum_type.names.len) catch |err| switch (err) {
- // TODO: eliminate this failure condition
- error.OutOfMemory => @panic("OOM"),
- };
- if (enum_tag.compareScalar(.gte, end_val, tag_ty, mod)) return null;
- return @intCast(usize, enum_tag.toUnsignedInt(mod));
- } else {
- assert(ip.typeOf(enum_tag.ip_index) == enum_type.tag_ty);
- return enum_type.tagValueIndex(ip.*, enum_tag.ip_index);
- }
+ assert(ip.typeOf(enum_tag.ip_index) == enum_type.tag_ty);
+ return enum_type.tagValueIndex(ip, enum_tag.ip_index);
}
pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields {
src/TypedValue.zig
@@ -197,9 +197,6 @@ pub fn print(
},
.empty_array => return writer.writeAll(".{}"),
.enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}),
- .enum_field_index => {
- return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data, mod)});
- },
.bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
@@ -255,7 +252,7 @@ pub fn print(
const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
};
- if (elem_val.isUndef()) break :str;
+ if (elem_val.isUndef(mod)) break :str;
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str;
}
@@ -358,6 +355,20 @@ pub fn print(
.int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}),
},
+ .enum_tag => |enum_tag| {
+ try writer.writeAll("@intToEnum(");
+ try print(.{
+ .ty = Type.type,
+ .val = enum_tag.ty.toValue(),
+ }, writer, level - 1, mod);
+ try writer.writeAll(", ");
+ try print(.{
+ .ty = mod.intern_pool.typeOf(enum_tag.int).toType(),
+ .val = enum_tag.int.toValue(),
+ }, writer, level - 1, mod);
+ try writer.writeAll(")");
+ return;
+ },
.float => |float| switch (float.storage) {
inline else => |x| return writer.print("{}", .{x}),
},
@@ -414,7 +425,7 @@ fn printAggregate(
var i: u32 = 0;
while (i < max_len) : (i += 1) {
const elem = try val.fieldValue(ty, mod, i);
- if (elem.isUndef()) break :str;
+ if (elem.isUndef(mod)) break :str;
buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str;
}
src/value.zig
@@ -73,8 +73,6 @@ pub const Value = struct {
/// Pointer and length as sub `Value` objects.
slice,
enum_literal,
- /// A specific enum tag, indicated by the field index (declaration order).
- enum_field_index,
@"error",
/// When the type is error union:
/// * If the tag is `.@"error"`, the error union is an error.
@@ -143,8 +141,6 @@ pub const Value = struct {
.str_lit => Payload.StrLit,
.slice => Payload.Slice,
- .enum_field_index => Payload.U32,
-
.ty,
.lazy_align,
.lazy_size,
@@ -397,7 +393,6 @@ pub const Value = struct {
.legacy = .{ .ptr_otherwise = &new_payload.base },
};
},
- .enum_field_index => return self.copyPayloadShallow(arena, Payload.U32),
.@"error" => return self.copyPayloadShallow(arena, Payload.Error),
.aggregate => {
@@ -515,7 +510,6 @@ pub const Value = struct {
},
.empty_array => return out_stream.writeAll(".{}"),
.enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}),
- .enum_field_index => return out_stream.print("(enum field {d})", .{val.castTag(.enum_field_index).?.data}),
.bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
@@ -618,87 +612,58 @@ pub const Value = struct {
};
}
- /// Asserts the type is an enum type.
- pub fn toEnum(val: Value, comptime E: type) E {
+ pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
+ const ip = &mod.intern_pool;
switch (val.ip_index) {
- .calling_convention_c => {
- if (E == std.builtin.CallingConvention) {
- return .C;
+ .none => {
+ const field_index = switch (val.tag()) {
+ .the_only_possible_value => blk: {
+ assert(ty.enumFieldCount(mod) == 1);
+ break :blk 0;
+ },
+ .enum_literal => i: {
+ const name = val.castTag(.enum_literal).?.data;
+ break :i ty.enumFieldIndex(name, mod).?;
+ },
+ else => unreachable,
+ };
+ const enum_type = ip.indexToKey(ty.ip_index).enum_type;
+ if (enum_type.values.len != 0) {
+ return enum_type.values[field_index].toValue();
} else {
- unreachable;
+ // Field index and integer values are the same.
+ return mod.intValue(enum_type.tag_ty.toType(), field_index);
}
},
- .calling_convention_inline => {
- if (E == std.builtin.CallingConvention) {
- return .Inline;
- } else {
- unreachable;
- }
+ else => {
+ const enum_type = ip.indexToKey(ip.typeOf(val.ip_index)).enum_type;
+ const int = try ip.getCoerced(mod.gpa, val.ip_index, enum_type.tag_ty);
+ return int.toValue();
},
- .none => switch (val.tag()) {
- .enum_field_index => {
- const field_index = val.castTag(.enum_field_index).?.data;
- return @intToEnum(E, field_index);
- },
- .the_only_possible_value => {
- const fields = std.meta.fields(E);
- assert(fields.len == 1);
- return @intToEnum(E, fields[0].value);
- },
- else => unreachable,
- },
- else => unreachable,
}
}
- pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
- const field_index = switch (val.tag()) {
- .enum_field_index => val.castTag(.enum_field_index).?.data,
- .the_only_possible_value => blk: {
- assert(ty.enumFieldCount(mod) == 1);
- break :blk 0;
- },
- .enum_literal => i: {
- const name = val.castTag(.enum_literal).?.data;
- break :i ty.enumFieldIndex(name, mod).?;
- },
- // Assume it is already an integer and return it directly.
- else => return val,
- };
+ pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 {
+ _ = ty; // TODO: remove this parameter now that we use InternPool
- const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
- if (enum_type.values.len != 0) {
- return enum_type.values[field_index].toValue();
- } else {
- // Field index and integer values are the same.
- return mod.intValue(enum_type.tag_ty.toType(), field_index);
+ if (val.castTag(.enum_literal)) |payload| {
+ return payload.data;
}
- }
-
- pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 {
- if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(mod), mod);
- const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
+ const ip = &mod.intern_pool;
- const field_index = switch (val.tag()) {
- .enum_field_index => val.castTag(.enum_field_index).?.data,
- .the_only_possible_value => blk: {
- assert(ty.enumFieldCount(mod) == 1);
- break :blk 0;
- },
- .enum_literal => return val.castTag(.enum_literal).?.data,
- else => field_index: {
- if (enum_type.values.len == 0) {
- // auto-numbered enum
- break :field_index @intCast(u32, val.toUnsignedInt(mod));
- }
- const field_index = enum_type.tagValueIndex(mod.intern_pool, val.ip_index).?;
- break :field_index @intCast(u32, field_index);
- },
+ const enum_tag = switch (ip.indexToKey(val.ip_index)) {
+ .un => |un| ip.indexToKey(un.tag).enum_tag,
+ .enum_tag => |x| x,
+ else => unreachable,
+ };
+ const enum_type = ip.indexToKey(enum_tag.ty).enum_type;
+ const field_index = field_index: {
+ const field_index = enum_type.tagValueIndex(ip, val.ip_index).?;
+ break :field_index @intCast(u32, field_index);
};
-
const field_name = enum_type.names[field_index];
- return mod.intern_pool.stringToSlice(field_name);
+ return ip.stringToSlice(field_name);
}
/// Asserts the value is an integer.
@@ -722,10 +687,6 @@ pub const Value = struct {
.the_only_possible_value, // i0, u0
=> BigIntMutable.init(&space.limbs, 0).toConst(),
- .enum_field_index => {
- const index = val.castTag(.enum_field_index).?.data;
- return BigIntMutable.init(&space.limbs, index).toConst();
- },
.runtime_value => {
const sub_val = val.castTag(.runtime_value).?.data;
return sub_val.toBigIntAdvanced(space, mod, opt_sema);
@@ -759,6 +720,7 @@ pub const Value = struct {
},
else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
.int => |int| int.storage.toBigInt(space),
+ .enum_tag => |enum_tag| mod.intern_pool.indexToKey(enum_tag.int).int.storage.toBigInt(space),
else => unreachable,
},
};
@@ -886,7 +848,7 @@ pub const Value = struct {
}!void {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
- if (val.isUndef()) {
+ if (val.isUndef(mod)) {
const size = @intCast(usize, ty.abiSize(mod));
@memset(buffer[0..size], 0xaa);
return;
@@ -1007,7 +969,7 @@ pub const Value = struct {
) error{ ReinterpretDeclRef, OutOfMemory }!void {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
- if (val.isUndef()) {
+ if (val.isUndef(mod)) {
const bit_size = @intCast(usize, ty.bitSize(mod));
std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
return;
@@ -1087,7 +1049,7 @@ pub const Value = struct {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => unreachable, // Handled in non-packed writeToMemory
.Packed => {
- const field_index = ty.unionTagFieldIndex(val.unionTag(), mod);
+ const field_index = ty.unionTagFieldIndex(val.unionTag(mod), mod);
const field_type = ty.unionFields(mod).values()[field_index.?].ty;
const field_val = try val.fieldValue(field_type, mod, field_index.?);
@@ -1432,7 +1394,7 @@ pub const Value = struct {
}
pub fn popCount(val: Value, ty: Type, mod: *Module) u64 {
- assert(!val.isUndef());
+ assert(!val.isUndef(mod));
switch (val.ip_index) {
.bool_false => return 0,
.bool_true => return 1,
@@ -1450,7 +1412,7 @@ pub const Value = struct {
}
pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
- assert(!val.isUndef());
+ assert(!val.isUndef(mod));
const info = ty.intInfo(mod);
@@ -1468,7 +1430,7 @@ pub const Value = struct {
}
pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
- assert(!val.isUndef());
+ assert(!val.isUndef(mod));
const info = ty.intInfo(mod);
@@ -1578,7 +1540,6 @@ pub const Value = struct {
.variable,
=> .gt,
- .enum_field_index => return std.math.order(lhs.castTag(.enum_field_index).?.data, 0),
.runtime_value => {
// This is needed to correctly handle hashing the value.
// Checks in Sema should prevent direct comparisons from reaching here.
@@ -1633,6 +1594,10 @@ pub const Value = struct {
.big_int => |big_int| big_int.orderAgainstScalar(0),
inline .u64, .i64 => |x| std.math.order(x, 0),
},
+ .enum_tag => |enum_tag| switch (mod.intern_pool.indexToKey(enum_tag.int).int.storage) {
+ .big_int => |big_int| big_int.orderAgainstScalar(0),
+ inline .u64, .i64 => |x| std.math.order(x, 0),
+ },
.float => |float| switch (float.storage) {
inline else => |x| std.math.order(x, 0),
},
@@ -1861,11 +1826,6 @@ pub const Value = struct {
const b_name = b.castTag(.enum_literal).?.data;
return std.mem.eql(u8, a_name, b_name);
},
- .enum_field_index => {
- const a_field_index = a.castTag(.enum_field_index).?.data;
- const b_field_index = b.castTag(.enum_field_index).?.data;
- return a_field_index == b_field_index;
- },
.opt_payload => {
const a_payload = a.castTag(.opt_payload).?.data;
const b_payload = b.castTag(.opt_payload).?.data;
@@ -2064,13 +2024,9 @@ pub const Value = struct {
}
const field_name = tuple.names[0];
const union_obj = mod.typeToUnion(ty).?;
- const field_index = union_obj.fields.getIndex(field_name) orelse return false;
+ const field_index = @intCast(u32, union_obj.fields.getIndex(field_name) orelse return false);
const tag_and_val = b.castTag(.@"union").?.data;
- var field_tag_buf: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @intCast(u32, field_index),
- };
- const field_tag = Value.initPayload(&field_tag_buf.base);
+ const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, field_index);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod);
if (!tag_matches) return false;
return eqlAdvanced(tag_and_val.val, union_obj.tag_ty, tuple.values[0], tuple.types[0], mod, opt_sema);
@@ -2132,7 +2088,7 @@ pub const Value = struct {
}
const zig_ty_tag = ty.zigTypeTag(mod);
std.hash.autoHash(hasher, zig_ty_tag);
- if (val.isUndef()) return;
+ if (val.isUndef(mod)) return;
// The value is runtime-known and shouldn't affect the hash.
if (val.isRuntimeValue()) return;
@@ -2277,7 +2233,7 @@ pub const Value = struct {
/// This function is used by hash maps and so treats floating-point NaNs as equal
/// to each other, and not equal to other floating-point values.
pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
- if (val.isUndef()) return;
+ if (val.isUndef(mod)) return;
// The value is runtime-known and shouldn't affect the hash.
if (val.isRuntimeValue()) return;
@@ -2726,16 +2682,12 @@ pub const Value = struct {
}
}
- pub fn unionTag(val: Value) Value {
- switch (val.ip_index) {
- .undef => return val,
- .none => switch (val.tag()) {
- .enum_field_index => return val,
- .@"union" => return val.castTag(.@"union").?.data.tag,
- else => unreachable,
- },
+ pub fn unionTag(val: Value, mod: *Module) Value {
+ return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .undef, .enum_tag => val,
+ .un => |un| un.tag.toValue(),
else => unreachable,
- }
+ };
}
/// Returns a pointer to the element value at the index.
@@ -2769,27 +2721,30 @@ pub const Value = struct {
});
}
- pub fn isUndef(val: Value) bool {
- return val.ip_index == .undef;
+ pub fn isUndef(val: Value, mod: *Module) bool {
+ if (val.ip_index == .none) return false;
+ return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .undef => true,
+ .simple_value => |v| v == .undefined,
+ else => false,
+ };
}
/// TODO: check for cases such as array that is not marked undef but all the element
/// values are marked undef, or struct that is not marked undef but all fields are marked
/// undef, etc.
- pub fn isUndefDeep(val: Value) bool {
- return val.isUndef();
+ pub fn isUndefDeep(val: Value, mod: *Module) bool {
+ return val.isUndef(mod);
}
/// Returns true if any value contained in `self` is undefined.
- /// TODO: check for cases such as array that is not marked undef but all the element
- /// values are marked undef, or struct that is not marked undef but all fields are marked
- /// undef, etc.
- pub fn anyUndef(self: Value, mod: *Module) !bool {
- switch (self.ip_index) {
+ pub fn anyUndef(val: Value, mod: *Module) !bool {
+ if (val.ip_index == .none) return false;
+ switch (val.ip_index) {
.undef => return true,
- .none => switch (self.tag()) {
+ .none => switch (val.tag()) {
.slice => {
- const payload = self.castTag(.slice).?;
+ const payload = val.castTag(.slice).?;
const len = payload.data.len.toUnsignedInt(mod);
for (0..len) |i| {
@@ -2799,14 +2754,21 @@ pub const Value = struct {
},
.aggregate => {
- const payload = self.castTag(.aggregate).?;
- for (payload.data) |val| {
- if (try val.anyUndef(mod)) return true;
+ const payload = val.castTag(.aggregate).?;
+ for (payload.data) |field| {
+ if (try field.anyUndef(mod)) return true;
}
},
else => {},
},
- else => {},
+ else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .undef => return true,
+ .simple_value => |v| if (v == .undefined) return true,
+ .aggregate => |aggregate| for (aggregate.fields) |field| {
+ if (try anyUndef(field.toValue(), mod)) return true;
+ },
+ else => {},
+ },
}
return false;
@@ -2819,11 +2781,7 @@ pub const Value = struct {
.undef => unreachable,
.unreachable_value => unreachable,
- .null_value,
- .zero,
- .zero_usize,
- .zero_u8,
- => true,
+ .null_value => true,
.none => switch (val.tag()) {
.opt_payload => false,
@@ -2843,6 +2801,7 @@ pub const Value = struct {
.big_int => |big_int| big_int.eqZero(),
inline .u64, .i64 => |x| x == 0,
},
+ .opt => |opt| opt.val == .none,
else => unreachable,
},
};
@@ -3024,8 +2983,8 @@ pub const Value = struct {
arena: Allocator,
mod: *Module,
) !Value {
- assert(!lhs.isUndef());
- assert(!rhs.isUndef());
+ assert(!lhs.isUndef(mod));
+ assert(!rhs.isUndef(mod));
const info = ty.intInfo(mod);
@@ -3071,8 +3030,8 @@ pub const Value = struct {
arena: Allocator,
mod: *Module,
) !Value {
- assert(!lhs.isUndef());
- assert(!rhs.isUndef());
+ assert(!lhs.isUndef(mod));
+ assert(!rhs.isUndef(mod));
const info = ty.intInfo(mod);
@@ -3178,7 +3137,7 @@ pub const Value = struct {
arena: Allocator,
mod: *Module,
) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
if (ty.zigTypeTag(mod) == .ComptimeInt) {
return intMul(lhs, rhs, ty, arena, mod);
@@ -3220,8 +3179,8 @@ pub const Value = struct {
arena: Allocator,
mod: *Module,
) !Value {
- assert(!lhs.isUndef());
- assert(!rhs.isUndef());
+ assert(!lhs.isUndef(mod));
+ assert(!rhs.isUndef(mod));
const info = ty.intInfo(mod);
@@ -3249,7 +3208,7 @@ pub const Value = struct {
/// Supports both floats and ints; handles undefined.
pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value {
- if (lhs.isUndef() or rhs.isUndef()) return undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef;
if (lhs.isNan(mod)) return rhs;
if (rhs.isNan(mod)) return lhs;
@@ -3261,7 +3220,7 @@ pub const Value = struct {
/// Supports both floats and ints; handles undefined.
pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value {
- if (lhs.isUndef() or rhs.isUndef()) return undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef;
if (lhs.isNan(mod)) return rhs;
if (rhs.isNan(mod)) return lhs;
@@ -3286,7 +3245,7 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (val.isUndef()) return Value.undef;
+ if (val.isUndef(mod)) return Value.undef;
const info = ty.intInfo(mod);
@@ -3324,7 +3283,7 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
@@ -3358,7 +3317,7 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod);
const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty);
@@ -3381,7 +3340,7 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
@@ -3415,7 +3374,7 @@ pub const Value = struct {
/// operands must be integers; handles undefined.
pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef() or rhs.isUndef()) return Value.undef;
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
@@ -4697,11 +4656,6 @@ pub const Value = struct {
pub const Payload = struct {
tag: Tag,
- pub const U32 = struct {
- base: Payload,
- data: u32,
- };
-
pub const Function = struct {
base: Payload,
data: *Module.Fn,
@@ -4885,16 +4839,6 @@ pub const Value = struct {
pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined };
pub const empty_struct: Value = .{ .ip_index = .empty_struct, .legacy = undefined };
- pub const enum_field_0: Value = .{
- .ip_index = .none,
- .legacy = .{ .ptr_otherwise = &enum_field_0_payload.base },
- };
-
- var enum_field_0_payload: Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = 0,
- };
-
pub fn makeBool(x: bool) Value {
return if (x) Value.true else Value.false;
}
src/Zir.zig
@@ -2052,6 +2052,7 @@ pub const Inst = struct {
/// and `[]Ref`.
pub const Ref = enum(u32) {
u1_type = @enumToInt(InternPool.Index.u1_type),
+ u5_type = @enumToInt(InternPool.Index.u5_type),
u8_type = @enumToInt(InternPool.Index.u8_type),
i8_type = @enumToInt(InternPool.Index.i8_type),
u16_type = @enumToInt(InternPool.Index.u16_type),
@@ -2120,6 +2121,8 @@ pub const Inst = struct {
zero_u8 = @enumToInt(InternPool.Index.zero_u8),
one = @enumToInt(InternPool.Index.one),
one_usize = @enumToInt(InternPool.Index.one_usize),
+ one_u5 = @enumToInt(InternPool.Index.one_u5),
+ four_u5 = @enumToInt(InternPool.Index.four_u5),
negative_one = @enumToInt(InternPool.Index.negative_one),
calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),