Commit b483defc5a
Changed files (39)
lib
std
src
test
behavior
lib/std/zig/AstGen.zig
@@ -11194,6 +11194,7 @@ fn rvalueInner(
const as_void = @as(u64, @intFromEnum(Zir.Inst.Ref.void_type)) << 32;
const as_comptime_int = @as(u64, @intFromEnum(Zir.Inst.Ref.comptime_int_type)) << 32;
const as_usize = @as(u64, @intFromEnum(Zir.Inst.Ref.usize_type)) << 32;
+ const as_u1 = @as(u64, @intFromEnum(Zir.Inst.Ref.u1_type)) << 32;
const as_u8 = @as(u64, @intFromEnum(Zir.Inst.Ref.u8_type)) << 32;
switch ((@as(u64, @intFromEnum(ty_inst)) << 32) | @as(u64, @intFromEnum(result))) {
as_ty | @intFromEnum(Zir.Inst.Ref.u1_type),
@@ -11237,10 +11238,11 @@ fn rvalueInner(
as_ty | @intFromEnum(Zir.Inst.Ref.null_type),
as_ty | @intFromEnum(Zir.Inst.Ref.undefined_type),
as_ty | @intFromEnum(Zir.Inst.Ref.enum_literal_type),
+ as_ty | @intFromEnum(Zir.Inst.Ref.ptr_usize_type),
+ as_ty | @intFromEnum(Zir.Inst.Ref.ptr_const_comptime_int_type),
as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_u8_type),
as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_type),
as_ty | @intFromEnum(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type),
- as_ty | @intFromEnum(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type),
as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_type),
as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_sentinel_0_type),
as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_void_error_union_type),
@@ -11249,27 +11251,45 @@ fn rvalueInner(
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero),
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one),
as_comptime_int | @intFromEnum(Zir.Inst.Ref.negative_one),
+ as_usize | @intFromEnum(Zir.Inst.Ref.undef_usize),
as_usize | @intFromEnum(Zir.Inst.Ref.zero_usize),
as_usize | @intFromEnum(Zir.Inst.Ref.one_usize),
+ as_u1 | @intFromEnum(Zir.Inst.Ref.undef_u1),
+ as_u1 | @intFromEnum(Zir.Inst.Ref.zero_u1),
+ as_u1 | @intFromEnum(Zir.Inst.Ref.one_u1),
as_u8 | @intFromEnum(Zir.Inst.Ref.zero_u8),
as_u8 | @intFromEnum(Zir.Inst.Ref.one_u8),
as_u8 | @intFromEnum(Zir.Inst.Ref.four_u8),
+ as_bool | @intFromEnum(Zir.Inst.Ref.undef_bool),
as_bool | @intFromEnum(Zir.Inst.Ref.bool_true),
as_bool | @intFromEnum(Zir.Inst.Ref.bool_false),
as_void | @intFromEnum(Zir.Inst.Ref.void_value),
=> return result, // type of result is already correct
+ as_bool | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_bool,
+ as_usize | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_usize,
+ as_usize | @intFromEnum(Zir.Inst.Ref.undef_u1) => return .undef_usize,
+ as_u1 | @intFromEnum(Zir.Inst.Ref.undef) => return .undef_u1,
+
as_usize | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_usize,
+ as_u1 | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.zero) => return .zero_u8,
as_usize | @intFromEnum(Zir.Inst.Ref.one) => return .one_usize,
+ as_u1 | @intFromEnum(Zir.Inst.Ref.one) => return .one_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.one) => return .one_u8,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero,
+ as_u1 | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.zero_usize) => return .zero_u8,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one,
+ as_u1 | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one_u1,
as_u8 | @intFromEnum(Zir.Inst.Ref.one_usize) => return .one_u8,
+ as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_u1) => return .zero,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero_u8) => return .zero,
+ as_usize | @intFromEnum(Zir.Inst.Ref.zero_u1) => return .zero_usize,
as_usize | @intFromEnum(Zir.Inst.Ref.zero_u8) => return .zero_usize,
+ as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_u1) => return .one,
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one_u8) => return .one,
+ as_usize | @intFromEnum(Zir.Inst.Ref.one_u1) => return .one_usize,
as_usize | @intFromEnum(Zir.Inst.Ref.one_u8) => return .one_usize,
// Need an explicit type coercion instruction.
lib/std/zig/Zir.zig
@@ -2142,7 +2142,7 @@ pub const Inst = struct {
ref_start_index = static_len,
_,
- pub const static_len = 118;
+ pub const static_len = 124;
pub fn toRef(i: Index) Inst.Ref {
return @enumFromInt(@intFromEnum(Index.ref_start_index) + @intFromEnum(i));
@@ -2220,10 +2220,11 @@ pub const Inst = struct {
null_type,
undefined_type,
enum_literal_type,
+ ptr_usize_type,
+ ptr_const_comptime_int_type,
manyptr_u8_type,
manyptr_const_u8_type,
manyptr_const_u8_sentinel_0_type,
- single_const_pointer_to_comptime_int_type,
slice_const_u8_type,
slice_const_u8_sentinel_0_type,
vector_8_i8_type,
@@ -2279,11 +2280,16 @@ pub const Inst = struct {
generic_poison_type,
empty_tuple_type,
undef,
+ undef_bool,
+ undef_usize,
+ undef_u1,
zero,
zero_usize,
+ zero_u1,
zero_u8,
one,
one_usize,
+ one_u1,
one_u8,
four_u8,
negative_one,
src/Air/Legalize.zig
@@ -4,6 +4,43 @@ air_extra: std.ArrayListUnmanaged(u32),
features: *const Features,
pub const Feature = enum {
+ scalarize_add,
+ scalarize_add_safe,
+ scalarize_add_optimized,
+ scalarize_add_wrap,
+ scalarize_add_sat,
+ scalarize_sub,
+ scalarize_sub_safe,
+ scalarize_sub_optimized,
+ scalarize_sub_wrap,
+ scalarize_sub_sat,
+ scalarize_mul,
+ scalarize_mul_safe,
+ scalarize_mul_optimized,
+ scalarize_mul_wrap,
+ scalarize_mul_sat,
+ scalarize_div_float,
+ scalarize_div_float_optimized,
+ scalarize_div_trunc,
+ scalarize_div_trunc_optimized,
+ scalarize_div_floor,
+ scalarize_div_floor_optimized,
+ scalarize_div_exact,
+ scalarize_div_exact_optimized,
+ scalarize_rem,
+ scalarize_rem_optimized,
+ scalarize_mod,
+ scalarize_mod_optimized,
+ scalarize_max,
+ scalarize_min,
+ scalarize_bit_and,
+ scalarize_bit_or,
+ scalarize_shr,
+ scalarize_shr_exact,
+ scalarize_shl,
+ scalarize_shl_exact,
+ scalarize_shl_sat,
+ scalarize_xor,
scalarize_not,
scalarize_clz,
scalarize_ctz,
@@ -26,41 +63,110 @@ pub const Feature = enum {
scalarize_trunc_float,
scalarize_neg,
scalarize_neg_optimized,
+ scalarize_cmp_vector,
+ scalarize_cmp_vector_optimized,
+ scalarize_fptrunc,
+ scalarize_fpext,
+ scalarize_intcast,
+ scalarize_intcast_safe,
+ scalarize_trunc,
+ scalarize_int_from_float,
+ scalarize_int_from_float_optimized,
+ scalarize_float_from_int,
+ scalarize_mul_add,
/// Legalize (shift lhs, (splat rhs)) -> (shift lhs, rhs)
remove_shift_vector_rhs_splat,
/// Legalize reduce of a one element vector to a bitcast
reduce_one_elem_to_bitcast,
+
+ fn scalarize(tag: Air.Inst.Tag) Feature {
+ return switch (tag) {
+ else => unreachable,
+ .add => .scalarize_add,
+ .add_safe => .scalarize_add_safe,
+ .add_optimized => .scalarize_add_optimized,
+ .add_wrap => .scalarize_add_wrap,
+ .add_sat => .scalarize_add_sat,
+ .sub => .scalarize_sub,
+ .sub_safe => .scalarize_sub_safe,
+ .sub_optimized => .scalarize_sub_optimized,
+ .sub_wrap => .scalarize_sub_wrap,
+ .sub_sat => .scalarize_sub_sat,
+ .mul => .scalarize_mul,
+ .mul_safe => .scalarize_mul_safe,
+ .mul_optimized => .scalarize_mul_optimized,
+ .mul_wrap => .scalarize_mul_wrap,
+ .mul_sat => .scalarize_mul_sat,
+ .div_float => .scalarize_div_float,
+ .div_float_optimized => .scalarize_div_float_optimized,
+ .div_trunc => .scalarize_div_trunc,
+ .div_trunc_optimized => .scalarize_div_trunc_optimized,
+ .div_floor => .scalarize_div_floor,
+ .div_floor_optimized => .scalarize_div_floor_optimized,
+ .div_exact => .scalarize_div_exact,
+ .div_exact_optimized => .scalarize_div_exact_optimized,
+ .rem => .scalarize_rem,
+ .rem_optimized => .scalarize_rem_optimized,
+ .mod => .scalarize_mod,
+ .mod_optimized => .scalarize_mod_optimized,
+ .max => .scalarize_max,
+ .min => .scalarize_min,
+ .bit_and => .scalarize_bit_and,
+ .bit_or => .scalarize_bit_or,
+ .shr => .scalarize_shr,
+ .shr_exact => .scalarize_shr_exact,
+ .shl => .scalarize_shl,
+ .shl_exact => .scalarize_shl_exact,
+ .shl_sat => .scalarize_shl_sat,
+ .xor => .scalarize_xor,
+ .not => .scalarize_not,
+ .clz => .scalarize_clz,
+ .ctz => .scalarize_ctz,
+ .popcount => .scalarize_popcount,
+ .byte_swap => .scalarize_byte_swap,
+ .bit_reverse => .scalarize_bit_reverse,
+ .sqrt => .scalarize_sqrt,
+ .sin => .scalarize_sin,
+ .cos => .scalarize_cos,
+ .tan => .scalarize_tan,
+ .exp => .scalarize_exp,
+ .exp2 => .scalarize_exp2,
+ .log => .scalarize_log,
+ .log2 => .scalarize_log2,
+ .log10 => .scalarize_log10,
+ .abs => .scalarize_abs,
+ .floor => .scalarize_floor,
+ .ceil => .scalarize_ceil,
+ .round => .scalarize_round,
+ .trunc_float => .scalarize_trunc_float,
+ .neg => .scalarize_neg,
+ .neg_optimized => .scalarize_neg_optimized,
+ .cmp_vector => .scalarize_cmp_vector,
+ .cmp_vector_optimized => .scalarize_cmp_vector_optimized,
+ .fptrunc => .scalarize_fptrunc,
+ .fpext => .scalarize_fpext,
+ .intcast => .scalarize_intcast,
+ .intcast_safe => .scalarize_intcast_safe,
+ .trunc => .scalarize_trunc,
+ .int_from_float => .scalarize_int_from_float,
+ .int_from_float_optimized => .scalarize_int_from_float_optimized,
+ .float_from_int => .scalarize_float_from_int,
+ .mul_add => .scalarize_mul_add,
+ };
+ }
};
pub const Features = std.enums.EnumSet(Feature);
pub const Error = std.mem.Allocator.Error;
-pub fn legalize(air: *Air, backend: std.builtin.CompilerBackend, pt: Zcu.PerThread) Error!void {
+pub fn legalize(air: *Air, pt: Zcu.PerThread, features: *const Features) Error!void {
var l: Legalize = .{
.pt = pt,
.air_instructions = air.instructions.toMultiArrayList(),
.air_extra = air.extra,
- .features = &features: switch (backend) {
- .other, .stage1 => unreachable,
- inline .stage2_llvm,
- .stage2_c,
- .stage2_wasm,
- .stage2_arm,
- .stage2_x86_64,
- .stage2_aarch64,
- .stage2_x86,
- .stage2_riscv64,
- .stage2_sparc64,
- .stage2_spirv64,
- .stage2_powerpc,
- => |ct_backend| {
- const Backend = codegen.importBackend(ct_backend) orelse break :features .initEmpty();
- break :features if (@hasDecl(Backend, "legalize_features")) Backend.legalize_features else .initEmpty();
- },
- _ => unreachable,
- },
+ .features = features,
};
if (l.features.bits.eql(.initEmpty())) return;
defer air.* = l.getTmpAir();
@@ -90,11 +196,93 @@ fn extraData(l: *const Legalize, comptime T: type, index: usize) @TypeOf(Air.ext
fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
const zcu = l.pt.zcu;
const ip = &zcu.intern_pool;
- for (body_start..body_start + body_len) |inst_extra_index| {
- const inst: Air.Inst.Index = @enumFromInt(l.air_extra.items[inst_extra_index]);
+ for (0..body_len) |body_index| {
+ const inst: Air.Inst.Index = @enumFromInt(l.air_extra.items[body_start + body_index]);
inst: switch (l.air_instructions.items(.tag)[@intFromEnum(inst)]) {
- else => {},
-
+ .arg,
+ => {},
+ inline .add,
+ .add_safe,
+ .add_optimized,
+ .add_wrap,
+ .add_sat,
+ .sub,
+ .sub_safe,
+ .sub_optimized,
+ .sub_wrap,
+ .sub_sat,
+ .mul,
+ .mul_safe,
+ .mul_optimized,
+ .mul_wrap,
+ .mul_sat,
+ .div_float,
+ .div_float_optimized,
+ .div_trunc,
+ .div_trunc_optimized,
+ .div_floor,
+ .div_floor_optimized,
+ .div_exact,
+ .div_exact_optimized,
+ .rem,
+ .rem_optimized,
+ .mod,
+ .mod_optimized,
+ .max,
+ .min,
+ .bit_and,
+ .bit_or,
+ .xor,
+ => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
+ const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ if (l.typeOf(bin_op.lhs).isVector(zcu)) continue :inst try l.scalarize(inst, .bin_op);
+ },
+ .ptr_add,
+ .ptr_sub,
+ .add_with_overflow,
+ .sub_with_overflow,
+ .mul_with_overflow,
+ .shl_with_overflow,
+ .alloc,
+ => {},
+ .inferred_alloc,
+ .inferred_alloc_comptime,
+ => unreachable,
+ .ret_ptr,
+ .assembly,
+ => {},
+ inline .shr,
+ .shr_exact,
+ .shl,
+ .shl_exact,
+ .shl_sat,
+ => |air_tag| done: {
+ const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op;
+ if (!l.typeOf(bin_op.rhs).isVector(zcu)) break :done;
+ if (l.features.contains(comptime .scalarize(air_tag))) {
+ continue :inst try l.scalarize(inst, .bin_op);
+ } else if (l.features.contains(.remove_shift_vector_rhs_splat)) {
+ if (bin_op.rhs.toInterned()) |rhs_ip_index| switch (ip.indexToKey(rhs_ip_index)) {
+ else => {},
+ .aggregate => |aggregate| switch (aggregate.storage) {
+ else => {},
+ .repeated_elem => |splat| continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
+ .lhs = bin_op.lhs,
+ .rhs = Air.internedToRef(splat),
+ } }),
+ },
+ } else {
+ const rhs_inst = bin_op.rhs.toIndex().?;
+ switch (l.air_instructions.items(.tag)[@intFromEnum(rhs_inst)]) {
+ else => {},
+ .splat => continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
+ .lhs = bin_op.lhs,
+ .rhs = l.air_instructions.items(.data)[@intFromEnum(rhs_inst)].ty_op.operand,
+ } }),
+ }
+ }
+ }
+ },
inline .not,
.clz,
.ctz,
@@ -102,11 +290,38 @@ fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
.byte_swap,
.bit_reverse,
.abs,
- => |air_tag| if (l.features.contains(@field(Feature, "scalarize_" ++ @tagName(air_tag)))) done: {
+ .fptrunc,
+ .fpext,
+ .intcast,
+ .intcast_safe,
+ .trunc,
+ .int_from_float,
+ .int_from_float_optimized,
+ .float_from_int,
+ => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
const ty_op = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_op;
- if (!ty_op.ty.toType().isVector(zcu)) break :done;
- continue :inst try l.scalarizeUnary(inst, .ty_op, ty_op.operand);
+ if (ty_op.ty.toType().isVector(zcu)) continue :inst try l.scalarize(inst, .ty_op);
},
+ .bitcast,
+ => {},
+ .block,
+ .loop,
+ => {
+ const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = l.extraData(Air.Block, ty_pl.payload);
+ try l.legalizeBody(extra.end, extra.data.body_len);
+ },
+ .repeat,
+ .br,
+ .trap,
+ .breakpoint,
+ .ret_addr,
+ .frame_addr,
+ .call,
+ .call_always_tail,
+ .call_never_tail,
+ .call_never_inline,
+ => {},
inline .sqrt,
.sin,
.cos,
@@ -122,84 +337,39 @@ fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
.trunc_float,
.neg,
.neg_optimized,
- => |air_tag| if (l.features.contains(@field(Feature, "scalarize_" ++ @tagName(air_tag)))) done: {
+ => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
const un_op = l.air_instructions.items(.data)[@intFromEnum(inst)].un_op;
- if (!l.typeOf(un_op).isVector(zcu)) break :done;
- continue :inst try l.scalarizeUnary(inst, .un_op, un_op);
- },
-
- .shl,
- .shl_exact,
- .shl_sat,
- .shr,
- .shr_exact,
- => |air_tag| if (l.features.contains(.remove_shift_vector_rhs_splat)) done: {
- const bin_op = l.air_instructions.items(.data)[@intFromEnum(inst)].bin_op;
- const ty = l.typeOf(bin_op.rhs);
- if (!ty.isVector(zcu)) break :done;
- if (bin_op.rhs.toInterned()) |rhs_ip_index| switch (ip.indexToKey(rhs_ip_index)) {
- else => {},
- .aggregate => |aggregate| switch (aggregate.storage) {
- else => {},
- .repeated_elem => |splat| continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
- .lhs = bin_op.lhs,
- .rhs = Air.internedToRef(splat),
- } }),
- },
- } else {
- const rhs_inst = bin_op.rhs.toIndex().?;
- switch (l.air_instructions.items(.tag)[@intFromEnum(rhs_inst)]) {
- else => {},
- .splat => continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
- .lhs = bin_op.lhs,
- .rhs = l.air_instructions.items(.data)[@intFromEnum(rhs_inst)].ty_op.operand,
- } }),
- }
- }
+ if (l.typeOf(un_op).isVector(zcu)) continue :inst try l.scalarize(inst, .un_op);
},
-
- .reduce,
- .reduce_optimized,
- => if (l.features.contains(.reduce_one_elem_to_bitcast)) done: {
- const reduce = l.air_instructions.items(.data)[@intFromEnum(inst)].reduce;
- const vector_ty = l.typeOf(reduce.operand);
- switch (vector_ty.vectorLen(zcu)) {
- 0 => unreachable,
- 1 => continue :inst l.replaceInst(inst, .bitcast, .{ .ty_op = .{
- .ty = Air.internedToRef(vector_ty.scalarType(zcu).toIntern()),
- .operand = reduce.operand,
- } }),
- else => break :done,
- }
- },
-
- .@"try", .try_cold => {
- const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
- const extra = l.extraData(Air.Try, pl_op.payload);
- try l.legalizeBody(extra.end, extra.data.body_len);
- },
- .try_ptr, .try_ptr_cold => {
+ .cmp_lt,
+ .cmp_lt_optimized,
+ .cmp_lte,
+ .cmp_lte_optimized,
+ .cmp_eq,
+ .cmp_eq_optimized,
+ .cmp_gte,
+ .cmp_gte_optimized,
+ .cmp_gt,
+ .cmp_gt_optimized,
+ .cmp_neq,
+ .cmp_neq_optimized,
+ => {},
+ inline .cmp_vector,
+ .cmp_vector_optimized,
+ => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = l.extraData(Air.TryPtr, ty_pl.payload);
- try l.legalizeBody(extra.end, extra.data.body_len);
- },
- .block, .loop => {
- const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = l.extraData(Air.Block, ty_pl.payload);
- try l.legalizeBody(extra.end, extra.data.body_len);
- },
- .dbg_inline_block => {
- const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = l.extraData(Air.DbgInlineBlock, ty_pl.payload);
- try l.legalizeBody(extra.end, extra.data.body_len);
+ if (ty_pl.ty.toType().isVector(zcu)) continue :inst try l.scalarize(inst, .ty_pl_vector_cmp);
},
- .cond_br => {
+ .cond_br,
+ => {
const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = l.extraData(Air.CondBr, pl_op.payload);
try l.legalizeBody(extra.end, extra.data.then_body_len);
try l.legalizeBody(extra.end + extra.data.then_body_len, extra.data.else_body_len);
},
- .switch_br, .loop_switch_br => {
+ .switch_br,
+ .loop_switch_br,
+ => {
const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = l.extraData(Air.SwitchBr, pl_op.payload);
const hint_bag_count = std.math.divCeil(usize, extra.data.cases_len + 1, 10) catch unreachable;
@@ -212,154 +382,408 @@ fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
}
try l.legalizeBody(extra_index, extra.data.else_body_len);
},
+ .switch_dispatch,
+ => {},
+ .@"try",
+ .try_cold,
+ => {
+ const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const extra = l.extraData(Air.Try, pl_op.payload);
+ try l.legalizeBody(extra.end, extra.data.body_len);
+ },
+ .try_ptr,
+ .try_ptr_cold,
+ => {
+ const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = l.extraData(Air.TryPtr, ty_pl.payload);
+ try l.legalizeBody(extra.end, extra.data.body_len);
+ },
+ .dbg_stmt,
+ .dbg_empty_stmt,
+ => {},
+ .dbg_inline_block,
+ => {
+ const ty_pl = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = l.extraData(Air.DbgInlineBlock, ty_pl.payload);
+ try l.legalizeBody(extra.end, extra.data.body_len);
+ },
+ .dbg_var_ptr,
+ .dbg_var_val,
+ .dbg_arg_inline,
+ .is_null,
+ .is_non_null,
+ .is_null_ptr,
+ .is_non_null_ptr,
+ .is_err,
+ .is_non_err,
+ .is_err_ptr,
+ .is_non_err_ptr,
+ .bool_and,
+ .bool_or,
+ .load,
+ .ret,
+ .ret_safe,
+ .ret_load,
+ .store,
+ .store_safe,
+ .unreach,
+ => {},
+ .optional_payload,
+ .optional_payload_ptr,
+ .optional_payload_ptr_set,
+ .wrap_optional,
+ .unwrap_errunion_payload,
+ .unwrap_errunion_err,
+ .unwrap_errunion_payload_ptr,
+ .unwrap_errunion_err_ptr,
+ .errunion_payload_ptr_set,
+ .wrap_errunion_payload,
+ .wrap_errunion_err,
+ .struct_field_ptr,
+ .struct_field_ptr_index_0,
+ .struct_field_ptr_index_1,
+ .struct_field_ptr_index_2,
+ .struct_field_ptr_index_3,
+ .struct_field_val,
+ .set_union_tag,
+ .get_union_tag,
+ .slice,
+ .slice_len,
+ .slice_ptr,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
+ .array_elem_val,
+ .slice_elem_val,
+ .slice_elem_ptr,
+ .ptr_elem_val,
+ .ptr_elem_ptr,
+ .array_to_slice,
+ => {},
+ .reduce,
+ .reduce_optimized,
+ => if (l.features.contains(.reduce_one_elem_to_bitcast)) done: {
+ const reduce = l.air_instructions.items(.data)[@intFromEnum(inst)].reduce;
+ const vector_ty = l.typeOf(reduce.operand);
+ switch (vector_ty.vectorLen(zcu)) {
+ 0 => unreachable,
+ 1 => continue :inst l.replaceInst(inst, .bitcast, .{ .ty_op = .{
+ .ty = Air.internedToRef(vector_ty.scalarType(zcu).toIntern()),
+ .operand = reduce.operand,
+ } }),
+ else => break :done,
+ }
+ },
+ .splat,
+ .shuffle,
+ .select,
+ .memset,
+ .memset_safe,
+ .memcpy,
+ .memmove,
+ .cmpxchg_weak,
+ .cmpxchg_strong,
+ .atomic_load,
+ .atomic_store_unordered,
+ .atomic_store_monotonic,
+ .atomic_store_release,
+ .atomic_store_seq_cst,
+ .atomic_rmw,
+ .is_named_enum_value,
+ .tag_name,
+ .error_name,
+ .error_set_has_value,
+ .aggregate_init,
+ .union_init,
+ .prefetch,
+ => {},
+ inline .mul_add,
+ => |air_tag| if (l.features.contains(comptime .scalarize(air_tag))) {
+ const pl_op = l.air_instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ if (l.typeOf(pl_op.operand).isVector(zcu)) continue :inst try l.scalarize(inst, .pl_op_bin);
+ },
+ .field_parent_ptr,
+ .wasm_memory_size,
+ .wasm_memory_grow,
+ .cmp_lt_errors_len,
+ .err_return_trace,
+ .set_err_return_trace,
+ .addrspace_cast,
+ .save_err_return_trace_index,
+ .vector_store_elem,
+ .tlv_dllimport_ptr,
+ .c_va_arg,
+ .c_va_copy,
+ .c_va_end,
+ .c_va_start,
+ .work_item_id,
+ .work_group_size,
+ .work_group_id,
+ => {},
}
}
}
-const UnaryDataTag = enum { un_op, ty_op };
-inline fn scalarizeUnary(l: *Legalize, inst: Air.Inst.Index, data_tag: UnaryDataTag, un_op: Air.Inst.Ref) Error!Air.Inst.Tag {
- return l.replaceInst(inst, .block, try l.scalarizeUnaryBlockPayload(inst, data_tag, un_op));
+const ScalarizeDataTag = enum { un_op, ty_op, bin_op, ty_pl_vector_cmp, pl_op_bin };
+inline fn scalarize(l: *Legalize, orig_inst: Air.Inst.Index, comptime data_tag: ScalarizeDataTag) Error!Air.Inst.Tag {
+ return l.replaceInst(orig_inst, .block, try l.scalarizeBlockPayload(orig_inst, data_tag));
}
-fn scalarizeUnaryBlockPayload(
- l: *Legalize,
- inst: Air.Inst.Index,
- data_tag: UnaryDataTag,
- un_op: Air.Inst.Ref,
-) Error!Air.Inst.Data {
+fn scalarizeBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, comptime data_tag: ScalarizeDataTag) Error!Air.Inst.Data {
const pt = l.pt;
const zcu = pt.zcu;
const gpa = zcu.gpa;
- const res_ty = l.typeOfIndex(inst);
- try l.air_instructions.ensureUnusedCapacity(gpa, 15);
- const res_alloc_inst = l.addInstAssumeCapacity(.{
- .tag = .alloc,
- .data = .{ .ty = try pt.singleMutPtrType(res_ty) },
- });
- const index_alloc_inst = l.addInstAssumeCapacity(.{
- .tag = .alloc,
- .data = .{ .ty = try pt.singleMutPtrType(.usize) },
- });
- const index_init_inst = l.addInstAssumeCapacity(.{
- .tag = .store,
- .data = .{ .bin_op = .{
- .lhs = index_alloc_inst.toRef(),
- .rhs = try pt.intRef(.usize, 0),
- } },
- });
- const cur_index_inst = l.addInstAssumeCapacity(.{
- .tag = .load,
- .data = .{ .ty_op = .{
- .ty = .usize_type,
- .operand = index_alloc_inst.toRef(),
- } },
- });
- const get_elem_inst = l.addInstAssumeCapacity(.{
- .tag = .array_elem_val,
- .data = .{ .bin_op = .{
- .lhs = un_op,
- .rhs = cur_index_inst.toRef(),
- } },
- });
- const op_elem_inst = l.addInstAssumeCapacity(.{
- .tag = l.air_instructions.items(.tag)[@intFromEnum(inst)],
- .data = switch (data_tag) {
- .un_op => .{ .un_op = get_elem_inst.toRef() },
- .ty_op => .{ .ty_op = .{
- .ty = Air.internedToRef(res_ty.scalarType(zcu).toIntern()),
- .operand = get_elem_inst.toRef(),
+ const orig = l.air_instructions.get(@intFromEnum(orig_inst));
+ const res_ty = l.typeOfIndex(orig_inst);
+ const arity = switch (data_tag) {
+ .un_op, .ty_op => 1,
+ .bin_op, .ty_pl_vector_cmp => 2,
+ .pl_op_bin => 3,
+ };
+ const expected_instructions_len = l.air_instructions.len + (6 + arity + 8);
+ try l.air_instructions.ensureTotalCapacity(gpa, expected_instructions_len);
+
+ var res_block: Block(4) = .empty;
+ {
+ const res_alloc_inst = res_block.add(l.addInstAssumeCapacity(.{
+ .tag = .alloc,
+ .data = .{ .ty = try pt.singleMutPtrType(res_ty) },
+ }));
+ const index_alloc_inst = res_block.add(l.addInstAssumeCapacity(.{
+ .tag = .alloc,
+ .data = .{ .ty = .ptr_usize },
+ }));
+ _ = res_block.add(l.addInstAssumeCapacity(.{
+ .tag = .store,
+ .data = .{ .bin_op = .{
+ .lhs = index_alloc_inst.toRef(),
+ .rhs = .zero_usize,
} },
- },
- });
- const set_elem_inst = l.addInstAssumeCapacity(.{
- .tag = .vector_store_elem,
- .data = .{ .vector_store_elem = .{
- .vector_ptr = res_alloc_inst.toRef(),
- .payload = try l.addExtra(Air.Bin, .{
- .lhs = cur_index_inst.toRef(),
- .rhs = op_elem_inst.toRef(),
- }),
- } },
- });
- const not_done_inst = l.addInstAssumeCapacity(.{
- .tag = .cmp_lt,
- .data = .{ .bin_op = .{
- .lhs = cur_index_inst.toRef(),
- .rhs = try pt.intRef(.usize, res_ty.vectorLen(zcu)),
- } },
- });
- const next_index_inst = l.addInstAssumeCapacity(.{
- .tag = .add,
- .data = .{ .bin_op = .{
- .lhs = cur_index_inst.toRef(),
- .rhs = try pt.intRef(.usize, 1),
- } },
- });
- const set_index_inst = l.addInstAssumeCapacity(.{
- .tag = .store,
- .data = .{ .bin_op = .{
- .lhs = index_alloc_inst.toRef(),
- .rhs = next_index_inst.toRef(),
- } },
- });
- const loop_inst: Air.Inst.Index = @enumFromInt(l.air_instructions.len + 4);
- const repeat_inst = l.addInstAssumeCapacity(.{
- .tag = .repeat,
- .data = .{ .repeat = .{ .loop_inst = loop_inst } },
- });
- const final_res_inst = l.addInstAssumeCapacity(.{
- .tag = .load,
- .data = .{ .ty_op = .{
- .ty = Air.internedToRef(res_ty.toIntern()),
- .operand = res_alloc_inst.toRef(),
- } },
- });
- const br_res_inst = l.addInstAssumeCapacity(.{
- .tag = .br,
- .data = .{ .br = .{
- .block_inst = inst,
- .operand = final_res_inst.toRef(),
- } },
- });
- const done_br_inst = l.addInstAssumeCapacity(.{
- .tag = .cond_br,
- .data = .{ .pl_op = .{
- .operand = not_done_inst.toRef(),
- .payload = try l.addCondBrBodies(&.{
- next_index_inst,
- set_index_inst,
- repeat_inst,
- }, &.{
- final_res_inst,
- br_res_inst,
- }),
- } },
- });
- assert(loop_inst == l.addInstAssumeCapacity(.{
- .tag = .loop,
- .data = .{ .ty_pl = .{
- .ty = .noreturn_type,
- .payload = try l.addBlockBody(&.{
- cur_index_inst,
- get_elem_inst,
- op_elem_inst,
- set_elem_inst,
- not_done_inst,
- done_br_inst,
- }),
- } },
- }));
+ }));
+
+ const loop_inst: Air.Inst.Index = @enumFromInt(l.air_instructions.len + (3 + arity + 7));
+ var loop_block: Block(3 + arity + 2) = .empty;
+ {
+ const cur_index_inst = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .load,
+ .data = .{ .ty_op = .{
+ .ty = .usize_type,
+ .operand = index_alloc_inst.toRef(),
+ } },
+ }));
+ _ = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .vector_store_elem,
+ .data = .{ .vector_store_elem = .{
+ .vector_ptr = res_alloc_inst.toRef(),
+ .payload = try l.addExtra(Air.Bin, .{
+ .lhs = cur_index_inst.toRef(),
+ .rhs = loop_block.add(l.addInstAssumeCapacity(res_elem: switch (data_tag) {
+ .un_op => .{
+ .tag = orig.tag,
+ .data = .{ .un_op = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .array_elem_val,
+ .data = .{ .bin_op = .{
+ .lhs = orig.data.un_op,
+ .rhs = cur_index_inst.toRef(),
+ } },
+ })).toRef() },
+ },
+ .ty_op => .{
+ .tag = orig.tag,
+ .data = .{ .ty_op = .{
+ .ty = Air.internedToRef(orig.data.ty_op.ty.toType().scalarType(zcu).toIntern()),
+ .operand = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .array_elem_val,
+ .data = .{ .bin_op = .{
+ .lhs = orig.data.ty_op.operand,
+ .rhs = cur_index_inst.toRef(),
+ } },
+ })).toRef(),
+ } },
+ },
+ .bin_op => .{
+ .tag = orig.tag,
+ .data = .{ .bin_op = .{
+ .lhs = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .array_elem_val,
+ .data = .{ .bin_op = .{
+ .lhs = orig.data.bin_op.lhs,
+ .rhs = cur_index_inst.toRef(),
+ } },
+ })).toRef(),
+ .rhs = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .array_elem_val,
+ .data = .{ .bin_op = .{
+ .lhs = orig.data.bin_op.rhs,
+ .rhs = cur_index_inst.toRef(),
+ } },
+ })).toRef(),
+ } },
+ },
+ .ty_pl_vector_cmp => {
+ const extra = l.extraData(Air.VectorCmp, orig.data.ty_pl.payload).data;
+ break :res_elem .{
+ .tag = switch (orig.tag) {
+ else => unreachable,
+ .cmp_vector => switch (extra.compareOperator()) {
+ .lt => .cmp_lt,
+ .lte => .cmp_lte,
+ .eq => .cmp_eq,
+ .gte => .cmp_gte,
+ .gt => .cmp_gt,
+ .neq => .cmp_neq,
+ },
+ .cmp_vector_optimized => switch (extra.compareOperator()) {
+ .lt => .cmp_lt_optimized,
+ .lte => .cmp_lte_optimized,
+ .eq => .cmp_eq_optimized,
+ .gte => .cmp_gte_optimized,
+ .gt => .cmp_gt_optimized,
+ .neq => .cmp_neq_optimized,
+ },
+ },
+ .data = .{ .bin_op = .{
+ .lhs = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .array_elem_val,
+ .data = .{ .bin_op = .{
+ .lhs = extra.lhs,
+ .rhs = cur_index_inst.toRef(),
+ } },
+ })).toRef(),
+ .rhs = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .array_elem_val,
+ .data = .{ .bin_op = .{
+ .lhs = extra.rhs,
+ .rhs = cur_index_inst.toRef(),
+ } },
+ })).toRef(),
+ } },
+ };
+ },
+ .pl_op_bin => {
+ const extra = l.extraData(Air.Bin, orig.data.pl_op.payload).data;
+ break :res_elem .{
+ .tag = orig.tag,
+ .data = .{ .pl_op = .{
+ .payload = try l.addExtra(Air.Bin, .{
+ .lhs = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .array_elem_val,
+ .data = .{ .bin_op = .{
+ .lhs = extra.lhs,
+ .rhs = cur_index_inst.toRef(),
+ } },
+ })).toRef(),
+ .rhs = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .array_elem_val,
+ .data = .{ .bin_op = .{
+ .lhs = extra.rhs,
+ .rhs = cur_index_inst.toRef(),
+ } },
+ })).toRef(),
+ }),
+ .operand = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .array_elem_val,
+ .data = .{ .bin_op = .{
+ .lhs = orig.data.pl_op.operand,
+ .rhs = cur_index_inst.toRef(),
+ } },
+ })).toRef(),
+ } },
+ };
+ },
+ })).toRef(),
+ }),
+ } },
+ }));
+ const not_done_inst = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .cmp_lt,
+ .data = .{ .bin_op = .{
+ .lhs = cur_index_inst.toRef(),
+ .rhs = try pt.intRef(.usize, res_ty.vectorLen(zcu) - 1),
+ } },
+ }));
+
+ var not_done_block: Block(3) = .empty;
+ {
+ _ = not_done_block.add(l.addInstAssumeCapacity(.{
+ .tag = .store,
+ .data = .{ .bin_op = .{
+ .lhs = index_alloc_inst.toRef(),
+ .rhs = not_done_block.add(l.addInstAssumeCapacity(.{
+ .tag = .add,
+ .data = .{ .bin_op = .{
+ .lhs = cur_index_inst.toRef(),
+ .rhs = .one_usize,
+ } },
+ })).toRef(),
+ } },
+ }));
+ _ = not_done_block.add(l.addInstAssumeCapacity(.{
+ .tag = .repeat,
+ .data = .{ .repeat = .{ .loop_inst = loop_inst } },
+ }));
+ }
+ var done_block: Block(2) = .empty;
+ {
+ _ = done_block.add(l.addInstAssumeCapacity(.{
+ .tag = .br,
+ .data = .{ .br = .{
+ .block_inst = orig_inst,
+ .operand = done_block.add(l.addInstAssumeCapacity(.{
+ .tag = .load,
+ .data = .{ .ty_op = .{
+ .ty = Air.internedToRef(res_ty.toIntern()),
+ .operand = res_alloc_inst.toRef(),
+ } },
+ })).toRef(),
+ } },
+ }));
+ }
+ _ = loop_block.add(l.addInstAssumeCapacity(.{
+ .tag = .cond_br,
+ .data = .{ .pl_op = .{
+ .operand = not_done_inst.toRef(),
+ .payload = try l.addCondBrBodies(not_done_block.body(), done_block.body()),
+ } },
+ }));
+ }
+ assert(loop_inst == res_block.add(l.addInstAssumeCapacity(.{
+ .tag = .loop,
+ .data = .{ .ty_pl = .{
+ .ty = .noreturn_type,
+ .payload = try l.addBlockBody(loop_block.body()),
+ } },
+ })));
+ }
+ assert(l.air_instructions.len == expected_instructions_len);
return .{ .ty_pl = .{
.ty = Air.internedToRef(res_ty.toIntern()),
- .payload = try l.addBlockBody(&.{
- res_alloc_inst,
- index_alloc_inst,
- index_init_inst,
- loop_inst,
- }),
+ .payload = try l.addBlockBody(res_block.body()),
} };
}
+fn Block(comptime capacity: usize) type {
+ return struct {
+ instructions: [capacity]Air.Inst.Index,
+ len: usize,
+
+ const empty: @This() = .{
+ .instructions = undefined,
+ .len = 0,
+ };
+
+ fn add(b: *@This(), inst: Air.Inst.Index) Air.Inst.Index {
+ b.instructions[b.len] = inst;
+ b.len += 1;
+ return inst;
+ }
+
+ fn body(b: *const @This()) []const Air.Inst.Index {
+ assert(b.len == b.instructions.len);
+ return &b.instructions;
+ }
+ };
+}
+
fn addInstAssumeCapacity(l: *Legalize, inst: Air.Inst) Air.Inst.Index {
defer l.air_instructions.appendAssumeCapacity(inst);
return @enumFromInt(l.air_instructions.len);
@@ -414,7 +838,6 @@ inline fn replaceInst(l: *Legalize, inst: Air.Inst.Index, tag: Air.Inst.Tag, dat
const Air = @import("../Air.zig");
const assert = std.debug.assert;
-const codegen = @import("../codegen.zig");
const Legalize = @This();
const std = @import("std");
const Type = @import("../Type.zig");
src/arch/x86_64/CodeGen.zig
@@ -32,15 +32,65 @@ const FrameIndex = bits.FrameIndex;
const InnerError = codegen.CodeGenError || error{OutOfRegisters};
-pub const legalize_features: Air.Legalize.Features = .init(.{
- .scalarize_ctz = true,
- .scalarize_popcount = true,
- .scalarize_byte_swap = true,
- .scalarize_bit_reverse = true,
-
- .remove_shift_vector_rhs_splat = false,
- .reduce_one_elem_to_bitcast = true,
-});
+pub inline fn legalizeFeatures(target: *const std.Target) *const Air.Legalize.Features {
+ @setEvalBranchQuota(1_200);
+ return switch (target.ofmt == .coff) {
+ inline false, true => |use_old| comptime &.init(.{
+ .scalarize_add = use_old,
+ .scalarize_add_sat = use_old,
+ .scalarize_sub = use_old,
+ .scalarize_sub_sat = use_old,
+ .scalarize_mul = use_old,
+ .scalarize_mul_wrap = use_old,
+ .scalarize_mul_sat = true,
+ .scalarize_div_float = use_old,
+ .scalarize_div_float_optimized = use_old,
+ .scalarize_div_trunc = use_old,
+ .scalarize_div_trunc_optimized = use_old,
+ .scalarize_div_floor = use_old,
+ .scalarize_div_floor_optimized = use_old,
+ .scalarize_div_exact = use_old,
+ .scalarize_div_exact_optimized = use_old,
+ .scalarize_max = use_old,
+ .scalarize_min = use_old,
+ .scalarize_shr = true,
+ .scalarize_shr_exact = true,
+ .scalarize_shl = true,
+ .scalarize_shl_exact = true,
+ .scalarize_shl_sat = true,
+ .scalarize_not = use_old,
+ .scalarize_clz = use_old,
+ .scalarize_ctz = true,
+ .scalarize_popcount = true,
+ .scalarize_byte_swap = true,
+ .scalarize_bit_reverse = true,
+ .scalarize_sin = use_old,
+ .scalarize_cos = use_old,
+ .scalarize_tan = use_old,
+ .scalarize_exp = use_old,
+ .scalarize_exp2 = use_old,
+ .scalarize_log = use_old,
+ .scalarize_log2 = use_old,
+ .scalarize_log10 = use_old,
+ .scalarize_abs = use_old,
+ .scalarize_floor = use_old,
+ .scalarize_ceil = use_old,
+ .scalarize_trunc_float = use_old,
+ .scalarize_cmp_vector = true,
+ .scalarize_cmp_vector_optimized = true,
+ .scalarize_fptrunc = use_old,
+ .scalarize_fpext = use_old,
+ .scalarize_intcast = use_old,
+ .scalarize_int_from_float = use_old,
+ .scalarize_int_from_float_optimized = use_old,
+ .scalarize_float_from_int = use_old,
+ .scalarize_mul_add = use_old,
+
+ .remove_shift_vector_rhs_splat = false,
+ .reduce_one_elem_to_bitcast = true,
+ }),
+ };
+}
/// Set this to `false` to uncover Sema OPV bugs.
/// https://github.com/ziglang/zig/issues/22419
@@ -5719,7 +5769,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
},
.extra_temps = .{
.{ .type = .i64, .kind = .{ .rc = .general_purpose } },
- .{ .type = .i64, .kind = .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } } },
+ .{ .type = .i64, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
@@ -92473,7 +92523,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
- .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
+ .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
.{ ._, ._r, .sa, .tmp0q, .ui(63), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
@@ -92505,7 +92555,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
- .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
+ .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ },
.{ ._, ._l, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._r, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
@@ -92539,7 +92589,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp1p, .mem(.dst0), ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
- .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_size, -8), ._, ._ },
+ .{ ._, ._, .mov, .tmp0q, .memad(.src0q, .add_dst0_size, -8), ._, ._ },
.{ ._, ._l, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._r, .sa, .tmp0q, .uia(64, .dst0, .sub_bit_size_rem_64), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
@@ -92600,7 +92650,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sa(.dst0, .add_bit_size_rem_64), ._, ._ },
- .{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_size, -16), .tmp2q, ._ },
+ .{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_dst0_size, -16), .tmp2q, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp2q, ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ },
} },
@@ -92632,7 +92682,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp2d, .sa(.dst0, .add_bit_size_rem_64), ._, ._ },
- .{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_size, -8), .tmp2q, ._ },
+ .{ ._, ._, .bzhi, .tmp2q, .memad(.src0q, .add_dst0_size, -8), .tmp2q, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp2q, ._, ._ },
} },
}, .{
@@ -92663,7 +92713,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-2, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .ua(.dst0, .add_umax), ._, ._ },
- .{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_size, -16), ._, ._ },
+ .{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_dst0_size, -16), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -16), .tmp0q, ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .si(0), ._, ._ },
} },
@@ -92695,7 +92745,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .mov, .tmp2d, .sia(-1, .dst0, .add_size_div_8), ._, ._ },
.{ ._, .@"rep _sq", .mov, ._, ._, ._, ._ },
.{ ._, ._, .mov, .tmp0q, .ua(.dst0, .add_umax), ._, ._ },
- .{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_size, -8), ._, ._ },
+ .{ ._, ._, .@"and", .tmp0q, .memad(.src0q, .add_dst0_size, -8), ._, ._ },
.{ ._, ._, .mov, .memad(.dst0q, .add_size, -8), .tmp0q, ._, ._ },
} },
}, .{
@@ -165537,9 +165587,7 @@ fn airShlShrBinOp(self: *CodeGen, inst: Air.Inst.Index) !void {
.ty = mask_ty.toIntern(),
.storage = .{ .elems = &([1]InternPool.Index{
(try rhs_ty.childType(zcu).maxIntScalar(pt, .u8)).toIntern(),
- } ++ [1]InternPool.Index{
- (try pt.intValue(.u8, 0)).toIntern(),
- } ** 15) },
+ } ++ [1]InternPool.Index{.zero_u8} ** 15) },
} })));
const mask_addr_reg = try self.copyToTmpRegister(.usize, mask_mcv.address());
const mask_addr_lock = self.register_manager.lockRegAssumeUnused(mask_addr_reg);
@@ -178776,10 +178824,10 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
const mask_ty = try pt.vectorType(.{ .len = vec_len, .child = mask_elem_ty.toIntern() });
var mask_elems_buf: [32]InternPool.Index = undefined;
const mask_elems = mask_elems_buf[0..vec_len];
- for (mask_elems, 0..) |*elem, bit| elem.* = try pt.intern(.{ .int = .{
- .ty = mask_elem_ty.toIntern(),
- .storage = .{ .u64 = @as(u64, 1) << @intCast(bit) },
- } });
+ for (mask_elems, 0..) |*elem, bit| elem.* = (try pt.intValue(
+ mask_elem_ty,
+ @as(u8, 1) << @truncate(bit),
+ )).toIntern();
const mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = mask_elems },
@@ -179647,16 +179695,13 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
var lhs_mask_elems: [16]InternPool.Index = undefined;
for (lhs_mask_elems[0..max_abi_size], 0..) |*lhs_mask_elem, byte_index| {
const elem_index = byte_index / elem_abi_size;
- lhs_mask_elem.* = try pt.intern(.{ .int = .{
- .ty = .u8_type,
- .storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
- const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
- if (mask_elem < 0) break :elem 0b1_00_00000;
- const mask_elem_index: u31 = @intCast(mask_elem);
- const byte_off: u32 = @intCast(byte_index % elem_abi_size);
- break :elem @intCast(mask_elem_index * elem_abi_size + byte_off);
- } },
- } });
+ lhs_mask_elem.* = (try pt.intValue(.u8, if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
+ const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
+ if (mask_elem < 0) break :elem 0b1_00_00000;
+ const mask_elem_index: u31 = @intCast(mask_elem);
+ const byte_off: u32 = @intCast(byte_index % elem_abi_size);
+ break :elem mask_elem_index * elem_abi_size + byte_off;
+ })).toIntern();
}
const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
const lhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
@@ -179681,16 +179726,13 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
var rhs_mask_elems: [16]InternPool.Index = undefined;
for (rhs_mask_elems[0..max_abi_size], 0..) |*rhs_mask_elem, byte_index| {
const elem_index = byte_index / elem_abi_size;
- rhs_mask_elem.* = try pt.intern(.{ .int = .{
- .ty = .u8_type,
- .storage = .{ .u64 = if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
- const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
- if (mask_elem >= 0) break :elem 0b1_00_00000;
- const mask_elem_index: u31 = @intCast(~mask_elem);
- const byte_off: u32 = @intCast(byte_index % elem_abi_size);
- break :elem @intCast(mask_elem_index * elem_abi_size + byte_off);
- } },
- } });
+ rhs_mask_elem.* = (try pt.intValue(.u8, if (elem_index >= mask_elems.len) 0b1_00_00000 else elem: {
+ const mask_elem = mask_elems[elem_index] orelse break :elem 0b1_00_00000;
+ if (mask_elem >= 0) break :elem 0b1_00_00000;
+ const mask_elem_index: u31 = @intCast(~mask_elem);
+ const byte_off: u32 = @intCast(byte_index % elem_abi_size);
+ break :elem mask_elem_index * elem_abi_size + byte_off;
+ })).toIntern();
}
const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
const rhs_mask_mcv = try self.genTypedValue(.fromInterned(try pt.intern(.{ .aggregate = .{
@@ -188160,6 +188202,7 @@ const Select = struct {
ptr_bit_size,
size,
src0_size,
+ dst0_size,
delta_size,
delta_elem_size,
unaligned_size,
@@ -188203,6 +188246,7 @@ const Select = struct {
const sub_src0_size: Adjust = .{ .sign = .neg, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
const add_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
const add_8_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"8" };
+ const add_dst0_size: Adjust = .{ .sign = .pos, .lhs = .dst0_size, .op = .mul, .rhs = .@"1" };
const add_delta_size_div_8: Adjust = .{ .sign = .pos, .lhs = .delta_size, .op = .div, .rhs = .@"8" };
const add_delta_elem_size: Adjust = .{ .sign = .pos, .lhs = .delta_elem_size, .op = .mul, .rhs = .@"1" };
const add_delta_elem_size_div_8: Adjust = .{ .sign = .pos, .lhs = .delta_elem_size, .op = .div, .rhs = .@"8" };
@@ -188998,6 +189042,7 @@ const Select = struct {
.ptr_bit_size => s.cg.target.ptrBitWidth(),
.size => @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu)),
.src0_size => @intCast(Select.Operand.Ref.src0.typeOf(s).abiSize(s.cg.pt.zcu)),
+ .dst0_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).abiSize(s.cg.pt.zcu)),
.delta_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu))) -
@as(SignedImm, @intCast(op.flags.index.ref.typeOf(s).abiSize(s.cg.pt.zcu)))),
.delta_elem_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))) -
src/codegen/c/Type.zig
@@ -1408,6 +1408,15 @@ pub const Pool = struct {
.bits = pt.zcu.errorSetBits(),
}, mod, kind),
+ .ptr_usize_type,
+ => return pool.getPointer(allocator, .{
+ .elem_ctype = .usize,
+ }),
+ .ptr_const_comptime_int_type,
+ => return pool.getPointer(allocator, .{
+ .elem_ctype = .void,
+ .@"const" = true,
+ }),
.manyptr_u8_type,
=> return pool.getPointer(allocator, .{
.elem_ctype = .u8,
@@ -1418,11 +1427,6 @@ pub const Pool = struct {
.elem_ctype = .u8,
.@"const" = true,
}),
- .single_const_pointer_to_comptime_int_type,
- => return pool.getPointer(allocator, .{
- .elem_ctype = .void,
- .@"const" = true,
- }),
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
=> {
@@ -2157,11 +2161,16 @@ pub const Pool = struct {
},
.undef,
+ .undef_bool,
+ .undef_usize,
+ .undef_u1,
.zero,
.zero_usize,
+ .zero_u1,
.zero_u8,
.one,
.one_usize,
+ .one_u1,
.one_u8,
.four_u8,
.negative_one,
@@ -2172,7 +2181,7 @@ pub const Pool = struct {
.bool_false,
.empty_tuple,
.none,
- => unreachable,
+ => unreachable, // values, not types
_ => |ip_index| switch (ip.indexToKey(ip_index)) {
.int_type => |int_info| return pool.fromIntInfo(allocator, int_info, mod, kind),
src/codegen/c.zig
@@ -1591,7 +1591,7 @@ pub const DeclGen = struct {
try writer.writeAll("((");
try dg.renderCType(writer, ctype);
return writer.print("){x})", .{
- try dg.fmtIntLiteral(try pt.undefValue(.usize), .Other),
+ try dg.fmtIntLiteral(.undef_usize, .Other),
});
},
.slice => {
@@ -1605,7 +1605,7 @@ pub const DeclGen = struct {
const ptr_ty = ty.slicePtrFieldType(zcu);
try dg.renderType(writer, ptr_ty);
return writer.print("){x}, {0x}}}", .{
- try dg.fmtIntLiteral(try dg.pt.undefValue(.usize), .Other),
+ try dg.fmtIntLiteral(.undef_usize, .Other),
});
},
},
@@ -6376,7 +6376,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
if (operand_child_ctype.info(ctype_pool) == .array) {
try writer.writeByte('&');
try f.writeCValueDeref(writer, operand);
- try writer.print("[{}]", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))});
+ try writer.print("[{}]", .{try f.fmtIntLiteral(.zero_usize)});
} else try f.writeCValue(writer, operand, .Other);
}
try a.end(f, writer);
@@ -6907,7 +6907,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll("for (");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, try pt.intValue(.usize, 0), .Other);
+ try f.object.dg.renderValue(writer, .zero_usize, .Other);
try writer.writeAll("; ");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" != ");
@@ -8311,11 +8311,11 @@ const Vectorize = struct {
try writer.writeAll("for (");
try f.writeCValue(writer, local, .Other);
- try writer.print(" = {d}; ", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))});
+ try writer.print(" = {d}; ", .{try f.fmtIntLiteral(.zero_usize)});
try f.writeCValue(writer, local, .Other);
try writer.print(" < {d}; ", .{try f.fmtIntLiteral(try pt.intValue(.usize, ty.vectorLen(zcu)))});
try f.writeCValue(writer, local, .Other);
- try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(try pt.intValue(.usize, 1))});
+ try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(.one_usize)});
f.object.indent_writer.pushIndent();
break :index .{ .index = local };
src/codegen/llvm.zig
@@ -3081,10 +3081,11 @@ pub const Object = struct {
.undefined_type,
.enum_literal_type,
=> unreachable,
+ .ptr_usize_type,
+ .ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
- .single_const_pointer_to_comptime_int_type,
=> .ptr,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
@@ -3098,11 +3099,16 @@ pub const Object = struct {
=> unreachable,
// values, not types
.undef,
+ .undef_bool,
+ .undef_usize,
+ .undef_u1,
.zero,
.zero_usize,
+ .zero_u1,
.zero_u8,
.one,
.one_usize,
+ .one_u1,
.one_u8,
.four_u8,
.negative_one,
src/Sema/arith.zig
@@ -168,7 +168,7 @@ fn addWithOverflowScalar(
else => unreachable,
}
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{
- .overflow_bit = try pt.undefValue(.u1),
+ .overflow_bit = .undef_u1,
.wrapped_result = try pt.undefValue(ty),
};
return intAddWithOverflow(sema, lhs, rhs, ty);
@@ -229,7 +229,7 @@ fn subWithOverflowScalar(
else => unreachable,
}
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{
- .overflow_bit = try pt.undefValue(.u1),
+ .overflow_bit = .undef_u1,
.wrapped_result = try pt.undefValue(ty),
};
return intSubWithOverflow(sema, lhs, rhs, ty);
@@ -290,7 +290,7 @@ fn mulWithOverflowScalar(
else => unreachable,
}
if (lhs.isUndef(zcu) or rhs.isUndef(zcu)) return .{
- .overflow_bit = try pt.undefValue(.u1),
+ .overflow_bit = .undef_u1,
.wrapped_result = try pt.undefValue(ty),
};
return intMulWithOverflow(sema, lhs, rhs, ty);
@@ -1043,7 +1043,7 @@ fn comptimeIntAdd(sema: *Sema, lhs: Value, rhs: Value) !Value {
fn intAddWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult {
switch (ty.toIntern()) {
.comptime_int_type => return .{
- .overflow_bit = try sema.pt.intValue(.u1, 0),
+ .overflow_bit = .zero_u1,
.wrapped_result = try comptimeIntAdd(sema, lhs, rhs),
},
else => return intAddWithOverflowInner(sema, lhs, rhs, ty),
@@ -1125,7 +1125,7 @@ fn comptimeIntSub(sema: *Sema, lhs: Value, rhs: Value) !Value {
fn intSubWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult {
switch (ty.toIntern()) {
.comptime_int_type => return .{
- .overflow_bit = try sema.pt.intValue(.u1, 0),
+ .overflow_bit = .zero_u1,
.wrapped_result = try comptimeIntSub(sema, lhs, rhs),
},
else => return intSubWithOverflowInner(sema, lhs, rhs, ty),
@@ -1211,7 +1211,7 @@ fn comptimeIntMul(sema: *Sema, lhs: Value, rhs: Value) !Value {
fn intMulWithOverflow(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value.OverflowArithmeticResult {
switch (ty.toIntern()) {
.comptime_int_type => return .{
- .overflow_bit = try sema.pt.intValue(.u1, 0),
+ .overflow_bit = .zero_u1,
.wrapped_result = try comptimeIntMul(sema, lhs, rhs),
},
else => return intMulWithOverflowInner(sema, lhs, rhs, ty),
src/Zcu/PerThread.zig
@@ -1741,8 +1741,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: *A
return;
}
- const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
- try air.legalize(backend, pt);
+ try air.legalize(pt, @import("../codegen.zig").legalizeFeatures(pt, nav_index));
var liveness = try Air.Liveness.analyze(gpa, air.*, ip);
defer liveness.deinit(gpa);
@@ -3022,7 +3021,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
// is unused so it just has to be a no-op.
sema.air_instructions.set(@intFromEnum(ptr_inst), .{
.tag = .alloc,
- .data = .{ .ty = Type.single_const_pointer_to_comptime_int },
+ .data = .{ .ty = .ptr_const_comptime_int },
});
}
src/Air.zig
@@ -1011,10 +1011,11 @@ pub const Inst = struct {
null_type = @intFromEnum(InternPool.Index.null_type),
undefined_type = @intFromEnum(InternPool.Index.undefined_type),
enum_literal_type = @intFromEnum(InternPool.Index.enum_literal_type),
+ ptr_usize_type = @intFromEnum(InternPool.Index.ptr_usize_type),
+ ptr_const_comptime_int_type = @intFromEnum(InternPool.Index.ptr_const_comptime_int_type),
manyptr_u8_type = @intFromEnum(InternPool.Index.manyptr_u8_type),
manyptr_const_u8_type = @intFromEnum(InternPool.Index.manyptr_const_u8_type),
manyptr_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.manyptr_const_u8_sentinel_0_type),
- single_const_pointer_to_comptime_int_type = @intFromEnum(InternPool.Index.single_const_pointer_to_comptime_int_type),
slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type),
slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type),
vector_8_i8_type = @intFromEnum(InternPool.Index.vector_8_i8_type),
@@ -1070,11 +1071,16 @@ pub const Inst = struct {
generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type),
empty_tuple_type = @intFromEnum(InternPool.Index.empty_tuple_type),
undef = @intFromEnum(InternPool.Index.undef),
+ undef_bool = @intFromEnum(InternPool.Index.undef_bool),
+ undef_usize = @intFromEnum(InternPool.Index.undef_usize),
+ undef_u1 = @intFromEnum(InternPool.Index.undef_u1),
zero = @intFromEnum(InternPool.Index.zero),
zero_usize = @intFromEnum(InternPool.Index.zero_usize),
+ zero_u1 = @intFromEnum(InternPool.Index.zero_u1),
zero_u8 = @intFromEnum(InternPool.Index.zero_u8),
one = @intFromEnum(InternPool.Index.one),
one_usize = @intFromEnum(InternPool.Index.one_usize),
+ one_u1 = @intFromEnum(InternPool.Index.one_u1),
one_u8 = @intFromEnum(InternPool.Index.one_u8),
four_u8 = @intFromEnum(InternPool.Index.four_u8),
negative_one = @intFromEnum(InternPool.Index.negative_one),
@@ -1121,7 +1127,7 @@ pub const Inst = struct {
}
pub fn toType(ref: Ref) Type {
- return Type.fromInterned(ref.toInterned().?);
+ return .fromInterned(ref.toInterned().?);
}
};
@@ -1393,7 +1399,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index {
pub fn typeOf(air: *const Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
if (inst.toInterned()) |ip_index| {
- return Type.fromInterned(ip.typeOf(ip_index));
+ return .fromInterned(ip.typeOf(ip_index));
} else {
return air.typeOfIndex(inst.toIndex().?, ip);
}
@@ -1483,7 +1489,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.is_non_err_ptr,
.is_named_enum_value,
.error_set_has_value,
- => return Type.bool,
+ => return .bool,
.alloc,
.ret_ptr,
@@ -1574,7 +1580,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.ret_load,
.unreach,
.trap,
- => return Type.noreturn,
+ => return .noreturn,
.breakpoint,
.dbg_stmt,
@@ -1597,22 +1603,22 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.set_err_return_trace,
.vector_store_elem,
.c_va_end,
- => return Type.void,
+ => return .void,
.slice_len,
.ret_addr,
.frame_addr,
.save_err_return_trace_index,
- => return Type.usize,
+ => return .usize,
- .wasm_memory_grow => return Type.isize,
- .wasm_memory_size => return Type.usize,
+ .wasm_memory_grow => return .isize,
+ .wasm_memory_size => return .usize,
- .tag_name, .error_name => return Type.slice_const_u8_sentinel_0,
+ .tag_name, .error_name => return .slice_const_u8_sentinel_0,
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip);
- return Type.fromInterned(ip.funcTypeReturnType(callee_ty.toIntern()));
+ return .fromInterned(ip.funcTypeReturnType(callee_ty.toIntern()));
},
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
@@ -1630,7 +1636,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.reduce, .reduce_optimized => {
const operand_ty = air.typeOf(datas[@intFromEnum(inst)].reduce.operand, ip);
- return Type.fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child);
+ return .fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child);
},
.mul_add => return air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip),
@@ -1641,7 +1647,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.@"try", .try_cold => {
const err_union_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip);
- return Type.fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type);
+ return .fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type);
},
.tlv_dllimport_ptr => return .fromInterned(datas[@intFromEnum(inst)].ty_nav.ty),
@@ -1649,7 +1655,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.work_item_id,
.work_group_size,
.work_group_id,
- => return Type.u32,
+ => return .u32,
.inferred_alloc => unreachable,
.inferred_alloc_comptime => unreachable,
@@ -1696,7 +1702,7 @@ pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref {
/// Returns `null` if runtime-known.
pub fn value(air: Air, inst: Inst.Ref, pt: Zcu.PerThread) !?Value {
if (inst.toInterned()) |ip_index| {
- return Value.fromInterned(ip_index);
+ return .fromInterned(ip_index);
}
const index = inst.toIndex().?;
return air.typeOfIndex(index, &pt.zcu.intern_pool).onePossibleValue(pt);
src/codegen.zig
@@ -32,8 +32,9 @@ fn devFeatureForBackend(comptime backend: std.builtin.CompilerBackend) dev.Featu
return @field(dev.Feature, @tagName(backend)["stage2_".len..] ++ "_backend");
}
-pub fn importBackend(comptime backend: std.builtin.CompilerBackend) ?type {
+fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
return switch (backend) {
+ .other, .stage1 => unreachable,
.stage2_aarch64 => @import("arch/aarch64/CodeGen.zig"),
.stage2_arm => @import("arch/arm/CodeGen.zig"),
.stage2_c => @import("codegen/c.zig"),
@@ -42,11 +43,35 @@ pub fn importBackend(comptime backend: std.builtin.CompilerBackend) ?type {
.stage2_riscv64 => @import("arch/riscv64/CodeGen.zig"),
.stage2_sparc64 => @import("arch/sparc64/CodeGen.zig"),
.stage2_spirv64 => @import("codegen/spirv.zig"),
- .stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"),
- else => null,
+ .stage2_wasm => @import("arch/wasm/CodeGen.zig"),
+ .stage2_x86, .stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"),
+ _ => unreachable,
};
}
+pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) *const Air.Legalize.Features {
+ const zcu = pt.zcu;
+ const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
+ switch (target_util.zigBackend(target.*, zcu.comp.config.use_llvm)) {
+ else => unreachable,
+ inline .stage2_llvm,
+ .stage2_c,
+ .stage2_wasm,
+ .stage2_arm,
+ .stage2_x86_64,
+ .stage2_aarch64,
+ .stage2_x86,
+ .stage2_riscv64,
+ .stage2_sparc64,
+ .stage2_spirv64,
+ .stage2_powerpc,
+ => |backend| {
+ const Backend = importBackend(backend);
+ return if (@hasDecl(Backend, "legalizeFeatures")) Backend.legalizeFeatures(target) else &.initEmpty();
+ },
+ }
+}
+
pub fn generateFunction(
lf: *link.File,
pt: Zcu.PerThread,
@@ -60,7 +85,7 @@ pub fn generateFunction(
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
- switch (target_util.zigBackend(target, false)) {
+ switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_aarch64,
.stage2_arm,
@@ -70,7 +95,7 @@ pub fn generateFunction(
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
- return importBackend(backend).?.generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output);
+ return importBackend(backend).generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output);
},
}
}
@@ -88,14 +113,14 @@ pub fn generateLazyFunction(
zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
else
zcu.getTarget();
- switch (target_util.zigBackend(target, false)) {
+ switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_powerpc,
.stage2_riscv64,
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
- return importBackend(backend).?.generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
+ return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
},
}
}
src/InternPool.zig
@@ -4579,10 +4579,11 @@ pub const Index = enum(u32) {
undefined_type,
enum_literal_type,
+ ptr_usize_type,
+ ptr_const_comptime_int_type,
manyptr_u8_type,
manyptr_const_u8_type,
manyptr_const_u8_sentinel_0_type,
- single_const_pointer_to_comptime_int_type,
slice_const_u8_type,
slice_const_u8_sentinel_0_type,
@@ -4649,19 +4650,29 @@ pub const Index = enum(u32) {
/// `undefined` (untyped)
undef,
+ /// `@as(bool, undefined)`
+ undef_bool,
+ /// `@as(usize, undefined)`
+ undef_usize,
+ /// `@as(u1, undefined)`
+ undef_u1,
/// `0` (comptime_int)
zero,
- /// `0` (usize)
+ /// `@as(usize, 0)`
zero_usize,
- /// `0` (u8)
+ /// `@as(u1, 0)`
+ zero_u1,
+ /// `@as(u8, 0)`
zero_u8,
/// `1` (comptime_int)
one,
- /// `1` (usize)
+ /// `@as(usize, 1)`
one_usize,
- /// `1` (u8)
+ /// `@as(u1, 1)`
+ one_u1,
+ /// `@as(u8, 1)`
one_u8,
- /// `4` (u8)
+ /// `@as(u8, 4)`
four_u8,
/// `-1` (comptime_int)
negative_one,
@@ -5074,6 +5085,20 @@ pub const static_keys: [static_len]Key = .{
.{ .simple_type = .undefined },
.{ .simple_type = .enum_literal },
+ // *usize
+ .{ .ptr_type = .{
+ .child = .usize_type,
+ .flags = .{},
+ } },
+
+ // *const comptime_int
+ .{ .ptr_type = .{
+ .child = .comptime_int_type,
+ .flags = .{
+ .is_const = true,
+ },
+ } },
+
// [*]u8
.{ .ptr_type = .{
.child = .u8_type,
@@ -5101,15 +5126,6 @@ pub const static_keys: [static_len]Key = .{
},
} },
- // *const comptime_int
- .{ .ptr_type = .{
- .child = .comptime_int_type,
- .flags = .{
- .size = .one,
- .is_const = true,
- },
- } },
-
// []const u8
.{ .ptr_type = .{
.child = .u8_type,
@@ -5245,6 +5261,9 @@ pub const static_keys: [static_len]Key = .{
} },
.{ .simple_value = .undefined },
+ .{ .undef = .bool_type },
+ .{ .undef = .usize_type },
+ .{ .undef = .u1_type },
.{ .int = .{
.ty = .comptime_int_type,
@@ -5256,6 +5275,11 @@ pub const static_keys: [static_len]Key = .{
.storage = .{ .u64 = 0 },
} },
+ .{ .int = .{
+ .ty = .u1_type,
+ .storage = .{ .u64 = 0 },
+ } },
+
.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = 0 },
@@ -5271,17 +5295,21 @@ pub const static_keys: [static_len]Key = .{
.storage = .{ .u64 = 1 },
} },
- // one_u8
+ .{ .int = .{
+ .ty = .u1_type,
+ .storage = .{ .u64 = 1 },
+ } },
+
.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = 1 },
} },
- // four_u8
+
.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = 4 },
} },
- // negative_one
+
.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .i64 = -1 },
@@ -10482,7 +10510,7 @@ pub fn getCoerced(
.base_addr = .int,
.byte_offset = 0,
} }),
- .len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
+ .len = .undef_usize,
} }),
};
},
@@ -10601,7 +10629,7 @@ pub fn getCoerced(
.base_addr = .int,
.byte_offset = 0,
} }),
- .len = try ip.get(gpa, tid, .{ .undef = .usize_type }),
+ .len = .undef_usize,
} }),
},
else => |payload| try ip.getCoerced(gpa, tid, payload, new_ty),
@@ -11847,10 +11875,11 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.null_type,
.undefined_type,
.enum_literal_type,
+ .ptr_usize_type,
+ .ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
- .single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.vector_8_i8_type,
@@ -11909,12 +11938,13 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.undef => .undefined_type,
.zero, .one, .negative_one => .comptime_int_type,
- .zero_usize, .one_usize => .usize_type,
+ .undef_usize, .zero_usize, .one_usize => .usize_type,
+ .undef_u1, .zero_u1, .one_u1 => .u1_type,
.zero_u8, .one_u8, .four_u8 => .u8_type,
.void_value => .void_type,
.unreachable_value => .noreturn_type,
.null_value => .null_type,
- .bool_true, .bool_false => .bool_type,
+ .undef_bool, .bool_true, .bool_false => .bool_type,
.empty_tuple => .empty_tuple_type,
// This optimization on tags is needed so that indexToKey can call
@@ -12186,10 +12216,11 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.undefined_type => .undefined,
.enum_literal_type => .enum_literal,
+ .ptr_usize_type,
+ .ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
- .single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
=> .pointer,
@@ -12251,11 +12282,16 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
// values, not types
.undef => unreachable,
+ .undef_bool => unreachable,
+ .undef_usize => unreachable,
+ .undef_u1 => unreachable,
.zero => unreachable,
.zero_usize => unreachable,
+ .zero_u1 => unreachable,
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
+ .one_u1 => unreachable,
.one_u8 => unreachable,
.four_u8 => unreachable,
.negative_one => unreachable,
src/mutable_value.zig
@@ -260,7 +260,7 @@ pub const MutableValue = union(enum) {
const ptr = try arena.create(MutableValue);
const len = try arena.create(MutableValue);
ptr.* = .{ .interned = try pt.intern(.{ .undef = ip.slicePtrType(ty_ip) }) };
- len.* = .{ .interned = try pt.intern(.{ .undef = .usize_type }) };
+ len.* = .{ .interned = .undef_usize };
mv.* = .{ .slice = .{
.ty = ty_ip,
.ptr = ptr,
@@ -464,7 +464,7 @@ pub const MutableValue = union(enum) {
return switch (field_idx) {
Value.slice_ptr_index => .{ .interned = Value.fromInterned(ip_index).slicePtr(pt.zcu).toIntern() },
Value.slice_len_index => .{ .interned = switch (pt.zcu.intern_pool.indexToKey(ip_index)) {
- .undef => try pt.intern(.{ .undef = .usize_type }),
+ .undef => .undef_usize,
.slice => |s| s.len,
else => unreachable,
} },
src/Sema.zig
@@ -1881,7 +1881,7 @@ fn analyzeBodyInner(
extra.data.else_body_len,
);
const uncasted_cond = try sema.resolveInst(extra.data.condition);
- const cond = try sema.coerce(block, Type.bool, uncasted_cond, cond_src);
+ const cond = try sema.coerce(block, .bool, uncasted_cond, cond_src);
const cond_val = try sema.resolveConstDefinedValue(
block,
cond_src,
@@ -2012,7 +2012,7 @@ fn resolveConstBool(
reason: ComptimeReason,
) !bool {
const air_inst = try sema.resolveInst(zir_ref);
- const wanted_type = Type.bool;
+ const wanted_type: Type = .bool;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
return val.toBool();
@@ -2037,7 +2037,7 @@ pub fn toConstString(
reason: ComptimeReason,
) ![]u8 {
const pt = sema.pt;
- const coerced_inst = try sema.coerce(block, Type.slice_const_u8, air_inst, src);
+ const coerced_inst = try sema.coerce(block, .slice_const_u8, air_inst, src);
const slice_val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
const arr_val = try sema.derefSliceAsArray(block, src, slice_val, reason);
return arr_val.toAllocatedBytes(arr_val.typeOf(pt.zcu), sema.arena, pt);
@@ -2051,7 +2051,7 @@ pub fn resolveConstStringIntern(
reason: ComptimeReason,
) !InternPool.NullTerminatedString {
const air_inst = try sema.resolveInst(zir_ref);
- const wanted_type = Type.slice_const_u8;
+ const wanted_type: Type = .slice_const_u8;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, reason);
return sema.sliceToIpString(block, src, val, reason);
@@ -2180,7 +2180,7 @@ fn analyzeAsType(
src: LazySrcLoc,
air_inst: Air.Inst.Ref,
) !Type {
- const wanted_type = Type.type;
+ const wanted_type: Type = .type;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, .{ .simple = .type });
return val.toType();
@@ -2641,7 +2641,7 @@ fn reparentOwnedErrorMsg(
msg.msg = msg_str;
}
-const align_ty = Type.u29;
+const align_ty: Type = .u29;
pub fn analyzeAsAlign(
sema: *Sema,
@@ -2819,7 +2819,7 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const parent_ty = Type.fromInterned(zcu.namespacePtr(block.namespace).owner_type);
+ const parent_ty: Type = .fromInterned(zcu.namespacePtr(block.namespace).owner_type);
const parent_captures: InternPool.CaptureValue.Slice = parent_ty.getCaptures(zcu);
const captures = try sema.arena.alloc(InternPool.CaptureValue, captures_len);
@@ -3777,7 +3777,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const alloc = try sema.resolveInst(inst_data.operand);
const alloc_ty = sema.typeOf(alloc);
const ptr_info = alloc_ty.ptrInfo(zcu);
- const elem_ty = Type.fromInterned(ptr_info.child);
+ const elem_ty: Type = .fromInterned(ptr_info.child);
// If the alloc was created in a comptime scope, we already created a comptime alloc for it.
// However, if the final constructed value does not reference comptime-mutable memory, we wish
@@ -3848,7 +3848,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc);
const ptr_info = alloc_ty.ptrInfo(zcu);
- const elem_ty = Type.fromInterned(ptr_info.child);
+ const elem_ty: Type = .fromInterned(ptr_info.child);
const alloc_inst = alloc.toIndex() orelse return null;
const comptime_info = sema.maybe_comptime_allocs.fetchRemove(alloc_inst) orelse return null;
@@ -4024,9 +4024,9 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
// As this is a union field, we must store to the pointer now to set the tag.
// If the payload is OPV, there will not be a payload store, so we store that value.
// Otherwise, there will be a payload store to process later, so undef will suffice.
- const payload_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]);
+ const payload_ty: Type = .fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]);
const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty);
- const tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), idx);
+ const tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), idx);
const store_val = try pt.unionValue(maybe_union_ty, tag_val, payload_val);
try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty);
}
@@ -4050,7 +4050,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
const air_ptr_inst = store_inst.data.bin_op.lhs.toIndex().?;
const store_val = (try sema.resolveValue(store_inst.data.bin_op.rhs)).?;
const new_ptr = ptr_mapping.get(air_ptr_inst).?;
- try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(new_ptr), store_val, Type.fromInterned(zcu.intern_pool.typeOf(store_val.toIntern())));
+ try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(new_ptr), store_val, .fromInterned(zcu.intern_pool.typeOf(store_val.toIntern())));
},
else => unreachable,
}
@@ -4284,7 +4284,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
else => unreachable,
};
if (zcu.intern_pool.isFuncBody(val)) {
- const ty = Type.fromInterned(zcu.intern_pool.typeOf(val));
+ const ty: Type = .fromInterned(zcu.intern_pool.typeOf(val));
if (try ty.fnHasRuntimeBitsSema(pt)) {
try sema.addReferenceEntry(block, src, AnalUnit.wrap(.{ .func = val }));
try zcu.ensureFuncBodyAnalysisQueued(val);
@@ -4447,14 +4447,14 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const range_end = try sema.resolveInst(zir_arg_pair[1]);
break :l try sema.analyzeArithmetic(block, .sub, range_end, range_start, arg_src, arg_src, arg_src, true);
};
- const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src);
+ const arg_len = try sema.coerce(block, .usize, arg_len_uncoerced, arg_src);
if (len == .none) {
len = arg_len;
len_idx = i;
}
if (try sema.resolveDefinedValue(block, src, arg_len)) |arg_val| {
if (len_val) |v| {
- if (!(try sema.valuesEqual(arg_val, v, Type.usize))) {
+ if (!(try sema.valuesEqual(arg_val, v, .usize))) {
const msg = msg: {
const msg = try sema.errMsg(src, "non-matching for loop lengths", .{});
errdefer msg.destroy(gpa);
@@ -5343,7 +5343,7 @@ fn zirValidatePtrArrayInit(
// sentinel-terminated array, the sentinel will not have been populated by
// any ZIR instructions at comptime; we need to do that here.
if (array_ty.sentinel(zcu)) |sentinel_val| {
- const array_len_ref = try pt.intRef(Type.usize, array_len);
+ const array_len_ref = try pt.intRef(.usize, array_len);
const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
const sentinel = Air.internedToRef(sentinel_val.toIntern());
try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store);
@@ -5828,7 +5828,7 @@ fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
defer tracy.end();
const int = sema.code.instructions.items(.data)[@intFromEnum(inst)].int;
- return sema.pt.intRef(Type.comptime_int, int);
+ return sema.pt.intRef(.comptime_int, int);
}
fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5846,7 +5846,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const limbs = try sema.arena.alloc(std.math.big.Limb, int.len);
@memcpy(mem.sliceAsBytes(limbs), limb_bytes);
- return Air.internedToRef((try sema.pt.intValue_big(Type.comptime_int, .{
+ return Air.internedToRef((try sema.pt.intValue_big(.comptime_int, .{
.limbs = limbs,
.positive = true,
})).toIntern());
@@ -5856,7 +5856,7 @@ fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
_ = block;
const number = sema.code.instructions.items(.data)[@intFromEnum(inst)].float;
return Air.internedToRef((try sema.pt.floatValue(
- Type.comptime_float,
+ .comptime_float,
number,
)).toIntern());
}
@@ -5866,7 +5866,7 @@ fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
const number = extra.get();
- return Air.internedToRef((try sema.pt.floatValue(Type.comptime_float, number)).toIntern());
+ return Air.internedToRef((try sema.pt.floatValue(.comptime_float, number)).toIntern());
}
fn zirCompileError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
@@ -6641,7 +6641,7 @@ pub fn analyzeExport(
};
const exported_nav = ip.getNav(exported_nav_index);
- const export_ty = Type.fromInterned(exported_nav.typeOf(ip));
+ const export_ty: Type = .fromInterned(exported_nav.typeOf(ip));
if (!try sema.validateExternType(export_ty, .other)) {
return sema.failWithOwnedErrorMsg(block, msg: {
@@ -7005,7 +7005,7 @@ fn lookupInNamespace(
for (usingnamespaces.items) |sub_ns_nav| {
try sema.ensureNavResolved(block, src, sub_ns_nav, .fully);
- const sub_ns_ty = Type.fromInterned(ip.getNav(sub_ns_nav).status.fully_resolved.val);
+ const sub_ns_ty: Type = .fromInterned(ip.getNav(sub_ns_nav).status.fully_resolved.val);
const sub_ns = zcu.namespacePtr(sub_ns_ty.getNamespaceIndex(zcu));
try checked_namespaces.put(gpa, sub_ns, {});
}
@@ -7081,7 +7081,7 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref
const gpa = sema.gpa;
if (block.isComptime() or block.is_typeof) {
- const index_val = try pt.intValue_u64(Type.usize, sema.comptime_err_ret_trace.items.len);
+ const index_val = try pt.intValue_u64(.usize, sema.comptime_err_ret_trace.items.len);
return Air.internedToRef(index_val.toIntern());
}
@@ -7326,13 +7326,13 @@ fn checkCallArgumentCount(
) !Type {
const pt = sema.pt;
const zcu = pt.zcu;
- const func_ty = func_ty: {
+ const func_ty: Type = func_ty: {
switch (callee_ty.zigTypeTag(zcu)) {
.@"fn" => break :func_ty callee_ty,
.pointer => {
const ptr_info = callee_ty.ptrInfo(zcu);
if (ptr_info.flags.size == .one and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .@"fn") {
- break :func_ty Type.fromInterned(ptr_info.child);
+ break :func_ty .fromInterned(ptr_info.child);
}
},
.optional => {
@@ -7405,13 +7405,13 @@ fn callBuiltin(
const pt = sema.pt;
const zcu = pt.zcu;
const callee_ty = sema.typeOf(builtin_fn);
- const func_ty = func_ty: {
+ const func_ty: Type = func_ty: {
switch (callee_ty.zigTypeTag(zcu)) {
.@"fn" => break :func_ty callee_ty,
.pointer => {
const ptr_info = callee_ty.ptrInfo(zcu);
if (ptr_info.flags.size == .one and Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .@"fn") {
- break :func_ty Type.fromInterned(ptr_info.child);
+ break :func_ty .fromInterned(ptr_info.child);
}
},
else => {},
@@ -7568,7 +7568,7 @@ const CallArgsInfo = union(enum) {
}
}
// Give the arg its result type
- const provide_param_ty = if (maybe_param_ty) |t| t else Type.generic_poison;
+ const provide_param_ty: Type = maybe_param_ty orelse .generic_poison;
sema.inst_map.putAssumeCapacity(zir_call.call_inst, Air.internedToRef(provide_param_ty.toIntern()));
// Resolve the arg!
const uncoerced_arg = try sema.resolveInlineBody(block, arg_body, zir_call.call_inst);
@@ -8353,7 +8353,7 @@ fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Typ
@tagName(backend), @tagName(target.cpu.arch),
});
}
- const owner_func_ty = Type.fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty);
+ const owner_func_ty: Type = .fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty);
if (owner_func_ty.toIntern() != func_ty.toIntern()) {
return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
func_ty.fmt(pt), owner_func_ty.fmt(pt),
@@ -8452,7 +8452,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const len_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const elem_type_src = block.builtinCallArgSrc(inst_data.src_node, 1);
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
- const len: u32 = @intCast(try sema.resolveInt(block, len_src, extra.lhs, Type.u32, .{ .simple = .vector_length }));
+ const len: u32 = @intCast(try sema.resolveInt(block, len_src, extra.lhs, .u32, .{ .simple = .vector_length }));
const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs);
try sema.checkVectorElemType(block, elem_type_src, elem_type);
const vector_type = try sema.pt.vectorType(.{
@@ -8470,7 +8470,7 @@ fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const len_src = block.src(.{ .node_offset_array_type_len = inst_data.src_node });
const elem_src = block.src(.{ .node_offset_array_type_elem = inst_data.src_node });
- const len = try sema.resolveInt(block, len_src, extra.lhs, Type.usize, .{ .simple = .array_length });
+ const len = try sema.resolveInt(block, len_src, extra.lhs, .usize, .{ .simple = .array_length });
const elem_type = try sema.resolveType(block, elem_src, extra.rhs);
try sema.validateArrayElemType(block, elem_type, elem_src);
const array_ty = try sema.pt.arrayType(.{
@@ -8490,7 +8490,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
const len_src = block.src(.{ .node_offset_array_type_len = inst_data.src_node });
const sentinel_src = block.src(.{ .node_offset_array_type_sentinel = inst_data.src_node });
const elem_src = block.src(.{ .node_offset_array_type_elem = inst_data.src_node });
- const len = try sema.resolveInt(block, len_src, extra.len, Type.usize, .{ .simple = .array_length });
+ const len = try sema.resolveInt(block, len_src, extra.len, .usize, .{ .simple = .array_length });
const elem_type = try sema.resolveType(block, elem_src, extra.elem_type);
try sema.validateArrayElemType(block, elem_type, elem_src);
const uncasted_sentinel = try sema.resolveInst(extra.sentinel);
@@ -8599,7 +8599,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const src = block.nodeOffset(extra.node);
const operand_src = block.builtinCallArgSrc(extra.node, 0);
const uncasted_operand = try sema.resolveInst(extra.operand);
- const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src);
+ const operand = try sema.coerce(block, .anyerror, uncasted_operand, operand_src);
const err_int_ty = try pt.errorIntType();
if (try sema.resolveValue(operand)) |val| {
@@ -9309,7 +9309,7 @@ fn zirFunc(
const ret_ty: Type = if (extra.data.ret_ty.is_generic)
.generic_poison
else switch (extra.data.ret_ty.body_len) {
- 0 => Type.void,
+ 0 => .void,
1 => blk: {
const ret_ty_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
@@ -9319,7 +9319,7 @@ fn zirFunc(
const ret_ty_body = sema.code.bodySlice(extra_index, extra.data.ret_ty.body_len);
extra_index += ret_ty_body.len;
- const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, .{ .simple = .function_ret_ty });
+ const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, .type, .{ .simple = .function_ret_ty });
break :blk ret_ty_val.toType();
},
};
@@ -9649,7 +9649,7 @@ fn funcCommon(
var comptime_bits: u32 = 0;
for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
- const param_ty = Type.fromInterned(param_ty_ip);
+ const param_ty: Type = .fromInterned(param_ty_ip);
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
break :blk @as(u1, @truncate(noalias_bits >> index)) != 0;
@@ -9870,7 +9870,7 @@ fn finishFunc(
const return_type: Type = if (opt_func_index == .none or ret_poison)
bare_return_type
else
- Type.fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index)));
+ .fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index)));
if (!return_type.isValidReturnType(zcu)) {
const opaque_str = if (return_type.zigTypeTag(zcu) == .@"opaque") "opaque " else "";
@@ -10130,14 +10130,14 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (try sema.resolveValue(operand)) |operand_val| ct: {
if (!is_vector) {
if (operand_val.isUndef(zcu)) {
- return Air.internedToRef((try pt.undefValue(Type.usize)).toIntern());
+ return .undef_usize;
}
const addr = try operand_val.getUnsignedIntSema(pt) orelse {
// Wasn't an integer pointer. This is a runtime operation.
break :ct;
};
return Air.internedToRef((try pt.intValue(
- Type.usize,
+ .usize,
addr,
)).toIntern());
}
@@ -10145,7 +10145,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
for (new_elems, 0..) |*new_elem, i| {
const ptr_val = try operand_val.elemValue(pt, i);
if (ptr_val.isUndef(zcu)) {
- new_elem.* = (try pt.undefValue(Type.usize)).toIntern();
+ new_elem.* = .undef_usize;
continue;
}
const addr = try ptr_val.getUnsignedIntSema(pt) orelse {
@@ -10153,7 +10153,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
break :ct;
};
new_elem.* = (try pt.intValue(
- Type.usize,
+ .usize,
addr,
)).toIntern();
}
@@ -10170,7 +10170,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addBitCast(.usize, old_elem);
}
@@ -10646,7 +10646,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const vec_len = operand_ty.vectorLen(zcu);
const new_elems = try sema.arena.alloc(Air.Inst.Ref, vec_len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addTyOp(.fptrunc, dest_scalar_ty, old_elem);
}
@@ -10675,7 +10675,7 @@ fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
- const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src);
+ const elem_index = try sema.coerce(block, .usize, uncoerced_elem_index, elem_index_src);
return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
}
@@ -10685,7 +10685,7 @@ fn zirElemValImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].elem_val_imm;
const array = try sema.resolveInst(inst_data.operand);
- const elem_index = try sema.pt.intRef(Type.usize, inst_data.idx);
+ const elem_index = try sema.pt.intRef(.usize, inst_data.idx);
return sema.elemVal(block, LazySrcLoc.unneeded, array, elem_index, LazySrcLoc.unneeded, false);
}
@@ -10728,7 +10728,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
- const elem_index = try sema.coerce(block, Type.usize, uncoerced_elem_index, elem_index_src);
+ const elem_index = try sema.coerce(block, .usize, uncoerced_elem_index, elem_index_src);
return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src, false, true);
}
@@ -10742,7 +10742,7 @@ fn zirArrayInitElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
const src = block.nodeOffset(inst_data.src_node);
const extra = sema.code.extraData(Zir.Inst.ElemPtrImm, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.ptr);
- const elem_index = try pt.intRef(Type.usize, extra.index);
+ const elem_index = try pt.intRef(.usize, extra.index);
const array_ty = sema.typeOf(array_ptr).childType(zcu);
switch (array_ty.zigTypeTag(zcu)) {
.array, .vector => {},
@@ -11104,7 +11104,7 @@ const SwitchProngAnalysis = struct {
if (operand_ty.zigTypeTag(zcu) == .@"union") {
const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, zcu).?);
const union_obj = zcu.typeToUnion(operand_ty).?;
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
if (capture_byref) {
const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
@@ -11154,7 +11154,7 @@ const SwitchProngAnalysis = struct {
const first_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable;
const first_field_index: u32 = zcu.unionTagFieldIndex(union_obj, first_item_val).?;
- const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_field_index]);
+ const first_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_field_index]);
const field_indices = try sema.arena.alloc(u32, case_vals.len);
for (case_vals, field_indices) |item, *field_idx| {
@@ -11165,7 +11165,7 @@ const SwitchProngAnalysis = struct {
// Fast path: if all the operands are the same type already, we don't need to hit
// PTR! This will also allow us to emit simpler code.
const same_types = for (field_indices[1..]) |field_idx| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (!field_ty.eql(first_field_ty, zcu)) break false;
} else true;
@@ -11173,7 +11173,7 @@ const SwitchProngAnalysis = struct {
// We need values to run PTR on, so make a bunch of undef constants.
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (dummy_captures, field_indices) |*dummy, field_idx| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
dummy.* = try pt.undefRef(field_ty);
}
@@ -11208,7 +11208,7 @@ const SwitchProngAnalysis = struct {
// We need values to run PTR on, so make a bunch of undef constants.
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (field_indices, dummy_captures) |field_idx, *dummy| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
const field_ptr_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
@@ -11271,7 +11271,7 @@ const SwitchProngAnalysis = struct {
// If we can, try to avoid that using in-memory coercions.
const first_non_imc = in_mem: {
for (field_indices, 0..) |field_idx, i| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) {
break :in_mem i;
}
@@ -11294,7 +11294,7 @@ const SwitchProngAnalysis = struct {
{
const next = first_non_imc + 1;
for (field_indices[next..], next..) |field_idx, i| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) {
in_mem_coercible.unset(i);
}
@@ -11341,7 +11341,7 @@ const SwitchProngAnalysis = struct {
};
const field_idx = field_indices[idx];
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(operand_val, field_idx, field_ty);
const coerced = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src);
_ = try coerce_block.addBr(capture_block_inst, coerced);
@@ -11365,7 +11365,7 @@ const SwitchProngAnalysis = struct {
const first_imc_item_idx = in_mem_coercible.findFirstSet().?;
const first_imc_field_idx = field_indices[first_imc_item_idx];
- const first_imc_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]);
+ const first_imc_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(operand_val, first_imc_field_idx, first_imc_field_ty);
const coerced = try coerce_block.addBitCast(capture_ty, uncoerced);
_ = try coerce_block.addBr(capture_block_inst, coerced);
@@ -13165,7 +13165,7 @@ fn analyzeSwitchRuntimeBlock(
for (seen_enum_fields, 0..) |seen_field, index| {
if (seen_field != null) continue;
const union_obj = zcu.typeToUnion(maybe_union_ty).?;
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[index]);
if (field_ty.zigTypeTag(zcu) != .noreturn) break true;
} else false
else
@@ -13490,7 +13490,7 @@ const RangeSetUnhandledIterator = struct {
inline .u64, .i64 => |val_int| {
const next_int = @addWithOverflow(val_int, 1);
if (next_int[1] == 0)
- return (try it.pt.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern();
+ return (try it.pt.intValue(.fromInterned(int.ty), next_int[0])).toIntern();
},
.big_int => {},
.lazy_align, .lazy_size => unreachable,
@@ -13506,7 +13506,7 @@ const RangeSetUnhandledIterator = struct {
);
result_bigint.addScalar(val_bigint, 1);
- return (try it.pt.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern();
+ return (try it.pt.intValue_big(.fromInterned(int.ty), result_bigint.toConst())).toIntern();
}
fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index {
@@ -13636,7 +13636,7 @@ fn validateErrSetSwitch(
.{},
);
}
- return Type.anyerror;
+ return .anyerror;
},
else => |err_set_ty_index| else_validation: {
const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
@@ -13839,7 +13839,7 @@ fn validateSwitchItemBool(
item_ref: Zir.Inst.Ref,
item_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const item = try sema.resolveSwitchItemVal(block, item_ref, Type.bool, item_src);
+ const item = try sema.resolveSwitchItemVal(block, item_ref, .bool, item_src);
if (Value.fromInterned(item.val).toBool()) {
true_count.* += 1;
} else {
@@ -14224,7 +14224,7 @@ fn zirShl(
return lhs;
}
if (air_tag != .shl_sat and scalar_ty.zigTypeTag(zcu) != .comptime_int) {
- const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits);
+ const bit_value = try pt.intValue(.comptime_int, scalar_ty.intInfo(zcu).bits);
if (rhs_ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
@@ -14351,8 +14351,7 @@ fn zirShl(
try block.addReduce(ov_bit, .Or)
else
ov_bit;
- const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
- const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
+ const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, .zero_u1);
try sema.addSafetyCheck(block, src, no_ov, .shl_overflow);
return sema.tupleFieldValByIndex(block, op_ov, 0, op_ov_tuple_ty);
@@ -14406,7 +14405,7 @@ fn zirShr(
return lhs;
}
if (scalar_ty.zigTypeTag(zcu) != .comptime_int) {
- const bit_value = try pt.intValue(Type.comptime_int, scalar_ty.intInfo(zcu).bits);
+ const bit_value = try pt.intValue(.comptime_int, scalar_ty.intInfo(zcu).bits);
if (rhs_ty.zigTypeTag(zcu) == .vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen(zcu)) : (i += 1) {
@@ -14689,7 +14688,7 @@ fn analyzeTupleCat(
try sema.tupleFieldValByIndex(block, rhs, i, rhs_ty);
}
- return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
+ return block.addAggregateInit(.fromInterned(tuple_ty), element_refs);
}
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -14716,7 +14715,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs_src = block.src(.{ .node_offset_bin_rhs = inst_data.src_node });
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: {
- if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined);
+ if (lhs_is_tuple) break :lhs_info undefined;
return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(pt)});
};
const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse {
@@ -14892,7 +14891,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// lhs_dest_slice = dest[0..lhs.len]
const slice_ty_ref = Air.internedToRef(slice_ty.toIntern());
- const lhs_len_ref = try pt.intRef(Type.usize, lhs_len);
+ const lhs_len_ref = try pt.intRef(.usize, lhs_len);
const lhs_dest_slice = try block.addInst(.{
.tag = .slice,
.data = .{ .ty_pl = .{
@@ -14907,7 +14906,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
_ = try block.addBinOp(.memcpy, lhs_dest_slice, lhs);
// rhs_dest_slice = dest[lhs.len..][0..rhs.len]
- const rhs_len_ref = try pt.intRef(Type.usize, rhs_len);
+ const rhs_len_ref = try pt.intRef(.usize, rhs_len);
const rhs_dest_offset = try block.addInst(.{
.tag = .ptr_add,
.data = .{ .ty_pl = .{
@@ -14932,7 +14931,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
_ = try block.addBinOp(.memcpy, rhs_dest_slice, rhs);
if (res_sent_val) |sent_val| {
- const elem_index = try pt.intRef(Type.usize, result_len);
+ const elem_index = try pt.intRef(.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty);
const init = Air.internedToRef((try pt.getCoerced(sent_val, lhs_info.elem_type)).toIntern());
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
@@ -14943,7 +14942,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var elem_i: u32 = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
- const elem_index = try pt.intRef(Type.usize, elem_i);
+ const elem_index = try pt.intRef(.usize, elem_i);
const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty);
const operand_src = block.src(.{ .array_cat_lhs = .{
.array_cat_offset = inst_data.src_node,
@@ -14954,8 +14953,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
while (elem_i < result_len) : (elem_i += 1) {
const rhs_elem_i = elem_i - lhs_len;
- const elem_index = try pt.intRef(Type.usize, elem_i);
- const rhs_index = try pt.intRef(Type.usize, rhs_elem_i);
+ const elem_index = try pt.intRef(.usize, elem_i);
+ const rhs_index = try pt.intRef(.usize, rhs_elem_i);
const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty);
const operand_src = block.src(.{ .array_cat_rhs = .{
.array_cat_offset = inst_data.src_node,
@@ -14965,7 +14964,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.storePtr2(block, src, elem_ptr, src, init, operand_src, .store);
}
if (res_sent_val) |sent_val| {
- const elem_index = try pt.intRef(Type.usize, result_len);
+ const elem_index = try pt.intRef(.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(mutable_alloc, elem_index, elem_ptr_ty);
const init = Air.internedToRef((try pt.getCoerced(sent_val, lhs_info.elem_type)).toIntern());
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
@@ -14978,7 +14977,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
{
var elem_i: u32 = 0;
while (elem_i < lhs_len) : (elem_i += 1) {
- const index = try pt.intRef(Type.usize, elem_i);
+ const index = try pt.intRef(.usize, elem_i);
const operand_src = block.src(.{ .array_cat_lhs = .{
.array_cat_offset = inst_data.src_node,
.elem_index = elem_i,
@@ -14988,7 +14987,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
while (elem_i < result_len) : (elem_i += 1) {
const rhs_elem_i = elem_i - lhs_len;
- const index = try pt.intRef(Type.usize, rhs_elem_i);
+ const index = try pt.intRef(.usize, rhs_elem_i);
const operand_src = block.src(.{ .array_cat_rhs = .{
.array_cat_offset = inst_data.src_node,
.elem_index = @intCast(rhs_elem_i),
@@ -15012,8 +15011,8 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
switch (ptr_info.flags.size) {
.slice => {
const val = try sema.resolveConstDefinedValue(block, src, operand, .{ .simple = .slice_cat_operand });
- return Type.ArrayInfo{
- .elem_type = Type.fromInterned(ptr_info.child),
+ return .{
+ .elem_type = .fromInterned(ptr_info.child),
.sentinel = switch (ptr_info.sentinel) {
.none => null,
else => Value.fromInterned(ptr_info.sentinel),
@@ -15113,7 +15112,7 @@ fn analyzeTupleMul(
@memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]);
}
- return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
+ return block.addAggregateInit(.fromInterned(tuple_ty), element_refs);
}
fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -15166,7 +15165,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (lhs_ty.isTuple(zcu)) {
// In `**` rhs must be comptime-known, but lhs can be runtime-known
- const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{ .simple = .array_mul_factor });
+ const factor = try sema.resolveInt(block, rhs_src, extra.rhs, .usize, .{ .simple = .array_mul_factor });
const factor_casted = try sema.usizeCast(block, rhs_src, factor);
return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted);
}
@@ -15188,7 +15187,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
// In `**` rhs must be comptime-known, but lhs can be runtime-known
- const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, .{ .simple = .array_mul_factor });
+ const factor = try sema.resolveInt(block, rhs_src, extra.rhs, .usize, .{ .simple = .array_mul_factor });
const result_len_u64 = std.math.mul(u64, lhs_info.len, factor) catch
return sema.fail(block, rhs_src, "operation results in overflow", .{});
@@ -15246,7 +15245,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// to get the same elem values.
const lhs_vals = try sema.arena.alloc(Air.Inst.Ref, lhs_len);
for (lhs_vals, 0..) |*lhs_val, idx| {
- const idx_ref = try pt.intRef(Type.usize, idx);
+ const idx_ref = try pt.intRef(.usize, idx);
lhs_val.* = try sema.elemVal(block, lhs_src, lhs, idx_ref, src, false);
}
@@ -15267,14 +15266,14 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var elem_i: usize = 0;
while (elem_i < result_len) {
for (lhs_vals) |lhs_val| {
- const elem_index = try pt.intRef(Type.usize, elem_i);
+ const elem_index = try pt.intRef(.usize, elem_i);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
try sema.storePtr2(block, src, elem_ptr, src, lhs_val, lhs_src, .store);
elem_i += 1;
}
}
if (lhs_info.sentinel) |sent_val| {
- const elem_index = try pt.intRef(Type.usize, result_len);
+ const elem_index = try pt.intRef(.usize, result_len);
const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty);
const init = Air.internedToRef(sent_val.toIntern());
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
@@ -16132,14 +16131,13 @@ fn zirOverflowArithmetic(
const maybe_rhs_val = try sema.resolveValue(rhs);
const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty);
- const overflow_ty = Type.fromInterned(ip.indexToKey(tuple_ty.toIntern()).tuple_type.types.get(ip)[1]);
+ const overflow_ty: Type = .fromInterned(ip.indexToKey(tuple_ty.toIntern()).tuple_type.types.get(ip)[1]);
var result: struct {
inst: Air.Inst.Ref = .none,
wrapped: Value = Value.@"unreachable",
overflow_bit: Value,
} = result: {
- const zero_bit = try pt.intValue(Type.u1, 0);
switch (zir_tag) {
.add_with_overflow => {
// If either of the arguments is zero, `false` is returned and the other is stored
@@ -16147,12 +16145,12 @@ fn zirOverflowArithmetic(
// Otherwise, if either of the argument is undefined, undefined is returned.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(zcu) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
@@ -16173,7 +16171,7 @@ fn zirOverflowArithmetic(
if (rhs_val.isUndef(zcu)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
} else if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
} else if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(zcu)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
@@ -16192,9 +16190,9 @@ fn zirOverflowArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(zcu)) {
if (try lhs_val.compareAllWithZeroSema(.eq, pt)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
} else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
}
}
}
@@ -16202,9 +16200,9 @@ fn zirOverflowArithmetic(
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(zcu)) {
if (try rhs_val.compareAllWithZeroSema(.eq, pt)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
} else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
}
}
}
@@ -16226,12 +16224,12 @@ fn zirOverflowArithmetic(
// Oterhwise if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(zcu) and (try lhs_val.compareAllWithZeroSema(.eq, pt))) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(zcu) and (try rhs_val.compareAllWithZeroSema(.eq, pt))) {
- break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
@@ -16309,10 +16307,10 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const ov_ty = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{
+ const ov_ty: Type = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{
.len = ty.vectorLen(zcu),
.child = .u1_type,
- }) else Type.u1;
+ }) else .u1;
const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() };
const values = [2]InternPool.Index{ .none, .none };
@@ -16320,7 +16318,7 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
.types = &types,
.values = &values,
});
- return Type.fromInterned(tuple_ty);
+ return .fromInterned(tuple_ty);
}
fn analyzeArithmetic(
@@ -16380,7 +16378,7 @@ fn analyzeArithmetic(
const address = std.math.sub(u64, lhs_ptr.byte_offset, rhs_ptr.byte_offset) catch
return sema.fail(block, src, "operation results in overflow", .{});
const result = address / elem_size;
- return try pt.intRef(Type.usize, result);
+ return try pt.intRef(.usize, result);
} else {
break :runtime_src lhs_src;
}
@@ -16395,7 +16393,7 @@ fn analyzeArithmetic(
const lhs_int = try block.addBitCast(.usize, lhs);
const rhs_int = try block.addBitCast(.usize, rhs);
const address = try block.addBinOp(.sub_wrap, lhs_int, rhs_int);
- return try block.addBinOp(.div_exact, address, try pt.intRef(Type.usize, elem_size));
+ return try block.addBinOp(.div_exact, address, try pt.intRef(.usize, elem_size));
}
} else {
switch (lhs_ty.ptrSize(zcu)) {
@@ -16527,8 +16525,7 @@ fn analyzeArithmetic(
try block.addReduce(ov_bit, .Or)
else
ov_bit;
- const zero_ov = Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
- const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov);
+ const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, .zero_u1);
try sema.addSafetyCheck(block, src, no_ov, .integer_overflow);
return sema.tupleFieldValByIndex(block, op_ov, 0, op_ov_tuple_ty);
@@ -16550,7 +16547,7 @@ fn analyzePtrArithmetic(
) CompileError!Air.Inst.Ref {
// TODO if the operand is comptime-known to be negative, or is a negative int,
// coerce to isize instead of usize.
- const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
+ const offset = try sema.coerce(block, .usize, uncasted_offset, offset_src);
const pt = sema.pt;
const zcu = pt.zcu;
const opt_ptr_val = try sema.resolveValue(ptr);
@@ -16736,8 +16733,8 @@ fn zirAsm(
const uncasted_arg = try sema.resolveInst(input.data.operand);
const uncasted_arg_ty = sema.typeOf(uncasted_arg);
switch (uncasted_arg_ty.zigTypeTag(zcu)) {
- .comptime_int => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src),
- .comptime_float => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src),
+ .comptime_int => arg.* = try sema.coerce(block, .usize, uncasted_arg, src),
+ .comptime_float => arg.* = try sema.coerce(block, .f64, uncasted_arg, src),
else => {
arg.* = uncasted_arg;
},
@@ -16860,9 +16857,7 @@ fn zirCmpEq(
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveValue(lhs)) |lval| {
if (try sema.resolveValue(rhs)) |rval| {
- if (lval.isUndef(zcu) or rval.isUndef(zcu)) {
- return pt.undefRef(Type.bool);
- }
+ if (lval.isUndef(zcu) or rval.isUndef(zcu)) return .undef_bool;
const lkey = zcu.intern_pool.indexToKey(lval.toIntern());
const rkey = zcu.intern_pool.indexToKey(rval.toIntern());
return if ((lkey.err.name == rkey.err.name) == (op == .eq))
@@ -16916,7 +16911,7 @@ fn analyzeCmpUnionTag(
const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src);
if (try sema.resolveValue(coerced_tag)) |enum_val| {
- if (enum_val.isUndef(zcu)) return pt.undefRef(Type.bool);
+ if (enum_val.isUndef(zcu)) return .undef_bool;
const field_ty = union_ty.unionFieldType(enum_val, zcu).?;
if (field_ty.zigTypeTag(zcu) == .noreturn) {
return .bool_false;
@@ -17027,8 +17022,8 @@ fn cmpSelf(
const maybe_lhs_val = try sema.resolveValue(casted_lhs);
const maybe_rhs_val = try sema.resolveValue(casted_rhs);
- if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool);
- if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool);
+ if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return .undef_bool;
+ if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return .undef_bool;
const runtime_src: LazySrcLoc = src: {
if (maybe_lhs_val) |lhs_val| {
@@ -17083,7 +17078,7 @@ fn runtimeBoolCmp(
) CompileError!Air.Inst.Ref {
if ((op == .neq) == rhs) {
try sema.requireRuntimeBlock(block, src, runtime_src);
- return block.addTyOp(.not, Type.bool, lhs);
+ return block.addTyOp(.not, .bool, lhs);
} else {
return lhs;
}
@@ -17107,7 +17102,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.comptime_float,
.comptime_int,
.void,
- => return pt.intRef(Type.comptime_int, 0),
+ => return .zero,
.bool,
.int,
@@ -17148,7 +17143,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.comptime_float,
.comptime_int,
.void,
- => return pt.intRef(Type.comptime_int, 0),
+ => return .zero,
.bool,
.int,
@@ -17167,7 +17162,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
=> {},
}
const bit_size = try operand_ty.bitSizeSema(pt);
- return pt.intRef(Type.comptime_int, bit_size);
+ return pt.intRef(.comptime_int, bit_size);
}
fn zirThis(
@@ -17285,7 +17280,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
assert(block.is_typeof);
// We need a dummy runtime instruction with the correct type.
- return block.addTy(.alloc, Type.fromInterned(capture_ty));
+ return block.addTy(.alloc, .fromInterned(capture_ty));
}
fn zirRetAddr(
@@ -17293,10 +17288,11 @@ fn zirRetAddr(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
+ _ = sema;
_ = extended;
if (block.isComptime()) {
// TODO: we could give a meaningful lazy value here. #14938
- return sema.pt.intRef(Type.usize, 0);
+ return .zero_usize;
} else {
return block.addNoOp(.ret_addr);
}
@@ -17349,7 +17345,7 @@ fn zirBuiltinSrc(
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, func_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, func_name_len)).toIntern(),
} });
};
@@ -17375,7 +17371,7 @@ fn zirBuiltinSrc(
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, module_name.len)).toIntern(),
+ .len = (try pt.intValue(.usize, module_name.len)).toIntern(),
} });
};
@@ -17401,7 +17397,7 @@ fn zirBuiltinSrc(
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, file_name.len)).toIntern(),
+ .len = (try pt.intValue(.usize, file_name.len)).toIntern(),
} });
};
@@ -17414,9 +17410,9 @@ fn zirBuiltinSrc(
// fn_name: [:0]const u8,
func_name_val,
// line: u32,
- (try pt.intValue(Type.u32, extra.line + 1)).toIntern(),
+ (try pt.intValue(.u32, extra.line + 1)).toIntern(),
// column: u32,
- (try pt.intValue(Type.u32, extra.column + 1)).toIntern(),
+ (try pt.intValue(.u32, extra.column + 1)).toIntern(),
};
return Air.internedToRef((try pt.intern(.{ .aggregate = .{
.ty = src_loc_ty.toIntern(),
@@ -17511,7 +17507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, param_vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, param_vals.len)).toIntern(),
} });
};
@@ -17564,7 +17560,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// signedness: Signedness,
(try pt.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(),
// bits: u16,
- (try pt.intValue(Type.u16, info.bits)).toIntern(),
+ (try pt.intValue(.u16, info.bits)).toIntern(),
};
return Air.internedToRef((try pt.internUnion(.{
.ty = type_info_ty.toIntern(),
@@ -17580,7 +17576,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_vals = .{
// bits: u16,
- (try pt.intValue(Type.u16, ty.bitSize(zcu))).toIntern(),
+ (try pt.intValue(.u16, ty.bitSize(zcu))).toIntern(),
};
return Air.internedToRef((try pt.internUnion(.{
.ty = type_info_ty.toIntern(),
@@ -17594,7 +17590,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.pointer => {
const info = ty.ptrInfo(zcu);
const alignment = if (info.flags.alignment.toByteUnits()) |alignment|
- try pt.intValue(Type.comptime_int, alignment)
+ try pt.intValue(.comptime_int, alignment)
else
try Type.fromInterned(info.child).lazyAbiAlignment(pt);
@@ -17638,7 +17634,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const info = ty.arrayInfo(zcu);
const field_values = .{
// len: comptime_int,
- (try pt.intValue(Type.comptime_int, info.len)).toIntern(),
+ (try pt.intValue(.comptime_int, info.len)).toIntern(),
// child: type,
info.elem_type.toIntern(),
// sentinel: ?*const anyopaque,
@@ -17659,7 +17655,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const info = ty.arrayInfo(zcu);
const field_values = .{
// len: comptime_int,
- (try pt.intValue(Type.comptime_int, info.len)).toIntern(),
+ (try pt.intValue(.comptime_int, info.len)).toIntern(),
// child: type,
info.elem_type.toIntern(),
};
@@ -17723,7 +17719,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, error_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, error_name_len)).toIntern(),
} });
};
@@ -17770,7 +17766,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, vals.len)).toIntern(),
} });
} else .none;
const errors_val = try pt.intern(.{ .opt = .{
@@ -17819,7 +17815,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.comptime_int_type,
)
else
- (try pt.intValue(Type.comptime_int, tag_index)).toIntern();
+ (try pt.intValue(.comptime_int, tag_index)).toIntern();
// TODO: write something like getCoercedInts to avoid needing to dupe
const name_val = v: {
@@ -17844,7 +17840,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, tag_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, tag_name_len)).toIntern(),
} });
};
@@ -17887,7 +17883,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, enum_field_vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, enum_field_vals.len)).toIntern(),
} });
};
@@ -17949,7 +17945,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, field_name_len)).toIntern(),
} });
};
@@ -17965,7 +17961,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// type: type,
field_ty,
// alignment: comptime_int,
- (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
+ (try pt.intValue(.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
field_val.* = try pt.intern(.{ .aggregate = .{
.ty = union_field_ty.toIntern(),
@@ -18000,7 +17996,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, union_field_vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, union_field_vals.len)).toIntern(),
} });
};
@@ -18070,7 +18066,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, field_name_len)).toIntern(),
} });
};
@@ -18089,7 +18085,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(is_comptime).toIntern(),
// alignment: comptime_int,
- (try pt.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(zcu).toByteUnits() orelse 0)).toIntern(),
+ (try pt.intValue(.comptime_int, Type.fromInterned(field_ty).abiAlignment(zcu).toByteUnits() orelse 0)).toIntern(),
};
struct_field_val.* = try pt.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@@ -18111,7 +18107,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else
try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
const field_name_len = field_name.length(ip);
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
const field_init = struct_type.fieldInit(ip, field_index);
const field_is_comptime = struct_type.fieldIsComptime(ip, field_index);
const name_val = v: {
@@ -18134,7 +18130,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, field_name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, field_name_len)).toIntern(),
} });
};
@@ -18159,7 +18155,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(field_is_comptime).toIntern(),
// alignment: comptime_int,
- (try pt.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
+ (try pt.intValue(.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
field_val.* = try pt.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@@ -18195,7 +18191,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, struct_field_vals.len)).toIntern(),
+ .len = (try pt.intValue(.usize, struct_field_vals.len)).toIntern(),
} });
};
@@ -18304,7 +18300,7 @@ fn typeInfoDecls(
} },
.byte_offset = 0,
} }),
- .len = (try pt.intValue(Type.usize, decl_vals.items.len)).toIntern(),
+ .len = (try pt.intValue(.usize, decl_vals.items.len)).toIntern(),
} });
}
@@ -18354,7 +18350,7 @@ fn typeInfoNamespaceDecls(
.byte_offset = 0,
},
}),
- .len = (try pt.intValue(Type.usize, name_len)).toIntern(),
+ .len = (try pt.intValue(.usize, name_len)).toIntern(),
},
});
};
@@ -18373,7 +18369,7 @@ fn typeInfoNamespaceDecls(
continue;
}
try sema.ensureNavResolved(block, src, nav, .fully);
- const namespace_ty = Type.fromInterned(ip.getNav(nav).status.fully_resolved.val);
+ const namespace_ty: Type = .fromInterned(ip.getNav(nav).status.fully_resolved.val);
try sema.typeInfoNamespaceDecls(block, src, namespace_ty.getNamespaceIndex(zcu).toOptional(), declaration_ty, decl_vals, seen_namespaces);
}
}
@@ -18424,7 +18420,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
const pt = sema.pt;
const zcu = pt.zcu;
switch (operand.zigTypeTag(zcu)) {
- .comptime_int => return Type.comptime_int,
+ .comptime_int => return .comptime_int,
.int => {
const bits = operand.bitSize(zcu);
const count = if (bits == 0)
@@ -18512,14 +18508,12 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_src = block.src(.{ .node_offset_un_op = inst_data.src_node });
const uncasted_operand = try sema.resolveInst(inst_data.operand);
- const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src);
+ const operand = try sema.coerce(block, .bool, uncasted_operand, operand_src);
if (try sema.resolveValue(operand)) |val| {
- return if (val.isUndef(zcu))
- pt.undefRef(Type.bool)
- else if (val.toBool()) .bool_false else .bool_true;
+ return if (val.isUndef(zcu)) .undef_bool else if (val.toBool()) .bool_false else .bool_true;
}
try sema.requireRuntimeBlock(block, src, null);
- return block.addTyOp(.not, Type.bool, operand);
+ return block.addTyOp(.not, .bool, operand);
}
fn zirBoolBr(
@@ -18544,7 +18538,7 @@ fn zirBoolBr(
const lhs_src = parent_block.src(.{ .node_offset_bin_lhs = inst_data.src_node });
const rhs_src = parent_block.src(.{ .node_offset_bin_rhs = inst_data.src_node });
- const lhs = try sema.coerce(parent_block, Type.bool, uncoerced_lhs, lhs_src);
+ const lhs = try sema.coerce(parent_block, .bool, uncoerced_lhs, lhs_src);
if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| {
if (is_bool_or and lhs_val.toBool()) {
@@ -18559,7 +18553,7 @@ fn zirBoolBr(
if (sema.typeOf(rhs_result).isNoReturn(zcu)) {
return rhs_result;
}
- return sema.coerce(parent_block, Type.bool, rhs_result, rhs_src);
+ return sema.coerce(parent_block, .bool, rhs_result, rhs_src);
}
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
@@ -18596,7 +18590,7 @@ fn zirBoolBr(
const rhs_result = try sema.resolveInlineBody(rhs_block, body, inst);
const rhs_noret = sema.typeOf(rhs_result).isNoReturn(zcu);
const coerced_rhs_result = if (!rhs_noret) rhs: {
- const coerced_result = try sema.coerce(rhs_block, Type.bool, rhs_result, rhs_src);
+ const coerced_result = try sema.coerce(rhs_block, .bool, rhs_result, rhs_src);
_ = try rhs_block.addBr(block_inst, coerced_result);
break :rhs coerced_result;
} else rhs_result;
@@ -18797,7 +18791,7 @@ fn zirCondbr(
const else_body = sema.code.bodySlice(extra.end + then_body.len, extra.data.else_body_len);
const uncasted_cond = try sema.resolveInst(extra.data.condition);
- const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src);
+ const cond = try sema.coerce(parent_block, .bool, uncasted_cond, cond_src);
if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| {
const body = if (cond_val.toBool()) then_body else else_body;
@@ -19502,7 +19496,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const abi_align: Alignment = if (inst_data.flags.has_align) blk: {
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
extra_i += 1;
- const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src);
+ const coerced = try sema.coerce(block, .u32, try sema.resolveInst(ref), align_src);
const val = try sema.resolveConstDefinedValue(block, align_src, coerced, .{ .simple = .@"align" });
// Check if this happens to be the lazy alignment of our element type, in
// which case we can make this 0 without resolving it.
@@ -19526,14 +19520,14 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const bit_offset: u16 = if (inst_data.flags.has_bit_range) blk: {
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
extra_i += 1;
- const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, Type.u16, .{ .simple = .type });
+ const bit_offset = try sema.resolveInt(block, bitoffset_src, ref, .u16, .{ .simple = .type });
break :blk @intCast(bit_offset);
} else 0;
const host_size: u16 = if (inst_data.flags.has_bit_range) blk: {
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
extra_i += 1;
- const host_size = try sema.resolveInt(block, hostsize_src, ref, Type.u16, .{ .simple = .type });
+ const host_size = try sema.resolveInt(block, hostsize_src, ref, .u16, .{ .simple = .type });
break :blk @intCast(host_size);
} else 0;
@@ -19767,7 +19761,7 @@ fn unionInit(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
- const field_ty = Type.fromInterned(zcu.typeToUnion(union_ty).?.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(zcu.typeToUnion(union_ty).?.field_types.get(ip)[field_index]);
const init = try sema.coerce(block, field_ty, uncasted_init, init_src);
_ = union_ty_src;
return unionInitFromEnumTag(sema, block, init_src, union_ty, field_index, init);
@@ -19902,7 +19896,7 @@ fn zirStructInit(
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
const tag_ty = resolved_ty.unionTagTypeHypothetical(zcu);
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
- const field_ty = Type.fromInterned(zcu.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(zcu.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
if (field_ty.zigTypeTag(zcu) == .noreturn) {
return sema.failWithOwnedErrorMsg(block, msg: {
@@ -19990,7 +19984,7 @@ fn finishStructInit(
.init_node_offset = init_src.offset.node_offset.x,
.elem_index = @intCast(i),
} });
- const field_ty = Type.fromInterned(tuple.types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(tuple.types.get(ip)[i]);
field_inits[i] = try sema.coerce(block, field_ty, field_inits[i], field_src);
continue;
}
@@ -20018,7 +20012,7 @@ fn finishStructInit(
.init_node_offset = init_src.offset.node_offset.x,
.elem_index = @intCast(i),
} });
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
field_inits[i] = try sema.coerce(block, field_ty, field_inits[i], field_src);
continue;
}
@@ -20183,7 +20177,7 @@ fn structInitAnon(
const msg = try sema.errMsg(field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
- try sema.addDeclaredHereNote(msg, Type.fromInterned(field_ty.*));
+ try sema.addDeclaredHereNote(msg, .fromInterned(field_ty.*));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -20317,7 +20311,7 @@ fn structInitAnon(
element_refs[i] = try sema.resolveInst(item.data.init);
}
- return block.addAggregateInit(Type.fromInterned(struct_ty), element_refs);
+ return block.addAggregateInit(.fromInterned(struct_ty), element_refs);
}
fn zirArrayInit(
@@ -20441,7 +20435,7 @@ fn zirArrayInit(
});
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
- const index = try pt.intRef(Type.usize, i);
+ const index = try pt.intRef(.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
}
@@ -20455,7 +20449,7 @@ fn zirArrayInit(
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
for (resolved_args, 0..) |arg, i| {
- const index = try pt.intRef(Type.usize, i);
+ const index = try pt.intRef(.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(base_ptr, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
}
@@ -20504,7 +20498,7 @@ fn arrayInitAnon(
const msg = try sema.errMsg(operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(gpa);
- try sema.addDeclaredHereNote(msg, Type.fromInterned(types[i]));
+ try sema.addDeclaredHereNote(msg, .fromInterned(types[i]));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -20561,7 +20555,7 @@ fn arrayInitAnon(
element_refs[i] = try sema.resolveInst(operand);
}
- return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
+ return block.addAggregateInit(.fromInterned(tuple_ty), element_refs);
}
fn addConstantMaybeRef(sema: *Sema, val: InternPool.Index, is_ref: bool) !Air.Inst.Ref {
@@ -20632,7 +20626,7 @@ fn fieldType(
.optional => {
// Struct/array init through optional requires the child type to not be a pointer.
// If the child of .optional is a pointer it'll error on the next loop.
- cur_ty = Type.fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type);
+ cur_ty = .fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type);
continue;
},
.error_union => {
@@ -20710,21 +20704,18 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const dest_ty: Type = if (is_vector) try pt.vectorType(.{ .child = .u1_type, .len = len }) else .u1;
if (try sema.resolveValue(operand)) |val| {
if (!is_vector) {
- if (val.isUndef(zcu)) return pt.undefRef(Type.u1);
- if (val.toBool()) return Air.internedToRef((try pt.intValue(Type.u1, 1)).toIntern());
- return Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
+ return if (val.isUndef(zcu)) .undef_u1 else if (val.toBool()) .one_u1 else .zero_u1;
}
if (val.isUndef(zcu)) return pt.undefRef(dest_ty);
const new_elems = try sema.arena.alloc(InternPool.Index, len);
for (new_elems, 0..) |*new_elem, i| {
const old_elem = try val.elemValue(pt, i);
- const new_val = if (old_elem.isUndef(zcu))
- try pt.undefValue(Type.u1)
+ new_elem.* = if (old_elem.isUndef(zcu))
+ .undef_u1
else if (old_elem.toBool())
- try pt.intValue(Type.u1, 1)
+ .one_u1
else
- try pt.intValue(Type.u1, 0);
- new_elem.* = new_val.toIntern();
+ .zero_u1;
}
return Air.internedToRef(try pt.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
@@ -20736,7 +20727,7 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addBitCast(.u1, old_elem);
}
@@ -20747,7 +20738,7 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const uncoerced_operand = try sema.resolveInst(inst_data.operand);
- const operand = try sema.coerce(block, Type.anyerror, uncoerced_operand, operand_src);
+ const operand = try sema.coerce(block, .anyerror, uncoerced_operand, operand_src);
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
const err_name = sema.pt.zcu.intern_pool.indexToKey(val.toIntern()).err.name;
@@ -20993,12 +20984,12 @@ fn zirReify(
.float => {
const float = try sema.interpretBuiltinType(block, operand_src, .fromInterned(union_val.val), std.builtin.Type.Float);
- const ty = switch (float.bits) {
- 16 => Type.f16,
- 32 => Type.f32,
- 64 => Type.f64,
- 80 => Type.f80,
- 128 => Type.f128,
+ const ty: Type = switch (float.bits) {
+ 16 => .f16,
+ 32 => .f32,
+ 64 => .f64,
+ 80 => .f80,
+ 128 => .f128,
else => return sema.fail(block, src, "{}-bit float unsupported", .{float.bits}),
};
return Air.internedToRef(ty.toIntern());
@@ -21038,7 +21029,7 @@ fn zirReify(
try ip.getOrPutString(gpa, pt.tid, "sentinel_ptr", .no_embedded_nulls),
).?);
- if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
+ if (!try sema.intFitsInType(alignment_val, .u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
@@ -21174,7 +21165,7 @@ fn zirReify(
},
.error_set => {
const payload_val = Value.fromInterned(union_val.val).optionalValue(zcu) orelse
- return Air.internedToRef(Type.anyerror.toIntern());
+ return .anyerror_type;
const names_val = try sema.derefSliceAsArray(block, src, payload_val, .{ .simple = .error_set_contents });
@@ -21776,7 +21767,7 @@ fn reifyUnion(
errdefer if (!has_explicit_tag) ip.remove(pt.tid, enum_tag_ty); // remove generated tag type on error
for (field_types) |field_ty_ip| {
- const field_ty = Type.fromInterned(field_ty_ip);
+ const field_ty: Type = .fromInterned(field_ty_ip);
if (field_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
@@ -22060,7 +22051,7 @@ fn reifyStruct(
}
if (any_aligned_fields) {
- if (!try sema.intFitsInType(field_alignment_val, Type.u32, null)) {
+ if (!try sema.intFitsInType(field_alignment_val, .u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
@@ -22149,7 +22140,7 @@ fn reifyStruct(
if (layout == .@"packed") {
var fields_bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |field_idx| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_idx]);
field_ty.resolveLayout(pt) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -22325,7 +22316,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
if (block.wantSafety()) {
const len = dest_ty.vectorLen(zcu);
for (0..len) |i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref);
const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try pt.floatValue(operand_scalar_ty, 0.0)).toIntern()));
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
@@ -22358,7 +22349,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const len = dest_ty.vectorLen(zcu);
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem);
if (block.wantSafety()) {
@@ -22408,7 +22399,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const len = operand_ty.vectorLen(zcu);
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
new_elem.* = try block.addTyOp(.float_from_int, dest_scalar_ty, old_elem);
}
@@ -22431,10 +22422,10 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, uncoerced_operand_ty, src, operand_src);
const is_vector = dest_ty.zigTypeTag(zcu) == .vector;
- const operand_ty = if (is_vector) operand_ty: {
+ const operand_ty: Type = if (is_vector) operand_ty: {
const len = dest_ty.vectorLen(zcu);
break :operand_ty try pt.vectorType(.{ .child = .usize_type, .len = len });
- } else Type.usize;
+ } else .usize;
const operand_coerced = try sema.coerce(block, operand_ty, operand_res, operand_src);
@@ -22495,7 +22486,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_mask = Air.internedToRef((try sema.splat(operand_ty, try pt.intValue(
- Type.usize,
+ .usize,
if (elem_ty.fnPtrMaskOrNull(zcu)) |mask|
align_bytes_minus_1 & mask
else
@@ -22516,7 +22507,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const len = dest_ty.vectorLen(zcu);
if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) {
for (0..len) |i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const elem_coerced = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
if (!ptr_ty.isAllowzeroPtr(zcu)) {
const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize);
@@ -22525,7 +22516,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_mask = Air.internedToRef((try pt.intValue(
- Type.usize,
+ .usize,
if (elem_ty.fnPtrMaskOrNull(zcu)) |mask|
align_bytes_minus_1 & mask
else
@@ -22540,7 +22531,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
- const idx_ref = try pt.intRef(Type.usize, i);
+ const idx_ref = try pt.intRef(.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand_coerced, idx_ref);
new_elem.* = try block.addBitCast(ptr_ty, old_elem);
}
@@ -22918,12 +22909,12 @@ fn ptrCastFull(
}
check_child: {
- const src_child = if (dest_info.flags.size == .slice and src_info.flags.size == .one) blk: {
+ const src_child: Type = if (dest_info.flags.size == .slice and src_info.flags.size == .one) blk: {
// *[n]T -> []T
break :blk Type.fromInterned(src_info.child).childType(zcu);
- } else Type.fromInterned(src_info.child);
+ } else .fromInterned(src_info.child);
- const dest_child = Type.fromInterned(dest_info.child);
+ const dest_child: Type = .fromInterned(dest_info.child);
const imc_res = try sema.coerceInMemoryAllowed(
block,
@@ -22956,7 +22947,7 @@ fn ptrCastFull(
}
if (is_array_ptr_to_slice) {
// [*]nT -> []T
- const arr_ty = Type.fromInterned(src_info.child);
+ const arr_ty: Type = .fromInterned(src_info.child);
if (arr_ty.sentinel(zcu)) |src_sentinel| {
const coerced_sent = try zcu.intern_pool.getCoerced(sema.gpa, pt.tid, src_sentinel.toIntern(), dest_info.child);
if (dest_info.sentinel == coerced_sent) break :check_sent;
@@ -23158,7 +23149,7 @@ fn ptrCastFull(
if (dest_info.flags.size == .slice) {
// Because the operand is comptime-known and not `null`, the slice length has already been computed:
const len: Value = switch (dest_slice_len.?) {
- .undef => try pt.undefValue(.usize),
+ .undef => .undef_usize,
.constant => |n| try pt.intValue(.usize, n),
.equal_runtime_src_slice => unreachable,
.change_runtime_src_slice => unreachable,
@@ -23267,7 +23258,7 @@ fn ptrCastFull(
if (need_align_check) {
assert(operand_ptr_int != .none);
const align_mask = try pt.intRef(.usize, mask: {
- const target_ptr_mask: u64 = Type.fromInterned(dest_info.child).fnPtrMaskOrNull(zcu) orelse ~@as(u64, 0);
+ const target_ptr_mask = Type.fromInterned(dest_info.child).fnPtrMaskOrNull(zcu) orelse ~@as(u64, 0);
break :mask (dest_align.toByteUnits().? - 1) & target_ptr_mask;
});
const ptr_masked = try block.addBinOp(.bit_and, operand_ptr_int, align_mask);
@@ -23288,7 +23279,7 @@ fn ptrCastFull(
assert(need_operand_ptr);
const result_len: Air.Inst.Ref = switch (dest_slice_len.?) {
- .undef => try pt.undefRef(.usize),
+ .undef => .undef_usize,
.constant => |n| try pt.intRef(.usize, n),
.equal_runtime_src_slice => len: {
assert(need_operand_len);
@@ -23658,13 +23649,13 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const offset = try sema.bitOffsetOf(block, inst);
- return sema.pt.intRef(Type.comptime_int, offset);
+ return sema.pt.intRef(.comptime_int, offset);
}
fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const offset = try sema.bitOffsetOf(block, inst);
// TODO reminder to make this a compile error for packed structs
- return sema.pt.intRef(Type.comptime_int, offset / 8);
+ return sema.pt.intRef(.comptime_int, offset / 8);
}
fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u64 {
@@ -23705,7 +23696,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
if (i == field_index) {
return bit_sum;
}
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
bit_sum += field_ty.bitSize(zcu);
} else unreachable;
},
@@ -24620,10 +24611,10 @@ fn analyzeShuffle(
const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len);
for (@intCast(0)..@intCast(min_len)) |i| {
- expand_mask_values[i] = (try pt.intValue(Type.comptime_int, i)).toIntern();
+ expand_mask_values[i] = (try pt.intValue(.comptime_int, i)).toIntern();
}
for (@intCast(min_len)..@intCast(max_len)) |i| {
- expand_mask_values[i] = (try pt.intValue(Type.comptime_int, -1)).toIntern();
+ expand_mask_values[i] = .negative_one;
}
const expand_mask = try pt.intern(.{ .aggregate = .{
.ty = (try pt.vectorType(.{ .len = @intCast(max_len), .child = .comptime_int_type })).toIntern(),
@@ -25087,7 +25078,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
if (parent_ptr_info.flags.size != .one) {
return sema.fail(block, inst_src, "expected single pointer type, found '{}'", .{parent_ptr_ty.fmt(pt)});
}
- const parent_ty = Type.fromInterned(parent_ptr_info.child);
+ const parent_ty: Type = .fromInterned(parent_ptr_info.child);
switch (parent_ty.zigTypeTag(zcu)) {
.@"struct", .@"union" => {},
else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(pt)}),
@@ -25741,7 +25732,7 @@ fn zirMemcpy(
if (try sema.resolveDefinedValue(block, dest_src, dest_len)) |dest_len_val| {
len_val = dest_len_val;
if (try sema.resolveDefinedValue(block, src_src, src_len)) |src_len_val| {
- if (!(try sema.valuesEqual(dest_len_val, src_len_val, Type.usize))) {
+ if (!(try sema.valuesEqual(dest_len_val, src_len_val, .usize))) {
const msg = msg: {
const msg = try sema.errMsg(src, "non-matching copy lengths", .{});
errdefer msg.destroy(sema.gpa);
@@ -25952,7 +25943,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const dest_elem_ty: Type = dest_elem_ty: {
const ptr_info = dest_ptr_ty.ptrInfo(zcu);
switch (ptr_info.flags.size) {
- .slice => break :dest_elem_ty Type.fromInterned(ptr_info.child),
+ .slice => break :dest_elem_ty .fromInterned(ptr_info.child),
.one => {
if (Type.fromInterned(ptr_info.child).zigTypeTag(zcu) == .array) {
break :dest_elem_ty Type.fromInterned(ptr_info.child).childType(zcu);
@@ -26118,7 +26109,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
extra_index += body.len;
if (extra.data.bits.ret_ty_is_generic) break :blk .generic_poison;
- const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, .{ .simple = .function_ret_ty });
+ const val = try sema.resolveGenericBody(block, ret_src, body, inst, .type, .{ .simple = .function_ret_ty });
const ty = val.toType();
break :blk ty;
} else if (extra.data.bits.has_ret_ty_ref) blk: {
@@ -26129,7 +26120,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const ret_ty_air_ref = try sema.resolveInst(ret_ty_ref);
const ret_ty_val = try sema.resolveConstDefinedValue(block, ret_src, ret_ty_air_ref, .{ .simple = .function_ret_ty });
break :blk ret_ty_val.toType();
- } else Type.void;
+ } else .void;
const noalias_bits: u32 = if (extra.data.bits.has_any_noalias) blk: {
const x = sema.code.extra[extra_index];
@@ -26223,7 +26214,7 @@ fn zirWasmMemorySize(
return sema.fail(block, builtin_src, "builtin @wasmMemorySize is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
}
- const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.operand, Type.u32, .{ .simple = .wasm_memory_index }));
+ const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.operand, .u32, .{ .simple = .wasm_memory_index }));
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
.tag = .wasm_memory_size,
@@ -26248,8 +26239,8 @@ fn zirWasmMemoryGrow(
return sema.fail(block, builtin_src, "builtin @wasmMemoryGrow is available when targeting WebAssembly; targeted CPU architecture is {s}", .{@tagName(target.cpu.arch)});
}
- const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.lhs, Type.u32, .{ .simple = .wasm_memory_index }));
- const delta = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.rhs), delta_src);
+ const index: u32 = @intCast(try sema.resolveInt(block, index_src, extra.lhs, .u32, .{ .simple = .wasm_memory_index }));
+ const delta = try sema.coerce(block, .usize, try sema.resolveInst(extra.rhs), delta_src);
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
@@ -26484,7 +26475,7 @@ fn zirWorkItem(
},
}
- const dimension: u32 = @intCast(try sema.resolveInt(block, dimension_src, extra.operand, Type.u32, .{ .simple = .work_group_dim_index }));
+ const dimension: u32 = @intCast(try sema.resolveInt(block, dimension_src, extra.operand, .u32, .{ .simple = .work_group_dim_index }));
try sema.requireRuntimeBlock(block, builtin_src, null);
return block.addInst(.{
@@ -26552,7 +26543,7 @@ fn zirBuiltinValue(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const inline_tag_val = try pt.enumValue(
callconv_tag_ty,
(try pt.intValue(
- Type.u8,
+ .u8,
@intFromEnum(std.builtin.CallingConvention.@"inline"),
)).toIntern(),
);
@@ -26760,7 +26751,7 @@ fn explainWhyTypeIsComptimeInner(
if (zcu.typeToStruct(ty)) |struct_type| {
for (0..struct_type.field_types.len) |i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
const field_src: LazySrcLoc = .{
.base_node_inst = struct_type.zir_index,
.offset = .{ .container_field_type = @intCast(i) },
@@ -26780,7 +26771,7 @@ fn explainWhyTypeIsComptimeInner(
if (zcu.typeToUnion(ty)) |union_obj| {
for (0..union_obj.field_types.len) |i| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[i]);
const field_src: LazySrcLoc = .{
.base_node_inst = union_obj.zir_index,
.offset = .{ .container_field_type = @intCast(i) },
@@ -27171,7 +27162,7 @@ fn addSafetyCheckUnwrapError(
defer fail_block.instructions.deinit(gpa);
- const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand);
+ const err = try fail_block.addTyOp(unwrap_err_tag, .anyerror, operand);
try safetyPanicUnwrapError(sema, &fail_block, src, err);
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
@@ -27344,7 +27335,7 @@ fn fieldVal(
switch (inner_ty.zigTypeTag(zcu)) {
.array => {
if (field_name.eqlSlice("len", ip)) {
- return Air.internedToRef((try pt.intValue(Type.usize, inner_ty.arrayLen(zcu))).toIntern());
+ return Air.internedToRef((try pt.intValue(.usize, inner_ty.arrayLen(zcu))).toIntern());
} else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(zcu);
const result_ty = try pt.ptrTypeSema(.{
@@ -27527,7 +27518,7 @@ fn fieldPtr(
switch (inner_ty.zigTypeTag(zcu)) {
.array => {
if (field_name.eqlSlice("len", ip)) {
- const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(zcu));
+ const int_val = try pt.intValue(.usize, inner_ty.arrayLen(zcu));
return uavRef(sema, int_val.toIntern());
} else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(zcu);
@@ -27769,12 +27760,12 @@ fn fieldCallBind(
if (zcu.typeToStruct(concrete_ty)) |struct_type| {
const field_index = struct_type.nameIndex(ip, field_name) orelse
break :find_field;
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr);
} else if (concrete_ty.isTuple(zcu)) {
if (field_name.eqlSlice("len", ip)) {
- return .{ .direct = try pt.intRef(Type.usize, concrete_ty.structFieldCount(zcu)) };
+ return .{ .direct = try pt.intRef(.usize, concrete_ty.structFieldCount(zcu)) };
}
if (field_name.toUnsigned(ip)) |field_index| {
if (field_index >= concrete_ty.structFieldCount(zcu)) break :find_field;
@@ -27817,7 +27808,7 @@ fn fieldCallBind(
if (zcu.typeToFunc(decl_type)) |func_type| f: {
if (func_type.param_types.len == 0) break :f;
- const first_param_type = Type.fromInterned(func_type.param_types.get(ip)[0]);
+ const first_param_type: Type = .fromInterned(func_type.param_types.get(ip)[0]);
if (first_param_type.isGenericPoison() or
(first_param_type.zigTypeTag(zcu) == .pointer and
(first_param_type.ptrSize(zcu) == .one or
@@ -28003,7 +27994,7 @@ fn structFieldPtr(
if (struct_ty.isTuple(zcu)) {
if (field_name.eqlSlice("len", ip)) {
- const len_inst = try pt.intRef(Type.usize, struct_ty.structFieldCount(zcu));
+ const len_inst = try pt.intRef(.usize, struct_ty.structFieldCount(zcu));
return sema.analyzeRef(block, src, len_inst);
}
const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
@@ -28134,7 +28125,7 @@ fn structFieldVal(
return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]);
}
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
if (try sema.typeHasOnePossibleValue(field_ty)) |field_val|
return Air.internedToRef(field_val.toIntern());
@@ -28167,7 +28158,7 @@ fn tupleFieldVal(
const pt = sema.pt;
const zcu = pt.zcu;
if (field_name.eqlSlice("len", &zcu.intern_pool)) {
- return pt.intRef(Type.usize, tuple_ty.structFieldCount(zcu));
+ return pt.intRef(.usize, tuple_ty.structFieldCount(zcu));
}
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src);
return sema.tupleFieldValByIndex(block, tuple_byval, field_index, tuple_ty);
@@ -28220,7 +28211,7 @@ fn tupleFieldValByIndex(
return switch (zcu.intern_pool.indexToKey(tuple_val.toIntern())) {
.undef => pt.undefRef(field_ty),
.aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) {
- .bytes => |bytes| try pt.intValue(Type.u8, bytes.at(field_index, &zcu.intern_pool)),
+ .bytes => |bytes| try pt.intValue(.u8, bytes.at(field_index, &zcu.intern_pool)),
.elems => |elems| Value.fromInterned(elems[field_index]),
.repeated_elem => |elem| Value.fromInterned(elem),
}.toIntern()),
@@ -28253,7 +28244,7 @@ fn unionFieldPtr(
try union_ty.resolveFields(pt);
const union_obj = zcu.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
@@ -28295,8 +28286,8 @@ fn unionFieldPtr(
break :ct;
}
// Store to the union to initialize the tag.
- const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
- const payload_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_tag = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const payload_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
const new_union_val = try pt.unionValue(union_ty, field_tag, try pt.undefValue(payload_ty));
try sema.storePtrVal(block, src, union_ptr_val, new_union_val, union_ty);
} else {
@@ -28306,7 +28297,7 @@ fn unionFieldPtr(
return sema.failWithUseOfUndef(block, src);
}
const un = ip.indexToKey(union_val.toIntern()).un;
- const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const field_tag = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
if (!tag_matches) {
const msg = msg: {
@@ -28332,11 +28323,11 @@ fn unionFieldPtr(
if (!initializing and union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and
union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1)
{
- const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const wanted_tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
// TODO would it be better if get_union_tag supported pointers to unions?
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
- const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_val);
+ const active_tag = try block.addTyOp(.get_union_tag, .fromInterned(union_obj.enum_tag_ty), union_val);
try sema.addSafetyCheckInactiveUnionField(block, src, active_tag, wanted_tag);
}
if (field_ty.zigTypeTag(zcu) == .noreturn) {
@@ -28363,14 +28354,14 @@ fn unionFieldVal(
try union_ty.resolveFields(pt);
const union_obj = zcu.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, zcu).?);
if (try sema.resolveValue(union_byval)) |union_val| {
if (union_val.isUndef(zcu)) return pt.undefRef(field_ty);
const un = ip.indexToKey(union_val.toIntern()).un;
- const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const field_tag = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
switch (union_obj.flagsUnordered(ip).layout) {
.auto => {
@@ -28408,9 +28399,9 @@ fn unionFieldVal(
if (union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and
union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1)
{
- const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
+ const wanted_tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
- const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval);
+ const active_tag = try block.addTyOp(.get_union_tag, .fromInterned(union_obj.enum_tag_ty), union_byval);
try sema.addSafetyCheckInactiveUnionField(block, src, active_tag, wanted_tag);
}
if (field_ty.zigTypeTag(zcu) == .noreturn) {
@@ -28540,7 +28531,7 @@ fn elemVal(
// TODO in case of a vector of pointers, we need to detect whether the element
// index is a scalar or vector instead of unconditionally casting to usize.
- const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src);
+ const elem_index = try sema.coerce(block, .usize, elem_index_uncasted, elem_index_src);
switch (indexable_ty.zigTypeTag(zcu)) {
.pointer => switch (indexable_ty.ptrSize(zcu)) {
@@ -28795,7 +28786,7 @@ fn elemValArray(
if (oob_safety and block.wantSafety()) {
// Runtime check is only needed if unable to comptime check.
if (maybe_index_val == null) {
- const len_inst = try pt.intRef(Type.usize, array_len);
+ const len_inst = try pt.intRef(.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent != null) .cmp_lte else .cmp_lt;
try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op);
}
@@ -28860,7 +28851,7 @@ fn elemPtrArray(
// Runtime check is only needed if unable to comptime check.
if (oob_safety and block.wantSafety() and offset == null) {
- const len_inst = try pt.intRef(Type.usize, array_len);
+ const len_inst = try pt.intRef(.usize, array_len);
const cmp_op: Air.Inst.Tag = if (array_sent) .cmp_lte else .cmp_lt;
try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op);
}
@@ -28917,9 +28908,9 @@ fn elemValSlice(
if (oob_safety and block.wantSafety()) {
const len_inst = if (maybe_slice_val) |slice_val|
- try pt.intRef(Type.usize, try slice_val.sliceLen(pt))
+ try pt.intRef(.usize, try slice_val.sliceLen(pt))
else
- try block.addTyOp(.slice_len, Type.usize, slice);
+ try block.addTyOp(.slice_len, .usize, slice);
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op);
}
@@ -28976,8 +28967,8 @@ fn elemPtrSlice(
const len_inst = len: {
if (maybe_undef_slice_val) |slice_val|
if (!slice_val.isUndef(zcu))
- break :len try pt.intRef(Type.usize, try slice_val.sliceLen(pt));
- break :len try block.addTyOp(.slice_len, Type.usize, slice);
+ break :len try pt.intRef(.usize, try slice_val.sliceLen(pt));
+ break :len try block.addTyOp(.slice_len, .usize, slice);
};
const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt;
try sema.addSafetyCheckIndexOob(block, src, elem_index, len_inst, cmp_op);
@@ -29142,7 +29133,7 @@ fn coerceExtra(
if (!inst_ty.isSinglePointer(zcu)) break :single_item;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const ptr_elem_ty = inst_ty.childType(zcu);
- const array_ty = Type.fromInterned(dest_info.child);
+ const array_ty: Type = .fromInterned(dest_info.child);
if (array_ty.zigTypeTag(zcu) != .array) break :single_item;
const array_elem_ty = array_ty.childType(zcu);
if (array_ty.arrayLen(zcu) != 1) break :single_item;
@@ -29164,7 +29155,7 @@ fn coerceExtra(
const array_elem_type = array_ty.childType(zcu);
const dest_is_mut = !dest_info.flags.is_const;
- const dst_elem_type = Type.fromInterned(dest_info.child);
+ const dst_elem_type: Type = .fromInterned(dest_info.child);
const elem_res = try sema.coerceInMemoryAllowed(block, dst_elem_type, array_elem_type, dest_is_mut, target, dest_ty_src, inst_src, maybe_inst_val);
switch (elem_res) {
.ok => {},
@@ -29225,7 +29216,7 @@ fn coerceExtra(
// could be null.
const src_elem_ty = inst_ty.childType(zcu);
const dest_is_mut = !dest_info.flags.is_const;
- const dst_elem_type = Type.fromInterned(dest_info.child);
+ const dst_elem_type: Type = .fromInterned(dest_info.child);
switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src, maybe_inst_val)) {
.ok => {},
else => break :src_c_ptr,
@@ -29265,16 +29256,16 @@ fn coerceExtra(
.byte_offset = 0,
} })),
.comptime_int => {
- const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
+ const addr = sema.coerceExtra(block, .usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => break :pointer,
else => |e| return e,
};
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
},
.int => {
- const ptr_size_ty = switch (inst_ty.intInfo(zcu).signedness) {
- .signed => Type.isize,
- .unsigned => Type.usize,
+ const ptr_size_ty: Type = switch (inst_ty.intInfo(zcu).signedness) {
+ .signed => .isize,
+ .unsigned => .usize,
};
const addr = sema.coerceExtra(block, ptr_size_ty, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => {
@@ -29291,8 +29282,8 @@ fn coerceExtra(
const inst_info = inst_ty.ptrInfo(zcu);
switch (try sema.coerceInMemoryAllowed(
block,
- Type.fromInterned(dest_info.child),
- Type.fromInterned(inst_info.child),
+ .fromInterned(dest_info.child),
+ .fromInterned(inst_info.child),
!dest_info.flags.is_const,
target,
dest_ty_src,
@@ -29305,7 +29296,7 @@ fn coerceExtra(
if (inst_info.flags.size == .slice) {
assert(dest_info.sentinel == .none);
if (inst_info.sentinel == .none or
- inst_info.sentinel != (try pt.intValue(Type.fromInterned(inst_info.child), 0)).toIntern())
+ inst_info.sentinel != (try pt.intValue(.fromInterned(inst_info.child), 0)).toIntern())
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
@@ -29364,8 +29355,8 @@ fn coerceExtra(
switch (try sema.coerceInMemoryAllowed(
block,
- Type.fromInterned(dest_info.child),
- Type.fromInterned(inst_info.child),
+ .fromInterned(dest_info.child),
+ .fromInterned(inst_info.child),
!dest_info.flags.is_const,
target,
dest_ty_src,
@@ -29378,7 +29369,7 @@ fn coerceExtra(
if (dest_info.sentinel == .none or inst_info.sentinel == .none or
Air.internedToRef(dest_info.sentinel) !=
- try sema.coerceInMemory(Value.fromInterned(inst_info.sentinel), Type.fromInterned(dest_info.child)))
+ try sema.coerceInMemory(Value.fromInterned(inst_info.sentinel), .fromInterned(dest_info.child)))
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
@@ -30658,8 +30649,8 @@ fn coerceInMemoryAllowedPtrs(
} };
}
- const dest_child = Type.fromInterned(dest_info.child);
- const src_child = Type.fromInterned(src_info.child);
+ const dest_child: Type = .fromInterned(dest_info.child);
+ const src_child: Type = .fromInterned(src_info.child);
const child = try sema.coerceInMemoryAllowed(
block,
dest_child,
@@ -30731,7 +30722,7 @@ fn coerceInMemoryAllowedPtrs(
.none => Value.@"unreachable",
else => Value.fromInterned(dest_info.sentinel),
},
- .ty = Type.fromInterned(dest_info.child),
+ .ty = .fromInterned(dest_info.child),
} };
}
@@ -30794,8 +30785,8 @@ fn coerceVarArgParam(
const inst_bits = uncasted_ty.floatBits(target);
if (inst_bits >= double_bits) break :float inst;
switch (double_bits) {
- 32 => break :float try sema.coerce(block, Type.f32, inst, inst_src),
- 64 => break :float try sema.coerce(block, Type.f64, inst, inst_src),
+ 32 => break :float try sema.coerce(block, .f32, inst, inst_src),
+ 64 => break :float try sema.coerce(block, .f64, inst, inst_src),
else => unreachable,
}
},
@@ -30807,22 +30798,22 @@ fn coerceVarArgParam(
.signed => .int,
.unsigned => .uint,
})) break :int try sema.coerce(block, switch (uncasted_info.signedness) {
- .signed => Type.c_int,
- .unsigned => Type.c_uint,
+ .signed => .c_int,
+ .unsigned => .c_uint,
}, inst, inst_src);
if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) {
.signed => .long,
.unsigned => .ulong,
})) break :int try sema.coerce(block, switch (uncasted_info.signedness) {
- .signed => Type.c_long,
- .unsigned => Type.c_ulong,
+ .signed => .c_long,
+ .unsigned => .c_ulong,
}, inst, inst_src);
if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) {
.signed => .longlong,
.unsigned => .ulonglong,
})) break :int try sema.coerce(block, switch (uncasted_info.signedness) {
- .signed => Type.c_longlong,
- .unsigned => Type.c_ulonglong,
+ .signed => .c_longlong,
+ .unsigned => .c_ulonglong,
}, inst, inst_src);
break :int inst;
} else inst,
@@ -30889,7 +30880,7 @@ fn storePtr2(
while (i < field_count) : (i += 1) {
const elem_src = operand_src; // TODO better source location
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
- const elem_index = try pt.intRef(Type.usize, i);
+ const elem_index = try pt.intRef(.usize, i);
const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
@@ -31216,7 +31207,7 @@ fn coerceArrayPtrToSlice(
const slice_val = try pt.intern(.{ .slice = .{
.ty = dest_ty.toIntern(),
.ptr = slice_ptr.toIntern(),
- .len = (try pt.intValue(Type.usize, array_ty.arrayLen(zcu))).toIntern(),
+ .len = (try pt.intValue(.usize, array_ty.arrayLen(zcu))).toIntern(),
} });
return Air.internedToRef(slice_val);
}
@@ -31358,7 +31349,7 @@ fn coerceEnumToUnion(
};
const union_obj = zcu.typeToUnion(union_ty).?;
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
try field_ty.resolveFields(pt);
if (field_ty.zigTypeTag(zcu) == .noreturn) {
const msg = msg: {
@@ -31448,7 +31439,7 @@ fn coerceEnumToUnion(
for (0..union_obj.field_types.len) |field_index| {
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!(try field_ty.hasRuntimeBitsSema(pt))) continue;
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{
field_name.fmt(ip),
@@ -31536,7 +31527,7 @@ fn coerceArrayLike(
var runtime_src: ?LazySrcLoc = null;
for (element_vals, element_refs, 0..) |*val, *ref, i| {
- const index_ref = Air.internedToRef((try pt.intValue(Type.usize, i)).toIntern());
+ const index_ref = Air.internedToRef((try pt.intValue(.usize, i)).toIntern());
const src = inst_src; // TODO better source location
const elem_src = inst_src; // TODO better source location
const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true);
@@ -31668,7 +31659,7 @@ fn coerceTupleToArrayPtrs(
const zcu = pt.zcu;
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
const ptr_info = ptr_array_ty.ptrInfo(zcu);
- const array_ty = Type.fromInterned(ptr_info.child);
+ const array_ty: Type = .fromInterned(ptr_info.child);
const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src);
if (ptr_info.flags.alignment != .none) {
return sema.fail(block, array_ty_src, "TODO: override the alignment of the array decl we create here", .{});
@@ -31721,14 +31712,14 @@ fn coerceTupleToTuple(
const field_index: u32 = @intCast(field_index_usize);
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
- const coerced = try sema.coerce(block, Type.fromInterned(field_ty), elem_ref, field_src);
+ const coerced = try sema.coerce(block, .fromInterned(field_ty), elem_ref, field_src);
field_refs[field_index] = coerced;
if (default_val != .none) {
const init_val = (try sema.resolveValue(coerced)) orelse {
return sema.failWithNeededComptime(block, field_src, .{ .simple = .stored_to_comptime_field });
};
- if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), pt.zcu)) {
+ if (!init_val.eql(Value.fromInterned(default_val), .fromInterned(field_ty), pt.zcu)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
}
@@ -31885,7 +31876,7 @@ pub fn ensureNavResolved(sema: *Sema, block: *Block, src: LazySrcLoc, nav_index:
fn optRefValue(sema: *Sema, opt_val: ?Value) !Value {
const pt = sema.pt;
- const ptr_anyopaque_ty = try pt.singleConstPtrType(Type.anyopaque);
+ const ptr_anyopaque_ty = try pt.singleConstPtrType(.anyopaque);
return Value.fromInterned(try pt.intern(.{ .opt = .{
.ty = (try pt.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(),
.val = if (opt_val) |val| (try pt.getCoerced(
@@ -32140,12 +32131,12 @@ fn analyzeSliceLen(
const zcu = pt.zcu;
if (try sema.resolveValue(slice_inst)) |slice_val| {
if (slice_val.isUndef(zcu)) {
- return pt.undefRef(Type.usize);
+ return .undef_usize;
}
- return pt.intRef(Type.usize, try slice_val.sliceLen(pt));
+ return pt.intRef(.usize, try slice_val.sliceLen(pt));
}
try sema.requireRuntimeBlock(block, src, null);
- return block.addTyOp(.slice_len, Type.usize, slice_inst);
+ return block.addTyOp(.slice_len, .usize, slice_inst);
}
fn analyzeIsNull(
@@ -32156,7 +32147,7 @@ fn analyzeIsNull(
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
- const result_ty = Type.bool;
+ const result_ty: Type = .bool;
if (try sema.resolveValue(operand)) |opt_val| {
if (opt_val.isUndef(zcu)) {
return pt.undefRef(result_ty);
@@ -32224,7 +32215,7 @@ fn analyzeIsNonErrComptimeOnly(
else => {},
}
} else if (operand == .undef) {
- return pt.undefRef(Type.bool);
+ return .undef_bool;
} else if (@intFromEnum(operand) < InternPool.static_len) {
// None of the ref tags can be errors.
return .bool_true;
@@ -32308,14 +32299,7 @@ fn analyzeIsNonErrComptimeOnly(
}
if (maybe_operand_val) |err_union| {
- if (err_union.isUndef(zcu)) {
- return pt.undefRef(Type.bool);
- }
- if (err_union.getErrorName(zcu) == .none) {
- return .bool_true;
- } else {
- return .bool_false;
- }
+ return if (err_union.isUndef(zcu)) .undef_bool else if (err_union.getErrorName(zcu) == .none) .bool_true else .bool_false;
}
return .none;
}
@@ -32412,8 +32396,8 @@ fn analyzeSlice(
);
const bounds_error_message = "slice of single-item pointer must have bounds [0..0], [0..1], or [1..1]";
- if (try sema.compareScalar(start_value, .neq, end_value, Type.comptime_int)) {
- if (try sema.compareScalar(start_value, .neq, Value.zero_comptime_int, Type.comptime_int)) {
+ if (try sema.compareScalar(start_value, .neq, end_value, .comptime_int)) {
+ if (try sema.compareScalar(start_value, .neq, Value.zero_comptime_int, .comptime_int)) {
const msg = msg: {
const msg = try sema.errMsg(start_src, bounds_error_message, .{});
errdefer msg.destroy(sema.gpa);
@@ -32429,7 +32413,7 @@ fn analyzeSlice(
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
- } else if (try sema.compareScalar(end_value, .neq, Value.one_comptime_int, Type.comptime_int)) {
+ } else if (try sema.compareScalar(end_value, .neq, Value.one_comptime_int, .comptime_int)) {
const msg = msg: {
const msg = try sema.errMsg(end_src, bounds_error_message, .{});
errdefer msg.destroy(sema.gpa);
@@ -32447,7 +32431,7 @@ fn analyzeSlice(
return sema.failWithOwnedErrorMsg(block, msg);
}
} else {
- if (try sema.compareScalar(end_value, .gt, Value.one_comptime_int, Type.comptime_int)) {
+ if (try sema.compareScalar(end_value, .gt, Value.one_comptime_int, .comptime_int)) {
return sema.fail(
block,
end_src,
@@ -32512,7 +32496,7 @@ fn analyzeSlice(
break :ptr try sema.coerceCompatiblePtrs(block, try pt.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src);
} else ptr_or_slice;
- const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
+ const start = try sema.coerce(block, .usize, uncasted_start, start_src);
const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src);
const new_ptr_ty = sema.typeOf(new_ptr);
@@ -32523,20 +32507,20 @@ fn analyzeSlice(
var end_is_len = uncasted_end_opt == .none;
const end = e: {
if (array_ty.zigTypeTag(zcu) == .array) {
- const len_val = try pt.intValue(Type.usize, array_ty.arrayLen(zcu));
+ const len_val = try pt.intValue(.usize, array_ty.arrayLen(zcu));
if (!end_is_len) {
const end = if (by_length) end: {
- const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ const len = try sema.coerce(block, .usize, uncasted_end_opt, end_src);
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
- break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
- } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ break :end try sema.coerce(block, .usize, uncasted_end, end_src);
+ } else try sema.coerce(block, .usize, uncasted_end_opt, end_src);
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
const len_s_val = try pt.intValue(
- Type.usize,
+ .usize,
array_ty.arrayLenIncludingSentinel(zcu),
);
- if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) {
+ if (!(try sema.compareAll(end_val, .lte, len_s_val, .usize))) {
const sentinel_label: []const u8 = if (array_ty.sentinel(zcu) != null)
" +1 (sentinel)"
else
@@ -32557,7 +32541,7 @@ fn analyzeSlice(
// end_is_len is only true if we are NOT using the sentinel
// length. For sentinel-length, we don't want the type to
// contain the sentinel.
- if (end_val.eql(len_val, Type.usize, zcu)) {
+ if (end_val.eql(len_val, .usize, zcu)) {
end_is_len = true;
}
}
@@ -32568,10 +32552,10 @@ fn analyzeSlice(
} else if (slice_ty.isSlice(zcu)) {
if (!end_is_len) {
const end = if (by_length) end: {
- const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ const len = try sema.coerce(block, .usize, uncasted_end_opt, end_src);
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
- break :end try sema.coerce(block, Type.usize, uncasted_end, end_src);
- } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ break :end try sema.coerce(block, .usize, uncasted_end, end_src);
+ } else try sema.coerce(block, .usize, uncasted_end_opt, end_src);
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
if (try sema.resolveValue(ptr_or_slice)) |slice_val| {
if (slice_val.isUndef(zcu)) {
@@ -32580,8 +32564,8 @@ fn analyzeSlice(
const has_sentinel = slice_ty.sentinel(zcu) != null;
const slice_len = try slice_val.sliceLen(pt);
const len_plus_sent = slice_len + @intFromBool(has_sentinel);
- const slice_len_val_with_sentinel = try pt.intValue(Type.usize, len_plus_sent);
- if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) {
+ const slice_len_val_with_sentinel = try pt.intValue(.usize, len_plus_sent);
+ if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, .usize))) {
const sentinel_label: []const u8 = if (has_sentinel)
" +1 (sentinel)"
else
@@ -32602,8 +32586,8 @@ fn analyzeSlice(
// If the slice has a sentinel, we consider end_is_len
// is only true if it equals the length WITHOUT the
// sentinel, so we don't add a sentinel type.
- const slice_len_val = try pt.intValue(Type.usize, slice_len);
- if (end_val.eql(slice_len_val, Type.usize, zcu)) {
+ const slice_len_val = try pt.intValue(.usize, slice_len);
+ if (end_val.eql(slice_len_val, .usize, zcu)) {
end_is_len = true;
}
}
@@ -32614,10 +32598,10 @@ fn analyzeSlice(
}
if (!end_is_len) {
if (by_length) {
- const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ const len = try sema.coerce(block, .usize, uncasted_end_opt, end_src);
const uncasted_end = try sema.analyzeArithmetic(block, .add, start, len, src, start_src, end_src, false);
- break :e try sema.coerce(block, Type.usize, uncasted_end, end_src);
- } else break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ break :e try sema.coerce(block, .usize, uncasted_end, end_src);
+ } else break :e try sema.coerce(block, .usize, uncasted_end_opt, end_src);
}
return sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src);
};
@@ -32645,7 +32629,7 @@ fn analyzeSlice(
// requirement: start <= end
if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| {
if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| {
- if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) {
+ if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, .usize))) {
return sema.fail(
block,
start_src,
@@ -32715,7 +32699,7 @@ fn analyzeSlice(
try sema.addSafetyCheckCall(block, src, ok, .@"panic.startGreaterThanEnd", &.{ start, end });
}
const new_len = if (by_length)
- try sema.coerce(block, Type.usize, uncasted_end_opt, end_src)
+ try sema.coerce(block, .usize, uncasted_end_opt, end_src)
else
try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false);
const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
@@ -32753,9 +32737,9 @@ fn analyzeSlice(
bounds_check: {
const actual_len = if (array_ty.zigTypeTag(zcu) == .array)
- try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu))
+ try pt.intRef(.usize, array_ty.arrayLenIncludingSentinel(zcu))
else if (slice_ty.isSlice(zcu)) l: {
- const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
+ const slice_len_inst = try block.addTyOp(.slice_len, .usize, ptr_or_slice);
break :l if (slice_ty.sentinel(zcu) == null)
slice_len_inst
else
@@ -32811,15 +32795,15 @@ fn analyzeSlice(
// requirement: end <= len
const opt_len_inst = if (array_ty.zigTypeTag(zcu) == .array)
- try pt.intRef(Type.usize, array_ty.arrayLenIncludingSentinel(zcu))
+ try pt.intRef(.usize, array_ty.arrayLenIncludingSentinel(zcu))
else if (slice_ty.isSlice(zcu)) blk: {
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
// we don't need to add one for sentinels because the
// underlying value data includes the sentinel
- break :blk try pt.intRef(Type.usize, try slice_val.sliceLen(pt));
+ break :blk try pt.intRef(.usize, try slice_val.sliceLen(pt));
}
- const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
+ const slice_len_inst = try block.addTyOp(.slice_len, .usize, ptr_or_slice);
if (slice_ty.sentinel(zcu) == null) break :blk slice_len_inst;
// we have to add one because slice lengths don't include the sentinel
@@ -32935,8 +32919,8 @@ fn cmpNumeric(
}
// Any other comparison depends on both values, so the result is undef if either is undef.
- if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool);
- if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool);
+ if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return .undef_bool;
+ if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return .undef_bool;
const runtime_src: LazySrcLoc = if (maybe_lhs_val) |lhs_val| rs: {
if (maybe_rhs_val) |rhs_val| {
@@ -33646,7 +33630,7 @@ fn resolvePeerTypes(
candidate_srcs: PeerTypeCandidateSrc,
) !Type {
switch (instructions.len) {
- 0 => return Type.noreturn,
+ 0 => return .noreturn,
1 => return sema.typeOf(instructions[0]),
else => {},
}
@@ -33780,12 +33764,12 @@ fn resolvePeerTypesInner(
.nullable => {
for (peer_tys, 0..) |opt_ty, i| {
const ty = opt_ty orelse continue;
- if (!ty.eql(Type.null, zcu)) return .{ .conflict = .{
+ if (!ty.eql(.null, zcu)) return .{ .conflict = .{
.peer_idx_a = strat_reason,
.peer_idx_b = i,
} };
}
- return .{ .success = Type.null };
+ return .{ .success = .null };
},
.optional => {
@@ -34006,7 +33990,7 @@ fn resolvePeerTypesInner(
};
// Try peer -> cur, then cur -> peer
- ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) orelse {
+ ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) orelse {
return .{ .conflict = .{
.peer_idx_a = first_idx,
.peer_idx_b = i,
@@ -34153,8 +34137,8 @@ fn resolvePeerTypesInner(
};
// We abstract array handling slightly so that tuple pointers can work like array pointers
- const peer_pointee_array = sema.typeIsArrayLike(Type.fromInterned(peer_info.child));
- const cur_pointee_array = sema.typeIsArrayLike(Type.fromInterned(ptr_info.child));
+ const peer_pointee_array = sema.typeIsArrayLike(.fromInterned(peer_info.child));
+ const cur_pointee_array = sema.typeIsArrayLike(.fromInterned(ptr_info.child));
// This switch is just responsible for deciding the size and pointee (not including
// single-pointer array sentinel).
@@ -34162,7 +34146,7 @@ fn resolvePeerTypesInner(
switch (peer_info.flags.size) {
.one => switch (ptr_info.flags.size) {
.one => {
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34204,7 +34188,7 @@ fn resolvePeerTypesInner(
.many => {
// Only works for *[n]T + [*]T -> [*]T
const arr = peer_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34217,7 +34201,7 @@ fn resolvePeerTypesInner(
.slice => {
// Only works for *[n]T + []T -> []T
const arr = peer_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34233,7 +34217,7 @@ fn resolvePeerTypesInner(
.one => {
// Only works for [*]T + *[n]T -> [*]T
const arr = cur_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, .fromInterned(peer_info.child))) |pointee| {
ptr_info.flags.size = .many;
ptr_info.child = pointee.toIntern();
break :good;
@@ -34247,7 +34231,7 @@ fn resolvePeerTypesInner(
return generic_err;
},
.many => {
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34262,7 +34246,7 @@ fn resolvePeerTypesInner(
} };
}
// Okay, then works for [*]T + "[]T" -> [*]T
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| {
ptr_info.flags.size = .many;
ptr_info.child = pointee.toIntern();
break :good;
@@ -34275,7 +34259,7 @@ fn resolvePeerTypesInner(
.one => {
// Only works for []T + *[n]T -> []T
const arr = cur_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, .fromInterned(peer_info.child))) |pointee| {
ptr_info.flags.size = .slice;
ptr_info.child = pointee.toIntern();
break :good;
@@ -34293,7 +34277,7 @@ fn resolvePeerTypesInner(
return generic_err;
},
.slice => {
- if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, .fromInterned(ptr_info.child), .fromInterned(peer_info.child))) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34479,7 +34463,7 @@ fn resolvePeerTypesInner(
} },
}
}
- return .{ .success = Type.comptime_int };
+ return .{ .success = .comptime_int };
},
.comptime_float => {
@@ -34493,7 +34477,7 @@ fn resolvePeerTypesInner(
} },
}
}
- return .{ .success = Type.comptime_float };
+ return .{ .success = .comptime_float };
},
.fixed_int => {
@@ -34601,11 +34585,11 @@ fn resolvePeerTypesInner(
// Recreate the type so we eliminate any c_longdouble
const bits = @max(cur_ty.floatBits(target), ty.floatBits(target));
opt_cur_ty = switch (bits) {
- 16 => Type.f16,
- 32 => Type.f32,
- 64 => Type.f64,
- 80 => Type.f80,
- 128 => Type.f128,
+ 16 => .f16,
+ 32 => .f32,
+ 64 => .f64,
+ 80 => .f80,
+ 128 => .f128,
else => unreachable,
};
} else {
@@ -34716,7 +34700,7 @@ fn resolvePeerTypesInner(
break;
};
const uncoerced_field = Air.internedToRef(uncoerced_field_val.toIntern());
- const coerced_inst = sema.coerceExtra(block, Type.fromInterned(field_ty.*), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) {
+ const coerced_inst = sema.coerceExtra(block, .fromInterned(field_ty.*), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) {
// It's possible for PTR to give false positives. Just give up on making this a comptime field, we'll get an error later anyway
error.NotCoercible => {
comptime_val = null;
@@ -34729,7 +34713,7 @@ fn resolvePeerTypesInner(
comptime_val = coerced_val;
continue;
};
- if (!coerced_val.eql(existing, Type.fromInterned(field_ty.*), zcu)) {
+ if (!coerced_val.eql(existing, .fromInterned(field_ty.*), zcu)) {
comptime_val = null;
break;
}
@@ -34743,7 +34727,7 @@ fn resolvePeerTypesInner(
.values = field_vals,
});
- return .{ .success = Type.fromInterned(final_ty) };
+ return .{ .success = .fromInterned(final_ty) };
},
.exact => {
@@ -34813,7 +34797,7 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
const field_count = ty.structFieldCount(zcu);
if (field_count == 0) return .{
.len = 0,
- .elem_ty = Type.noreturn,
+ .elem_ty = .noreturn,
};
if (!ty.isTuple(zcu)) return null;
const elem_ty = ty.fieldType(0, zcu);
@@ -34902,7 +34886,7 @@ pub fn resolveStructAlignment(
var alignment: Alignment = .@"1";
for (0..struct_type.field_types.len) |i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt))
continue;
const field_align = try field_ty.structFieldAlignmentSema(
@@ -34953,7 +34937,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
var big_align: Alignment = .@"1";
for (aligns, sizes, 0..) |*field_align, *field_size, i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) {
struct_type.offsets.get(ip)[i] = 0;
field_size.* = 0;
@@ -35001,7 +34985,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
const runtime_order = struct_type.runtime_order.get(ip);
for (runtime_order, 0..) |*ro, i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt)) {
ro.* = .omitted;
} else {
@@ -35095,7 +35079,7 @@ fn backingIntType(
const fields_bit_sum = blk: {
var accumulator: u64 = 0;
for (0..struct_type.field_types.len) |i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
accumulator += try field_ty.bitSizeSema(pt);
}
break :blk accumulator;
@@ -35234,7 +35218,7 @@ pub fn resolveUnionAlignment(
var max_align: Alignment = .@"1";
for (0..union_type.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_type.field_types.get(ip)[field_index]);
if (!(try field_ty.hasRuntimeBitsSema(pt))) continue;
const explicit_align = union_type.fieldAlign(ip, field_index);
@@ -35282,7 +35266,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
var max_size: u64 = 0;
var max_align: Alignment = .@"1";
for (0..union_type.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_type.field_types.get(ip)[field_index]);
if (try field_ty.comptimeOnlySema(pt) or field_ty.zigTypeTag(pt.zcu) == .noreturn) continue; // TODO: should this affect alignment?
@@ -35307,7 +35291,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void {
const has_runtime_tag = union_type.flagsUnordered(ip).runtime_tag.hasTag() and
try Type.fromInterned(union_type.enum_tag_ty).hasRuntimeBitsSema(pt);
const size, const alignment, const padding = if (has_runtime_tag) layout: {
- const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty);
+ const enum_tag_type: Type = .fromInterned(union_type.enum_tag_ty);
const tag_align = try enum_tag_type.abiAlignmentSema(pt);
const tag_size = try enum_tag_type.abiSizeSema(pt);
@@ -35392,7 +35376,7 @@ pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void {
// See also similar code for unions.
for (0..struct_type.field_types.len) |i| {
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
try field_ty.resolveFully(pt);
}
}
@@ -35421,7 +35405,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void {
union_obj.setStatus(ip, .fully_resolved_wip);
for (0..union_obj.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
try field_ty.resolveFully(pt);
}
union_obj.setStatus(ip, .fully_resolved);
@@ -35553,7 +35537,7 @@ fn resolveInferredErrorSet(
// set. However, in the case of comptime/inline function calls with
// inferred error sets, each call gets an adhoc InferredErrorSet object, which
// has no corresponding function body.
- const ies_func_info = zcu.typeToFunc(Type.fromInterned(func.ty)).?;
+ const ies_func_info = zcu.typeToFunc(.fromInterned(func.ty)).?;
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
// so here we can simply skip this case.
@@ -36008,7 +35992,7 @@ fn structFieldInits(
// In init bodies, the zir index of the struct itself is used
// to refer to the current field type.
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_i]);
const type_ref = Air.internedToRef(field_ty.toIntern());
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, &.{zir_index});
sema.inst_map.putAssumeCapacity(zir_index, type_ref);
@@ -36135,7 +36119,7 @@ fn unionFields(
}
if (fields_len > 0) {
- const field_count_val = try pt.intValue(Type.comptime_int, fields_len - 1);
+ const field_count_val = try pt.intValue(.comptime_int, fields_len - 1);
if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) {
const msg = msg: {
const msg = try sema.errMsg(tag_ty_src, "specified integer tag type cannot represent every field", .{});
@@ -36288,9 +36272,9 @@ fn unionFields(
}
const field_ty: Type = if (!has_type)
- Type.void
+ .void
else if (field_type_ref == .none)
- Type.noreturn
+ .noreturn
else
try sema.resolveType(&block_scope, type_src, field_type_ref);
@@ -36388,11 +36372,11 @@ fn unionFields(
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
if (explicit_tags_seen[field_index]) continue;
- try sema.addFieldErrNote(Type.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{
+ try sema.addFieldErrNote(.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{
field_name.fmt(ip),
});
}
- try sema.addDeclaredHereNote(msg, Type.fromInterned(tag_ty));
+ try sema.addDeclaredHereNote(msg, .fromInterned(tag_ty));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
@@ -36530,10 +36514,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.comptime_int_type,
.comptime_float_type,
.enum_literal_type,
+ .ptr_usize_type,
+ .ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
- .single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.vector_8_i8_type,
@@ -36595,11 +36580,16 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.empty_tuple_type => Value.empty_tuple,
// values, not types
.undef,
+ .undef_bool,
+ .undef_usize,
+ .undef_u1,
.zero,
.zero_usize,
+ .zero_u1,
.zero_u8,
.one,
.one_usize,
+ .one_u1,
.one_u8,
.four_u8,
.negative_one,
@@ -36705,7 +36695,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.storage = .{ .elems = &.{} },
} }));
- if (try sema.typeHasOnePossibleValue(Type.fromInterned(seq_type.child))) |opv| {
+ if (try sema.typeHasOnePossibleValue(.fromInterned(seq_type.child))) |opv| {
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = opv.toIntern() },
@@ -36740,7 +36730,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
field_val.* = struct_type.field_inits.get(ip)[i];
continue;
}
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[i]);
if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| {
field_val.* = field_opv.toIntern();
} else return null;
@@ -36773,13 +36763,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
try ty.resolveLayout(pt);
const union_obj = ip.loadUnionType(ty.toIntern());
- const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse
+ const tag_val = (try sema.typeHasOnePossibleValue(.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse
return null;
if (union_obj.field_types.len == 0) {
const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
}
- const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
+ const only_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[0]);
const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse
return null;
const only = try pt.internUnion(.{
@@ -36796,7 +36786,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
- if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| {
+ if (try sema.typeHasOnePossibleValue(.fromInterned(enum_type.tag_ty))) |int_opv| {
const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
@@ -36814,7 +36804,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
1 => try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = if (enum_type.values.len == 0)
- (try pt.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
+ (try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern()
else
try ip.getCoercedInts(
zcu.gpa,
@@ -37041,7 +37031,7 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
if (ptr_type.flags.is_allowzero) return null;
// optionals of zero sized types behave like bools, not pointers
- const payload_ty = Type.fromInterned(opt_child);
+ const payload_ty: Type = .fromInterned(opt_child);
if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) {
return null;
}
@@ -37175,7 +37165,7 @@ fn intFromFloatScalar(
var big_int = try float128IntPartToBigInt(sema.arena, float);
defer big_int.deinit();
- const cti_result = try pt.intValue_big(Type.comptime_int, big_int.toConst());
+ const cti_result = try pt.intValue_big(.comptime_int, big_int.toConst());
if (!(try sema.intFitsInType(cti_result, int_ty, null))) {
return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{
@@ -37278,8 +37268,8 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
assert(enum_type.tag_mode != .nonexhaustive);
// The `tagValueIndex` function call below relies on the type being the integer tag type.
// `getCoerced` assumes the value will fit the new type.
- if (!(try sema.intFitsInType(int, Type.fromInterned(enum_type.tag_ty), null))) return false;
- const int_coerced = try pt.getCoerced(int, Type.fromInterned(enum_type.tag_ty));
+ if (!(try sema.intFitsInType(int, .fromInterned(enum_type.tag_ty), null))) return false;
+ const int_coerced = try pt.getCoerced(int, .fromInterned(enum_type.tag_ty));
return enum_type.tagValueIndex(&zcu.intern_pool, int_coerced.toIntern()) != null;
}
@@ -37359,7 +37349,7 @@ fn compareVector(
const lhs_elem = try lhs.elemValue(pt, i);
const rhs_elem = try rhs.elemValue(pt, i);
if (lhs_elem.isUndef(zcu) or rhs_elem.isUndef(zcu)) {
- scalar.* = try pt.intern(.{ .undef = .bool_type });
+ scalar.* = .undef_bool;
} else {
const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(zcu));
scalar.* = Value.makeBool(res_bool).toIntern();
@@ -37826,7 +37816,7 @@ pub fn resolveDeclaredEnum(
.owner = .wrap(.{ .type = wip_ty.index }),
.func_index = .none,
.func_is_naked = false,
- .fn_ret_ty = Type.void,
+ .fn_ret_ty = .void,
.fn_ret_ty_ies = null,
.comptime_err_ret_trace = &comptime_err_ret_trace,
};
@@ -37999,7 +37989,7 @@ fn resolveDeclaredEnumInner(
break :overflow false;
} else overflow: {
assert(wip_ty.nextField(ip, field_name, .none) == null);
- last_tag_val = try pt.intValue(Type.comptime_int, field_i);
+ last_tag_val = try pt.intValue(.comptime_int, field_i);
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
break :overflow false;
src/Type.zig
@@ -2641,10 +2641,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
if (enum_type.values.len == 0) {
const only = try pt.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
- .int = try pt.intern(.{ .int = .{
- .ty = enum_type.tag_ty,
- .storage = .{ .u64 = 0 },
- } }),
+ .int = (try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern(),
} });
return Value.fromInterned(only);
} else {
@@ -3676,10 +3673,11 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
.null_type,
.undefined_type,
.enum_literal_type,
+ .ptr_usize_type,
+ .ptr_const_comptime_int_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
- .single_const_pointer_to_comptime_int_type,
.slice_const_u8_type,
.slice_const_u8_sentinel_0_type,
.optional_noreturn_type,
@@ -3691,9 +3689,11 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
.undef => unreachable,
.zero => unreachable,
.zero_usize => unreachable,
+ .zero_u1 => unreachable,
.zero_u8 => unreachable,
.one => unreachable,
.one_usize => unreachable,
+ .one_u1 => unreachable,
.one_u8 => unreachable,
.four_u8 => unreachable,
.negative_one => unreachable,
@@ -4100,10 +4100,11 @@ pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type };
pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type };
pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type };
+pub const ptr_usize: Type = .{ .ip_index = .ptr_usize_type };
+pub const ptr_const_comptime_int: Type = .{ .ip_index = .ptr_const_comptime_int_type };
pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type };
pub const manyptr_const_u8: Type = .{ .ip_index = .manyptr_const_u8_type };
pub const manyptr_const_u8_sentinel_0: Type = .{ .ip_index = .manyptr_const_u8_sentinel_0_type };
-pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_const_pointer_to_comptime_int_type };
pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type };
pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type };
src/Value.zig
@@ -2895,19 +2895,25 @@ pub fn intValueBounds(val: Value, pt: Zcu.PerThread) !?[2]Value {
pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace;
+pub const undef: Value = .{ .ip_index = .undef };
+pub const undef_bool: Value = .{ .ip_index = .undef_bool };
+pub const undef_usize: Value = .{ .ip_index = .undef_usize };
+pub const undef_u1: Value = .{ .ip_index = .undef_u1 };
+pub const zero_comptime_int: Value = .{ .ip_index = .zero };
pub const zero_usize: Value = .{ .ip_index = .zero_usize };
+pub const zero_u1: Value = .{ .ip_index = .zero_u1 };
pub const zero_u8: Value = .{ .ip_index = .zero_u8 };
-pub const zero_comptime_int: Value = .{ .ip_index = .zero };
pub const one_comptime_int: Value = .{ .ip_index = .one };
+pub const one_usize: Value = .{ .ip_index = .one_usize };
+pub const one_u1: Value = .{ .ip_index = .one_u1 };
+pub const one_u8: Value = .{ .ip_index = .one_u8 };
+pub const four_u8: Value = .{ .ip_index = .four_u8 };
pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one };
-pub const undef: Value = .{ .ip_index = .undef };
pub const @"void": Value = .{ .ip_index = .void_value };
+pub const @"unreachable": Value = .{ .ip_index = .unreachable_value };
pub const @"null": Value = .{ .ip_index = .null_value };
-pub const @"false": Value = .{ .ip_index = .bool_false };
pub const @"true": Value = .{ .ip_index = .bool_true };
-pub const @"unreachable": Value = .{ .ip_index = .unreachable_value };
-
-pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type };
+pub const @"false": Value = .{ .ip_index = .bool_false };
pub const empty_tuple: Value = .{ .ip_index = .empty_tuple };
pub fn makeBool(x: bool) Value {
test/behavior/x86_64/binary.zig
@@ -1,5 +1,7 @@
const AddOneBit = math.AddOneBit;
+const AsSignedness = math.AsSignedness;
const cast = math.cast;
+const ChangeScalar = math.ChangeScalar;
const checkExpected = math.checkExpected;
const Compare = math.Compare;
const DoubleBits = math.DoubleBits;
@@ -13,6 +15,7 @@ const math = @import("math.zig");
const nan = math.nan;
const Scalar = math.Scalar;
const sign = math.sign;
+const splat = math.splat;
const Sse = math.Sse;
const tmin = math.tmin;
@@ -5141,6 +5144,7 @@ inline fn mulSat(comptime Type: type, lhs: Type, rhs: Type) Type {
test mulSat {
const test_mul_sat = binary(mulSat, .{});
try test_mul_sat.testInts();
+ try test_mul_sat.testIntVectors();
}
inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs * rhs) {
@@ -5265,9 +5269,9 @@ test mulWithOverflow {
}
inline fn shlWithOverflow(comptime Type: type, lhs: Type, rhs: Type) struct { Type, u1 } {
- const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
+ const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
- return @shlWithOverflow(lhs, if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs);
+ return @shlWithOverflow(lhs, if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs);
}
test shlWithOverflow {
const test_shl_with_overflow = binary(shlWithOverflow, .{});
@@ -5280,7 +5284,9 @@ inline fn equal(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs == rhs) {
test equal {
const test_equal = binary(equal, .{});
try test_equal.testInts();
+ try test_equal.testIntVectors();
try test_equal.testFloats();
+ try test_equal.testFloatVectors();
}
inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs) {
@@ -5289,7 +5295,9 @@ inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs
test notEqual {
const test_not_equal = binary(notEqual, .{});
try test_not_equal.testInts();
+ try test_not_equal.testIntVectors();
try test_not_equal.testFloats();
+ try test_not_equal.testFloatVectors();
}
inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs) {
@@ -5298,7 +5306,9 @@ inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs)
test lessThan {
const test_less_than = binary(lessThan, .{});
try test_less_than.testInts();
+ try test_less_than.testIntVectors();
try test_less_than.testFloats();
+ try test_less_than.testFloatVectors();
}
inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs <= rhs) {
@@ -5307,7 +5317,9 @@ inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs
test lessThanOrEqual {
const test_less_than_or_equal = binary(lessThanOrEqual, .{});
try test_less_than_or_equal.testInts();
+ try test_less_than_or_equal.testIntVectors();
try test_less_than_or_equal.testFloats();
+ try test_less_than_or_equal.testFloatVectors();
}
inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > rhs) {
@@ -5316,7 +5328,9 @@ inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > r
test greaterThan {
const test_greater_than = binary(greaterThan, .{});
try test_greater_than.testInts();
+ try test_greater_than.testIntVectors();
try test_greater_than.testFloats();
+ try test_greater_than.testFloatVectors();
}
inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs >= rhs) {
@@ -5325,7 +5339,9 @@ inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(
test greaterThanOrEqual {
const test_greater_than_or_equal = binary(greaterThanOrEqual, .{});
try test_greater_than_or_equal.testInts();
+ try test_greater_than_or_equal.testIntVectors();
try test_greater_than_or_equal.testFloats();
+ try test_greater_than_or_equal.testFloatVectors();
}
inline fn bitAnd(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs & rhs) {
@@ -5347,54 +5363,57 @@ test bitOr {
}
inline fn shr(comptime Type: type, lhs: Type, rhs: Type) Type {
- const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
+ const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
- return lhs >> if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
+ return lhs >> if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
}
test shr {
const test_shr = binary(shr, .{});
try test_shr.testInts();
+ try test_shr.testIntVectors();
}
inline fn shrExact(comptime Type: type, lhs: Type, rhs: Type) Type {
- const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
+ const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
- const final_rhs = if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
+ const final_rhs = if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
return @shrExact(lhs >> final_rhs << final_rhs, final_rhs);
}
test shrExact {
const test_shr_exact = binary(shrExact, .{});
try test_shr_exact.testInts();
+ try test_shr_exact.testIntVectors();
}
inline fn shl(comptime Type: type, lhs: Type, rhs: Type) Type {
- const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
+ const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
- return lhs << if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
+ return lhs << if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
}
test shl {
const test_shl = binary(shl, .{});
try test_shl.testInts();
+ try test_shl.testIntVectors();
}
inline fn shlExactUnsafe(comptime Type: type, lhs: Type, rhs: Type) Type {
@setRuntimeSafety(false);
- const bit_cast_rhs: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(Type) } }) = @bitCast(rhs);
+ const bit_cast_rhs: AsSignedness(Type, .unsigned) = @bitCast(rhs);
const truncate_rhs: Log2Int(Type) = @truncate(bit_cast_rhs);
- const final_rhs = if (comptime cast(Log2Int(Type), @bitSizeOf(Type))) |bits| truncate_rhs % bits else truncate_rhs;
+ const final_rhs = if (comptime cast(Log2Int(Scalar(Type)), @bitSizeOf(Scalar(Type)))) |bits| truncate_rhs % splat(Log2Int(Type), bits) else truncate_rhs;
return @shlExact(lhs << final_rhs >> final_rhs, final_rhs);
}
test shlExactUnsafe {
const test_shl_exact_unsafe = binary(shlExactUnsafe, .{});
try test_shl_exact_unsafe.testInts();
+ try test_shl_exact_unsafe.testIntVectors();
}
inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type {
// workaround https://github.com/ziglang/zig/issues/23034
if (@inComptime()) {
// workaround https://github.com/ziglang/zig/issues/23139
- //return lhs <<| @min(@abs(rhs), imax(u64));
- return lhs <<| @min(@abs(rhs), @as(u64, imax(u64)));
+ return lhs <<| @min(@abs(rhs), splat(ChangeScalar(Type, u64), imax(u64)));
}
// workaround https://github.com/ziglang/zig/issues/23033
@setRuntimeSafety(false);
@@ -5403,6 +5422,7 @@ inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type {
test shlSat {
const test_shl_sat = binary(shlSat, .{});
try test_shl_sat.testInts();
+ try test_shl_sat.testIntVectors();
}
inline fn bitXor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs ^ rhs) {
test/behavior/x86_64/math.zig
@@ -8,8 +8,6 @@ pub const fmin = math.floatMin;
pub const imax = math.maxInt;
pub const imin = math.minInt;
pub const inf = math.inf;
-pub const Log2Int = math.Log2Int;
-pub const Log2IntCeil = math.Log2IntCeil;
pub const nan = math.nan;
pub const next = math.nextAfter;
pub const tmin = math.floatTrueMin;
@@ -30,38 +28,44 @@ pub fn Scalar(comptime Type: type) type {
.vector => |info| info.child,
};
}
+pub fn ChangeScalar(comptime Type: type, comptime NewScalar: type) type {
+ return switch (@typeInfo(Type)) {
+ else => NewScalar,
+ .vector => |vector| @Vector(vector.len, NewScalar),
+ };
+}
+pub fn AsSignedness(comptime Type: type, comptime signedness: std.builtin.Signedness) type {
+ return ChangeScalar(Type, @Type(.{ .int = .{
+ .signedness = signedness,
+ .bits = @typeInfo(Scalar(Type)).int.bits,
+ } }));
+}
pub fn AddOneBit(comptime Type: type) type {
- const ResultScalar = switch (@typeInfo(Scalar(Type))) {
+ return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = 1 + int.bits } }),
.float => Scalar(Type),
else => @compileError(@typeName(Type)),
- };
- return switch (@typeInfo(Type)) {
- else => ResultScalar,
- .vector => |vector| @Vector(vector.len, ResultScalar),
- };
+ });
}
pub fn DoubleBits(comptime Type: type) type {
- const ResultScalar = switch (@typeInfo(Scalar(Type))) {
+ return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = int.bits * 2 } }),
.float => Scalar(Type),
else => @compileError(@typeName(Type)),
- };
- return switch (@typeInfo(Type)) {
- else => ResultScalar,
- .vector => |vector| @Vector(vector.len, ResultScalar),
- };
+ });
}
pub fn RoundBitsUp(comptime Type: type, comptime multiple: u16) type {
- const ResultScalar = switch (@typeInfo(Scalar(Type))) {
+ return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = std.mem.alignForward(u16, int.bits, multiple) } }),
.float => Scalar(Type),
else => @compileError(@typeName(Type)),
- };
- return switch (@typeInfo(Type)) {
- else => ResultScalar,
- .vector => |vector| @Vector(vector.len, ResultScalar),
- };
+ });
+}
+pub fn Log2Int(comptime Type: type) type {
+ return ChangeScalar(Type, math.Log2Int(Scalar(Type)));
+}
+pub fn Log2IntCeil(comptime Type: type) type {
+ return ChangeScalar(Type, math.Log2IntCeil(Scalar(Type)));
}
// inline to avoid a runtime `@splat`
pub inline fn splat(comptime Type: type, scalar: Scalar(Type)) Type {
@@ -78,18 +82,12 @@ inline fn select(cond: anytype, lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {
else => @compileError(@typeName(@TypeOf(cond))),
};
}
-pub fn sign(rhs: anytype) switch (@typeInfo(@TypeOf(rhs))) {
- else => bool,
- .vector => |vector| @Vector(vector.len, bool),
-} {
+pub fn sign(rhs: anytype) ChangeScalar(@TypeOf(rhs), bool) {
const ScalarInt = @Type(.{ .int = .{
.signedness = .unsigned,
.bits = @bitSizeOf(Scalar(@TypeOf(rhs))),
} });
- const VectorInt = switch (@typeInfo(@TypeOf(rhs))) {
- else => ScalarInt,
- .vector => |vector| @Vector(vector.len, ScalarInt),
- };
+ const VectorInt = ChangeScalar(@TypeOf(rhs), ScalarInt);
return @as(VectorInt, @bitCast(rhs)) & splat(VectorInt, @as(ScalarInt, 1) << @bitSizeOf(ScalarInt) - 1) != splat(VectorInt, 0);
}
fn boolAnd(lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {
test/behavior/abs.zig
@@ -96,7 +96,6 @@ test "@abs big int <= 128 bits" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime testAbsSignedBigInt();
try testAbsSignedBigInt();
@@ -211,7 +210,6 @@ fn testAbsFloats(comptime T: type) !void {
test "@abs int vectors" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
test/behavior/basic.zig
@@ -837,7 +837,6 @@ test "extern variable with non-pointer opaque type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@export(&var_to_export, .{ .name = "opaque_extern_var" });
test/behavior/bitcast.zig
@@ -384,7 +384,6 @@ test "comptime bitcast with fields following f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const FloatT = extern struct { f: f80, x: u128 align(16) };
const x: FloatT = .{ .f = 0.5, .x = 123 };
test/behavior/bitreverse.zig
@@ -12,7 +12,6 @@ test "@bitReverse" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime testBitReverse();
@@ -128,7 +127,6 @@ test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector8();
try vector8();
@@ -149,7 +147,6 @@ test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector16();
try vector16();
@@ -170,7 +167,6 @@ test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector24();
try vector24();
test/behavior/byteswap.zig
@@ -39,7 +39,6 @@ test "@byteSwap integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const ByteSwapIntTest = struct {
fn run() !void {
@@ -100,7 +99,6 @@ test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector8();
try vector8();
@@ -121,7 +119,6 @@ test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector16();
try vector16();
@@ -142,7 +139,6 @@ test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime vector24();
try vector24();
test/behavior/cast.zig
@@ -617,7 +617,6 @@ test "@intCast on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
@@ -2520,7 +2519,6 @@ test "@ptrFromInt on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
@@ -2592,7 +2590,6 @@ test "@intFromFloat on vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -2693,7 +2690,6 @@ test "@intCast vector of signed integer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
test/behavior/extern.zig
@@ -5,7 +5,6 @@ const expect = std.testing.expect;
test "anyopaque extern symbol" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const a = @extern(*anyopaque, .{ .name = "a_mystery_symbol" });
test/behavior/floatop.zig
@@ -135,7 +135,6 @@ test "cmp f32" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testCmp(f32);
try comptime testCmp(f32);
@@ -144,7 +143,6 @@ test "cmp f32" {
test "cmp f64" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
try testCmp(f64);
@@ -400,7 +398,6 @@ test "@sqrt f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
try testSqrt(f32);
test/behavior/fn.zig
@@ -429,7 +429,6 @@ test "implicit cast function to function ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S1 = struct {
export fn someFunctionThatReturnsAValue() c_int {
test/behavior/math.zig
@@ -85,7 +85,6 @@ test "@clz big ints" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testClzBigInts();
@@ -103,7 +102,6 @@ fn testOneClz(comptime T: type, x: T) u32 {
test "@clz vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -173,7 +171,6 @@ fn testOneCtz(comptime T: type, x: T) u32 {
}
test "@ctz 128-bit integers" {
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -198,7 +195,6 @@ test "@ctz vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testCtzVectors();
try comptime testCtzVectors();
@@ -1694,9 +1690,6 @@ test "vector comparison" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and
- !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .avx2)) return error.SkipZigTest;
-
const S = struct {
fn doTheTest() !void {
var a: @Vector(6, i32) = [_]i32{ 1, 3, -1, 5, 7, 9 };
@@ -1785,7 +1778,6 @@ test "mod lazy values" {
test "@clz works on both vector and scalar inputs" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1807,7 +1799,6 @@ test "runtime comparison to NaN is comptime-known" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
@@ -1838,7 +1829,6 @@ test "runtime int comparison to inf is comptime-known" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
test/behavior/maximum_minimum.zig
@@ -34,7 +34,6 @@ test "@max on vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -90,7 +89,6 @@ test "@min for vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -260,7 +258,6 @@ test "@min/@max notices bounds from vector types" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
var x: @Vector(2, u16) = .{ 30, 67 };
var y: @Vector(2, u32) = .{ 20, 500 };
@@ -303,7 +300,6 @@ test "@min/@max notices bounds from vector types when element of comptime-known
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
var x: @Vector(2, u32) = .{ 1_000_000, 12345 };
_ = &x;
@@ -375,7 +371,6 @@ test "@min/@max with runtime vectors of signed and unsigned integers of same siz
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn min(a: @Vector(2, i32), b: @Vector(2, u32)) @Vector(2, i32) {
test/behavior/popcount.zig
@@ -82,7 +82,6 @@ test "@popCount vectors" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try comptime testPopCountVectors();
try testPopCountVectors();
test/behavior/select.zig
@@ -70,7 +70,7 @@ fn selectArrays() !void {
test "@select compare result" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
const S = struct {
test/behavior/union.zig
@@ -282,6 +282,7 @@ test "cast union to tag type of union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testCastUnionToTag();
try comptime testCastUnionToTag();
test/behavior/vector.zig
@@ -31,7 +31,6 @@ test "vector wrap operators" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -652,7 +651,6 @@ test "vector division operators" {
test "vector bitwise not operator" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -934,7 +932,6 @@ test "saturating add" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -969,7 +966,6 @@ test "saturating subtraction" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -989,7 +985,6 @@ test "saturating subtraction" {
test "saturating multiplication" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1018,12 +1013,12 @@ test "saturating multiplication" {
test "saturating shift-left" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -1469,7 +1464,6 @@ test "compare vectors with different element types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/cases/compile_errors/@import_zon_bad_type.zig
@@ -117,9 +117,9 @@ export fn testMutablePointer() void {
// tmp.zig:37:38: note: imported here
// neg_inf.zon:1:1: error: expected type '?u8'
// tmp.zig:57:28: note: imported here
-// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_518'
+// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_522'
// tmp.zig:62:39: note: imported here
-// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_520'
+// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_524'
// tmp.zig:67:44: note: imported here
-// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_523'
+// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_527'
// tmp.zig:72:50: note: imported here
test/cases/compile_errors/anytype_param_requires_comptime.zig
@@ -15,6 +15,6 @@ pub export fn entry() void {
// error
//
// :7:25: error: unable to resolve comptime value
-// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_492.C' must be comptime-known
+// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_496.C' must be comptime-known
// :4:16: note: struct requires comptime because of this field
// :4:16: note: types are not available at runtime
test/cases/compile_errors/bogus_method_call_on_slice.zig
@@ -16,5 +16,5 @@ pub export fn entry2() void {
//
// :3:6: error: no field or member function named 'copy' in '[]const u8'
// :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})'
-// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_496'
+// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_500'
// :12:6: note: struct declared here
test/cases/compile_errors/coerce_anon_struct.zig
@@ -6,6 +6,6 @@ export fn foo() void {
// error
//
-// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_485'
+// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_489'
// :3:16: note: struct declared here
// :1:11: note: struct declared here
test/cases/compile_errors/redundant_try.zig
@@ -44,9 +44,9 @@ comptime {
//
// :5:23: error: expected error union type, found 'comptime_int'
// :10:23: error: expected error union type, found '@TypeOf(.{})'
-// :15:23: error: expected error union type, found 'tmp.test2__struct_522'
+// :15:23: error: expected error union type, found 'tmp.test2__struct_526'
// :15:23: note: struct declared here
-// :20:27: error: expected error union type, found 'tmp.test3__struct_524'
+// :20:27: error: expected error union type, found 'tmp.test3__struct_528'
// :20:27: note: struct declared here
// :25:23: error: expected error union type, found 'struct { comptime *const [5:0]u8 = "hello" }'
// :31:13: error: expected error union type, found 'u32'
tools/lldb_pretty_printers.py
@@ -601,7 +601,8 @@ type_tag_handlers = {
'fn_void_no_args': lambda payload: 'fn() void',
'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.naked) noreturn',
'fn_ccc_void_no_args': lambda payload: 'fn() callconv(.c) void',
- 'single_const_pointer_to_comptime_int': lambda payload: '*const comptime_int',
+ 'ptr_usize': lambda payload: '*usize',
+ 'ptr_const_comptime_int': lambda payload: '*const comptime_int',
'manyptr_u8': lambda payload: '[*]u8',
'manyptr_const_u8': lambda payload: '[*]const u8',
'manyptr_const_u8_sentinel_0': lambda payload: '[*:0]const u8',