Commit 9d422bff18
Changed files (8)
src
arch
codegen
src/arch/aarch64/CodeGen.zig
@@ -2577,7 +2577,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
});
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
- try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne });
+ try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
},
@@ -2720,7 +2720,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
- try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne });
+ try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
} else if (int_info.bits <= 64) {
@@ -2860,7 +2860,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
- try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne });
+ try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
} else return self.fail("TODO implement mul_with_overflow for integers > u64/i64", .{});
@@ -2993,7 +2993,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
});
try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg });
- try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne });
+ try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
} else {
@@ -3780,7 +3780,7 @@ fn genInlineMemset(
const val_reg = switch (val) {
.register => |r| r,
- else => try self.copyToTmpRegister(Type.initTag(.u8), val),
+ else => try self.copyToTmpRegister(Type.u8, val),
};
const val_reg_lock = self.register_manager.lockReg(val_reg);
defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock);
@@ -4330,7 +4330,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
- try self.genSetReg(Type.initTag(.u64), .x30, .{
+ try self.genSetReg(Type.u64, .x30, .{
.linker_load = .{
.type = .got,
.sym_index = sym_index,
@@ -4339,7 +4339,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
- try self.genSetReg(Type.initTag(.u64), .x30, .{
+ try self.genSetReg(Type.u64, .x30, .{
.linker_load = .{
.type = .got,
.sym_index = sym_index,
@@ -4379,7 +4379,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name);
- try self.genSetReg(Type.initTag(.u64), .x30, .{
+ try self.genSetReg(Type.u64, .x30, .{
.linker_load = .{
.type = .import,
.sym_index = sym_index,
@@ -4536,7 +4536,7 @@ fn cmp(
var opt_buffer: Type.Payload.ElemType = undefined;
const payload_ty = lhs_ty.optionalChild(&opt_buffer);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- break :blk Type.initTag(.u1);
+ break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) {
break :blk Type.usize;
} else {
@@ -4546,9 +4546,9 @@ fn cmp(
.Float => return self.fail("TODO ARM cmp floats", .{}),
.Enum => lhs_ty.intTagType(),
.Int => lhs_ty,
- .Bool => Type.initTag(.u1),
+ .Bool => Type.u1,
.Pointer => Type.usize,
- .ErrorSet => Type.initTag(.u16),
+ .ErrorSet => Type.u16,
else => unreachable,
};
src/arch/arm/CodeGen.zig
@@ -1637,7 +1637,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
});
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
- try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
+ try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
} else if (int_info.bits == 32) {
@@ -1750,7 +1750,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
});
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
- try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
+ try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
} else if (int_info.bits <= 32) {
@@ -1848,7 +1848,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
});
// strb rdlo, [...]
- try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .register = rdlo });
+ try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .register = rdlo });
break :result MCValue{ .stack_offset = stack_offset };
} else {
@@ -1983,7 +1983,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
});
try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg });
- try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
+ try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne });
break :result MCValue{ .stack_offset = stack_offset };
} else {
@@ -4086,7 +4086,7 @@ fn genInlineMemset(
const val_reg = switch (val) {
.register => |r| r,
- else => try self.copyToTmpRegister(Type.initTag(.u8), val),
+ else => try self.copyToTmpRegister(Type.u8, val),
};
const val_reg_lock = self.register_manager.lockReg(val_reg);
defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock);
@@ -4485,7 +4485,7 @@ fn cmp(
var opt_buffer: Type.Payload.ElemType = undefined;
const payload_ty = lhs_ty.optionalChild(&opt_buffer);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- break :blk Type.initTag(.u1);
+ break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) {
break :blk Type.usize;
} else {
@@ -4495,9 +4495,9 @@ fn cmp(
.Float => return self.fail("TODO ARM cmp floats", .{}),
.Enum => lhs_ty.intTagType(),
.Int => lhs_ty,
- .Bool => Type.initTag(.u1),
+ .Bool => Type.u1,
.Pointer => Type.usize,
- .ErrorSet => Type.initTag(.u16),
+ .ErrorSet => Type.u16,
else => unreachable,
};
@@ -5367,7 +5367,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
1, 4 => {
const offset = if (math.cast(u12, stack_offset)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
- } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none);
+ } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
@@ -5390,7 +5390,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
2 => {
const offset = if (stack_offset <= math.maxInt(u8)) blk: {
break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset));
- } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }));
+ } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }));
_ = try self.addInst(.{
.tag = .strh,
@@ -5769,7 +5769,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
1, 4 => {
const offset = if (math.cast(u12, stack_offset)) |imm| blk: {
break :blk Instruction.Offset.imm(imm);
- } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none);
+ } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
@@ -5789,7 +5789,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
2 => {
const offset = if (stack_offset <= math.maxInt(u8)) blk: {
break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset));
- } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }));
+ } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }));
_ = try self.addInst(.{
.tag = .strh,
src/arch/sparc64/CodeGen.zig
@@ -1436,14 +1436,14 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
.Vector => unreachable, // Handled by cmp_vector.
.Enum => lhs_ty.intTagType(),
.Int => lhs_ty,
- .Bool => Type.initTag(.u1),
+ .Bool => Type.u1,
.Pointer => Type.usize,
- .ErrorSet => Type.initTag(.u16),
+ .ErrorSet => Type.u16,
.Optional => blk: {
var opt_buffer: Type.Payload.ElemType = undefined;
const payload_ty = lhs_ty.optionalChild(&opt_buffer);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- break :blk Type.initTag(.u1);
+ break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) {
break :blk Type.usize;
} else {
src/arch/wasm/CodeGen.zig
@@ -4272,7 +4272,7 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = result: {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const non_null_bit = try func.allocStack(Type.initTag(.u1));
+ const non_null_bit = try func.allocStack(Type.u1);
try func.emitWValue(non_null_bit);
try func.addImm32(1);
try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 });
@@ -5195,7 +5195,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
// We store the final result in here that will be validated
// if the optional is truly equal.
- var result = try func.ensureAllocLocal(Type.initTag(.i32));
+ var result = try func.ensureAllocLocal(Type.i32);
defer result.free(func);
try func.startBlock(.block, wasm.block_empty);
@@ -5658,7 +5658,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
const offset = @intCast(u32, lhs_ty.abiSize(mod));
- try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
+ try func.store(result_ptr, overflow_local, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
}
@@ -5717,13 +5717,13 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type,
break :blk WValue{ .stack = {} };
};
- var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1));
+ var overflow_local = try overflow_bit.toLocal(func, Type.u1);
defer overflow_local.free(func);
const result_ptr = try func.allocStack(result_ty);
try func.store(result_ptr, high_op_res, Type.u64, 0);
try func.store(result_ptr, tmp_op, Type.u64, 8);
- try func.store(result_ptr, overflow_local, Type.initTag(.u1), 16);
+ try func.store(result_ptr, overflow_local, Type.u1, 16);
return result_ptr;
}
@@ -5774,13 +5774,13 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const shr = try func.binOp(result, rhs_final, lhs_ty, .shr);
break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq);
};
- var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1));
+ var overflow_local = try overflow_bit.toLocal(func, Type.u1);
defer overflow_local.free(func);
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
const offset = @intCast(u32, lhs_ty.abiSize(mod));
- try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
+ try func.store(result_ptr, overflow_local, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
}
@@ -5800,7 +5800,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// We store the bit if it's overflowed or not in this. As it's zero-initialized
// we only need to update it if an overflow (or underflow) occurred.
- var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1));
+ var overflow_bit = try func.ensureAllocLocal(Type.u1);
defer overflow_bit.free(func);
const int_info = lhs_ty.intInfo(mod);
@@ -5955,7 +5955,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.typeOfIndex(inst));
try func.store(result_ptr, bin_op_local, lhs_ty, 0);
const offset = @intCast(u32, lhs_ty.abiSize(mod));
- try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
+ try func.store(result_ptr, overflow_bit, Type.u1, offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
}
src/codegen/c/type.zig
@@ -1471,7 +1471,7 @@ pub const CType = extern union {
else
info.pointee_type;
- if (if (info.size == .C and pointee_ty.tag() == .u8)
+ if (if (info.size == .C and pointee_ty.ip_index == .u8_type)
Tag.char.toIndex()
else
try lookup.typeToIndex(pointee_ty, .forward)) |child_idx|
src/AstGen.zig
@@ -10271,6 +10271,8 @@ fn rvalue(
as_ty | @enumToInt(Zir.Inst.Ref.i32_type),
as_ty | @enumToInt(Zir.Inst.Ref.u64_type),
as_ty | @enumToInt(Zir.Inst.Ref.i64_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.u128_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.i128_type),
as_ty | @enumToInt(Zir.Inst.Ref.usize_type),
as_ty | @enumToInt(Zir.Inst.Ref.isize_type),
as_ty | @enumToInt(Zir.Inst.Ref.c_char_type),
@@ -10296,11 +10298,30 @@ fn rvalue(
as_ty | @enumToInt(Zir.Inst.Ref.comptime_int_type),
as_ty | @enumToInt(Zir.Inst.Ref.comptime_float_type),
as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.anyframe_type),
as_ty | @enumToInt(Zir.Inst.Ref.null_type),
as_ty | @enumToInt(Zir.Inst.Ref.undefined_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.atomic_order_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.atomic_rmw_op_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.calling_convention_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.address_space_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.float_mode_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.reduce_op_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.call_modifier_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.prefetch_options_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.export_options_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.extern_options_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.type_info_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.manyptr_u8_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type),
as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type),
as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type),
- as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_sentinel_0_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type),
as_comptime_int | @enumToInt(Zir.Inst.Ref.zero),
as_comptime_int | @enumToInt(Zir.Inst.Ref.one),
as_bool | @enumToInt(Zir.Inst.Ref.bool_true),
src/Sema.zig
@@ -31478,19 +31478,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
};
return switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
-
.manyptr_u8,
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
@@ -32971,19 +32958,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
};
switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
-
.error_set_single,
.error_set,
.error_set_merged,
@@ -33175,19 +33149,6 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
}
switch (ty.tag()) {
- .u1 => return .u1_type,
- .u8 => return .u8_type,
- .i8 => return .i8_type,
- .u16 => return .u16_type,
- .u29 => return .u29_type,
- .i16 => return .i16_type,
- .u32 => return .u32_type,
- .i32 => return .i32_type,
- .u64 => return .u64_type,
- .i64 => return .i64_type,
- .u128 => return .u128_type,
- .i128 => return .i128_type,
-
.manyptr_u8 => return .manyptr_u8_type,
.manyptr_const_u8 => return .manyptr_const_u8_type,
.single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type,
@@ -33617,19 +33578,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
}
}
return switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
-
.manyptr_u8,
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
src/type.zig
@@ -107,20 +107,6 @@ pub const Type = struct {
}
}
switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
- => return .Int,
-
.error_set,
.error_set_single,
.error_set_inferred,
@@ -589,26 +575,6 @@ pub const Type = struct {
if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true;
switch (a.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
- => {
- if (b.zigTypeTag(mod) != .Int) return false;
- if (b.isNamedInt()) return false;
- const info_a = a.intInfo(mod);
- const info_b = b.intInfo(mod);
- return info_a.signedness == info_b.signedness and info_a.bits == info_b.bits;
- },
-
.error_set_inferred => {
// Inferred error sets are only equal if both are inferred
// and they share the same pointer.
@@ -926,26 +892,6 @@ pub const Type = struct {
return;
}
switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
- => {
- // Arbitrary sized integers.
- std.hash.autoHash(hasher, std.builtin.TypeId.Int);
- const info = ty.intInfo(mod);
- std.hash.autoHash(hasher, info.signedness);
- std.hash.autoHash(hasher, info.bits);
- },
-
.error_set,
.error_set_single,
.error_set_merged,
@@ -1183,18 +1129,6 @@ pub const Type = struct {
.legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough },
};
} else switch (self.legacy.ptr_otherwise.tag) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.const_slice_u8_sentinel_0,
@@ -1435,20 +1369,6 @@ pub const Type = struct {
while (true) {
const t = ty.tag();
switch (t) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
- => return writer.writeAll(@tagName(t)),
-
.empty_struct, .empty_struct_literal => return writer.writeAll("struct {}"),
.@"struct" => {
@@ -1775,20 +1695,6 @@ pub const Type = struct {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
- => try writer.writeAll(@tagName(t)),
-
.empty_struct_literal => try writer.writeAll("@TypeOf(.{})"),
.empty_struct => {
@@ -2057,16 +1963,6 @@ pub const Type = struct {
pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value {
if (self.ip_index != .none) return self.ip_index.toValue();
switch (self.tag()) {
- .u1 => return Value{ .ip_index = .u1_type, .legacy = undefined },
- .u8 => return Value{ .ip_index = .u8_type, .legacy = undefined },
- .i8 => return Value{ .ip_index = .i8_type, .legacy = undefined },
- .u16 => return Value{ .ip_index = .u16_type, .legacy = undefined },
- .u29 => return Value{ .ip_index = .u29_type, .legacy = undefined },
- .i16 => return Value{ .ip_index = .i16_type, .legacy = undefined },
- .u32 => return Value{ .ip_index = .u32_type, .legacy = undefined },
- .i32 => return Value{ .ip_index = .i32_type, .legacy = undefined },
- .u64 => return Value{ .ip_index = .u64_type, .legacy = undefined },
- .i64 => return Value{ .ip_index = .i64_type, .legacy = undefined },
.single_const_pointer_to_comptime_int => return Value{ .ip_index = .single_const_pointer_to_comptime_int_type, .legacy = undefined },
.const_slice_u8 => return Value{ .ip_index = .const_slice_u8_type, .legacy = undefined },
.const_slice_u8_sentinel_0 => return Value{ .ip_index = .const_slice_u8_sentinel_0_type, .legacy = undefined },
@@ -2162,19 +2058,6 @@ pub const Type = struct {
.enum_tag => unreachable, // it's a value, not a type
};
switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
-
.const_slice_u8,
.const_slice_u8_sentinel_0,
.array_u8_sentinel_0,
@@ -2404,19 +2287,6 @@ pub const Type = struct {
.enum_tag => unreachable, // it's a value, not a type
};
return switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
-
.manyptr_u8,
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
@@ -2752,10 +2622,6 @@ pub const Type = struct {
else => null,
};
switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
-
.array_u8_sentinel_0,
.array_u8,
.@"opaque",
@@ -2806,12 +2672,6 @@ pub const Type = struct {
return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) };
},
- .i16, .u16 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(16, target) },
- .u29 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(29, target) },
- .i32, .u32 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(32, target) },
- .i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) },
- .u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) },
-
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = ty.optionalChild(&buf);
@@ -3208,11 +3068,6 @@ pub const Type = struct {
return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true);
},
- .u1,
- .u8,
- .i8,
- => return AbiSizeAdvanced{ .scalar = 1 },
-
.array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data },
.array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 },
.array => {
@@ -3293,12 +3148,6 @@ pub const Type = struct {
.error_set_single,
=> return AbiSizeAdvanced{ .scalar = 2 },
- .i16, .u16 => return AbiSizeAdvanced{ .scalar = intAbiSize(16, target) },
- .u29 => return AbiSizeAdvanced{ .scalar = intAbiSize(29, target) },
- .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) },
- .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) },
- .u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) },
-
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = ty.optionalChild(&buf);
@@ -3497,14 +3346,6 @@ pub const Type = struct {
.inferred_alloc_mut => unreachable,
.@"opaque" => unreachable,
- .u1 => return 1,
- .u8, .i8 => return 8,
- .i16, .u16 => return 16,
- .u29 => return 29,
- .i32, .u32 => return 32,
- .i64, .u64 => return 64,
- .u128, .i128 => return 128,
-
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
if (struct_obj.layout != .Packed) {
@@ -4398,47 +4239,25 @@ pub const Type = struct {
/// Returns true if and only if the type is a fixed-width, signed integer.
pub fn isSignedInt(ty: Type, mod: *const Module) bool {
- if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .int_type => |int_type| return int_type.signedness == .signed,
- .simple_type => |s| return switch (s) {
- .c_char, .isize, .c_short, .c_int, .c_long, .c_longlong => true,
+ return switch (ty.ip_index) {
+ .c_char_type, .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true,
+ .none => false,
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| int_type.signedness == .signed,
else => false,
},
- else => return false,
- };
- return switch (ty.tag()) {
- .i8,
- .i16,
- .i32,
- .i64,
- .i128,
- => true,
-
- else => false,
};
}
/// Returns true if and only if the type is a fixed-width, unsigned integer.
pub fn isUnsignedInt(ty: Type, mod: *const Module) bool {
- if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .int_type => |int_type| return int_type.signedness == .unsigned,
- .simple_type => |s| return switch (s) {
- .usize, .c_ushort, .c_uint, .c_ulong, .c_ulonglong => true,
+ return switch (ty.ip_index) {
+ .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true,
+ .none => false,
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| int_type.signedness == .unsigned,
else => false,
},
- else => return false,
- };
- return switch (ty.tag()) {
- .u1,
- .u8,
- .u16,
- .u29,
- .u32,
- .u64,
- .u128,
- => true,
-
- else => false,
};
}
@@ -4459,19 +4278,6 @@ pub const Type = struct {
while (true) switch (ty.ip_index) {
.none => switch (ty.tag()) {
- .u1 => return .{ .signedness = .unsigned, .bits = 1 },
- .u8 => return .{ .signedness = .unsigned, .bits = 8 },
- .i8 => return .{ .signedness = .signed, .bits = 8 },
- .u16 => return .{ .signedness = .unsigned, .bits = 16 },
- .i16 => return .{ .signedness = .signed, .bits = 16 },
- .u29 => return .{ .signedness = .unsigned, .bits = 29 },
- .u32 => return .{ .signedness = .unsigned, .bits = 32 },
- .i32 => return .{ .signedness = .signed, .bits = 32 },
- .u64 => return .{ .signedness = .unsigned, .bits = 64 },
- .i64 => return .{ .signedness = .signed, .bits = 64 },
- .u128 => return .{ .signedness = .unsigned, .bits = 128 },
- .i128 => return .{ .signedness = .signed, .bits = 128 },
-
.enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty,
.enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty,
.enum_simple => {
@@ -4664,50 +4470,34 @@ pub const Type = struct {
}
pub fn isNumeric(ty: Type, mod: *const Module) bool {
- if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .int_type => true,
- .simple_type => |s| return switch (s) {
- .f16,
- .f32,
- .f64,
- .f80,
- .f128,
- .c_longdouble,
- .comptime_int,
- .comptime_float,
- .usize,
- .isize,
- .c_char,
- .c_short,
- .c_ushort,
- .c_int,
- .c_uint,
- .c_long,
- .c_ulong,
- .c_longlong,
- .c_ulonglong,
- => true,
+ return switch (ty.ip_index) {
+ .f16_type,
+ .f32_type,
+ .f64_type,
+ .f80_type,
+ .f128_type,
+ .c_longdouble_type,
+ .comptime_int_type,
+ .comptime_float_type,
+ .usize_type,
+ .isize_type,
+ .c_char_type,
+ .c_short_type,
+ .c_ushort_type,
+ .c_int_type,
+ .c_uint_type,
+ .c_long_type,
+ .c_ulong_type,
+ .c_longlong_type,
+ .c_ulonglong_type,
+ => true,
+
+ .none => false,
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => true,
else => false,
},
- else => false,
- };
- return switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
- => true,
-
- else => false,
};
}
@@ -4785,19 +4575,6 @@ pub const Type = struct {
};
while (true) switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
-
.error_union,
.error_set_single,
.error_set,
@@ -4995,19 +4772,6 @@ pub const Type = struct {
};
return switch (ty.tag()) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
-
.manyptr_u8,
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
@@ -5764,19 +5528,6 @@ pub const Type = struct {
/// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`.
pub const Tag = enum(usize) {
// The first section of this enum are tags that require no payload.
- u1,
- u8,
- i8,
- u16,
- i16,
- u29,
- u32,
- i32,
- u64,
- i64,
- u128,
- i128,
-
manyptr_u8,
manyptr_const_u8,
manyptr_const_u8_sentinel_0,
@@ -5839,19 +5590,6 @@ pub const Type = struct {
pub fn Type(comptime t: Tag) type {
return switch (t) {
- .u1,
- .u8,
- .i8,
- .u16,
- .i16,
- .u29,
- .u32,
- .i32,
- .u64,
- .i64,
- .u128,
- .i128,
-
.single_const_pointer_to_comptime_int,
.anyerror_void_error_union,
.const_slice_u8,
@@ -6203,19 +5941,19 @@ pub const Type = struct {
};
};
- pub const @"u1" = initTag(.u1);
- pub const @"u8" = initTag(.u8);
- pub const @"u16" = initTag(.u16);
- pub const @"u29" = initTag(.u29);
- pub const @"u32" = initTag(.u32);
- pub const @"u64" = initTag(.u64);
- pub const @"u128" = initTag(.u128);
-
- pub const @"i8" = initTag(.i8);
- pub const @"i16" = initTag(.i16);
- pub const @"i32" = initTag(.i32);
- pub const @"i64" = initTag(.i64);
- pub const @"i128" = initTag(.i128);
+ pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined };
+ pub const @"u8": Type = .{ .ip_index = .u8_type, .legacy = undefined };
+ pub const @"u16": Type = .{ .ip_index = .u16_type, .legacy = undefined };
+ pub const @"u29": Type = .{ .ip_index = .u29_type, .legacy = undefined };
+ pub const @"u32": Type = .{ .ip_index = .u32_type, .legacy = undefined };
+ pub const @"u64": Type = .{ .ip_index = .u64_type, .legacy = undefined };
+ pub const @"u128": Type = .{ .ip_index = .u128_type, .legacy = undefined };
+
+ pub const @"i8": Type = .{ .ip_index = .i8_type, .legacy = undefined };
+ pub const @"i16": Type = .{ .ip_index = .i16_type, .legacy = undefined };
+ pub const @"i32": Type = .{ .ip_index = .i32_type, .legacy = undefined };
+ pub const @"i64": Type = .{ .ip_index = .i64_type, .legacy = undefined };
+ pub const @"i128": Type = .{ .ip_index = .i128_type, .legacy = undefined };
pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined };
pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined };