Commit 5e636643d2
Changed files (25)
src
arch
aarch64
arm
riscv64
sparc64
wasm
x86_64
codegen
Liveness
src/arch/aarch64/CodeGen.zig
@@ -1030,7 +1030,7 @@ fn allocMem(
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const mod = self.bin_file.options.module.?;
- const elem_ty = self.typeOfIndex(inst).elemType();
+ const elem_ty = self.typeOfIndex(inst).childType(mod);
if (!elem_ty.hasRuntimeBits(mod)) {
// return the stack offset 0. Stack offset 0 will be where all
@@ -1140,17 +1140,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const result: MCValue = switch (self.ret_mcv) {
.none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) },
.stack_offset => blk: {
// self.ret_mcv is an address to where this function
// should store its result into
const ret_ty = self.fn_type.fnReturnType();
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ret_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ret_ty);
// addr_reg will contain the address of where to store the
// result into
@@ -2406,9 +2403,9 @@ fn ptrArithmetic(
assert(rhs_ty.eql(Type.usize, mod));
const ptr_ty = lhs_ty;
- const elem_ty = switch (ptr_ty.ptrSize()) {
- .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
- else => ptr_ty.childType(),
+ const elem_ty = switch (ptr_ty.ptrSize(mod)) {
+ .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
+ else => ptr_ty.childType(mod),
};
const elem_size = elem_ty.abiSize(mod);
@@ -3024,8 +3021,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue {
const mod = self.bin_file.options.module.?;
- var opt_buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&opt_buf);
+ const payload_ty = optional_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none;
if (optional_ty.isPtrLikeOptional(mod)) {
// TODO should we reuse the operand here?
@@ -3459,7 +3455,7 @@ fn ptrElemVal(
maybe_inst: ?Air.Inst.Index,
) !MCValue {
const mod = self.bin_file.options.module.?;
- const elem_ty = ptr_ty.childType();
+ const elem_ty = ptr_ty.childType(mod);
const elem_size = @intCast(u32, elem_ty.abiSize(mod));
// TODO optimize for elem_sizes of 1, 2, 4, 8
@@ -3617,7 +3613,7 @@ fn reuseOperand(
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const mod = self.bin_file.options.module.?;
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
switch (ptr) {
@@ -3773,7 +3769,7 @@ fn genInlineMemset(
) !void {
const dst_reg = switch (dst) {
.register => |r| r,
- else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst),
+ else => try self.copyToTmpRegister(Type.manyptr_u8, dst),
};
const dst_reg_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
@@ -4096,7 +4092,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
- const struct_ty = ptr_ty.childType();
+ const struct_ty = ptr_ty.childType(mod);
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
switch (mcv) {
.ptr_stack_offset => |off| {
@@ -4173,7 +4169,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const field_ptr = try self.resolveInst(extra.field_ptr);
- const struct_ty = self.air.getRefType(ty_pl.ty).childType();
+ const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod);
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod));
switch (field_ptr) {
.ptr_stack_offset => |off| {
@@ -4254,7 +4250,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
- .Pointer => ty.childType(),
+ .Pointer => ty.childType(mod),
else => unreachable,
};
@@ -4280,11 +4276,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ret_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ret_ty);
try self.register_manager.getReg(ret_ptr_reg, null);
try self.genSetReg(ptr_ty, ret_ptr_reg, .{ .ptr_stack_offset = stack_offset });
@@ -4453,11 +4445,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
//
// self.ret_mcv is an address to where this function
// should store its result into
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ret_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ret_ty);
try self.store(self.ret_mcv, operand, ptr_ty, ret_ty);
},
else => unreachable,
@@ -4533,8 +4521,7 @@ fn cmp(
const mod = self.bin_file.options.module.?;
const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
.Optional => blk: {
- var opt_buffer: Type.Payload.ElemType = undefined;
- const payload_ty = lhs_ty.optionalChild(&opt_buffer);
+ const payload_ty = lhs_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) {
@@ -4850,8 +4837,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
const mod = self.bin_file.options.module.?;
const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = operand_ty.optionalChild(&buf);
+ const payload_ty = operand_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
break :blk .{ .ty = operand_ty, .bind = operand_bind };
@@ -4947,11 +4933,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
@@ -4973,11 +4960,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
@@ -4999,11 +4987,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
@@ -5025,11 +5014,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
@@ -5511,11 +5501,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else {
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ty);
// TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
@@ -5833,11 +5819,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
} else {
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ty);
// TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
@@ -5957,12 +5939,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_ty = self.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
- const array_ty = ptr_ty.childType();
- const array_len = @intCast(u32, array_ty.arrayLen());
+ const array_ty = ptr_ty.childType(mod);
+ const array_len = @intCast(u32, array_ty.arrayLen(mod));
const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
@@ -6079,8 +6062,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const vector_ty = self.typeOfIndex(inst);
- const len = vector_ty.vectorLen();
+ const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = res: {
src/arch/arm/CodeGen.zig
@@ -1010,7 +1010,7 @@ fn allocMem(
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const mod = self.bin_file.options.module.?;
- const elem_ty = self.typeOfIndex(inst).elemType();
+ const elem_ty = self.typeOfIndex(inst).childType(mod);
if (!elem_ty.hasRuntimeBits(mod)) {
// As this stack item will never be dereferenced at runtime,
@@ -1117,17 +1117,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void {
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const result: MCValue = switch (self.ret_mcv) {
.none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) },
.stack_offset => blk: {
// self.ret_mcv is an address to where this function
// should store its result into
const ret_ty = self.fn_type.fnReturnType();
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ret_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ret_ty);
// addr_reg will contain the address of where to store the
// result into
@@ -2372,8 +2369,8 @@ fn ptrElemVal(
ptr_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- const elem_ty = ptr_ty.childType();
const mod = self.bin_file.options.module.?;
+ const elem_ty = ptr_ty.childType(mod);
const elem_size = @intCast(u32, elem_ty.abiSize(mod));
switch (elem_size) {
@@ -2474,7 +2471,8 @@ fn arrayElemVal(
array_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- const elem_ty = array_ty.childType();
+ const mod = self.bin_file.options.module.?;
+ const elem_ty = array_ty.childType(mod);
const mcv = try array_bind.resolveToMcv(self);
switch (mcv) {
@@ -2508,11 +2506,7 @@ fn arrayElemVal(
const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv };
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = elem_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(elem_ty);
return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst);
},
@@ -2659,8 +2653,8 @@ fn reuseOperand(
}
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
- const elem_ty = ptr_ty.elemType();
const mod = self.bin_file.options.module.?;
+ const elem_ty = ptr_ty.childType(mod);
const elem_size = @intCast(u32, elem_ty.abiSize(mod));
switch (ptr) {
@@ -2888,7 +2882,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
- const struct_ty = ptr_ty.childType();
+ const struct_ty = ptr_ty.childType(mod);
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
switch (mcv) {
.ptr_stack_offset => |off| {
@@ -3004,7 +2998,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const field_ptr = try self.resolveInst(extra.field_ptr);
- const struct_ty = self.air.getRefType(ty_pl.ty).childType();
+ const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod);
if (struct_ty.zigTypeTag(mod) == .Union) {
return self.fail("TODO implement @fieldParentPtr codegen for unions", .{});
@@ -3898,9 +3892,9 @@ fn ptrArithmetic(
assert(rhs_ty.eql(Type.usize, mod));
const ptr_ty = lhs_ty;
- const elem_ty = switch (ptr_ty.ptrSize()) {
- .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
- else => ptr_ty.childType(),
+ const elem_ty = switch (ptr_ty.ptrSize(mod)) {
+ .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
+ else => ptr_ty.childType(mod),
};
const elem_size = @intCast(u32, elem_ty.abiSize(mod));
@@ -4079,7 +4073,7 @@ fn genInlineMemset(
) !void {
const dst_reg = switch (dst) {
.register => |r| r,
- else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst),
+ else => try self.copyToTmpRegister(Type.manyptr_u8, dst),
};
const dst_reg_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
@@ -4229,7 +4223,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
- .Pointer => ty.childType(),
+ .Pointer => ty.childType(mod),
else => unreachable,
};
@@ -4259,11 +4253,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ret_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ret_ty);
try self.register_manager.getReg(.r0, null);
try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset });
@@ -4401,11 +4391,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
//
// self.ret_mcv is an address to where this function
// should store its result into
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ret_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ret_ty);
try self.store(self.ret_mcv, operand, ptr_ty, ret_ty);
},
else => unreachable, // invalid return result
@@ -4482,8 +4468,7 @@ fn cmp(
const mod = self.bin_file.options.module.?;
const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
.Optional => blk: {
- var opt_buffer: Type.Payload.ElemType = undefined;
- const payload_ty = lhs_ty.optionalChild(&opt_buffer);
+ const payload_ty = lhs_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) {
@@ -4837,11 +4822,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
@@ -4863,11 +4849,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
@@ -4924,11 +4911,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
@@ -4950,11 +4938,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_ptr = try self.resolveInst(un_op);
const ptr_ty = self.typeOf(un_op);
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const operand = try self.allocRegOrMem(elem_ty, true, null);
try self.load(operand, operand_ptr, ptr_ty);
@@ -5455,11 +5444,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else {
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ty);
// TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
@@ -5816,11 +5801,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg });
} else {
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ty);
// TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
@@ -5908,12 +5889,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_ty = self.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
- const array_ty = ptr_ty.childType();
- const array_len = @intCast(u32, array_ty.arrayLen());
+ const array_ty = ptr_ty.childType(mod);
+ const array_len = @intCast(u32, array_ty.arrayLen(mod));
const stack_offset = try self.allocMem(8, 8, inst);
try self.genSetStack(ptr_ty, stack_offset, ptr);
@@ -6026,8 +6008,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const vector_ty = self.typeOfIndex(inst);
- const len = vector_ty.vectorLen();
+ const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = res: {
src/arch/riscv64/CodeGen.zig
@@ -807,7 +807,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const mod = self.bin_file.options.module.?;
- const elem_ty = self.typeOfIndex(inst).elemType();
+ const elem_ty = self.typeOfIndex(inst).childType(mod);
const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
@@ -1099,9 +1099,9 @@ fn binOp(
switch (lhs_ty.zigTypeTag(mod)) {
.Pointer => {
const ptr_ty = lhs_ty;
- const elem_ty = switch (ptr_ty.ptrSize()) {
- .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
- else => ptr_ty.childType(),
+ const elem_ty = switch (ptr_ty.ptrSize(mod)) {
+ .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
+ else => ptr_ty.childType(mod),
};
const elem_size = elem_ty.abiSize(mod);
@@ -1502,7 +1502,8 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind
}
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
- const elem_ty = ptr_ty.elemType();
+ const mod = self.bin_file.options.module.?;
+ const elem_ty = ptr_ty.childType(mod);
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@@ -2496,8 +2497,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const vector_ty = self.typeOfIndex(inst);
- const len = vector_ty.vectorLen();
+ const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = res: {
src/arch/sparc64/CodeGen.zig
@@ -838,8 +838,9 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const vector_ty = self.typeOfIndex(inst);
- const len = vector_ty.vectorLen();
+ const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = res: {
@@ -871,12 +872,13 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_ty = self.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
- const array_ty = ptr_ty.childType();
- const array_len = @intCast(u32, array_ty.arrayLen());
+ const array_ty = ptr_ty.childType(mod);
+ const array_len = @intCast(u32, array_ty.arrayLen(mod));
const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
@@ -1300,7 +1302,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const mod = self.bin_file.options.module.?;
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
- .Pointer => ty.childType(),
+ .Pointer => ty.childType(mod),
else => unreachable,
};
@@ -1440,8 +1442,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
.Pointer => Type.usize,
.ErrorSet => Type.u16,
.Optional => blk: {
- var opt_buffer: Type.Payload.ElemType = undefined;
- const payload_ty = lhs_ty.optionalChild(&opt_buffer);
+ const payload_ty = lhs_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :blk Type.u1;
} else if (lhs_ty.isPtrLikeOptional(mod)) {
@@ -2447,6 +2448,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -2456,8 +2458,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const index_mcv = try self.resolveInst(bin_op.rhs);
const slice_ty = self.typeOf(bin_op.lhs);
- const elem_ty = slice_ty.childType();
- const mod = self.bin_file.options.module.?;
+ const elem_ty = slice_ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
@@ -2797,7 +2798,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const mod = self.bin_file.options.module.?;
- const elem_ty = self.typeOfIndex(inst).elemType();
+ const elem_ty = self.typeOfIndex(inst).childType(mod);
if (!elem_ty.hasRuntimeBits(mod)) {
// As this stack item will never be dereferenced at runtime,
@@ -3001,9 +3002,9 @@ fn binOp(
switch (lhs_ty.zigTypeTag(mod)) {
.Pointer => {
const ptr_ty = lhs_ty;
- const elem_ty = switch (ptr_ty.ptrSize()) {
- .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
- else => ptr_ty.childType(),
+ const elem_ty = switch (ptr_ty.ptrSize(mod)) {
+ .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
+ else => ptr_ty.childType(mod),
};
const elem_size = elem_ty.abiSize(mod);
@@ -3019,7 +3020,7 @@ fn binOp(
// multiplying it with elem_size
const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null);
- const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null);
+ const addr = try self.binOp(tag, lhs, offset, Type.manyptr_u8, Type.usize, null);
return addr;
}
},
@@ -4042,11 +4043,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else {
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ty);
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
@@ -4269,7 +4266,7 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const mod = self.bin_file.options.module.?;
- const elem_ty = ptr_ty.elemType();
+ const elem_ty = ptr_ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
switch (ptr) {
@@ -4729,7 +4726,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
- const struct_ty = ptr_ty.childType();
+ const struct_ty = ptr_ty.childType(mod);
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
switch (mcv) {
.ptr_stack_offset => |off| {
src/arch/wasm/CodeGen.zig
@@ -1542,7 +1542,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
const mod = func.bin_file.base.options.module.?;
const ptr_ty = func.typeOfIndex(inst);
- const pointee_ty = ptr_ty.childType();
+ const pointee_ty = ptr_ty.childType(mod);
if (func.initial_stack_value == .none) {
try func.initializeStack();
@@ -1766,8 +1766,7 @@ fn isByRef(ty: Type, mod: *const Module) bool {
},
.Optional => {
if (ty.isPtrLikeOptional(mod)) return false;
- var buf: Type.Payload.ElemType = undefined;
- const pl_type = ty.optionalChild(&buf);
+ const pl_type = ty.optionalChild(mod);
if (pl_type.zigTypeTag(mod) == .ErrorSet) return false;
return pl_type.hasRuntimeBitsIgnoreComptime(mod);
},
@@ -2139,7 +2138,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
- const child_type = func.typeOfIndex(inst).childType();
+ const child_type = func.typeOfIndex(inst).childType(mod);
var result = result: {
if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
@@ -2161,7 +2160,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
- const ret_ty = func.typeOf(un_op).childType();
+ const ret_ty = func.typeOf(un_op).childType(mod);
const fn_info = func.decl.ty.fnInfo();
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
@@ -2188,7 +2187,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const mod = func.bin_file.base.options.module.?;
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
- .Pointer => ty.childType(),
+ .Pointer => ty.childType(mod),
else => unreachable,
};
const ret_ty = fn_ty.fnReturnType();
@@ -2301,8 +2300,8 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
const ptr_ty = func.typeOf(bin_op.lhs);
- const ptr_info = ptr_ty.ptrInfo().data;
- const ty = ptr_ty.childType();
+ const ptr_info = ptr_ty.ptrInfo(mod);
+ const ty = ptr_ty.childType(mod);
if (ptr_info.host_size == 0) {
try func.store(lhs, rhs, ty, 0);
@@ -2360,8 +2359,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
if (ty.isPtrLikeOptional(mod)) {
return func.store(lhs, rhs, Type.usize, 0);
}
- var buf: Type.Payload.ElemType = undefined;
- const pl_ty = ty.optionalChild(&buf);
+ const pl_ty = ty.optionalChild(mod);
if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.store(lhs, rhs, Type.u8, 0);
}
@@ -2454,7 +2452,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const ty = func.air.getRefType(ty_op.ty);
const ptr_ty = func.typeOf(ty_op.operand);
- const ptr_info = ptr_ty.ptrInfo().data;
+ const ptr_info = ptr_ty.ptrInfo(mod);
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand});
@@ -2971,7 +2969,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
break :blk field_offset;
},
},
- .Pointer => switch (parent_ty.ptrSize()) {
+ .Pointer => switch (parent_ty.ptrSize(mod)) {
.Slice => switch (field_ptr.field_index) {
0 => 0,
1 => func.ptrSize(),
@@ -3001,11 +2999,7 @@ fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.In
const mod = func.bin_file.base.options.module.?;
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = decl.ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(decl.ty);
return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset);
}
@@ -3145,8 +3139,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
},
.Optional => if (ty.optionalReprIsPayload(mod)) {
- var buf: Type.Payload.ElemType = undefined;
- const pl_ty = ty.optionalChild(&buf);
+ const pl_ty = ty.optionalChild(mod);
if (val.castTag(.opt_payload)) |payload| {
return func.lowerConstant(payload.data, pl_ty);
} else if (val.isNull(mod)) {
@@ -3217,8 +3210,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
else => unreachable,
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const pl_ty = ty.optionalChild(&buf);
+ const pl_ty = ty.optionalChild(mod);
if (ty.optionalReprIsPayload(mod)) {
return func.emitUndefined(pl_ty);
}
@@ -3403,8 +3395,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
assert(!(lhs != .stack and rhs == .stack));
const mod = func.bin_file.base.options.module.?;
if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
+ const payload_ty = ty.optionalChild(mod);
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
@@ -3609,19 +3600,21 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
}
fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.StructField, ty_pl.payload);
const struct_ptr = try func.resolveInst(extra.data.struct_operand);
- const struct_ty = func.typeOf(extra.data.struct_operand).childType();
+ const struct_ty = func.typeOf(extra.data.struct_operand).childType(mod);
const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index);
func.finishAir(inst, result, &.{extra.data.struct_operand});
}
fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const struct_ptr = try func.resolveInst(ty_op.operand);
- const struct_ty = func.typeOf(ty_op.operand).childType();
+ const struct_ty = func.typeOf(ty_op.operand).childType(mod);
const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index);
func.finishAir(inst, result, &.{ty_op.operand});
@@ -3640,7 +3633,7 @@ fn structFieldPtr(
const offset = switch (struct_ty.containerLayout()) {
.Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => offset: {
- if (result_ty.ptrInfo().data.host_size != 0) {
+ if (result_ty.ptrInfo(mod).host_size != 0) {
break :offset @as(u32, 0);
}
break :offset struct_ty.packedStructFieldByteOffset(index, mod);
@@ -3981,7 +3974,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.typeOf(ty_op.operand);
- const err_ty = if (op_is_ptr) op_ty.childType() else op_ty;
+ const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty;
const payload_ty = err_ty.errorUnionPayload();
const result = result: {
@@ -4009,7 +4002,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool)
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.typeOf(ty_op.operand);
- const err_ty = if (op_is_ptr) op_ty.childType() else op_ty;
+ const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty;
const payload_ty = err_ty.errorUnionPayload();
const result = result: {
@@ -4156,11 +4149,12 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
}
fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
const op_ty = func.typeOf(un_op);
- const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty;
+ const optional_ty = if (op_kind == .ptr) op_ty.childType(mod) else op_ty;
const is_null = try func.isNull(operand, optional_ty, opcode);
const result = try is_null.toLocal(func, optional_ty);
func.finishAir(inst, result, &.{un_op});
@@ -4171,8 +4165,7 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
try func.emitWValue(operand);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
+ const payload_ty = optional_ty.optionalChild(mod);
if (!optional_ty.optionalReprIsPayload(mod)) {
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
@@ -4221,14 +4214,13 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const opt_ty = func.typeOf(ty_op.operand).childType();
+ const opt_ty = func.typeOf(ty_op.operand).childType(mod);
- const mod = func.bin_file.base.options.module.?;
const result = result: {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = opt_ty.optionalChild(&buf);
+ const payload_ty = opt_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
@@ -4242,9 +4234,8 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const opt_ty = func.typeOf(ty_op.operand).childType();
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = opt_ty.optionalChild(&buf);
+ const opt_ty = func.typeOf(ty_op.operand).childType(mod);
+ const payload_ty = opt_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
}
@@ -4325,13 +4316,13 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const slice_ty = func.typeOf(bin_op.lhs);
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
- const elem_ty = slice_ty.childType();
- const mod = func.bin_file.base.options.module.?;
+ const elem_ty = slice_ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
// load pointer onto stack
@@ -4355,11 +4346,11 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
- const elem_ty = func.air.getRefType(ty_pl.ty).childType();
- const mod = func.bin_file.base.options.module.?;
+ const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod);
const elem_size = elem_ty.abiSize(mod);
const slice = try func.resolveInst(bin_op.lhs);
@@ -4436,7 +4427,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
- const array_ty = func.typeOf(ty_op.operand).childType();
+ const array_ty = func.typeOf(ty_op.operand).childType(mod);
const slice_ty = func.air.getRefType(ty_op.ty);
// create a slice on the stack
@@ -4448,7 +4439,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
// store the length of the array in the slice
- const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen()) };
+ const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) };
try func.store(slice_local, len, Type.usize, func.ptrSize());
func.finishAir(inst, slice_local, &.{ty_op.operand});
@@ -4470,13 +4461,13 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = func.typeOf(bin_op.lhs);
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
- const elem_ty = ptr_ty.childType();
- const mod = func.bin_file.base.options.module.?;
+ const elem_ty = ptr_ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
// load pointer onto the stack
@@ -4507,12 +4498,12 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = func.typeOf(bin_op.lhs);
- const elem_ty = func.air.getRefType(ty_pl.ty).childType();
- const mod = func.bin_file.base.options.module.?;
+ const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod);
const elem_size = elem_ty.abiSize(mod);
const ptr = try func.resolveInst(bin_op.lhs);
@@ -4544,9 +4535,9 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const ptr = try func.resolveInst(bin_op.lhs);
const offset = try func.resolveInst(bin_op.rhs);
const ptr_ty = func.typeOf(bin_op.lhs);
- const pointee_ty = switch (ptr_ty.ptrSize()) {
- .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
- else => ptr_ty.childType(),
+ const pointee_ty = switch (ptr_ty.ptrSize(mod)) {
+ .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
+ else => ptr_ty.childType(mod),
};
const valtype = typeToValtype(Type.usize, mod);
@@ -4565,6 +4556,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
}
fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -4575,16 +4567,16 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
const ptr = try func.resolveInst(bin_op.lhs);
const ptr_ty = func.typeOf(bin_op.lhs);
const value = try func.resolveInst(bin_op.rhs);
- const len = switch (ptr_ty.ptrSize()) {
+ const len = switch (ptr_ty.ptrSize(mod)) {
.Slice => try func.sliceLen(ptr),
- .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType().arrayLen()) }),
+ .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }),
.C, .Many => unreachable,
};
- const elem_ty = if (ptr_ty.ptrSize() == .One)
- ptr_ty.childType().childType()
+ const elem_ty = if (ptr_ty.ptrSize(mod) == .One)
+ ptr_ty.childType(mod).childType(mod)
else
- ptr_ty.childType();
+ ptr_ty.childType(mod);
const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty);
try func.memset(elem_ty, dst_ptr, len, value);
@@ -4686,13 +4678,13 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue
}
fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const array_ty = func.typeOf(bin_op.lhs);
const array = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
- const elem_ty = array_ty.childType();
- const mod = func.bin_file.base.options.module.?;
+ const elem_ty = array_ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
if (isByRef(array_ty, mod)) {
@@ -4810,7 +4802,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.typeOfIndex(inst);
- const elem_ty = ty.childType();
+ const elem_ty = ty.childType(mod);
if (determineSimdStoreStrategy(ty, mod) == .direct) blk: {
switch (operand) {
@@ -4859,7 +4851,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
const elem_size = elem_ty.bitSize(mod);
- const vector_len = @intCast(usize, ty.vectorLen());
+ const vector_len = @intCast(usize, ty.vectorLen(mod));
if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
}
@@ -4895,7 +4887,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mask = func.air.values[extra.mask];
const mask_len = extra.mask_len;
- const child_ty = inst_ty.childType();
+ const child_ty = inst_ty.childType(mod);
const elem_size = child_ty.abiSize(mod);
// TODO: One of them could be by ref; handle in loop
@@ -4959,16 +4951,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const result_ty = func.typeOfIndex(inst);
- const len = @intCast(usize, result_ty.arrayLen());
+ const len = @intCast(usize, result_ty.arrayLen(mod));
const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]);
const result: WValue = result_value: {
switch (result_ty.zigTypeTag(mod)) {
.Array => {
const result = try func.allocStack(result_ty);
- const elem_ty = result_ty.childType();
+ const elem_ty = result_ty.childType(mod);
const elem_size = @intCast(u32, elem_ty.abiSize(mod));
- const sentinel = if (result_ty.sentinel()) |sent| blk: {
+ const sentinel = if (result_ty.sentinel(mod)) |sent| blk: {
break :blk try func.lowerConstant(sent, elem_ty);
} else null;
@@ -5190,8 +5182,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
const mod = func.bin_file.base.options.module.?;
assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod));
assert(op == .eq or op == .neq);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = operand_ty.optionalChild(&buf);
+ const payload_ty = operand_ty.optionalChild(mod);
// We store the final result in here that will be validated
// if the optional is truly equal.
@@ -5268,7 +5259,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
- const un_ty = func.typeOf(bin_op.lhs).childType();
+ const un_ty = func.typeOf(bin_op.lhs).childType(mod);
const tag_ty = func.typeOf(bin_op.rhs);
const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
@@ -5398,7 +5389,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
- const err_set_ty = func.typeOf(ty_op.operand).childType();
+ const err_set_ty = func.typeOf(ty_op.operand).childType(mod);
const payload_ty = err_set_ty.errorUnionPayload();
const operand = try func.resolveInst(ty_op.operand);
@@ -5426,7 +5417,7 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const field_ptr = try func.resolveInst(extra.field_ptr);
- const parent_ty = func.air.getRefType(ty_pl.ty).childType();
+ const parent_ty = func.air.getRefType(ty_pl.ty).childType(mod);
const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
const result = if (field_offset != 0) result: {
@@ -5455,10 +5446,10 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const dst = try func.resolveInst(bin_op.lhs);
const dst_ty = func.typeOf(bin_op.lhs);
- const ptr_elem_ty = dst_ty.childType();
+ const ptr_elem_ty = dst_ty.childType(mod);
const src = try func.resolveInst(bin_op.rhs);
const src_ty = func.typeOf(bin_op.rhs);
- const len = switch (dst_ty.ptrSize()) {
+ const len = switch (dst_ty.ptrSize(mod)) {
.Slice => blk: {
const slice_len = try func.sliceLen(dst);
if (ptr_elem_ty.abiSize(mod) != 1) {
@@ -5470,7 +5461,7 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :blk slice_len;
},
.One => @as(WValue, .{
- .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(mod)),
+ .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)),
}),
.C, .Many => unreachable,
};
@@ -5551,7 +5542,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// As the names are global and the slice elements are constant, we do not have
// to make a copy of the ptr+value but can point towards them directly.
const error_table_symbol = try func.bin_file.getErrorTableSymbol();
- const name_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const name_ty = Type.const_slice_u8_sentinel_0;
const mod = func.bin_file.base.options.module.?;
const abi_size = name_ty.abiSize(mod);
@@ -5857,7 +5848,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addLabel(.local_set, overflow_bit.local.value);
break :blk try func.wrapOperand(bin_op, lhs_ty);
} else if (int_info.bits == 64 and int_info.signedness == .unsigned) blk: {
- const new_ty = Type.initTag(.u128);
+ const new_ty = Type.u128;
var lhs_upcast = try (try func.intcast(lhs, lhs_ty, new_ty)).toLocal(func, lhs_ty);
defer lhs_upcast.free(func);
var rhs_upcast = try (try func.intcast(rhs, lhs_ty, new_ty)).toLocal(func, lhs_ty);
@@ -5878,7 +5869,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = try func.callIntrinsic(
"__multi3",
&[_]Type{Type.i64} ** 4,
- Type.initTag(.i128),
+ Type.i128,
&.{ lhs, lhs_shifted, rhs, rhs_shifted },
);
const res = try func.allocLocal(lhs_ty);
@@ -5902,19 +5893,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mul1 = try func.callIntrinsic(
"__multi3",
&[_]Type{Type.i64} ** 4,
- Type.initTag(.i128),
+ Type.i128,
&.{ lhs_lsb, zero, rhs_msb, zero },
);
const mul2 = try func.callIntrinsic(
"__multi3",
&[_]Type{Type.i64} ** 4,
- Type.initTag(.i128),
+ Type.i128,
&.{ rhs_lsb, zero, lhs_msb, zero },
);
const mul3 = try func.callIntrinsic(
"__multi3",
&[_]Type{Type.i64} ** 4,
- Type.initTag(.i128),
+ Type.i128,
&.{ lhs_msb, zero, rhs_msb, zero },
);
@@ -5942,7 +5933,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
_ = try func.binOp(lsb_or, mul_add_lt, Type.bool, .@"or");
try func.addLabel(.local_set, overflow_bit.local.value);
- const tmp_result = try func.allocStack(Type.initTag(.u128));
+ const tmp_result = try func.allocStack(Type.u128);
try func.emitWValue(tmp_result);
const mul3_msb = try func.load(mul3, Type.u64, 0);
try func.store(.stack, mul3_msb, Type.u64, tmp_result.offset());
@@ -6191,11 +6182,12 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try func.resolveInst(extra.data.ptr);
const body = func.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = func.typeOf(extra.data.ptr).childType();
+ const err_union_ty = func.typeOf(extra.data.ptr).childType(mod);
const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true);
func.finishAir(inst, result, &.{extra.data.ptr});
}
@@ -6845,11 +6837,11 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
for (enum_ty.enumFields().keys(), 0..) |tag_name, field_index| {
// for each tag name, create an unnamed const,
// and then get a pointer to its value.
- var name_ty_payload: Type.Payload.Len = .{
- .base = .{ .tag = .array_u8_sentinel_0 },
- .data = @intCast(u64, tag_name.len),
- };
- const name_ty = Type.initPayload(&name_ty_payload.base);
+ const name_ty = try mod.arrayType(.{
+ .len = tag_name.len,
+ .child = .u8_type,
+ .sentinel = .zero_u8,
+ });
const string_bytes = &mod.string_literal_bytes;
try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len);
const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{
@@ -6972,7 +6964,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// finish function body
try writer.writeByte(std.wasm.opcode(.end));
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const slice_ty = Type.const_slice_u8_sentinel_0;
const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod);
return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
}
@@ -7068,7 +7060,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const ptr_ty = func.typeOf(extra.ptr);
- const ty = ptr_ty.childType();
+ const ty = ptr_ty.childType(mod);
const result_ty = func.typeOfIndex(inst);
const ptr_operand = try func.resolveInst(extra.ptr);
@@ -7355,7 +7347,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ptr = try func.resolveInst(bin_op.lhs);
const operand = try func.resolveInst(bin_op.rhs);
const ptr_ty = func.typeOf(bin_op.lhs);
- const ty = ptr_ty.childType();
+ const ty = ptr_ty.childType(mod);
if (func.useAtomicFeature()) {
const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
src/arch/x86_64/abi.zig
@@ -76,7 +76,7 @@ pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class {
};
var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag(mod)) {
- .Pointer => switch (ty.ptrSize()) {
+ .Pointer => switch (ty.ptrSize(mod)) {
.Slice => {
result[0] = .integer;
result[1] = .integer;
@@ -158,8 +158,8 @@ pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class {
else => unreachable,
},
.Vector => {
- const elem_ty = ty.childType();
- const bits = elem_ty.bitSize(mod) * ty.arrayLen();
+ const elem_ty = ty.childType(mod);
+ const bits = elem_ty.bitSize(mod) * ty.arrayLen(mod);
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
src/arch/x86_64/CodeGen.zig
@@ -2259,7 +2259,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex {
const mod = self.bin_file.options.module.?;
const ptr_ty = self.typeOfIndex(inst);
- const val_ty = ptr_ty.childType();
+ const val_ty = ptr_ty.childType(mod);
return self.allocFrameIndex(FrameAlloc.init(.{
.size = math.cast(u32, val_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)});
@@ -2289,8 +2289,8 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b
80 => break :need_mem,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag(mod)) {
- .Float => switch (ty.childType().floatBits(self.target.*)) {
+ .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16,
80 => break :need_mem,
else => unreachable,
@@ -2727,12 +2727,12 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
if (dst_ty.zigTypeTag(mod) == .Vector) {
- assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen() == src_ty.vectorLen());
- const dst_info = dst_ty.childType().intInfo(mod);
- const src_info = src_ty.childType().intInfo(mod);
+ assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod));
+ const dst_info = dst_ty.childType(mod).intInfo(mod);
+ const src_info = src_ty.childType(mod).intInfo(mod);
const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) {
8 => switch (src_info.bits) {
- 16 => switch (dst_ty.vectorLen()) {
+ 16 => switch (dst_ty.vectorLen(mod)) {
1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw },
9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null,
else => null,
@@ -2740,7 +2740,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
else => null,
},
16 => switch (src_info.bits) {
- 32 => switch (dst_ty.vectorLen()) {
+ 32 => switch (dst_ty.vectorLen(mod)) {
1...4 => if (self.hasFeature(.avx))
.{ .vp_w, .ackusd }
else if (self.hasFeature(.sse4_1))
@@ -2769,14 +2769,10 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
};
const splat_val = Value.initPayload(&splat_pl.base);
- var full_pl = Type.Payload.Array{
- .base = .{ .tag = .vector },
- .data = .{
- .len = @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits),
- .elem_type = src_ty.childType(),
- },
- };
- const full_ty = Type.initPayload(&full_pl.base);
+ const full_ty = try mod.vectorType(.{
+ .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)),
+ .child = src_ty.childType(mod).ip_index,
+ });
const full_abi_size = @intCast(u32, full_ty.abiSize(mod));
const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val });
@@ -3587,7 +3583,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const result = result: {
const dst_ty = self.typeOfIndex(inst);
const src_ty = self.typeOf(ty_op.operand);
- const opt_ty = src_ty.childType();
+ const opt_ty = src_ty.childType(mod);
const src_mcv = try self.resolveInst(ty_op.operand);
if (opt_ty.optionalReprIsPayload(mod)) {
@@ -3607,7 +3603,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
else
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
- const pl_ty = dst_ty.childType();
+ const pl_ty = dst_ty.childType(mod);
const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod));
try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 });
break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv;
@@ -3737,7 +3733,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
defer self.register_manager.unlockReg(dst_lock);
- const eu_ty = src_ty.childType();
+ const eu_ty = src_ty.childType(mod);
const pl_ty = eu_ty.errorUnionPayload();
const err_ty = eu_ty.errorUnionSet();
const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
@@ -3777,7 +3773,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const eu_ty = src_ty.childType();
+ const eu_ty = src_ty.childType(mod);
const pl_ty = eu_ty.errorUnionPayload();
const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
@@ -3803,7 +3799,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
defer self.register_manager.unlockReg(src_lock);
- const eu_ty = src_ty.childType();
+ const eu_ty = src_ty.childType(mod);
const pl_ty = eu_ty.errorUnionPayload();
const err_ty = eu_ty.errorUnionSet();
const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
@@ -4057,7 +4053,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
};
defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_ty = slice_ty.childType();
+ const elem_ty = slice_ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
@@ -4116,7 +4112,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (array_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_ty = array_ty.childType();
+ const elem_ty = array_ty.childType(mod);
const elem_abi_size = elem_ty.abiSize(mod);
const index_ty = self.typeOf(bin_op.rhs);
@@ -4253,7 +4249,7 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_union_ty = self.typeOf(bin_op.lhs);
- const union_ty = ptr_union_ty.childType();
+ const union_ty = ptr_union_ty.childType(mod);
const tag_ty = self.typeOf(bin_op.rhs);
const layout = union_ty.unionGetLayout(mod);
@@ -4287,7 +4283,9 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
break :blk MCValue{ .register = reg };
} else ptr;
- var ptr_tag_pl = ptr_union_ty.ptrInfo();
+ var ptr_tag_pl: Type.Payload.Pointer = .{
+ .data = ptr_union_ty.ptrInfo(mod),
+ };
ptr_tag_pl.data.pointee_type = tag_ty;
const ptr_tag_ty = Type.initPayload(&ptr_tag_pl.base);
try self.store(ptr_tag_ty, adjusted_ptr, tag);
@@ -4924,14 +4922,11 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void {
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
- var vec_pl = Type.Payload.Array{
- .base = .{ .tag = .vector },
- .data = .{
- .len = @divExact(abi_size * 8, scalar_bits),
- .elem_type = try mod.intType(.signed, scalar_bits),
- },
- };
- const vec_ty = Type.initPayload(&vec_pl.base);
+ const vec_ty = try mod.vectorType(.{
+ .len = @divExact(abi_size * 8, scalar_bits),
+ .child = (try mod.intType(.signed, scalar_bits)).ip_index,
+ });
+
const sign_val = switch (tag) {
.neg => try vec_ty.minInt(stack.get(), mod),
.fabs => try vec_ty.maxInt(stack.get(), mod),
@@ -5034,15 +5029,15 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag(mod)) {
- .Float => switch (ty.childType().floatBits(self.target.*)) {
- 32 => switch (ty.vectorLen()) {
+ .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (ty.vectorLen(mod)) {
1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round },
5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null,
else => null,
},
- 64 => switch (ty.vectorLen()) {
+ 64 => switch (ty.vectorLen(mod)) {
1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round },
3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null,
@@ -5131,9 +5126,9 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag(mod)) {
- .Float => switch (ty.childType().floatBits(self.target.*)) {
- 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) {
+ .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+ 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(mod)) {
1 => {
try self.asmRegisterRegister(
.{ .v_ps, .cvtph2 },
@@ -5184,13 +5179,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
},
else => null,
} else null,
- 32 => switch (ty.vectorLen()) {
+ 32 => switch (ty.vectorLen(mod)) {
1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt },
2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt },
5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null,
else => null,
},
- 64 => switch (ty.vectorLen()) {
+ 64 => switch (ty.vectorLen(mod)) {
1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt },
2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt },
3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null,
@@ -5292,7 +5287,7 @@ fn reuseOperandAdvanced(
fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
const mod = self.bin_file.options.module.?;
- const ptr_info = ptr_ty.ptrInfo().data;
+ const ptr_info = ptr_ty.ptrInfo(mod);
const val_ty = ptr_info.pointee_type;
const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
@@ -5365,7 +5360,8 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
}
fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
- const dst_ty = ptr_ty.childType();
+ const mod = self.bin_file.options.module.?;
+ const dst_ty = ptr_ty.childType(mod);
switch (ptr_mcv) {
.none,
.unreach,
@@ -5424,7 +5420,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
else
try self.allocRegOrMem(inst, true);
- if (ptr_ty.ptrInfo().data.host_size > 0) {
+ if (ptr_ty.ptrInfo(mod).host_size > 0) {
try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv);
} else {
try self.load(dst_mcv, ptr_ty, ptr_mcv);
@@ -5436,8 +5432,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void {
const mod = self.bin_file.options.module.?;
- const ptr_info = ptr_ty.ptrInfo().data;
- const src_ty = ptr_ty.childType();
+ const ptr_info = ptr_ty.ptrInfo(mod);
+ const src_ty = ptr_ty.childType(mod);
const limb_abi_size: u16 = @min(ptr_info.host_size, 8);
const limb_abi_bits = limb_abi_size * 8;
@@ -5509,7 +5505,8 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In
}
fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void {
- const src_ty = ptr_ty.childType();
+ const mod = self.bin_file.options.module.?;
+ const src_ty = ptr_ty.childType(mod);
switch (ptr_mcv) {
.none,
.unreach,
@@ -5544,6 +5541,7 @@ fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerErr
}
fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
+ const mod = self.bin_file.options.module.?;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -5553,7 +5551,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
const ptr_mcv = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
const src_mcv = try self.resolveInst(bin_op.rhs);
- if (ptr_ty.ptrInfo().data.host_size > 0) {
+ if (ptr_ty.ptrInfo(mod).host_size > 0) {
try self.packedStore(ptr_ty, ptr_mcv, src_mcv);
} else {
try self.store(ptr_ty, ptr_mcv, src_mcv);
@@ -5578,11 +5576,11 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
const mod = self.bin_file.options.module.?;
const ptr_field_ty = self.typeOfIndex(inst);
const ptr_container_ty = self.typeOf(operand);
- const container_ty = ptr_container_ty.childType();
+ const container_ty = ptr_container_ty.childType(mod);
const field_offset = @intCast(i32, switch (container_ty.containerLayout()) {
.Auto, .Extern => container_ty.structFieldOffset(index, mod),
.Packed => if (container_ty.zigTypeTag(mod) == .Struct and
- ptr_field_ty.ptrInfo().data.host_size == 0)
+ ptr_field_ty.ptrInfo(mod).host_size == 0)
container_ty.packedStructFieldByteOffset(index, mod)
else
0,
@@ -5760,7 +5758,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const inst_ty = self.typeOfIndex(inst);
- const parent_ty = inst_ty.childType();
+ const parent_ty = inst_ty.childType(mod);
const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod));
const src_mcv = try self.resolveInst(extra.field_ptr);
@@ -6680,10 +6678,10 @@ fn genBinOp(
80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
+ .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
else => null,
- .Int => switch (lhs_ty.childType().intInfo(mod).bits) {
- 8 => switch (lhs_ty.vectorLen()) {
+ .Int => switch (lhs_ty.childType(mod).intInfo(mod).bits) {
+ 8 => switch (lhs_ty.vectorLen(mod)) {
1...16 => switch (air_tag) {
.add,
.addwrap,
@@ -6694,7 +6692,7 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
- .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_b, .mins }
else if (self.hasFeature(.sse4_1))
@@ -6708,7 +6706,7 @@ fn genBinOp(
else
null,
},
- .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_b, .maxs }
else if (self.hasFeature(.sse4_1))
@@ -6734,11 +6732,11 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
- .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null,
.unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null,
},
- .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null,
},
@@ -6746,7 +6744,7 @@ fn genBinOp(
},
else => null,
},
- 16 => switch (lhs_ty.vectorLen()) {
+ 16 => switch (lhs_ty.vectorLen(mod)) {
1...8 => switch (air_tag) {
.add,
.addwrap,
@@ -6760,7 +6758,7 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
- .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_w, .mins }
else
@@ -6770,7 +6768,7 @@ fn genBinOp(
else
.{ .p_w, .minu },
},
- .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_w, .maxs }
else
@@ -6795,11 +6793,11 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
- .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null,
.unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null,
},
- .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null,
},
@@ -6807,7 +6805,7 @@ fn genBinOp(
},
else => null,
},
- 32 => switch (lhs_ty.vectorLen()) {
+ 32 => switch (lhs_ty.vectorLen(mod)) {
1...4 => switch (air_tag) {
.add,
.addwrap,
@@ -6826,7 +6824,7 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
- .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_d, .mins }
else if (self.hasFeature(.sse4_1))
@@ -6840,7 +6838,7 @@ fn genBinOp(
else
null,
},
- .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_d, .maxs }
else if (self.hasFeature(.sse4_1))
@@ -6869,11 +6867,11 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
- .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null,
.unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null,
},
- .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
+ .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null,
},
@@ -6881,7 +6879,7 @@ fn genBinOp(
},
else => null,
},
- 64 => switch (lhs_ty.vectorLen()) {
+ 64 => switch (lhs_ty.vectorLen(mod)) {
1...2 => switch (air_tag) {
.add,
.addwrap,
@@ -6910,8 +6908,8 @@ fn genBinOp(
},
else => null,
},
- .Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
- 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen()) {
+ .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+ 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen(mod)) {
1 => {
const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128();
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
@@ -7086,7 +7084,7 @@ fn genBinOp(
},
else => null,
} else null,
- 32 => switch (lhs_ty.vectorLen()) {
+ 32 => switch (lhs_ty.vectorLen(mod)) {
1 => switch (air_tag) {
.add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add },
.sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub },
@@ -7124,7 +7122,7 @@ fn genBinOp(
} else null,
else => null,
},
- 64 => switch (lhs_ty.vectorLen()) {
+ 64 => switch (lhs_ty.vectorLen(mod)) {
1 => switch (air_tag) {
.add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add },
.sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub },
@@ -7236,14 +7234,14 @@ fn genBinOp(
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen()) {
+ .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(mod)) {
1 => .{ .v_ss, .cmp },
2...8 => .{ .v_ps, .cmp },
else => null,
},
- 64 => switch (lhs_ty.vectorLen()) {
+ 64 => switch (lhs_ty.vectorLen(mod)) {
1 => .{ .v_sd, .cmp },
2...4 => .{ .v_pd, .cmp },
else => null,
@@ -7270,13 +7268,13 @@ fn genBinOp(
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen()) {
+ .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(mod)) {
1...8 => .{ .v_ps, .blendv },
else => null,
},
- 64 => switch (lhs_ty.vectorLen()) {
+ 64 => switch (lhs_ty.vectorLen(mod)) {
1...4 => .{ .v_pd, .blendv },
else => null,
},
@@ -7304,14 +7302,14 @@ fn genBinOp(
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen()) {
+ .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(mod)) {
1 => .{ ._ss, .cmp },
2...4 => .{ ._ps, .cmp },
else => null,
},
- 64 => switch (lhs_ty.vectorLen()) {
+ 64 => switch (lhs_ty.vectorLen(mod)) {
1 => .{ ._sd, .cmp },
2 => .{ ._pd, .cmp },
else => null,
@@ -7337,13 +7335,13 @@ fn genBinOp(
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen()) {
+ .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(mod)) {
1...4 => .{ ._ps, .blendv },
else => null,
},
- 64 => switch (lhs_ty.vectorLen()) {
+ 64 => switch (lhs_ty.vectorLen(mod)) {
1...2 => .{ ._pd, .blendv },
else => null,
},
@@ -7368,13 +7366,13 @@ fn genBinOp(
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen()) {
+ .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(mod)) {
1...4 => .{ ._ps, .@"and" },
else => null,
},
- 64 => switch (lhs_ty.vectorLen()) {
+ 64 => switch (lhs_ty.vectorLen(mod)) {
1...2 => .{ ._pd, .@"and" },
else => null,
},
@@ -7398,13 +7396,13 @@ fn genBinOp(
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen()) {
+ .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(mod)) {
1...4 => .{ ._ps, .andn },
else => null,
},
- 64 => switch (lhs_ty.vectorLen()) {
+ 64 => switch (lhs_ty.vectorLen(mod)) {
1...2 => .{ ._pd, .andn },
else => null,
},
@@ -7428,13 +7426,13 @@ fn genBinOp(
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
- .Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
- 32 => switch (lhs_ty.vectorLen()) {
+ .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (lhs_ty.vectorLen(mod)) {
1...4 => .{ ._ps, .@"or" },
else => null,
},
- 64 => switch (lhs_ty.vectorLen()) {
+ 64 => switch (lhs_ty.vectorLen(mod)) {
1...2 => .{ ._pd, .@"or" },
else => null,
},
@@ -7586,11 +7584,7 @@ fn genBinOpMir(
.load_got,
.load_tlv,
=> {
- var ptr_pl = Type.Payload.ElemType{
- .base = .{ .tag = .single_const_pointer },
- .data = ty,
- };
- const ptr_ty = Type.initPayload(&ptr_pl.base);
+ const ptr_ty = try mod.singleConstPtrType(ty);
const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address());
return self.genBinOpMir(mir_tag, ty, dst_mcv, .{
.indirect = .{ .reg = addr_reg },
@@ -8058,7 +8052,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
- .Pointer => ty.childType(),
+ .Pointer => ty.childType(mod),
else => unreachable,
};
@@ -8506,10 +8500,11 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
}
fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = self.typeOf(extra.data.ptr).childType();
+ const err_union_ty = self.typeOf(extra.data.ptr).childType(mod);
const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true);
return self.finishAir(inst, result, .{ .none, .none, .none });
}
@@ -8683,8 +8678,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
- var pl_buf: Type.Payload.ElemType = undefined;
- const pl_ty = opt_ty.optionalChild(&pl_buf);
+ const pl_ty = opt_ty.optionalChild(mod);
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
@@ -8775,9 +8769,8 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
- const opt_ty = ptr_ty.childType();
- var pl_buf: Type.Payload.ElemType = undefined;
- const pl_ty = opt_ty.optionalChild(&pl_buf);
+ const opt_ty = ptr_ty.childType(mod);
+ const pl_ty = opt_ty.optionalChild(mod);
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
@@ -8919,6 +8912,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand_ptr = try self.resolveInst(un_op);
@@ -8939,7 +8933,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.typeOf(un_op);
try self.load(operand, ptr_ty, operand_ptr);
- const result = try self.isErr(inst, ptr_ty.childType(), operand);
+ const result = try self.isErr(inst, ptr_ty.childType(mod), operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -8953,6 +8947,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand_ptr = try self.resolveInst(un_op);
@@ -8973,7 +8968,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.typeOf(un_op);
try self.load(operand, ptr_ty, operand_ptr);
- const result = try self.isNonErr(inst, ptr_ty.childType(), operand);
+ const result = try self.isNonErr(inst, ptr_ty.childType(mod), operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@@ -9452,9 +9447,9 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
else => {},
},
- .Vector => switch (ty.childType().zigTypeTag(mod)) {
- .Int => switch (ty.childType().intInfo(mod).bits) {
- 8 => switch (ty.vectorLen()) {
+ .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+ .Int => switch (ty.childType(mod).intInfo(mod).bits) {
+ 8 => switch (ty.vectorLen(mod)) {
1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{
.insert = .{ .vp_b, .insr },
.extract = .{ .vp_b, .extr },
@@ -9484,7 +9479,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } },
else => {},
},
- 16 => switch (ty.vectorLen()) {
+ 16 => switch (ty.vectorLen(mod)) {
1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{
.insert = .{ .vp_w, .insr },
.extract = .{ .vp_w, .extr },
@@ -9507,7 +9502,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } },
else => {},
},
- 32 => switch (ty.vectorLen()) {
+ 32 => switch (ty.vectorLen(mod)) {
1 => return .{ .move = if (self.hasFeature(.avx))
.{ .v_d, .mov }
else
@@ -9523,7 +9518,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } },
else => {},
},
- 64 => switch (ty.vectorLen()) {
+ 64 => switch (ty.vectorLen(mod)) {
1 => return .{ .move = if (self.hasFeature(.avx))
.{ .v_q, .mov }
else
@@ -9535,7 +9530,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } },
else => {},
},
- 128 => switch (ty.vectorLen()) {
+ 128 => switch (ty.vectorLen(mod)) {
1 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -9543,15 +9538,15 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } },
else => {},
},
- 256 => switch (ty.vectorLen()) {
+ 256 => switch (ty.vectorLen(mod)) {
1 => if (self.hasFeature(.avx))
return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } },
else => {},
},
else => {},
},
- .Float => switch (ty.childType().floatBits(self.target.*)) {
- 16 => switch (ty.vectorLen()) {
+ .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+ 16 => switch (ty.vectorLen(mod)) {
1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{
.insert = .{ .vp_w, .insr },
.extract = .{ .vp_w, .extr },
@@ -9574,7 +9569,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } },
else => {},
},
- 32 => switch (ty.vectorLen()) {
+ 32 => switch (ty.vectorLen(mod)) {
1 => return .{ .move = if (self.hasFeature(.avx))
.{ .v_ss, .mov }
else
@@ -9590,7 +9585,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } },
else => {},
},
- 64 => switch (ty.vectorLen()) {
+ 64 => switch (ty.vectorLen(mod)) {
1 => return .{ .move = if (self.hasFeature(.avx))
.{ .v_sd, .mov }
else
@@ -9602,7 +9597,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
return .{ .move = if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu } },
else => {},
},
- 128 => switch (ty.vectorLen()) {
+ 128 => switch (ty.vectorLen(mod)) {
1 => return .{ .move = if (self.hasFeature(.avx))
if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu }
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
@@ -10248,8 +10243,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const slice_ty = self.typeOfIndex(inst);
const ptr_ty = self.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
- const array_ty = ptr_ty.childType();
- const array_len = array_ty.arrayLen();
+ const array_ty = ptr_ty.childType(mod);
+ const array_len = array_ty.arrayLen(mod);
const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod));
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr);
@@ -10790,16 +10785,16 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod));
if (elem_abi_size == 1) {
- const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) {
+ const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
// TODO: this only handles slices stored in the stack
.Slice => dst_ptr,
.One => dst_ptr,
.C, .Many => unreachable,
};
- const len: MCValue = switch (dst_ptr_ty.ptrSize()) {
+ const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
// TODO: this only handles slices stored in the stack
.Slice => dst_ptr.address().offset(8).deref(),
- .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() },
+ .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) },
.C, .Many => unreachable,
};
const len_lock: ?RegisterLock = switch (len) {
@@ -10815,7 +10810,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
// Store the first element, and then rely on memcpy copying forwards.
// Length zero requires a runtime check - so we handle arrays specially
// here to elide it.
- switch (dst_ptr_ty.ptrSize()) {
+ switch (dst_ptr_ty.ptrSize(mod)) {
.Slice => {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf);
@@ -10858,13 +10853,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
try self.performReloc(skip_reloc);
},
.One => {
- var elem_ptr_pl = Type.Payload.ElemType{
- .base = .{ .tag = .single_mut_pointer },
- .data = elem_ty,
- };
- const elem_ptr_ty = Type.initPayload(&elem_ptr_pl.base);
+ const elem_ptr_ty = try mod.singleMutPtrType(elem_ty);
- const len = dst_ptr_ty.childType().arrayLen();
+ const len = dst_ptr_ty.childType(mod).arrayLen(mod);
assert(len != 0); // prevented by Sema
try self.store(elem_ptr_ty, dst_ptr, src_val);
@@ -10889,6 +10880,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
}
fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const dst_ptr = try self.resolveInst(bin_op.lhs);
@@ -10906,9 +10898,9 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const len: MCValue = switch (dst_ptr_ty.ptrSize()) {
+ const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
.Slice => dst_ptr.address().offset(8).deref(),
- .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() },
+ .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) },
.C, .Many => unreachable,
};
const len_lock: ?RegisterLock = switch (len) {
@@ -11059,7 +11051,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
switch (scalar_ty.zigTypeTag(mod)) {
else => {},
.Float => switch (scalar_ty.floatBits(self.target.*)) {
- 32 => switch (vector_ty.vectorLen()) {
+ 32 => switch (vector_ty.vectorLen(mod)) {
1 => {
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv;
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
@@ -11139,7 +11131,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
},
else => {},
},
- 64 => switch (vector_ty.vectorLen()) {
+ 64 => switch (vector_ty.vectorLen(mod)) {
1 => {
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv;
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
@@ -11205,7 +11197,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
},
else => {},
},
- 128 => switch (vector_ty.vectorLen()) {
+ 128 => switch (vector_ty.vectorLen(mod)) {
1 => {
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv;
const dst_reg = try self.register_manager.allocReg(inst, dst_rc);
@@ -11271,7 +11263,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?;
const result_ty = self.typeOfIndex(inst);
- const len = @intCast(usize, result_ty.arrayLen());
+ const len = @intCast(usize, result_ty.arrayLen(mod));
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = result: {
@@ -11375,7 +11367,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
.Array => {
const frame_index =
try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
- const elem_ty = result_ty.childType();
+ const elem_ty = result_ty.childType(mod);
const elem_size = @intCast(u32, elem_ty.abiSize(mod));
for (elements, 0..) |elem, elem_i| {
@@ -11387,7 +11379,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const elem_off = @intCast(i32, elem_size * elem_i);
try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv);
}
- if (result_ty.sentinel()) |sentinel| try self.genSetMem(
+ if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem(
.{ .frame = frame_index },
@intCast(i32, elem_size * elements.len),
elem_ty,
@@ -11512,14 +11504,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag(mod)) {
- .Float => switch (ty.childType().floatBits(self.target.*)) {
- 32 => switch (ty.vectorLen()) {
+ .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (ty.vectorLen(mod)) {
1 => .{ .v_ss, .fmadd132 },
2...8 => .{ .v_ps, .fmadd132 },
else => null,
},
- 64 => switch (ty.vectorLen()) {
+ 64 => switch (ty.vectorLen(mod)) {
1 => .{ .v_sd, .fmadd132 },
2...4 => .{ .v_pd, .fmadd132 },
else => null,
@@ -11539,14 +11531,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag(mod)) {
- .Float => switch (ty.childType().floatBits(self.target.*)) {
- 32 => switch (ty.vectorLen()) {
+ .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (ty.vectorLen(mod)) {
1 => .{ .v_ss, .fmadd213 },
2...8 => .{ .v_ps, .fmadd213 },
else => null,
},
- 64 => switch (ty.vectorLen()) {
+ 64 => switch (ty.vectorLen(mod)) {
1 => .{ .v_sd, .fmadd213 },
2...4 => .{ .v_pd, .fmadd213 },
else => null,
@@ -11566,14 +11558,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag(mod)) {
- .Float => switch (ty.childType().floatBits(self.target.*)) {
- 32 => switch (ty.vectorLen()) {
+ .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
+ .Float => switch (ty.childType(mod).floatBits(self.target.*)) {
+ 32 => switch (ty.vectorLen(mod)) {
1 => .{ .v_ss, .fmadd231 },
2...8 => .{ .v_ps, .fmadd231 },
else => null,
},
- 64 => switch (ty.vectorLen()) {
+ 64 => switch (ty.vectorLen(mod)) {
1 => .{ .v_sd, .fmadd231 },
2...4 => .{ .v_pd, .fmadd231 },
else => null,
src/codegen/c/type.zig
@@ -1423,7 +1423,7 @@ pub const CType = extern union {
}),
.Pointer => {
- const info = ty.ptrInfo().data;
+ const info = ty.ptrInfo(mod);
switch (info.size) {
.Slice => {
if (switch (kind) {
@@ -1625,9 +1625,9 @@ pub const CType = extern union {
.Vector => .vector,
else => unreachable,
};
- if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| {
+ if (try lookup.typeToIndex(ty.childType(mod), kind)) |child_idx| {
self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{
- .len = ty.arrayLenIncludingSentinel(),
+ .len = ty.arrayLenIncludingSentinel(mod),
.elem_type = child_idx,
} } };
self.value = .{ .cty = initPayload(&self.storage.seq) };
@@ -1639,8 +1639,7 @@ pub const CType = extern union {
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
+ const payload_ty = ty.optionalChild(mod);
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (ty.optionalReprIsPayload(mod)) {
try self.initType(payload_ty, kind, lookup);
src/codegen/spirv/Module.zig
@@ -11,7 +11,8 @@ const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
-const ZigDecl = @import("../../Module.zig").Decl;
+const ZigModule = @import("../../Module.zig");
+const ZigDecl = ZigModule.Decl;
const spec = @import("spec.zig");
const Word = spec.Word;
src/codegen/c.zig
@@ -625,7 +625,9 @@ pub const DeclGen = struct {
// Ensure complete type definition is visible before accessing fields.
_ = try dg.typeToIndex(field_ptr.container_ty, .complete);
- var container_ptr_pl = ptr_ty.ptrInfo();
+ var container_ptr_pl: Type.Payload.Pointer = .{
+ .data = ptr_ty.ptrInfo(mod),
+ };
container_ptr_pl.data.pointee_type = field_ptr.container_ty;
const container_ptr_ty = Type.initPayload(&container_ptr_pl.base);
@@ -653,7 +655,9 @@ pub const DeclGen = struct {
try dg.writeCValue(writer, field);
},
.byte_offset => |byte_offset| {
- var u8_ptr_pl = ptr_ty.ptrInfo();
+ var u8_ptr_pl: Type.Payload.Pointer = .{
+ .data = ptr_ty.ptrInfo(mod),
+ };
u8_ptr_pl.data.pointee_type = Type.u8;
const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base);
@@ -692,11 +696,10 @@ pub const DeclGen = struct {
},
.elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- var elem_ptr_ty_pl: Type.Payload.ElemType = .{
- .base = .{ .tag = .c_mut_pointer },
- .data = elem_ptr.elem_ty,
- };
- const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base);
+ const elem_ptr_ty = try mod.ptrType(.{
+ .size = .C,
+ .elem_type = elem_ptr.elem_ty.ip_index,
+ });
try writer.writeAll("&(");
try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location);
@@ -704,11 +707,10 @@ pub const DeclGen = struct {
},
.opt_payload_ptr, .eu_payload_ptr => {
const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data;
- var container_ptr_ty_pl: Type.Payload.ElemType = .{
- .base = .{ .tag = .c_mut_pointer },
- .data = payload_ptr.container_ty,
- };
- const container_ptr_ty = Type.initPayload(&container_ptr_ty_pl.base);
+ const container_ptr_ty = try mod.ptrType(.{
+ .elem_type = payload_ptr.container_ty.ip_index,
+ .size = .C,
+ });
// Ensure complete type definition is visible before accessing fields.
_ = try dg.typeToIndex(payload_ptr.container_ty, .complete);
@@ -794,8 +796,7 @@ pub const DeclGen = struct {
return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
},
.Optional => {
- var opt_buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&opt_buf);
+ const payload_ty = ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return dg.renderValue(writer, Type.bool, val, location);
@@ -889,11 +890,11 @@ pub const DeclGen = struct {
return writer.writeAll(" }");
},
.Array, .Vector => {
- const ai = ty.arrayInfo();
+ const ai = ty.arrayInfo(mod);
if (ai.elem_type.eql(Type.u8, dg.module)) {
var literal = stringLiteral(writer);
try literal.start();
- const c_len = ty.arrayLenIncludingSentinel();
+ const c_len = ty.arrayLenIncludingSentinel(mod);
var index: u64 = 0;
while (index < c_len) : (index += 1)
try literal.writeChar(0xaa);
@@ -906,11 +907,11 @@ pub const DeclGen = struct {
}
try writer.writeByte('{');
- const c_len = ty.arrayLenIncludingSentinel();
+ const c_len = ty.arrayLenIncludingSentinel(mod);
var index: u64 = 0;
while (index < c_len) : (index += 1) {
if (index > 0) try writer.writeAll(", ");
- try dg.renderValue(writer, ty.childType(), val, initializer_type);
+ try dg.renderValue(writer, ty.childType(mod), val, initializer_type);
}
return writer.writeByte('}');
}
@@ -1110,7 +1111,7 @@ pub const DeclGen = struct {
// First try specific tag representations for more efficiency.
switch (val.tag()) {
.undef, .empty_struct_value, .empty_array => {
- const ai = ty.arrayInfo();
+ const ai = ty.arrayInfo(mod);
try writer.writeByte('{');
if (ai.sentinel) |s| {
try dg.renderValue(writer, ai.elem_type, s, initializer_type);
@@ -1128,9 +1129,9 @@ pub const DeclGen = struct {
},
else => unreachable,
};
- const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null;
+ const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null;
try writer.print("{s}", .{
- fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel),
+ fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel),
});
},
else => {
@@ -1142,7 +1143,7 @@ pub const DeclGen = struct {
// MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal
const max_string_initializer_len = 65535;
- const ai = ty.arrayInfo();
+ const ai = ty.arrayInfo(mod);
if (ai.elem_type.eql(Type.u8, dg.module)) {
if (ai.len <= max_string_initializer_len) {
var literal = stringLiteral(writer);
@@ -1198,8 +1199,7 @@ pub const DeclGen = struct {
}
},
.Optional => {
- var opt_buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&opt_buf);
+ const payload_ty = ty.optionalChild(mod);
const is_null_val = Value.makeBool(val.tag() == .null_value);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
@@ -2410,12 +2410,13 @@ pub fn genGlobalAsm(mod: *Module, writer: anytype) !void {
}
pub fn genErrDecls(o: *Object) !void {
+ const mod = o.dg.module;
const writer = o.writer();
try writer.writeAll("enum {\n");
o.indent_writer.pushIndent();
var max_name_len: usize = 0;
- for (o.dg.module.error_name_list.items, 0..) |name, value| {
+ for (mod.error_name_list.items, 0..) |name, value| {
max_name_len = std.math.max(name.len, max_name_len);
var err_pl = Value.Payload.Error{ .data = .{ .name = name } };
try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other);
@@ -2430,12 +2431,15 @@ pub fn genErrDecls(o: *Object) !void {
defer o.dg.gpa.free(name_buf);
@memcpy(name_buf[0..name_prefix.len], name_prefix);
- for (o.dg.module.error_name_list.items) |name| {
+ for (mod.error_name_list.items) |name| {
@memcpy(name_buf[name_prefix.len..][0..name.len], name);
const identifier = name_buf[0 .. name_prefix.len + name.len];
- var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len };
- const name_ty = Type.initPayload(&name_ty_pl.base);
+ const name_ty = try mod.arrayType(.{
+ .len = name.len,
+ .child = .u8_type,
+ .sentinel = .zero_u8,
+ });
var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name };
const name_val = Value.initPayload(&name_pl.base);
@@ -2448,15 +2452,15 @@ pub fn genErrDecls(o: *Object) !void {
}
var name_array_ty_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{
- .len = o.dg.module.error_name_list.items.len,
- .elem_type = Type.initTag(.const_slice_u8_sentinel_0),
+ .len = mod.error_name_list.items.len,
+ .elem_type = Type.const_slice_u8_sentinel_0,
} };
const name_array_ty = Type.initPayload(&name_array_ty_pl.base);
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete);
try writer.writeAll(" = {");
- for (o.dg.module.error_name_list.items, 0..) |name, value| {
+ for (mod.error_name_list.items, 0..) |name, value| {
if (value != 0) try writer.writeByte(',');
var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len };
@@ -2487,6 +2491,7 @@ fn genExports(o: *Object) !void {
}
pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
+ const mod = o.dg.module;
const w = o.writer();
const key = lazy_fn.key_ptr.*;
const val = lazy_fn.value_ptr;
@@ -2495,7 +2500,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
.tag_name => {
const enum_ty = val.data.tag_name;
- const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const name_slice_ty = Type.const_slice_u8_sentinel_0;
try w.writeAll("static ");
try o.dg.renderType(w, name_slice_ty);
@@ -2514,11 +2519,11 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
var int_pl: Value.Payload.U64 = undefined;
const int_val = tag_val.enumToInt(enum_ty, &int_pl);
- var name_ty_pl = Type.Payload.Len{
- .base = .{ .tag = .array_u8_sentinel_0 },
- .data = name.len,
- };
- const name_ty = Type.initPayload(&name_ty_pl.base);
+ const name_ty = try mod.arrayType(.{
+ .len = name.len,
+ .child = .u8_type,
+ .sentinel = .zero_u8,
+ });
var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name };
const name_val = Value.initPayload(&name_pl.base);
@@ -2547,7 +2552,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try w.writeAll("}\n");
},
.never_tail, .never_inline => |fn_decl_index| {
- const fn_decl = o.dg.module.declPtr(fn_decl_index);
+ const fn_decl = mod.declPtr(fn_decl_index);
const fn_cty = try o.dg.typeToCType(fn_decl.ty, .complete);
const fn_info = fn_cty.cast(CType.Payload.Function).?.data;
@@ -3150,7 +3155,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const ptr_ty = f.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType();
+ const elem_ty = ptr_ty.childType(mod);
const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod);
const ptr = try f.resolveInst(bin_op.lhs);
@@ -3166,7 +3171,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(writer, inst_ty);
try writer.writeByte(')');
if (elem_has_bits) try writer.writeByte('&');
- if (elem_has_bits and ptr_ty.ptrSize() == .One) {
+ if (elem_has_bits and ptr_ty.ptrSize(mod) == .One) {
// It's a pointer to an array, so we need to de-reference.
try f.writeCValueDeref(writer, ptr);
} else try f.writeCValue(writer, ptr, .Other);
@@ -3264,7 +3269,7 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
const mod = f.object.dg.module;
const inst_ty = f.typeOfIndex(inst);
- const elem_type = inst_ty.elemType();
+ const elem_type = inst_ty.childType(mod);
if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(
@@ -3280,7 +3285,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const mod = f.object.dg.module;
const inst_ty = f.typeOfIndex(inst);
- const elem_ty = inst_ty.elemType();
+ const elem_ty = inst_ty.childType(mod);
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(
@@ -3323,7 +3328,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_ty = f.typeOf(ty_op.operand);
const ptr_scalar_ty = ptr_ty.scalarType(mod);
- const ptr_info = ptr_scalar_ty.ptrInfo().data;
+ const ptr_info = ptr_scalar_ty.ptrInfo(mod);
const src_ty = ptr_info.pointee_type;
if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) {
@@ -3412,7 +3417,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
const writer = f.object.writer();
const op_inst = Air.refToIndex(un_op);
const op_ty = f.typeOf(un_op);
- const ret_ty = if (is_ptr) op_ty.childType() else op_ty;
+ const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty;
var lowered_ret_buf: LowerFnRetTyBuffer = undefined;
const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod);
@@ -3601,7 +3606,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const ptr_ty = f.typeOf(bin_op.lhs);
const ptr_scalar_ty = ptr_ty.scalarType(mod);
- const ptr_info = ptr_scalar_ty.ptrInfo().data;
+ const ptr_info = ptr_scalar_ty.ptrInfo(mod);
const ptr_val = try f.resolveInst(bin_op.lhs);
const src_ty = f.typeOf(bin_op.rhs);
@@ -4156,7 +4161,7 @@ fn airCall(
const callee_ty = f.typeOf(pl_op.operand);
const fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
- .Pointer => callee_ty.childType(),
+ .Pointer => callee_ty.childType(mod),
else => unreachable,
};
@@ -4331,10 +4336,11 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.TryPtr, ty_pl.payload);
const body = f.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = f.typeOf(extra.data.ptr).childType();
+ const err_union_ty = f.typeOf(extra.data.ptr).childType(mod);
return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true);
}
@@ -4826,7 +4832,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const is_reg = constraint[1] == '{';
if (is_reg) {
- const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType();
+ const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod);
try writer.writeAll("register ");
const alignment = 0;
const local_value = try f.allocLocalValue(output_ty, alignment);
@@ -5061,9 +5067,8 @@ fn airIsNull(
}
const operand_ty = f.typeOf(un_op);
- const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty;
- var payload_buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&payload_buf);
+ const optional_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty;
+ const payload_ty = optional_ty.optionalChild(mod);
var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
@@ -5097,8 +5102,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
const opt_ty = f.typeOf(ty_op.operand);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = opt_ty.optionalChild(&buf);
+ const payload_ty = opt_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return .none;
@@ -5132,10 +5136,10 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const ptr_ty = f.typeOf(ty_op.operand);
- const opt_ty = ptr_ty.childType();
+ const opt_ty = ptr_ty.childType(mod);
const inst_ty = f.typeOfIndex(inst);
- if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!inst_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod)) {
return .{ .undef = inst_ty };
}
@@ -5163,7 +5167,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
const operand_ty = f.typeOf(ty_op.operand);
- const opt_ty = operand_ty.elemType();
+ const opt_ty = operand_ty.childType(mod);
const inst_ty = f.typeOfIndex(inst);
@@ -5221,7 +5225,7 @@ fn fieldLocation(
else
.{ .identifier = container_ty.structFieldName(next_field_index) } };
} else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin,
- .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0)
+ .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0)
.{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) }
else
.begin,
@@ -5243,7 +5247,7 @@ fn fieldLocation(
},
.Packed => .begin,
},
- .Pointer => switch (container_ty.ptrSize()) {
+ .Pointer => switch (container_ty.ptrSize(mod)) {
.Slice => switch (field_index) {
0 => .{ .field = .{ .identifier = "ptr" } },
1 => .{ .field = .{ .identifier = "len" } },
@@ -5280,7 +5284,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const container_ptr_ty = f.typeOfIndex(inst);
- const container_ty = container_ptr_ty.childType();
+ const container_ty = container_ptr_ty.childType(mod);
const field_ptr_ty = f.typeOf(extra.field_ptr);
const field_ptr_val = try f.resolveInst(extra.field_ptr);
@@ -5296,7 +5300,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) {
.begin => try f.writeCValue(writer, field_ptr_val, .Initializer),
.field => |field| {
- var u8_ptr_pl = field_ptr_ty.ptrInfo();
+ var u8_ptr_pl: Type.Payload.Pointer = .{
+ .data = field_ptr_ty.ptrInfo(mod),
+ };
u8_ptr_pl.data.pointee_type = Type.u8;
const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base);
@@ -5311,7 +5317,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("))");
},
.byte_offset => |byte_offset| {
- var u8_ptr_pl = field_ptr_ty.ptrInfo();
+ var u8_ptr_pl: Type.Payload.Pointer = .{
+ .data = field_ptr_ty.ptrInfo(mod),
+ };
u8_ptr_pl.data.pointee_type = Type.u8;
const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base);
@@ -5345,7 +5353,7 @@ fn fieldPtr(
field_index: u32,
) !CValue {
const mod = f.object.dg.module;
- const container_ty = container_ptr_ty.elemType();
+ const container_ty = container_ptr_ty.childType(mod);
const field_ptr_ty = f.typeOfIndex(inst);
// Ensure complete type definition is visible before accessing fields.
@@ -5365,7 +5373,9 @@ fn fieldPtr(
try f.writeCValueDerefMember(writer, container_ptr_val, field);
},
.byte_offset => |byte_offset| {
- var u8_ptr_pl = field_ptr_ty.ptrInfo();
+ var u8_ptr_pl: Type.Payload.Pointer = .{
+ .data = field_ptr_ty.ptrInfo(mod),
+ };
u8_ptr_pl.data.pointee_type = Type.u8;
const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base);
@@ -5532,7 +5542,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer;
- const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
+ const error_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
const error_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
const local = try f.allocLocal(inst, inst_ty);
@@ -5569,7 +5579,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const operand_ty = f.typeOf(ty_op.operand);
- const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty;
+ const error_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty;
const writer = f.object.writer();
if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) {
@@ -5673,7 +5683,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
- const error_union_ty = f.typeOf(ty_op.operand).childType();
+ const error_union_ty = f.typeOf(ty_op.operand).childType(mod);
const error_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
@@ -5761,7 +5771,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
try reap(f, inst, &.{un_op});
const operand_ty = f.typeOf(un_op);
const local = try f.allocLocal(inst, Type.bool);
- const err_union_ty = if (is_ptr) operand_ty.childType() else operand_ty;
+ const err_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty;
const payload_ty = err_union_ty.errorUnionPayload();
const error_ty = err_union_ty.errorUnionSet();
@@ -5795,7 +5805,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- const array_ty = f.typeOf(ty_op.operand).childType();
+ const array_ty = f.typeOf(ty_op.operand).childType(mod);
try f.writeCValueMember(writer, local, .{ .identifier = "ptr" });
try writer.writeAll(" = ");
@@ -5811,7 +5821,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
} else try f.writeCValue(writer, operand, .Initializer);
try writer.writeAll("; ");
- const array_len = array_ty.arrayLen();
+ const array_len = array_ty.arrayLen(mod);
var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len };
const len_val = Value.initPayload(&len_pl.base);
try f.writeCValueMember(writer, local, .{ .identifier = "len" });
@@ -6050,7 +6060,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
const expected_value = try f.resolveInst(extra.expected_value);
const new_value = try f.resolveInst(extra.new_value);
const ptr_ty = f.typeOf(extra.ptr);
- const ty = ptr_ty.childType();
+ const ty = ptr_ty.childType(mod);
const writer = f.object.writer();
const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value);
@@ -6152,7 +6162,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const inst_ty = f.typeOfIndex(inst);
const ptr_ty = f.typeOf(pl_op.operand);
- const ty = ptr_ty.childType();
+ const ty = ptr_ty.childType(mod);
const ptr = try f.resolveInst(pl_op.operand);
const operand = try f.resolveInst(extra.operand);
@@ -6207,7 +6217,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr = try f.resolveInst(atomic_load.ptr);
try reap(f, inst, &.{atomic_load.ptr});
const ptr_ty = f.typeOf(atomic_load.ptr);
- const ty = ptr_ty.childType();
+ const ty = ptr_ty.childType(mod);
const repr_ty = if (ty.isRuntimeFloat())
mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
@@ -6241,7 +6251,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = f.typeOf(bin_op.lhs);
- const ty = ptr_ty.childType();
+ const ty = ptr_ty.childType(mod);
const ptr = try f.resolveInst(bin_op.lhs);
const element = try f.resolveInst(bin_op.rhs);
@@ -6299,7 +6309,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
try writer.writeAll("memset(");
- switch (dest_ty.ptrSize()) {
+ switch (dest_ty.ptrSize(mod)) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" });
try writer.writeAll(", 0xaa, ");
@@ -6311,8 +6321,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
},
.One => {
- const array_ty = dest_ty.childType();
- const len = array_ty.arrayLen() * elem_abi_size;
+ const array_ty = dest_ty.childType(mod);
+ const len = array_ty.arrayLen(mod) * elem_abi_size;
try f.writeCValue(writer, dest_slice, .FunctionArgument);
try writer.print(", 0xaa, {d});\n", .{len});
@@ -6327,11 +6337,10 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
// For the assignment in this loop, the array pointer needs to get
// casted to a regular pointer, otherwise an error like this occurs:
// error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable
- var elem_ptr_ty_pl: Type.Payload.ElemType = .{
- .base = .{ .tag = .c_mut_pointer },
- .data = elem_ty,
- };
- const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base);
+ const elem_ptr_ty = try mod.ptrType(.{
+ .size = .C,
+ .elem_type = elem_ty.ip_index,
+ });
const index = try f.allocLocal(inst, Type.usize);
@@ -6342,13 +6351,13 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll("; ");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" != ");
- switch (dest_ty.ptrSize()) {
+ switch (dest_ty.ptrSize(mod)) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" });
},
.One => {
- const array_ty = dest_ty.childType();
- try writer.print("{d}", .{array_ty.arrayLen()});
+ const array_ty = dest_ty.childType(mod);
+ try writer.print("{d}", .{array_ty.arrayLen(mod)});
},
.Many, .C => unreachable,
}
@@ -6377,7 +6386,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const bitcasted = try bitcast(f, Type.u8, value, elem_ty);
try writer.writeAll("memset(");
- switch (dest_ty.ptrSize()) {
+ switch (dest_ty.ptrSize(mod)) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" });
try writer.writeAll(", ");
@@ -6387,8 +6396,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll(");\n");
},
.One => {
- const array_ty = dest_ty.childType();
- const len = array_ty.arrayLen() * elem_abi_size;
+ const array_ty = dest_ty.childType(mod);
+ const len = array_ty.arrayLen(mod) * elem_abi_size;
try f.writeCValue(writer, dest_slice, .FunctionArgument);
try writer.writeAll(", ");
@@ -6416,9 +6425,9 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(", ");
try writeSliceOrPtr(f, writer, src_ptr, src_ty);
try writer.writeAll(", ");
- switch (dest_ty.ptrSize()) {
+ switch (dest_ty.ptrSize(mod)) {
.Slice => {
- const elem_ty = dest_ty.childType();
+ const elem_ty = dest_ty.childType(mod);
const elem_abi_size = elem_ty.abiSize(mod);
try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" });
if (elem_abi_size > 1) {
@@ -6428,10 +6437,10 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
}
},
.One => {
- const array_ty = dest_ty.childType();
- const elem_ty = array_ty.childType();
+ const array_ty = dest_ty.childType(mod);
+ const elem_ty = array_ty.childType(mod);
const elem_abi_size = elem_ty.abiSize(mod);
- const len = array_ty.arrayLen() * elem_abi_size;
+ const len = array_ty.arrayLen(mod) * elem_abi_size;
try writer.print("{d});\n", .{len});
},
.Many, .C => unreachable,
@@ -6448,7 +6457,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
const new_tag = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const union_ty = f.typeOf(bin_op.lhs).childType();
+ const union_ty = f.typeOf(bin_op.lhs).childType(mod);
const layout = union_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return .none;
const tag_ty = union_ty.unionTagTypeSafety().?;
@@ -6777,7 +6786,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const inst_ty = f.typeOfIndex(inst);
- const len = @intCast(usize, inst_ty.arrayLen());
+ const len = @intCast(usize, inst_ty.arrayLen(mod));
const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]);
const gpa = f.object.dg.gpa;
const resolved_elements = try gpa.alloc(CValue, elements.len);
@@ -6796,7 +6805,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const local = try f.allocLocal(inst, inst_ty);
switch (inst_ty.zigTypeTag(mod)) {
.Array, .Vector => {
- const elem_ty = inst_ty.childType();
+ const elem_ty = inst_ty.childType(mod);
const a = try Assignment.init(f, elem_ty);
for (resolved_elements, 0..) |element, i| {
try a.restart(f, writer);
@@ -6806,7 +6815,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, element, .Other);
try a.end(f, writer);
}
- if (inst_ty.sentinel()) |sentinel| {
+ if (inst_ty.sentinel(mod)) |sentinel| {
try a.restart(f, writer);
try f.writeCValue(writer, local, .Other);
try writer.print("[{d}]", .{resolved_elements.len});
@@ -7708,7 +7717,7 @@ const Vectorize = struct {
pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize {
const mod = f.object.dg.module;
return if (ty.zigTypeTag(mod) == .Vector) index: {
- var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() };
+ var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen(mod) };
const local = try f.allocLocal(inst, Type.usize);
src/codegen/llvm.zig
@@ -597,7 +597,7 @@ pub const Object = struct {
llvm_usize_ty,
};
const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False);
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const slice_ty = Type.const_slice_u8_sentinel_0;
const slice_alignment = slice_ty.abiAlignment(mod);
const error_name_list = mod.error_name_list.items;
@@ -1071,7 +1071,7 @@ pub const Object = struct {
.slice => {
assert(!it.byval_attr);
const param_ty = fn_info.param_types[it.zig_index - 1];
- const ptr_info = param_ty.ptrInfo().data;
+ const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, it.zig_index - 1)) |i| {
if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
@@ -1596,7 +1596,7 @@ pub const Object = struct {
},
.Pointer => {
// Normalize everything that the debug info does not represent.
- const ptr_info = ty.ptrInfo().data;
+ const ptr_info = ty.ptrInfo(mod);
if (ptr_info.sentinel != null or
ptr_info.@"addrspace" != .generic or
@@ -1755,8 +1755,8 @@ pub const Object = struct {
const array_di_ty = dib.createArrayType(
ty.abiSize(mod) * 8,
ty.abiAlignment(mod) * 8,
- try o.lowerDebugType(ty.childType(), .full),
- @intCast(c_int, ty.arrayLen()),
+ try o.lowerDebugType(ty.childType(mod), .full),
+ @intCast(c_int, ty.arrayLen(mod)),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module });
@@ -1781,14 +1781,14 @@ pub const Object = struct {
break :blk dib.createBasicType(name, info.bits, dwarf_encoding);
},
.Bool => dib.createBasicType("bool", 1, DW.ATE.boolean),
- else => try o.lowerDebugType(ty.childType(), .full),
+ else => try o.lowerDebugType(ty.childType(mod), .full),
};
const vector_di_ty = dib.createVectorType(
ty.abiSize(mod) * 8,
ty.abiAlignment(mod) * 8,
elem_di_type,
- ty.vectorLen(),
+ ty.vectorLen(mod),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module });
@@ -1797,8 +1797,7 @@ pub const Object = struct {
.Optional => {
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
- var buf: Type.Payload.ElemType = undefined;
- const child_ty = ty.optionalChild(&buf);
+ const child_ty = ty.optionalChild(mod);
if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const di_bits = 8; // lldb cannot handle non-byte sized types
const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean);
@@ -2350,11 +2349,7 @@ pub const Object = struct {
try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full));
if (sret) {
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = fn_info.return_type,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(fn_info.return_type);
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
}
} else {
@@ -2364,11 +2359,7 @@ pub const Object = struct {
if (fn_info.return_type.isError(mod) and
o.module.comp.bin_file.options.error_return_tracing)
{
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = o.getStackTraceType(),
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType());
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
}
@@ -2376,11 +2367,7 @@ pub const Object = struct {
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (isByRef(param_ty, mod)) {
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = param_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(param_ty);
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
} else {
try param_di_types.append(try o.lowerDebugType(param_ty, .full));
@@ -2843,7 +2830,7 @@ pub const DeclGen = struct {
};
return dg.context.structType(&fields, fields.len, .False);
}
- const ptr_info = t.ptrInfo().data;
+ const ptr_info = t.ptrInfo(mod);
const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target);
return dg.context.pointerType(llvm_addrspace);
},
@@ -2866,19 +2853,18 @@ pub const DeclGen = struct {
return llvm_struct_ty;
},
.Array => {
- const elem_ty = t.childType();
+ const elem_ty = t.childType(mod);
assert(elem_ty.onePossibleValue(mod) == null);
const elem_llvm_ty = try dg.lowerType(elem_ty);
- const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null);
+ const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null);
return elem_llvm_ty.arrayType(@intCast(c_uint, total_len));
},
.Vector => {
- const elem_type = try dg.lowerType(t.childType());
- return elem_type.vectorType(t.vectorLen());
+ const elem_type = try dg.lowerType(t.childType(mod));
+ return elem_type.vectorType(t.vectorLen(mod));
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const child_ty = t.optionalChild(&buf);
+ const child_ty = t.optionalChild(mod);
if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return dg.context.intType(8);
}
@@ -3173,11 +3159,7 @@ pub const DeclGen = struct {
if (fn_info.return_type.isError(mod) and
mod.comp.bin_file.options.error_return_tracing)
{
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = dg.object.getStackTraceType(),
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType());
try llvm_params.append(try dg.lowerType(ptr_ty));
}
@@ -3199,9 +3181,8 @@ pub const DeclGen = struct {
.slice => {
const param_ty = fn_info.param_types[it.zig_index - 1];
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- var opt_buf: Type.Payload.ElemType = undefined;
const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional)
- param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf)
+ param_ty.optionalChild(mod).slicePtrFieldType(&buf)
else
param_ty.slicePtrFieldType(&buf);
const ptr_llvm_ty = try dg.lowerType(ptr_ty);
@@ -3247,7 +3228,7 @@ pub const DeclGen = struct {
const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
.Opaque => true,
.Fn => !elem_ty.fnInfo().is_generic,
- .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(mod),
+ .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod),
else => elem_ty.hasRuntimeBitsIgnoreComptime(mod),
};
const llvm_elem_ty = if (lower_elem_ty)
@@ -3417,7 +3398,7 @@ pub const DeclGen = struct {
return llvm_int.constIntToPtr(try dg.lowerType(tv.ty));
},
.field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => {
- return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo().data.bit_offset % 8 == 0);
+ return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0);
},
.null_value, .zero => {
const llvm_type = try dg.lowerType(tv.ty);
@@ -3425,7 +3406,7 @@ pub const DeclGen = struct {
},
.opt_payload => {
const payload = tv.val.castTag(.opt_payload).?.data;
- return dg.lowerParentPtr(payload, tv.ty.ptrInfo().data.bit_offset % 8 == 0);
+ return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0);
},
else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{
tv.ty.fmtDebug(), tag,
@@ -3436,14 +3417,14 @@ pub const DeclGen = struct {
const bytes = tv.val.castTag(.bytes).?.data;
return dg.context.constString(
bytes.ptr,
- @intCast(c_uint, tv.ty.arrayLenIncludingSentinel()),
+ @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)),
.True, // Don't null terminate. Bytes has the sentinel, if any.
);
},
.str_lit => {
const str_lit = tv.val.castTag(.str_lit).?.data;
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- if (tv.ty.sentinel()) |sent_val| {
+ if (tv.ty.sentinel(mod)) |sent_val| {
const byte = @intCast(u8, sent_val.toUnsignedInt(mod));
if (byte == 0 and bytes.len > 0) {
return dg.context.constString(
@@ -3472,9 +3453,9 @@ pub const DeclGen = struct {
},
.aggregate => {
const elem_vals = tv.val.castTag(.aggregate).?.data;
- const elem_ty = tv.ty.elemType();
+ const elem_ty = tv.ty.childType(mod);
const gpa = dg.gpa;
- const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel());
+ const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod));
const llvm_elems = try gpa.alloc(*llvm.Value, len);
defer gpa.free(llvm_elems);
var need_unnamed = false;
@@ -3498,9 +3479,9 @@ pub const DeclGen = struct {
},
.repeated => {
const val = tv.val.castTag(.repeated).?.data;
- const elem_ty = tv.ty.elemType();
- const sentinel = tv.ty.sentinel();
- const len = @intCast(usize, tv.ty.arrayLen());
+ const elem_ty = tv.ty.childType(mod);
+ const sentinel = tv.ty.sentinel(mod);
+ const len = @intCast(usize, tv.ty.arrayLen(mod));
const len_including_sent = len + @boolToInt(sentinel != null);
const gpa = dg.gpa;
const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
@@ -3534,8 +3515,8 @@ pub const DeclGen = struct {
}
},
.empty_array_sentinel => {
- const elem_ty = tv.ty.elemType();
- const sent_val = tv.ty.sentinel().?;
+ const elem_ty = tv.ty.childType(mod);
+ const sent_val = tv.ty.sentinel(mod).?;
const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val });
const llvm_elems: [1]*llvm.Value = .{sentinel};
const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]);
@@ -3550,8 +3531,7 @@ pub const DeclGen = struct {
},
.Optional => {
comptime assert(optional_layout_version == 3);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = tv.ty.optionalChild(&buf);
+ const payload_ty = tv.ty.optionalChild(mod);
const llvm_i8 = dg.context.intType(8);
const is_pl = !tv.val.isNull(mod);
@@ -3897,10 +3877,10 @@ pub const DeclGen = struct {
.bytes => {
// Note, sentinel is not stored even if the type has a sentinel.
const bytes = tv.val.castTag(.bytes).?.data;
- const vector_len = @intCast(usize, tv.ty.arrayLen());
+ const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
- const elem_ty = tv.ty.elemType();
+ const elem_ty = tv.ty.childType(mod);
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems, 0..) |*elem, i| {
@@ -3923,9 +3903,9 @@ pub const DeclGen = struct {
// Note, sentinel is not stored even if the type has a sentinel.
// The value includes the sentinel in those cases.
const elem_vals = tv.val.castTag(.aggregate).?.data;
- const vector_len = @intCast(usize, tv.ty.arrayLen());
+ const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
- const elem_ty = tv.ty.elemType();
+ const elem_ty = tv.ty.childType(mod);
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems, 0..) |*elem, i| {
@@ -3939,8 +3919,8 @@ pub const DeclGen = struct {
.repeated => {
// Note, sentinel is not stored even if the type has a sentinel.
const val = tv.val.castTag(.repeated).?.data;
- const elem_ty = tv.ty.elemType();
- const len = @intCast(usize, tv.ty.arrayLen());
+ const elem_ty = tv.ty.childType(mod);
+ const len = @intCast(usize, tv.ty.arrayLen(mod));
const llvm_elems = try dg.gpa.alloc(*llvm.Value, len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems) |*elem| {
@@ -3955,10 +3935,10 @@ pub const DeclGen = struct {
// Note, sentinel is not stored
const str_lit = tv.val.castTag(.str_lit).?.data;
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- const vector_len = @intCast(usize, tv.ty.arrayLen());
+ const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
assert(vector_len == bytes.len);
- const elem_ty = tv.ty.elemType();
+ const elem_ty = tv.ty.childType(mod);
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems, 0..) |*elem, i| {
@@ -4006,13 +3986,10 @@ pub const DeclGen = struct {
ptr_val: Value,
decl_index: Module.Decl.Index,
) Error!*llvm.Value {
- const decl = dg.module.declPtr(decl_index);
- dg.module.markDeclAlive(decl);
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = decl.ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
+ mod.markDeclAlive(decl);
+ const ptr_ty = try mod.singleMutPtrType(decl.ty);
return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
}
@@ -4135,9 +4112,8 @@ pub const DeclGen = struct {
.opt_payload_ptr => {
const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf);
+ const payload_ty = opt_payload_ptr.container_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
payload_ty.optionalReprIsPayload(mod))
{
@@ -4251,7 +4227,8 @@ pub const DeclGen = struct {
}
fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*llvm.Value {
- const alignment = ptr_ty.ptrInfo().data.@"align";
+ const mod = dg.module;
+ const alignment = ptr_ty.ptrInfo(mod).@"align";
// Even though we are pointing at something which has zero bits (e.g. `void`),
// Pointers are defined to have bits. So we must return something here.
// The value cannot be undefined, because we use the `nonnull` annotation
@@ -4374,7 +4351,7 @@ pub const DeclGen = struct {
) void {
const mod = dg.module;
if (param_ty.isPtrAtRuntime(mod)) {
- const ptr_info = param_ty.ptrInfo().data;
+ const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, param_index)) |i| {
if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias");
@@ -4786,7 +4763,7 @@ pub const FuncGen = struct {
const mod = self.dg.module;
const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
- .Pointer => callee_ty.childType(),
+ .Pointer => callee_ty.childType(mod),
else => unreachable,
};
const fn_info = zig_fn_ty.fnInfo();
@@ -5014,7 +4991,7 @@ pub const FuncGen = struct {
.slice => {
assert(!it.byval_attr);
const param_ty = fn_info.param_types[it.zig_index - 1];
- const ptr_info = param_ty.ptrInfo().data;
+ const ptr_info = param_ty.ptrInfo(mod);
const llvm_arg_i = it.llvm_index - 2;
if (math.cast(u5, it.zig_index - 1)) |i| {
@@ -5098,11 +5075,7 @@ pub const FuncGen = struct {
const ret_ty = self.typeOf(un_op);
if (self.ret_ptr) |ret_ptr| {
const operand = try self.resolveInst(un_op);
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ret_ty,
- };
- const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const ptr_ty = try mod.singleMutPtrType(ret_ty);
try self.store(ret_ptr, ptr_ty, operand, .NotAtomic);
_ = self.builder.buildRetVoid();
return null;
@@ -5150,11 +5123,11 @@ pub const FuncGen = struct {
}
fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr_ty = self.typeOf(un_op);
- const ret_ty = ptr_ty.childType();
+ const ret_ty = ptr_ty.childType(mod);
const fn_info = self.dg.decl.ty.fnInfo();
- const mod = self.dg.module;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (fn_info.return_type.isError(mod)) {
// Functions with an empty error set are emitted with an error code
@@ -5301,15 +5274,13 @@ pub const FuncGen = struct {
operand_ty: Type,
op: math.CompareOperator,
) Allocator.Error!*llvm.Value {
- var opt_buffer: Type.Payload.ElemType = undefined;
-
const mod = self.dg.module;
const scalar_ty = operand_ty.scalarType(mod);
const int_ty = switch (scalar_ty.zigTypeTag(mod)) {
.Enum => scalar_ty.intTagType(),
.Int, .Bool, .Pointer, .ErrorSet => scalar_ty,
.Optional => blk: {
- const payload_ty = operand_ty.optionalChild(&opt_buffer);
+ const payload_ty = operand_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
operand_ty.optionalReprIsPayload(mod))
{
@@ -5506,11 +5477,12 @@ pub const FuncGen = struct {
}
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try self.resolveInst(extra.data.ptr);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
- const err_union_ty = self.typeOf(extra.data.ptr).childType();
+ const err_union_ty = self.typeOf(extra.data.ptr).childType(mod);
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused);
}
@@ -5661,9 +5633,9 @@ pub const FuncGen = struct {
const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
- const array_ty = operand_ty.childType();
+ const array_ty = operand_ty.childType(mod);
const llvm_usize = try self.dg.lowerType(Type.usize);
- const len = llvm_usize.constInt(array_ty.arrayLen(), .False);
+ const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False);
const slice_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst));
const operand = try self.resolveInst(ty_op.operand);
if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
@@ -5806,20 +5778,20 @@ pub const FuncGen = struct {
const mod = fg.dg.module;
const target = mod.getTarget();
const llvm_usize_ty = fg.context.intType(target.ptrBitWidth());
- switch (ty.ptrSize()) {
+ switch (ty.ptrSize(mod)) {
.Slice => {
const len = fg.builder.buildExtractValue(ptr, 1, "");
- const elem_ty = ty.childType();
+ const elem_ty = ty.childType(mod);
const abi_size = elem_ty.abiSize(mod);
if (abi_size == 1) return len;
const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False);
return fg.builder.buildMul(len, abi_size_llvm_val, "");
},
.One => {
- const array_ty = ty.childType();
- const elem_ty = array_ty.childType();
+ const array_ty = ty.childType(mod);
+ const elem_ty = array_ty.childType(mod);
const abi_size = elem_ty.abiSize(mod);
- return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False);
+ return llvm_usize_ty.constInt(array_ty.arrayLen(mod) * abi_size, .False);
},
.Many, .C => unreachable,
}
@@ -5832,10 +5804,11 @@ pub const FuncGen = struct {
}
fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const slice_ptr = try self.resolveInst(ty_op.operand);
const slice_ptr_ty = self.typeOf(ty_op.operand);
- const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType());
+ const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType(mod));
return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, "");
}
@@ -5847,7 +5820,7 @@ pub const FuncGen = struct {
const slice_ty = self.typeOf(bin_op.lhs);
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const elem_ty = slice_ty.childType();
+ const elem_ty = slice_ty.childType(mod);
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
const base_ptr = self.builder.buildExtractValue(slice, 0, "");
const indices: [1]*llvm.Value = .{index};
@@ -5863,13 +5836,14 @@ pub const FuncGen = struct {
}
fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const slice_ty = self.typeOf(bin_op.lhs);
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType());
+ const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType(mod));
const base_ptr = self.builder.buildExtractValue(slice, 0, "");
const indices: [1]*llvm.Value = .{index};
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
@@ -5884,7 +5858,7 @@ pub const FuncGen = struct {
const array_llvm_val = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const array_llvm_ty = try self.dg.lowerType(array_ty);
- const elem_ty = array_ty.childType();
+ const elem_ty = array_ty.childType(mod);
if (isByRef(array_ty, mod)) {
const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
if (isByRef(elem_ty, mod)) {
@@ -5923,7 +5897,7 @@ pub const FuncGen = struct {
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType();
+ const elem_ty = ptr_ty.childType(mod);
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -5951,14 +5925,14 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType();
+ const elem_ty = ptr_ty.childType(mod);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const elem_ptr = self.air.getRefType(ty_pl.ty);
- if (elem_ptr.ptrInfo().data.vector_index != .none) return base_ptr;
+ if (elem_ptr.ptrInfo(mod).vector_index != .none) return base_ptr;
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
if (ptr_ty.isSinglePointer(mod)) {
@@ -6098,7 +6072,7 @@ pub const FuncGen = struct {
const field_ptr = try self.resolveInst(extra.field_ptr);
const target = self.dg.module.getTarget();
- const parent_ty = self.air.getRefType(ty_pl.ty).childType();
+ const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod);
const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty));
@@ -6232,6 +6206,7 @@ pub const FuncGen = struct {
}
fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const dib = self.dg.object.di_builder orelse return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const operand = try self.resolveInst(pl_op.operand);
@@ -6243,7 +6218,7 @@ pub const FuncGen = struct {
name.ptr,
self.di_file.?,
self.prev_dbg_line,
- try self.dg.object.lowerDebugType(ptr_ty.childType(), .full),
+ try self.dg.object.lowerDebugType(ptr_ty.childType(mod), .full),
true, // always preserve
0, // flags
);
@@ -6365,7 +6340,7 @@ pub const FuncGen = struct {
const output_inst = try self.resolveInst(output);
const output_ty = self.typeOf(output);
assert(output_ty.zigTypeTag(mod) == .Pointer);
- const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType());
+ const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType(mod));
if (llvm_ret_indirect[i]) {
// Pass the result by reference as an indirect output (e.g. "=*m")
@@ -6466,7 +6441,7 @@ pub const FuncGen = struct {
// an elementtype(<ty>) attribute.
if (constraint[0] == '*') {
llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse
- try self.dg.lowerPtrElemTy(arg_ty.childType());
+ try self.dg.lowerPtrElemTy(arg_ty.childType(mod));
} else {
llvm_param_attrs[llvm_param_i] = null;
}
@@ -6657,14 +6632,13 @@ pub const FuncGen = struct {
operand_is_ptr: bool,
pred: llvm.IntPredicate,
) !?*llvm.Value {
+ const mod = self.dg.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.typeOf(un_op);
- const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
+ const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
const optional_llvm_ty = try self.dg.lowerType(optional_ty);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
- const mod = self.dg.module;
+ const payload_ty = optional_ty.optionalChild(mod);
if (optional_ty.optionalReprIsPayload(mod)) {
const loaded = if (operand_is_ptr)
self.builder.buildLoad(optional_llvm_ty, operand, "")
@@ -6709,7 +6683,7 @@ pub const FuncGen = struct {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.typeOf(un_op);
- const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
+ const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
const payload_ty = err_union_ty.errorUnionPayload();
const err_set_ty = try self.dg.lowerType(Type.anyerror);
const zero = err_set_ty.constNull();
@@ -6748,9 +6722,8 @@ pub const FuncGen = struct {
const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const optional_ty = self.typeOf(ty_op.operand).childType();
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
+ const optional_ty = self.typeOf(ty_op.operand).childType(mod);
+ const payload_ty = optional_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// We have a pointer to a zero-bit value and we need to return
// a pointer to a zero-bit value.
@@ -6770,9 +6743,8 @@ pub const FuncGen = struct {
const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const optional_ty = self.typeOf(ty_op.operand).childType();
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
+ const optional_ty = self.typeOf(ty_op.operand).childType(mod);
+ const payload_ty = optional_ty.optionalChild(mod);
const non_null_bit = self.context.intType(8).constInt(1, .False);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// We have a pointer to a i8. We need to set it to 1 and then return the same pointer.
@@ -6827,9 +6799,9 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
- const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
+ const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
const result_ty = self.typeOfIndex(inst);
- const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty;
+ const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty;
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return if (operand_is_ptr) operand else null;
@@ -6862,7 +6834,7 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
- const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
+ const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty;
if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) {
const err_llvm_ty = try self.dg.lowerType(Type.anyerror);
if (operand_is_ptr) {
@@ -6895,7 +6867,7 @@ pub const FuncGen = struct {
const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
- const err_union_ty = self.typeOf(ty_op.operand).childType();
+ const err_union_ty = self.typeOf(ty_op.operand).childType(mod);
const payload_ty = err_union_ty.errorUnionPayload();
const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero });
@@ -6961,11 +6933,7 @@ pub const FuncGen = struct {
if (isByRef(optional_ty, mod)) {
const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod));
const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, "");
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = payload_ty,
- };
- const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, "");
_ = self.builder.buildStore(non_null_bit, non_null_ptr);
@@ -6995,11 +6963,7 @@ pub const FuncGen = struct {
const store_inst = self.builder.buildStore(ok_err_code, err_ptr);
store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, "");
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = payload_ty,
- };
- const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic);
return result_ptr;
}
@@ -7027,11 +6991,7 @@ pub const FuncGen = struct {
const store_inst = self.builder.buildStore(operand, err_ptr);
store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, "");
- var ptr_ty_payload: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = payload_ty,
- };
- const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
// TODO store undef to payload_ptr
_ = payload_ptr;
_ = payload_ptr_ty;
@@ -7076,7 +7036,7 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(extra.rhs);
const loaded_vector = blk: {
- const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType());
+ const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType(mod));
const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, "");
load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod));
load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr()));
@@ -7287,7 +7247,7 @@ pub const FuncGen = struct {
const inst_llvm_ty = try self.dg.lowerType(inst_ty);
const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1;
const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: {
- const vec_len = inst_ty.vectorLen();
+ const vec_len = inst_ty.vectorLen(mod);
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
@@ -7361,7 +7321,7 @@ pub const FuncGen = struct {
if (scalar_ty.isSignedInt(mod)) {
const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1;
const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: {
- const vec_len = inst_ty.vectorLen();
+ const vec_len = inst_ty.vectorLen(mod);
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
@@ -7384,13 +7344,14 @@ pub const FuncGen = struct {
}
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const ptr_ty = self.typeOf(bin_op.lhs);
- const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType());
- switch (ptr_ty.ptrSize()) {
+ const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod));
+ switch (ptr_ty.ptrSize(mod)) {
.One => {
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset };
@@ -7409,14 +7370,15 @@ pub const FuncGen = struct {
}
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const negative_offset = self.builder.buildNeg(offset, "");
const ptr_ty = self.typeOf(bin_op.lhs);
- const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType());
- switch (ptr_ty.ptrSize()) {
+ const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod));
+ switch (ptr_ty.ptrSize(mod)) {
.One => {
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
const indices: [2]*llvm.Value = .{
@@ -7587,7 +7549,7 @@ pub const FuncGen = struct {
};
if (ty.zigTypeTag(mod) == .Vector) {
- const vec_len = ty.vectorLen();
+ const vec_len = ty.vectorLen(mod);
const vector_result_ty = llvm_i32.vectorType(vec_len);
var result = vector_result_ty.getUndef();
@@ -7672,8 +7634,8 @@ pub const FuncGen = struct {
const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False);
const sign_mask = one.constShl(shift_amt);
const result = if (ty.zigTypeTag(mod) == .Vector) blk: {
- const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, "");
- const cast_ty = int_llvm_ty.vectorType(ty.vectorLen());
+ const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, "");
+ const cast_ty = int_llvm_ty.vectorType(ty.vectorLen(mod));
const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, "");
break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, "");
} else blk: {
@@ -7720,7 +7682,7 @@ pub const FuncGen = struct {
const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty);
if (ty.zigTypeTag(mod) == .Vector) {
const result = llvm_ty.getUndef();
- return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen());
+ return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(mod));
}
break :b libc_fn;
@@ -7887,7 +7849,7 @@ pub const FuncGen = struct {
const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False);
const lhs_max = lhs_scalar_llvm_ty.constAllOnes();
if (rhs_ty.zigTypeTag(mod) == .Vector) {
- const vec_len = rhs_ty.vectorLen();
+ const vec_len = rhs_ty.vectorLen(mod);
const bits_vec = self.builder.buildVectorSplat(vec_len, bits, "");
const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, "");
const in_range = self.builder.buildICmp(.ULT, rhs, bits_vec, "");
@@ -8059,7 +8021,7 @@ pub const FuncGen = struct {
}
if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) {
- const elem_ty = operand_ty.childType();
+ const elem_ty = operand_ty.childType(mod);
if (!result_is_ref) {
return self.dg.todo("implement bitcast vector to non-ref array", .{});
}
@@ -8074,7 +8036,7 @@ pub const FuncGen = struct {
const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_u32 = self.context.intType(32);
const zero = llvm_usize.constNull();
- const vector_len = operand_ty.arrayLen();
+ const vector_len = operand_ty.arrayLen(mod);
var i: u64 = 0;
while (i < vector_len) : (i += 1) {
const index_usize = llvm_usize.constInt(i, .False);
@@ -8087,7 +8049,7 @@ pub const FuncGen = struct {
}
return array_ptr;
} else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) {
- const elem_ty = operand_ty.childType();
+ const elem_ty = operand_ty.childType(mod);
const llvm_vector_ty = try self.dg.lowerType(inst_ty);
if (!operand_is_ref) {
return self.dg.todo("implement bitcast non-ref array to vector", .{});
@@ -8108,7 +8070,7 @@ pub const FuncGen = struct {
const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_u32 = self.context.intType(32);
const zero = llvm_usize.constNull();
- const vector_len = operand_ty.arrayLen();
+ const vector_len = operand_ty.arrayLen(mod);
var vector = llvm_vector_ty.getUndef();
var i: u64 = 0;
while (i < vector_len) : (i += 1) {
@@ -8207,7 +8169,7 @@ pub const FuncGen = struct {
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const mod = self.dg.module;
const ptr_ty = self.typeOfIndex(inst);
- const pointee_type = ptr_ty.childType();
+ const pointee_type = ptr_ty.childType(mod);
if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty);
const pointee_llvm_ty = try self.dg.lowerType(pointee_type);
@@ -8218,7 +8180,7 @@ pub const FuncGen = struct {
fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const mod = self.dg.module;
const ptr_ty = self.typeOfIndex(inst);
- const ret_ty = ptr_ty.childType();
+ const ret_ty = ptr_ty.childType(mod);
if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty);
if (self.ret_ptr) |ret_ptr| return ret_ptr;
const ret_llvm_ty = try self.dg.lowerType(ret_ty);
@@ -8232,11 +8194,11 @@ pub const FuncGen = struct {
}
fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const dest_ptr = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
- const operand_ty = ptr_ty.childType();
- const mod = self.dg.module;
+ const operand_ty = ptr_ty.childType(mod);
const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false;
if (val_is_undef) {
@@ -8271,8 +8233,10 @@ pub const FuncGen = struct {
///
/// The first instruction of `body_tail` is the one whose copy we want to elide.
fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool {
+ const mod = fg.dg.module;
+ const ip = &mod.intern_pool;
for (body_tail[1..]) |body_inst| {
- switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0])) {
+ switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip.*)) {
.none => continue,
.write, .noret, .complex => return false,
.tomb => return true,
@@ -8288,7 +8252,7 @@ pub const FuncGen = struct {
const inst = body_tail[0];
const ty_op = fg.air.instructions.items(.data)[inst].ty_op;
const ptr_ty = fg.typeOf(ty_op.operand);
- const ptr_info = ptr_ty.ptrInfo().data;
+ const ptr_info = ptr_ty.ptrInfo(mod);
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
@@ -8363,7 +8327,7 @@ pub const FuncGen = struct {
const ptr = try self.resolveInst(extra.ptr);
var expected_value = try self.resolveInst(extra.expected_value);
var new_value = try self.resolveInst(extra.new_value);
- const operand_ty = self.typeOf(extra.ptr).elemType();
+ const operand_ty = self.typeOf(extra.ptr).childType(mod);
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
if (opt_abi_ty) |abi_ty| {
// operand needs widening and truncating
@@ -8409,7 +8373,7 @@ pub const FuncGen = struct {
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const ptr = try self.resolveInst(pl_op.operand);
const ptr_ty = self.typeOf(pl_op.operand);
- const operand_ty = ptr_ty.elemType();
+ const operand_ty = ptr_ty.childType(mod);
const operand = try self.resolveInst(extra.operand);
const is_signed_int = operand_ty.isSignedInt(mod);
const is_float = operand_ty.isRuntimeFloat();
@@ -8464,7 +8428,7 @@ pub const FuncGen = struct {
const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.typeOf(atomic_load.ptr);
- const ptr_info = ptr_ty.ptrInfo().data;
+ const ptr_info = ptr_ty.ptrInfo(mod);
const elem_ty = ptr_info.pointee_type;
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod))
return null;
@@ -8497,7 +8461,7 @@ pub const FuncGen = struct {
const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
- const operand_ty = ptr_ty.childType();
+ const operand_ty = ptr_ty.childType(mod);
if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null;
const ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
@@ -8595,9 +8559,9 @@ pub const FuncGen = struct {
const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd");
const llvm_usize_ty = self.context.intType(target.ptrBitWidth());
- const len = switch (ptr_ty.ptrSize()) {
+ const len = switch (ptr_ty.ptrSize(mod)) {
.Slice => self.builder.buildExtractValue(dest_slice, 1, ""),
- .One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False),
+ .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False),
.Many, .C => unreachable,
};
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
@@ -8665,7 +8629,7 @@ pub const FuncGen = struct {
fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const un_ty = self.typeOf(bin_op.lhs).childType();
+ const un_ty = self.typeOf(bin_op.lhs).childType(mod);
const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return null;
const union_ptr = try self.resolveInst(bin_op.lhs);
@@ -8791,7 +8755,7 @@ pub const FuncGen = struct {
// The truncated result at the end will be the correct bswap
const scalar_llvm_ty = self.context.intType(bits + 8);
if (operand_ty.zigTypeTag(mod) == .Vector) {
- const vec_len = operand_ty.vectorLen();
+ const vec_len = operand_ty.vectorLen(mod);
operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len);
const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
@@ -8980,7 +8944,7 @@ pub const FuncGen = struct {
defer self.gpa.free(fqn);
const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn});
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const slice_ty = Type.const_slice_u8_sentinel_0;
const llvm_ret_ty = try self.dg.lowerType(slice_ty);
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
const slice_alignment = slice_ty.abiAlignment(mod);
@@ -9097,10 +9061,11 @@ pub const FuncGen = struct {
}
fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const scalar = try self.resolveInst(ty_op.operand);
const vector_ty = self.typeOfIndex(inst);
- const len = vector_ty.vectorLen();
+ const len = vector_ty.vectorLen(mod);
return self.builder.buildVectorSplat(len, scalar, "");
}
@@ -9122,7 +9087,7 @@ pub const FuncGen = struct {
const b = try self.resolveInst(extra.b);
const mask = self.air.values[extra.mask];
const mask_len = extra.mask_len;
- const a_len = self.typeOf(extra.a).vectorLen();
+ const a_len = self.typeOf(extra.a).vectorLen(mod);
// LLVM uses integers larger than the length of the first array to
// index into the second array. This was deemed unnecessarily fragile
@@ -9298,14 +9263,14 @@ pub const FuncGen = struct {
.ty = scalar_ty,
.val = Value.initPayload(&init_value_payload.base),
});
- return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(), init_value);
+ return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value);
}
fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.typeOfIndex(inst);
- const len = @intCast(usize, result_ty.arrayLen());
+ const len = @intCast(usize, result_ty.arrayLen(mod));
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const llvm_result_ty = try self.dg.lowerType(result_ty);
@@ -9400,7 +9365,7 @@ pub const FuncGen = struct {
const llvm_usize = try self.dg.lowerType(Type.usize);
const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod));
- const array_info = result_ty.arrayInfo();
+ const array_info = result_ty.arrayInfo(mod);
var elem_ptr_payload: Type.Payload.Pointer = .{
.data = .{
.pointee_type = array_info.elem_type,
@@ -9720,7 +9685,7 @@ pub const FuncGen = struct {
}
const mod = self.dg.module;
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const slice_ty = Type.const_slice_u8_sentinel_0;
const slice_alignment = slice_ty.abiAlignment(mod);
const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space
@@ -9763,9 +9728,8 @@ pub const FuncGen = struct {
opt_ty: Type,
can_elide_load: bool,
) !*llvm.Value {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = opt_ty.optionalChild(&buf);
const mod = fg.dg.module;
+ const payload_ty = opt_ty.optionalChild(mod);
if (isByRef(opt_ty, mod)) {
// We have a pointer and we need to return a pointer to the first field.
@@ -9827,13 +9791,13 @@ pub const FuncGen = struct {
struct_ptr_ty: Type,
field_index: u32,
) !?*llvm.Value {
- const struct_ty = struct_ptr_ty.childType();
const mod = self.dg.module;
+ const struct_ty = struct_ptr_ty.childType(mod);
switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout()) {
.Packed => {
const result_ty = self.typeOfIndex(inst);
- const result_ty_info = result_ty.ptrInfo().data;
+ const result_ty_info = result_ty.ptrInfo(mod);
if (result_ty_info.host_size != 0) {
// From LLVM's perspective, a pointer to a packed struct and a pointer
@@ -9919,7 +9883,7 @@ pub const FuncGen = struct {
/// For isByRef=false types, it creates a load instruction and returns it.
fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value {
const mod = self.dg.module;
- const info = ptr_ty.ptrInfo().data;
+ const info = ptr_ty.ptrInfo(mod);
if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null;
const ptr_alignment = info.alignment(mod);
@@ -9954,7 +9918,7 @@ pub const FuncGen = struct {
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
- const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod));
+ const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
@@ -9992,9 +9956,9 @@ pub const FuncGen = struct {
elem: *llvm.Value,
ordering: llvm.AtomicOrdering,
) !void {
- const info = ptr_ty.ptrInfo().data;
- const elem_ty = info.pointee_type;
const mod = self.dg.module;
+ const info = ptr_ty.ptrInfo(mod);
+ const elem_ty = info.pointee_type;
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
return;
}
@@ -10026,7 +9990,7 @@ pub const FuncGen = struct {
assert(ordering == .NotAtomic);
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
- const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod));
+ const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod));
const containing_int_ty = containing_int.typeOf();
const shift_amt = containing_int_ty.constInt(info.bit_offset, .False);
// Convert to equally-sized integer type in order to perform the bit
@@ -10864,8 +10828,7 @@ const ParamTypeIterator = struct {
.Unspecified, .Inline => {
it.zig_index += 1;
it.llvm_index += 1;
- var buf: Type.Payload.ElemType = undefined;
- if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice(mod))) {
+ if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(mod).isSlice(mod))) {
it.llvm_index += 1;
return .slice;
} else if (isByRef(ty, mod)) {
@@ -11185,8 +11148,7 @@ fn isByRef(ty: Type, mod: *const Module) bool {
return true;
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
+ const payload_ty = ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return false;
}
src/codegen/spirv.zig
@@ -625,20 +625,20 @@ pub const DeclGen = struct {
.Array => switch (val.tag()) {
.aggregate => {
const elem_vals = val.castTag(.aggregate).?.data;
- const elem_ty = ty.elemType();
- const len = @intCast(u32, ty.arrayLenIncludingSentinel()); // TODO: limit spir-v to 32 bit arrays in a more elegant way.
+ const elem_ty = ty.childType(mod);
+ const len = @intCast(u32, ty.arrayLenIncludingSentinel(mod)); // TODO: limit spir-v to 32 bit arrays in a more elegant way.
for (elem_vals[0..len]) |elem_val| {
try self.lower(elem_ty, elem_val);
}
},
.repeated => {
const elem_val = val.castTag(.repeated).?.data;
- const elem_ty = ty.elemType();
- const len = @intCast(u32, ty.arrayLen());
+ const elem_ty = ty.childType(mod);
+ const len = @intCast(u32, ty.arrayLen(mod));
for (0..len) |_| {
try self.lower(elem_ty, elem_val);
}
- if (ty.sentinel()) |sentinel| {
+ if (ty.sentinel(mod)) |sentinel| {
try self.lower(elem_ty, sentinel);
}
},
@@ -646,7 +646,7 @@ pub const DeclGen = struct {
const str_lit = val.castTag(.str_lit).?.data;
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
try self.addBytes(bytes);
- if (ty.sentinel()) |sentinel| {
+ if (ty.sentinel(mod)) |sentinel| {
try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod)));
}
},
@@ -706,8 +706,7 @@ pub const DeclGen = struct {
}
},
.Optional => {
- var opt_buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&opt_buf);
+ const payload_ty = ty.optionalChild(mod);
const has_payload = !val.isNull(mod);
const abi_size = ty.abiSize(mod);
@@ -1216,10 +1215,10 @@ pub const DeclGen = struct {
return try self.spv.resolve(.{ .float_type = .{ .bits = bits } });
},
.Array => {
- const elem_ty = ty.childType();
+ const elem_ty = ty.childType(mod);
const elem_ty_ref = try self.resolveType(elem_ty, .direct);
- const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel()) orelse {
- return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel()});
+ const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse {
+ return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)});
};
return self.spv.arrayType(total_len, elem_ty_ref);
},
@@ -1248,7 +1247,7 @@ pub const DeclGen = struct {
},
},
.Pointer => {
- const ptr_info = ty.ptrInfo().data;
+ const ptr_info = ty.ptrInfo(mod);
const storage_class = spvStorageClass(ptr_info.@"addrspace");
const child_ty_ref = try self.resolveType(ptr_info.pointee_type, .indirect);
@@ -1280,8 +1279,8 @@ pub const DeclGen = struct {
// TODO: Properly verify sizes and child type.
return try self.spv.resolve(.{ .vector_type = .{
- .component_type = try self.resolveType(ty.elemType(), repr),
- .component_count = @intCast(u32, ty.vectorLen()),
+ .component_type = try self.resolveType(ty.childType(mod), repr),
+ .component_count = @intCast(u32, ty.vectorLen(mod)),
} });
},
.Struct => {
@@ -1335,8 +1334,7 @@ pub const DeclGen = struct {
} });
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
+ const payload_ty = ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// Just use a bool.
// Note: Always generate the bool with indirect format, to save on some sanity
@@ -1685,7 +1683,8 @@ pub const DeclGen = struct {
}
fn load(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef) !IdRef {
- const value_ty = ptr_ty.childType();
+ const mod = self.module;
+ const value_ty = ptr_ty.childType(mod);
const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect);
const result_id = self.spv.allocId();
const access = spec.MemoryAccess.Extended{
@@ -1701,7 +1700,8 @@ pub const DeclGen = struct {
}
fn store(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, value_id: IdRef) !void {
- const value_ty = ptr_ty.childType();
+ const mod = self.module;
+ const value_ty = ptr_ty.childType(mod);
const indirect_value_id = try self.convertToIndirect(value_ty, value_id);
const access = spec.MemoryAccess.Extended{
.Volatile = ptr_ty.isVolatilePtr(),
@@ -2072,7 +2072,7 @@ pub const DeclGen = struct {
const b = try self.resolve(extra.b);
const mask = self.air.values[extra.mask];
const mask_len = extra.mask_len;
- const a_len = self.typeOf(extra.a).vectorLen();
+ const a_len = self.typeOf(extra.a).vectorLen(mod);
const result_id = self.spv.allocId();
const result_type_id = try self.resolveTypeId(ty);
@@ -2138,9 +2138,10 @@ pub const DeclGen = struct {
}
fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef {
+ const mod = self.module;
const result_ty_ref = try self.resolveType(result_ty, .direct);
- switch (ptr_ty.ptrSize()) {
+ switch (ptr_ty.ptrSize(mod)) {
.One => {
// Pointer to array
// TODO: Is this correct?
@@ -2498,7 +2499,7 @@ pub const DeclGen = struct {
// Construct new pointer type for the resulting pointer
const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
const elem_ty_ref = try self.resolveType(elem_ty, .direct);
- const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace()));
+ const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
if (ptr_ty.isSinglePointer(mod)) {
// Pointer-to-array. In this case, the resulting pointer is not of the same type
// as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain.
@@ -2516,7 +2517,7 @@ pub const DeclGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.typeOf(bin_op.lhs);
- const elem_ty = ptr_ty.childType();
+ const elem_ty = ptr_ty.childType(mod);
// TODO: Make this return a null ptr or something
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
@@ -2526,6 +2527,7 @@ pub const DeclGen = struct {
}
fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const mod = self.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const ptr_id = try self.resolve(bin_op.lhs);
@@ -2536,9 +2538,9 @@ pub const DeclGen = struct {
// If we have a pointer-to-array, construct an element pointer to use with load()
// If we pass ptr_ty directly, it will attempt to load the entire array rather than
// just an element.
- var elem_ptr_info = ptr_ty.ptrInfo();
- elem_ptr_info.data.size = .One;
- const elem_ptr_ty = Type.initPayload(&elem_ptr_info.base);
+ var elem_ptr_info = ptr_ty.ptrInfo(mod);
+ elem_ptr_info.size = .One;
+ const elem_ptr_ty = try Type.ptr(undefined, mod, elem_ptr_info);
return try self.load(elem_ptr_ty, elem_ptr_id);
}
@@ -2586,7 +2588,7 @@ pub const DeclGen = struct {
field_index: u32,
) !?IdRef {
const mod = self.module;
- const object_ty = object_ptr_ty.childType();
+ const object_ty = object_ptr_ty.childType(mod);
switch (object_ty.zigTypeTag(mod)) {
.Struct => switch (object_ty.containerLayout()) {
.Packed => unreachable, // TODO
@@ -2662,9 +2664,10 @@ pub const DeclGen = struct {
fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ptr_ty = self.typeOfIndex(inst);
- assert(ptr_ty.ptrAddressSpace() == .generic);
- const child_ty = ptr_ty.childType();
+ assert(ptr_ty.ptrAddressSpace(mod) == .generic);
+ const child_ty = ptr_ty.childType(mod);
const child_ty_ref = try self.resolveType(child_ty, .indirect);
return try self.alloc(child_ty_ref, null);
}
@@ -2834,7 +2837,7 @@ pub const DeclGen = struct {
const mod = self.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr_ty = self.typeOf(un_op);
- const ret_ty = ptr_ty.childType();
+ const ret_ty = ptr_ty.childType(mod);
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try self.func.body.emit(self.spv.gpa, .OpReturn, {});
@@ -2971,8 +2974,7 @@ pub const DeclGen = struct {
const operand_id = try self.resolve(un_op);
const optional_ty = self.typeOf(un_op);
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = optional_ty.optionalChild(&buf);
+ const payload_ty = optional_ty.optionalChild(mod);
const bool_ty_ref = try self.resolveType(Type.bool, .direct);
src/link/Dwarf.zig
@@ -219,8 +219,7 @@ pub const DeclState = struct {
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
} else {
// Non-pointer optionals are structs: struct { .maybe = *, .val = * }
- var buf = try arena.create(Type.Payload.ElemType);
- const payload_ty = ty.optionalChild(buf);
+ const payload_ty = ty.optionalChild(mod);
// DW.AT.structure_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
@@ -304,7 +303,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index));
}
},
.Array => {
@@ -315,7 +314,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index));
// DW.AT.subrange_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim));
// DW.AT.type, DW.FORM.ref4
@@ -323,7 +322,7 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.count, DW.FORM.udata
- const len = ty.arrayLenIncludingSentinel();
+ const len = ty.arrayLenIncludingSentinel(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), len);
// DW.AT.array_type delimit children
try dbg_info_buffer.append(0);
@@ -688,7 +687,7 @@ pub const DeclState = struct {
const mod = self.mod;
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
- const child_ty = if (is_ptr) ty.childType() else ty;
+ const child_ty = if (is_ptr) ty.childType(mod) else ty;
switch (loc) {
.register => |reg| {
src/link/Wasm.zig
@@ -2931,7 +2931,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
const atom_index = try wasm.createAtom();
const atom = wasm.getAtomPtr(atom_index);
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const slice_ty = Type.const_slice_u8_sentinel_0;
const mod = wasm.base.options.module.?;
atom.alignment = slice_ty.abiAlignment(mod);
const sym_index = atom.sym_index;
@@ -2988,7 +2988,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
for (mod.error_name_list.items) |error_name| {
const len = @intCast(u32, error_name.len + 1); // names are 0-termianted
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ const slice_ty = Type.const_slice_u8_sentinel_0;
const offset = @intCast(u32, atom.code.items.len);
// first we create the data for the slice of the name
try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated
src/Liveness/Verify.zig
@@ -325,7 +325,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.aggregate_init => {
const ty_pl = data[inst].ty_pl;
const aggregate_ty = self.air.getRefType(ty_pl.ty);
- const len = @intCast(usize, aggregate_ty.arrayLen());
+ const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*));
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
var bt = self.liveness.iterateBigTomb(inst);
src/Air.zig
@@ -1375,7 +1375,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type {
.bool_to_int => return Type.u1,
- .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0),
+ .tag_name, .error_name => return Type.const_slice_u8_sentinel_0,
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip);
@@ -1384,18 +1384,21 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type {
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip);
- return ptr_ty.elemType();
+ return ptr_ty.childTypeIp(ip);
},
.atomic_load => {
const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip);
- return ptr_ty.elemType();
+ return ptr_ty.childTypeIp(ip);
},
.atomic_rmw => {
const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip);
- return ptr_ty.elemType();
+ return ptr_ty.childTypeIp(ip);
},
- .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand, ip).childType(),
+ .reduce, .reduce_optimized => {
+ const operand_ty = air.typeOf(datas[inst].reduce.operand, ip);
+ return ip.indexToKey(operand_ty.ip_index).vector_type.child.toType();
+ },
.mul_add => return air.typeOf(datas[inst].pl_op.operand, ip),
.select => {
src/codegen.zig
@@ -230,7 +230,7 @@ pub fn generateSymbol(
.Array => switch (typed_value.val.tag()) {
.bytes => {
const bytes = typed_value.val.castTag(.bytes).?.data;
- const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel());
+ const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod));
// The bytes payload already includes the sentinel, if any
try code.ensureUnusedCapacity(len);
code.appendSliceAssumeCapacity(bytes[0..len]);
@@ -241,7 +241,7 @@ pub fn generateSymbol(
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
try code.ensureUnusedCapacity(bytes.len + 1);
code.appendSliceAssumeCapacity(bytes);
- if (typed_value.ty.sentinel()) |sent_val| {
+ if (typed_value.ty.sentinel(mod)) |sent_val| {
const byte = @intCast(u8, sent_val.toUnsignedInt(mod));
code.appendAssumeCapacity(byte);
}
@@ -249,8 +249,8 @@ pub fn generateSymbol(
},
.aggregate => {
const elem_vals = typed_value.val.castTag(.aggregate).?.data;
- const elem_ty = typed_value.ty.elemType();
- const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel());
+ const elem_ty = typed_value.ty.childType(mod);
+ const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod));
for (elem_vals[0..len]) |elem_val| {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = elem_ty,
@@ -264,9 +264,9 @@ pub fn generateSymbol(
},
.repeated => {
const array = typed_value.val.castTag(.repeated).?.data;
- const elem_ty = typed_value.ty.childType();
- const sentinel = typed_value.ty.sentinel();
- const len = typed_value.ty.arrayLen();
+ const elem_ty = typed_value.ty.childType(mod);
+ const sentinel = typed_value.ty.sentinel(mod);
+ const len = typed_value.ty.arrayLen(mod);
var index: u64 = 0;
while (index < len) : (index += 1) {
@@ -292,8 +292,8 @@ pub fn generateSymbol(
return Result.ok;
},
.empty_array_sentinel => {
- const elem_ty = typed_value.ty.childType();
- const sentinel_val = typed_value.ty.sentinel().?;
+ const elem_ty = typed_value.ty.childType(mod);
+ const sentinel_val = typed_value.ty.sentinel(mod).?;
switch (try generateSymbol(bin_file, src_loc, .{
.ty = elem_ty,
.val = sentinel_val,
@@ -618,8 +618,7 @@ pub fn generateSymbol(
return Result.ok;
},
.Optional => {
- var opt_buf: Type.Payload.ElemType = undefined;
- const payload_type = typed_value.ty.optionalChild(&opt_buf);
+ const payload_type = typed_value.ty.optionalChild(mod);
const is_pl = !typed_value.val.isNull(mod);
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
@@ -751,7 +750,7 @@ pub fn generateSymbol(
.Vector => switch (typed_value.val.tag()) {
.bytes => {
const bytes = typed_value.val.castTag(.bytes).?.data;
- const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow;
+ const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow;
const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse
return error.Overflow;
try code.ensureUnusedCapacity(len + padding);
@@ -761,8 +760,8 @@ pub fn generateSymbol(
},
.aggregate => {
const elem_vals = typed_value.val.castTag(.aggregate).?.data;
- const elem_ty = typed_value.ty.elemType();
- const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow;
+ const elem_ty = typed_value.ty.childType(mod);
+ const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow;
const padding = math.cast(usize, typed_value.ty.abiSize(mod) -
(math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) {
error.DivisionByZero => unreachable,
@@ -782,8 +781,8 @@ pub fn generateSymbol(
},
.repeated => {
const array = typed_value.val.castTag(.repeated).?.data;
- const elem_ty = typed_value.ty.childType();
- const len = typed_value.ty.arrayLen();
+ const elem_ty = typed_value.ty.childType(mod);
+ const len = typed_value.ty.arrayLen(mod);
const padding = math.cast(usize, typed_value.ty.abiSize(mod) -
(math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) {
error.DivisionByZero => unreachable,
@@ -1188,7 +1187,7 @@ pub fn genTypedValue(
switch (typed_value.ty.zigTypeTag(mod)) {
.Void => return GenResult.mcv(.none),
- .Pointer => switch (typed_value.ty.ptrSize()) {
+ .Pointer => switch (typed_value.ty.ptrSize(mod)) {
.Slice => {},
else => {
switch (typed_value.val.tag()) {
@@ -1219,9 +1218,8 @@ pub fn genTypedValue(
if (typed_value.ty.isPtrLikeOptional(mod)) {
if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 });
- var buf: Type.Payload.ElemType = undefined;
return genTypedValue(bin_file, src_loc, .{
- .ty = typed_value.ty.optionalChild(&buf),
+ .ty = typed_value.ty.optionalChild(mod),
.val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val,
}, owner_decl_index);
} else if (typed_value.ty.abiSize(mod) == 1) {
src/InternPool.zig
@@ -31,28 +31,10 @@ const KeyAdapter = struct {
pub const Key = union(enum) {
int_type: IntType,
- ptr_type: struct {
- elem_type: Index,
- sentinel: Index = .none,
- alignment: u16 = 0,
- size: std.builtin.Type.Pointer.Size,
- is_const: bool = false,
- is_volatile: bool = false,
- is_allowzero: bool = false,
- address_space: std.builtin.AddressSpace = .generic,
- },
- array_type: struct {
- len: u64,
- child: Index,
- sentinel: Index,
- },
- vector_type: struct {
- len: u32,
- child: Index,
- },
- optional_type: struct {
- payload_type: Index,
- },
+ ptr_type: PtrType,
+ array_type: ArrayType,
+ vector_type: VectorType,
+ opt_type: Index,
error_union_type: struct {
error_set_type: Index,
payload_type: Index,
@@ -87,6 +69,47 @@ pub const Key = union(enum) {
pub const IntType = std.builtin.Type.Int;
+ pub const PtrType = struct {
+ elem_type: Index,
+ sentinel: Index = .none,
+ /// If zero use pointee_type.abiAlignment()
+ /// When creating pointer types, if alignment is equal to pointee type
+ /// abi alignment, this value should be set to 0 instead.
+ alignment: u16 = 0,
+ /// If this is non-zero it means the pointer points to a sub-byte
+ /// range of data, which is backed by a "host integer" with this
+ /// number of bytes.
+ /// When host_size=pointee_abi_size and bit_offset=0, this must be
+ /// represented with host_size=0 instead.
+ host_size: u16 = 0,
+ bit_offset: u16 = 0,
+ vector_index: VectorIndex = .none,
+ size: std.builtin.Type.Pointer.Size = .One,
+ is_const: bool = false,
+ is_volatile: bool = false,
+ is_allowzero: bool = false,
+ /// See src/target.zig defaultAddressSpace function for how to obtain
+ /// an appropriate value for this field.
+ address_space: std.builtin.AddressSpace = .generic,
+
+ pub const VectorIndex = enum(u32) {
+ none = std.math.maxInt(u32),
+ runtime = std.math.maxInt(u32) - 1,
+ _,
+ };
+ };
+
+ pub const ArrayType = struct {
+ len: u64,
+ child: Index,
+ sentinel: Index,
+ };
+
+ pub const VectorType = struct {
+ len: u32,
+ child: Index,
+ };
+
pub fn hash32(key: Key) u32 {
return @truncate(u32, key.hash64());
}
@@ -106,7 +129,7 @@ pub const Key = union(enum) {
.ptr_type,
.array_type,
.vector_type,
- .optional_type,
+ .opt_type,
.error_union_type,
.simple_type,
.simple_value,
@@ -159,8 +182,8 @@ pub const Key = union(enum) {
const b_info = b.vector_type;
return std.meta.eql(a_info, b_info);
},
- .optional_type => |a_info| {
- const b_info = b.optional_type;
+ .opt_type => |a_info| {
+ const b_info = b.opt_type;
return std.meta.eql(a_info, b_info);
},
.error_union_type => |a_info| {
@@ -220,7 +243,7 @@ pub const Key = union(enum) {
.ptr_type,
.array_type,
.vector_type,
- .optional_type,
+ .opt_type,
.error_union_type,
.simple_type,
.struct_type,
@@ -630,6 +653,7 @@ pub const Tag = enum(u8) {
/// data is payload to Vector.
type_vector,
/// A fully explicitly specified pointer type.
+ /// TODO actually this is missing some stuff like bit_offset
/// data is payload to Pointer.
type_pointer,
/// An optional type.
@@ -893,7 +917,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
} };
},
- .type_optional => .{ .optional_type = .{ .payload_type = @intToEnum(Index, data) } },
+ .type_optional => .{ .opt_type = @intToEnum(Index, data) },
.type_error_union => @panic("TODO"),
.type_enum_simple => @panic("TODO"),
@@ -971,10 +995,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}),
});
},
- .optional_type => |optional_type| {
+ .opt_type => |opt_type| {
ip.items.appendAssumeCapacity(.{
.tag = .type_optional,
- .data = @enumToInt(optional_type.payload_type),
+ .data = @enumToInt(opt_type),
});
},
.error_union_type => |error_union_type| {
@@ -1192,3 +1216,13 @@ test "basic usage" {
} });
try std.testing.expect(another_array_i32 == array_i32);
}
+
+pub fn childType(ip: InternPool, i: Index) Index {
+ return switch (ip.indexToKey(i)) {
+ .ptr_type => |ptr_type| ptr_type.elem_type,
+ .vector_type => |vector_type| vector_type.child,
+ .array_type => |array_type| array_type.child,
+ .opt_type => |child| child,
+ else => unreachable,
+ };
+}
src/Liveness.zig
@@ -225,6 +225,7 @@ pub fn categorizeOperand(
air: Air,
inst: Air.Inst.Index,
operand: Air.Inst.Index,
+ ip: InternPool,
) OperandCategory {
const air_tags = air.instructions.items(.tag);
const air_datas = air.instructions.items(.data);
@@ -534,7 +535,7 @@ pub fn categorizeOperand(
.aggregate_init => {
const ty_pl = air_datas[inst].ty_pl;
const aggregate_ty = air.getRefType(ty_pl.ty);
- const len = @intCast(usize, aggregate_ty.arrayLen());
+ const len = @intCast(usize, aggregate_ty.arrayLenIp(ip));
const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]);
if (elements.len <= bpi - 1) {
@@ -625,7 +626,7 @@ pub fn categorizeOperand(
var operand_live: bool = true;
for (air.extra[cond_extra.end..][0..2]) |cond_inst| {
- if (l.categorizeOperand(air, cond_inst, operand) == .tomb)
+ if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb)
operand_live = false;
switch (air_tags[cond_inst]) {
@@ -872,6 +873,7 @@ fn analyzeInst(
data: *LivenessPassData(pass),
inst: Air.Inst.Index,
) Allocator.Error!void {
+ const ip = a.intern_pool;
const inst_tags = a.air.instructions.items(.tag);
const inst_datas = a.air.instructions.items(.data);
@@ -1140,7 +1142,7 @@ fn analyzeInst(
.aggregate_init => {
const ty_pl = inst_datas[inst].ty_pl;
const aggregate_ty = a.air.getRefType(ty_pl.ty);
- const len = @intCast(usize, aggregate_ty.arrayLen());
+ const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*));
const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]);
if (elements.len <= bpi - 1) {
src/Module.zig
@@ -5805,7 +5805,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
// is unused so it just has to be a no-op.
sema.air_instructions.set(ptr_inst.*, .{
.tag = .alloc,
- .data = .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int) },
+ .data = .{ .ty = Type.single_const_pointer_to_comptime_int },
});
}
}
@@ -6545,7 +6545,7 @@ pub fn populateTestFunctions(
}
const decl = mod.declPtr(decl_index);
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType();
+ const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).childType(mod);
const array_decl_index = d: {
// Add mod.test_functions to an array decl then make the test_functions
@@ -6575,7 +6575,7 @@ pub fn populateTestFunctions(
errdefer name_decl_arena.deinit();
const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice);
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{
- .ty = try Type.Tag.array_u8.create(name_decl_arena.allocator(), bytes.len),
+ .ty = try Type.array(name_decl_arena.allocator(), bytes.len, null, Type.u8, mod),
.val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes),
});
try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena);
@@ -6609,7 +6609,12 @@ pub fn populateTestFunctions(
{
// This copy accesses the old Decl Type/Value so it must be done before `clearValues`.
- const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena));
+ const new_ty = try Type.ptr(arena, mod, .{
+ .size = .Slice,
+ .pointee_type = try tmp_test_fn_ty.copy(arena),
+ .mutable = false,
+ .@"addrspace" = .generic,
+ });
const new_var = try gpa.create(Var);
errdefer gpa.destroy(new_var);
new_var.* = decl.val.castTag(.variable).?.data.*;
@@ -6819,6 +6824,34 @@ pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allo
return i.toType();
}
+pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type {
+ const i = try intern(mod, .{ .array_type = info });
+ return i.toType();
+}
+
+pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type {
+ const i = try intern(mod, .{ .vector_type = info });
+ return i.toType();
+}
+
+pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type {
+ const i = try intern(mod, .{ .opt_type = child_type });
+ return i.toType();
+}
+
+pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type {
+ const i = try intern(mod, .{ .ptr_type = info });
+ return i.toType();
+}
+
+pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
+ return ptrType(mod, .{ .elem_type = child_type.ip_index });
+}
+
+pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
+ return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true });
+}
+
pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type {
return intType(mod, .unsigned, Type.smallestUnsignedBits(max));
}
src/print_air.zig
@@ -433,9 +433,10 @@ const Writer = struct {
}
fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const mod = w.module;
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const vector_ty = w.air.getRefType(ty_pl.ty);
- const len = @intCast(usize, vector_ty.arrayLen());
+ const len = @intCast(usize, vector_ty.arrayLen(mod));
const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]);
try w.writeType(s, vector_ty);
@@ -512,10 +513,11 @@ const Writer = struct {
}
fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ const mod = w.module;
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
- const elem_ty = w.typeOfIndex(inst).childType();
+ const elem_ty = w.typeOfIndex(inst).childType(mod);
try w.writeType(s, elem_ty);
try s.writeAll(", ");
try w.writeOperand(s, inst, 0, pl_op.operand);
src/Sema.zig
@@ -585,13 +585,18 @@ pub const Block = struct {
}
fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref {
+ const sema = block.sema;
+ const mod = sema.mod;
return block.addInst(.{
.tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector,
.data = .{ .ty_pl = .{
- .ty = try block.sema.addType(
- try Type.vector(block.sema.arena, block.sema.typeOf(lhs).vectorLen(), Type.bool),
+ .ty = try sema.addType(
+ try mod.vectorType(.{
+ .len = sema.typeOf(lhs).vectorLen(mod),
+ .child = .bool_type,
+ }),
),
- .payload = try block.sema.addExtra(Air.VectorCmp{
+ .payload = try sema.addExtra(Air.VectorCmp{
.lhs = lhs,
.rhs = rhs,
.op = Air.VectorCmp.encodeOp(cmp_op),
@@ -1760,7 +1765,7 @@ pub fn resolveConstString(
reason: []const u8,
) ![]u8 {
const air_inst = try sema.resolveInst(zir_ref);
- const wanted_type = Type.initTag(.const_slice_u8);
+ const wanted_type = Type.const_slice_u8;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced_inst, reason);
return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod);
@@ -1788,7 +1793,8 @@ fn analyzeAsType(
}
pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void {
- if (!sema.mod.backendSupportsFeature(.error_return_trace)) return;
+ const mod = sema.mod;
+ if (!mod.backendSupportsFeature(.error_return_trace)) return;
assert(!block.is_comptime);
var err_trace_block = block.makeSubBlock();
@@ -1798,13 +1804,13 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize)
// var addrs: [err_return_trace_addr_count]usize = undefined;
const err_return_trace_addr_count = 32;
- const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, sema.mod);
- const addrs_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, addr_arr_ty));
+ const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, mod);
+ const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty));
// var st: StackTrace = undefined;
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
- const st_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty));
+ const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty));
// st.instruction_addresses = &addrs;
const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "instruction_addresses", src, true);
@@ -2101,11 +2107,10 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError
fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError {
const mod = sema.mod;
- const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType() else object_ty;
+ const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty;
if (inner_ty.zigTypeTag(mod) == .Optional) opt: {
- var buf: Type.Payload.ElemType = undefined;
- const child_ty = inner_ty.optionalChild(&buf);
+ const child_ty = inner_ty.optionalChild(mod);
if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt;
const msg = msg: {
const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
@@ -2132,7 +2137,7 @@ fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8)
switch (ty.zigTypeTag(mod)) {
.Array => return mem.eql(u8, field_name, "len"),
.Pointer => {
- const ptr_info = ty.ptrInfo().data;
+ const ptr_info = ty.ptrInfo(mod);
if (ptr_info.size == .Slice) {
return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len");
} else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) {
@@ -2504,6 +2509,7 @@ fn coerceResultPtr(
dummy_operand: Air.Inst.Ref,
trash_block: *Block,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const target = sema.mod.getTarget();
const addr_space = target_util.defaultAddressSpace(target, .local);
const pointee_ty = sema.typeOf(dummy_operand);
@@ -2547,7 +2553,7 @@ fn coerceResultPtr(
return sema.addConstant(ptr_ty, ptr_val);
}
if (pointee_ty.eql(Type.null, sema.mod)) {
- const opt_ty = sema.typeOf(new_ptr).childType();
+ const opt_ty = sema.typeOf(new_ptr).childType(mod);
const null_inst = try sema.addConstant(opt_ty, Value.null);
_ = try block.addBinOp(.store, new_ptr, null_inst);
return Air.Inst.Ref.void_value;
@@ -3394,7 +3400,7 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer)
- operand_ty.childType()
+ operand_ty.childType(mod)
else
operand_ty;
if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return;
@@ -3430,7 +3436,7 @@ fn indexablePtrLen(
const mod = sema.mod;
const object_ty = sema.typeOf(object);
const is_pointer_to = object_ty.isSinglePointer(mod);
- const indexable_ty = if (is_pointer_to) object_ty.childType() else object_ty;
+ const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty;
try checkIndexable(sema, block, src, indexable_ty);
return sema.fieldVal(block, src, object, "len", src);
}
@@ -3441,9 +3447,10 @@ fn indexablePtrLenOrNone(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
try checkMemOperand(sema, block, src, operand_ty);
- if (operand_ty.ptrSize() == .Many) return .none;
+ if (operand_ty.ptrSize(mod) == .Many) return .none;
return sema.fieldVal(block, src, operand, "len", src);
}
@@ -3529,11 +3536,12 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
}
fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const alloc = try sema.resolveInst(inst_data.operand);
const alloc_ty = sema.typeOf(alloc);
- var ptr_info = alloc_ty.ptrInfo().data;
+ var ptr_info = alloc_ty.ptrInfo(mod);
const elem_ty = ptr_info.pointee_type;
// Detect if all stores to an `.alloc` were comptime-known.
@@ -3589,9 +3597,10 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
}
fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const alloc_ty = sema.typeOf(alloc);
- var ptr_info = alloc_ty.ptrInfo().data;
+ var ptr_info = alloc_ty.ptrInfo(mod);
ptr_info.mutable = false;
const const_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
@@ -3947,13 +3956,13 @@ fn zirArrayBasePtr(
const start_ptr = try sema.resolveInst(inst_data.operand);
var base_ptr = start_ptr;
- while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) {
+ while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) {
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
.Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
else => break,
};
- const elem_ty = sema.typeOf(base_ptr).childType();
+ const elem_ty = sema.typeOf(base_ptr).childType(mod);
switch (elem_ty.zigTypeTag(mod)) {
.Array, .Vector => return base_ptr,
.Struct => if (elem_ty.isTuple()) {
@@ -3962,7 +3971,7 @@ fn zirArrayBasePtr(
},
else => {},
}
- return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType());
+ return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod));
}
fn zirFieldBasePtr(
@@ -3976,18 +3985,18 @@ fn zirFieldBasePtr(
const start_ptr = try sema.resolveInst(inst_data.operand);
var base_ptr = start_ptr;
- while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) {
+ while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) {
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
.Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
else => break,
};
- const elem_ty = sema.typeOf(base_ptr).childType();
+ const elem_ty = sema.typeOf(base_ptr).childType(mod);
switch (elem_ty.zigTypeTag(mod)) {
.Struct, .Union => return base_ptr,
else => {},
}
- return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType());
+ return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod));
}
fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4129,7 +4138,7 @@ fn validateArrayInitTy(
switch (ty.zigTypeTag(mod)) {
.Array => {
- const array_len = ty.arrayLen();
+ const array_len = ty.arrayLen(mod);
if (extra.init_count != array_len) {
return sema.fail(block, src, "expected {d} array elements; found {d}", .{
array_len, extra.init_count,
@@ -4138,7 +4147,7 @@ fn validateArrayInitTy(
return;
},
.Vector => {
- const array_len = ty.arrayLen();
+ const array_len = ty.arrayLen(mod);
if (extra.init_count != array_len) {
return sema.fail(block, src, "expected {d} vector elements; found {d}", .{
array_len, extra.init_count,
@@ -4148,7 +4157,7 @@ fn validateArrayInitTy(
},
.Struct => if (ty.isTuple()) {
_ = try sema.resolveTypeFields(ty);
- const array_len = ty.arrayLen();
+ const array_len = ty.arrayLen(mod);
if (extra.init_count > array_len) {
return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{
array_len, extra.init_count,
@@ -4194,7 +4203,7 @@ fn zirValidateStructInit(
const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node;
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const object_ptr = try sema.resolveInst(field_ptr_extra.lhs);
- const agg_ty = sema.typeOf(object_ptr).childType();
+ const agg_ty = sema.typeOf(object_ptr).childType(mod);
switch (agg_ty.zigTypeTag(mod)) {
.Struct => return sema.validateStructInit(
block,
@@ -4350,6 +4359,7 @@ fn validateStructInit(
init_src: LazySrcLoc,
instrs: []const Zir.Inst.Index,
) CompileError!void {
+ const mod = sema.mod;
const gpa = sema.gpa;
// Maps field index to field_ptr index of where it was already initialized.
@@ -4425,14 +4435,13 @@ fn validateStructInit(
try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true)
else
try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true);
- const field_ty = sema.typeOf(default_field_ptr).childType();
+ const field_ty = sema.typeOf(default_field_ptr).childType(mod);
const init = try sema.addConstant(field_ty, default_val);
try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
}
if (root_msg) |msg| {
if (struct_ty.castTag(.@"struct")) |struct_obj| {
- const mod = sema.mod;
const fqn = try struct_obj.data.getFullyQualifiedName(mod);
defer gpa.free(fqn);
try mod.errNoteNonLazy(
@@ -4605,7 +4614,7 @@ fn validateStructInit(
try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true)
else
try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true);
- const field_ty = sema.typeOf(default_field_ptr).childType();
+ const field_ty = sema.typeOf(default_field_ptr).childType(mod);
const init = try sema.addConstant(field_ty, field_values[i]);
try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
}
@@ -4624,8 +4633,8 @@ fn zirValidateArrayInit(
const first_elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node;
const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data;
const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr);
- const array_ty = sema.typeOf(array_ptr).childType();
- const array_len = array_ty.arrayLen();
+ const array_ty = sema.typeOf(array_ptr).childType(mod);
+ const array_len = array_ty.arrayLen(mod);
if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) {
.Struct => {
@@ -4670,10 +4679,10 @@ fn zirValidateArrayInit(
// at comptime so we have almost nothing to do here. However, in case of a
// sentinel-terminated array, the sentinel will not have been populated by
// any ZIR instructions at comptime; we need to do that here.
- if (array_ty.sentinel()) |sentinel_val| {
+ if (array_ty.sentinel(mod)) |sentinel_val| {
const array_len_ref = try sema.addIntUnsigned(Type.usize, array_len);
const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true);
- const sentinel = try sema.addConstant(array_ty.childType(), sentinel_val);
+ const sentinel = try sema.addConstant(array_ty.childType(mod), sentinel_val);
try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store);
}
return;
@@ -4685,7 +4694,7 @@ fn zirValidateArrayInit(
// Collect the comptime element values in case the array literal ends up
// being comptime-known.
- const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel());
+ const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod));
const element_vals = try sema.arena.alloc(Value, array_len_s);
const opt_opv = try sema.typeHasOnePossibleValue(array_ty);
const air_tags = sema.air_instructions.items(.tag);
@@ -4784,7 +4793,7 @@ fn zirValidateArrayInit(
// Our task is to delete all the `elem_ptr` and `store` instructions, and insert
// instead a single `store` to the array_ptr with a comptime struct value.
// Also to populate the sentinel value, if any.
- if (array_ty.sentinel()) |sentinel_val| {
+ if (array_ty.sentinel(mod)) |sentinel_val| {
element_vals[instrs.len] = sentinel_val;
}
@@ -4806,13 +4815,13 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
if (operand_ty.zigTypeTag(mod) != .Pointer) {
return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)});
- } else switch (operand_ty.ptrSize()) {
+ } else switch (operand_ty.ptrSize(mod)) {
.One, .C => {},
.Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(sema.mod)}),
.Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(sema.mod)}),
}
- if ((try sema.typeHasOnePossibleValue(operand_ty.childType())) != null) {
+ if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) {
// No need to validate the actual pointer value, we don't need it!
return;
}
@@ -5132,7 +5141,7 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air
defer anon_decl.deinit();
const decl_index = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), gop.key_ptr.len),
+ try Type.array(anon_decl.arena(), gop.key_ptr.len, Value.zero, Type.u8, mod),
try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*),
0, // default alignment
);
@@ -6003,10 +6012,11 @@ fn addDbgVar(
air_tag: Air.Inst.Tag,
name: []const u8,
) CompileError!void {
+ const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
switch (air_tag) {
.dbg_var_ptr => {
- if (!(try sema.typeHasRuntimeBits(operand_ty.childType()))) return;
+ if (!(try sema.typeHasRuntimeBits(operand_ty.childType(mod)))) return;
},
.dbg_var_val => {
if (!(try sema.typeHasRuntimeBits(operand_ty))) return;
@@ -6238,7 +6248,7 @@ fn popErrorReturnTrace(
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
- const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty);
+ const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true);
try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store);
@@ -6263,7 +6273,7 @@ fn popErrorReturnTrace(
// If non-error, then pop the error return trace by restoring the index.
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
- const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty);
+ const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty);
const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true);
try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store);
@@ -6456,16 +6466,15 @@ fn checkCallArgumentCount(
switch (callee_ty.zigTypeTag(mod)) {
.Fn => break :func_ty callee_ty,
.Pointer => {
- const ptr_info = callee_ty.ptrInfo().data;
+ const ptr_info = callee_ty.ptrInfo(mod);
if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) {
break :func_ty ptr_info.pointee_type;
}
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const opt_child = callee_ty.optionalChild(&buf);
+ const opt_child = callee_ty.optionalChild(mod);
if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and
- opt_child.childType().zigTypeTag(mod) == .Fn))
+ opt_child.childType(mod).zigTypeTag(mod) == .Fn))
{
const msg = msg: {
const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{
@@ -6529,7 +6538,7 @@ fn callBuiltin(
switch (callee_ty.zigTypeTag(mod)) {
.Fn => break :func_ty callee_ty,
.Pointer => {
- const ptr_info = callee_ty.ptrInfo().data;
+ const ptr_info = callee_ty.ptrInfo(mod);
if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) {
break :func_ty ptr_info.pointee_type;
}
@@ -7929,7 +7938,7 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
} else if (child_type.zigTypeTag(mod) == .Null) {
return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)});
}
- const opt_type = try Type.optional(sema.arena, child_type);
+ const opt_type = try Type.optional(sema.arena, child_type, mod);
return sema.addType(opt_type);
}
@@ -7949,16 +7958,17 @@ fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
}
fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
- const len = try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known");
+ const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"));
const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs);
try sema.checkVectorElemType(block, elem_type_src, elem_type);
- const vector_type = try Type.Tag.vector.create(sema.arena, .{
- .len = @intCast(u32, len),
- .elem_type = elem_type,
+ const vector_type = try mod.vectorType(.{
+ .len = len,
+ .child = elem_type.ip_index,
});
return sema.addType(vector_type);
}
@@ -8377,16 +8387,16 @@ fn analyzeOptionalPayloadPtr(
const optional_ptr_ty = sema.typeOf(optional_ptr);
assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer);
- const opt_type = optional_ptr_ty.elemType();
+ const opt_type = optional_ptr_ty.childType(mod);
if (opt_type.zigTypeTag(mod) != .Optional) {
return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)});
}
- const child_type = try opt_type.optionalChildAlloc(sema.arena);
+ const child_type = opt_type.optionalChild(mod);
const child_pointer = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = child_type,
.mutable = !optional_ptr_ty.isConstPtr(),
- .@"addrspace" = optional_ptr_ty.ptrAddressSpace(),
+ .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod),
});
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| {
@@ -8401,7 +8411,7 @@ fn analyzeOptionalPayloadPtr(
child_pointer,
try Value.Tag.opt_payload_ptr.create(sema.arena, .{
.container_ptr = ptr_val,
- .container_ty = optional_ptr_ty.childType(),
+ .container_ty = optional_ptr_ty.childType(mod),
}),
);
}
@@ -8414,7 +8424,7 @@ fn analyzeOptionalPayloadPtr(
child_pointer,
try Value.Tag.opt_payload_ptr.create(sema.arena, .{
.container_ptr = ptr_val,
- .container_ty = optional_ptr_ty.childType(),
+ .container_ty = optional_ptr_ty.childType(mod),
}),
);
}
@@ -8448,14 +8458,14 @@ fn zirOptionalPayload(
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const result_ty = switch (operand_ty.zigTypeTag(mod)) {
- .Optional => try operand_ty.optionalChildAlloc(sema.arena),
+ .Optional => operand_ty.optionalChild(mod),
.Pointer => t: {
- if (operand_ty.ptrSize() != .C) {
+ if (operand_ty.ptrSize(mod) != .C) {
return sema.failWithExpectedOptionalType(block, src, operand_ty);
}
// TODO https://github.com/ziglang/zig/issues/6597
if (true) break :t operand_ty;
- const ptr_info = operand_ty.ptrInfo().data;
+ const ptr_info = operand_ty.ptrInfo(mod);
break :t try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = try ptr_info.pointee_type.copy(sema.arena),
.@"align" = ptr_info.@"align",
@@ -8569,18 +8579,18 @@ fn analyzeErrUnionPayloadPtr(
const operand_ty = sema.typeOf(operand);
assert(operand_ty.zigTypeTag(mod) == .Pointer);
- if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) {
+ if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
- operand_ty.elemType().fmt(sema.mod),
+ operand_ty.childType(mod).fmt(sema.mod),
});
}
- const err_union_ty = operand_ty.elemType();
+ const err_union_ty = operand_ty.childType(mod);
const payload_ty = err_union_ty.errorUnionPayload();
const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = payload_ty,
.mutable = !operand_ty.isConstPtr(),
- .@"addrspace" = operand_ty.ptrAddressSpace(),
+ .@"addrspace" = operand_ty.ptrAddressSpace(mod),
});
if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| {
@@ -8596,7 +8606,7 @@ fn analyzeErrUnionPayloadPtr(
operand_pointer_ty,
try Value.Tag.eu_payload_ptr.create(sema.arena, .{
.container_ptr = ptr_val,
- .container_ty = operand_ty.elemType(),
+ .container_ty = operand_ty.childType(mod),
}),
);
}
@@ -8609,7 +8619,7 @@ fn analyzeErrUnionPayloadPtr(
operand_pointer_ty,
try Value.Tag.eu_payload_ptr.create(sema.arena, .{
.container_ptr = ptr_val,
- .container_ty = operand_ty.elemType(),
+ .container_ty = operand_ty.childType(mod),
}),
);
}
@@ -8674,13 +8684,13 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const operand_ty = sema.typeOf(operand);
assert(operand_ty.zigTypeTag(mod) == .Pointer);
- if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) {
+ if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
- operand_ty.elemType().fmt(sema.mod),
+ operand_ty.childType(mod).fmt(sema.mod),
});
}
- const result_ty = operand_ty.elemType().errorUnionSet();
+ const result_ty = operand_ty.childType(mod).errorUnionSet();
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
@@ -10119,7 +10129,7 @@ fn zirSwitchCapture(
const operand_is_ref = cond_tag == .switch_cond_ref;
const operand_ptr = try sema.resolveInst(cond_info.operand);
const operand_ptr_ty = sema.typeOf(operand_ptr);
- const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty;
+ const operand_ty = if (operand_is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty;
if (block.inline_case_capture != .none) {
const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable;
@@ -10131,9 +10141,9 @@ fn zirSwitchCapture(
if (is_ref) {
const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = field_ty,
- .mutable = operand_ptr_ty.ptrIsMutable(),
+ .mutable = operand_ptr_ty.ptrIsMutable(mod),
.@"volatile" = operand_ptr_ty.isVolatilePtr(),
- .@"addrspace" = operand_ptr_ty.ptrAddressSpace(),
+ .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod),
});
return sema.addConstant(
ptr_field_ty,
@@ -10150,9 +10160,9 @@ fn zirSwitchCapture(
if (is_ref) {
const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = field_ty,
- .mutable = operand_ptr_ty.ptrIsMutable(),
+ .mutable = operand_ptr_ty.ptrIsMutable(mod),
.@"volatile" = operand_ptr_ty.isVolatilePtr(),
- .@"addrspace" = operand_ptr_ty.ptrAddressSpace(),
+ .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod),
});
return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty);
} else {
@@ -10235,7 +10245,7 @@ fn zirSwitchCapture(
const field_ty_ptr = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = first_field.ty,
.@"addrspace" = .generic,
- .mutable = operand_ptr_ty.ptrIsMutable(),
+ .mutable = operand_ptr_ty.ptrIsMutable(mod),
});
if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| {
@@ -10311,7 +10321,7 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
const cond_data = zir_datas[Zir.refToIndex(inst_data.operand).?].un_node;
const operand_ptr = try sema.resolveInst(cond_data.operand);
const operand_ptr_ty = sema.typeOf(operand_ptr);
- const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty;
+ const operand_ty = if (is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty;
if (operand_ty.zigTypeTag(mod) != .Union) {
const msg = msg: {
@@ -10448,7 +10458,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const cond_index = Zir.refToIndex(extra.data.operand).?;
const raw_operand = sema.resolveInst(zir_data[cond_index].un_node.operand) catch unreachable;
const target_ty = sema.typeOf(raw_operand);
- break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty;
+ break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.childType(mod) else target_ty;
};
const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union;
@@ -12132,7 +12142,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
// into the final binary, and never loads the data into memory.
// - When a Decl is destroyed, it can free the `*Module.EmbedFile`.
embed_file.owner_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len),
+ try Type.array(anon_decl.arena(), embed_file.bytes.len, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null),
0, // default alignment
);
@@ -12200,7 +12210,7 @@ fn zirShl(
const bit_value = Value.initPayload(&bits_payload.base);
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
- while (i < rhs_ty.vectorLen()) : (i += 1) {
+ while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
var elem_value_buf: Value.ElemValueBuffer = undefined;
const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
@@ -12220,7 +12230,7 @@ fn zirShl(
}
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
- while (i < rhs_ty.vectorLen()) : (i += 1) {
+ while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
var elem_value_buf: Value.ElemValueBuffer = undefined;
const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
if (rhs_elem.compareHetero(.lt, Value.zero, mod)) {
@@ -12388,7 +12398,7 @@ fn zirShr(
const bit_value = Value.initPayload(&bits_payload.base);
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
- while (i < rhs_ty.vectorLen()) : (i += 1) {
+ while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
var elem_value_buf: Value.ElemValueBuffer = undefined;
const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
@@ -12408,7 +12418,7 @@ fn zirShr(
}
if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
- while (i < rhs_ty.vectorLen()) : (i += 1) {
+ while (i < rhs_ty.vectorLen(mod)) : (i += 1) {
var elem_value_buf: Value.ElemValueBuffer = undefined;
const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
if (rhs_elem.compareHetero(.lt, Value.zero, mod)) {
@@ -12571,7 +12581,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (val.isUndef()) {
return sema.addConstUndef(operand_type);
} else if (operand_type.zigTypeTag(mod) == .Vector) {
- const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen());
+ const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod));
var elem_val_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
@@ -12768,8 +12778,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod);
const mod = sema.mod;
const ptr_addrspace = p: {
- if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace();
- if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace();
+ if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod);
+ if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod);
break :p null;
};
@@ -12883,9 +12893,9 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(mod)) {
- .Array => return operand_ty.arrayInfo(),
+ .Array => return operand_ty.arrayInfo(mod),
.Pointer => {
- const ptr_info = operand_ty.ptrInfo().data;
+ const ptr_info = operand_ty.ptrInfo(mod);
switch (ptr_info.size) {
// TODO: in the Many case here this should only work if the type
// has a sentinel, and this code should compute the length based
@@ -12900,7 +12910,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
},
.One => {
if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) {
- return ptr_info.pointee_type.arrayInfo();
+ return ptr_info.pointee_type.arrayInfo(mod);
}
},
.C => {},
@@ -12912,7 +12922,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
return .{
.elem_type = peer_ty.elemType2(mod),
.sentinel = null,
- .len = operand_ty.arrayLen(),
+ .len = operand_ty.arrayLen(mod),
};
}
},
@@ -13035,7 +13045,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod);
- const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace() else null;
+ const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null;
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
@@ -14022,7 +14032,7 @@ fn intRem(
) CompileError!Value {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -14484,7 +14494,10 @@ fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value {
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
const mod = sema.mod;
- const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1;
+ const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{
+ .len = ty.vectorLen(mod),
+ .child = .u1_type,
+ }) else Type.u1;
const types = try sema.arena.alloc(Type, 2);
const values = try sema.arena.alloc(Value, 2);
@@ -14520,7 +14533,7 @@ fn analyzeArithmetic(
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
- if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) {
+ if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize(mod)) {
.One, .Slice => {},
.Many, .C => {
const air_tag: Air.Inst.Tag = switch (zir_tag) {
@@ -14993,9 +15006,9 @@ fn analyzePtrArithmetic(
const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr);
const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset);
const ptr_ty = sema.typeOf(ptr);
- const ptr_info = ptr_ty.ptrInfo().data;
+ const ptr_info = ptr_ty.ptrInfo(mod);
const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array)
- ptr_info.pointee_type.childType()
+ ptr_info.pointee_type.childType(mod)
else
ptr_info.pointee_type;
@@ -15466,7 +15479,10 @@ fn cmpSelf(
if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool);
if (resolved_type.zigTypeTag(mod) == .Vector) {
- const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool);
+ const result_ty = try mod.vectorType(.{
+ .len = resolved_type.vectorLen(mod),
+ .child = .bool_type,
+ });
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type);
return sema.addConstant(result_ty, cmp_val);
}
@@ -15767,6 +15783,7 @@ fn zirBuiltinSrc(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{});
@@ -15778,7 +15795,7 @@ fn zirBuiltinSrc(
const name = std.mem.span(fn_owner_decl.name);
const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]);
const new_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len - 1),
+ try Type.array(anon_decl.arena(), bytes.len - 1, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes),
0, // default alignment
);
@@ -15791,7 +15808,7 @@ fn zirBuiltinSrc(
// The compiler must not call realpath anywhere.
const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena());
const new_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), name.len),
+ try Type.array(anon_decl.arena(), name.len, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]),
0, // default alignment
);
@@ -16024,7 +16041,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
},
.Pointer => {
- const info = ty.ptrInfo().data;
+ const info = ty.ptrInfo(mod);
const alignment = if (info.@"align" != 0)
try Value.Tag.int_u64.create(sema.arena, info.@"align")
else
@@ -16059,7 +16076,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
},
.Array => {
- const info = ty.arrayInfo();
+ const info = ty.arrayInfo(mod);
const field_values = try sema.arena.alloc(Value, 3);
// len: comptime_int,
field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len);
@@ -16077,7 +16094,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
},
.Vector => {
- const info = ty.arrayInfo();
+ const info = ty.arrayInfo(mod);
const field_values = try sema.arena.alloc(Value, 2);
// len: comptime_int,
field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len);
@@ -16095,7 +16112,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.Optional => {
const field_values = try sema.arena.alloc(Value, 1);
// child: type,
- field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena));
+ field_values[0] = try Value.Tag.ty.create(sema.arena, ty.optionalChild(mod));
return sema.addConstant(
type_info_ty,
@@ -16141,7 +16158,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len),
+ try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
@@ -16250,7 +16267,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len),
+ try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
@@ -16338,7 +16355,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len),
+ try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
@@ -16448,7 +16465,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else
try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i});
const new_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len),
+ try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
@@ -16490,7 +16507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len),
+ try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
@@ -16666,14 +16683,15 @@ fn typeInfoNamespaceDecls(
decl_vals: *std.ArrayList(Value),
seen_namespaces: *std.AutoHashMap(*Namespace, void),
) !void {
+ const mod = sema.mod;
const gop = try seen_namespaces.getOrPut(namespace);
if (gop.found_existing) return;
const decls = namespace.decls.keys();
for (decls) |decl_index| {
- const decl = sema.mod.declPtr(decl_index);
+ const decl = mod.declPtr(decl_index);
if (decl.kind == .@"usingnamespace") {
if (decl.analysis == .in_progress) continue;
- try sema.mod.ensureDeclAnalyzed(decl_index);
+ try mod.ensureDeclAnalyzed(decl_index);
const new_ns = decl.val.toType().getNamespace().?;
try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces);
continue;
@@ -16684,7 +16702,7 @@ fn typeInfoNamespaceDecls(
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0));
const new_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len),
+ try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
@@ -16770,9 +16788,9 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
.Vector => {
const elem_ty = operand.elemType2(mod);
const log2_elem_ty = try sema.log2IntType(block, elem_ty, src);
- return Type.Tag.vector.create(sema.arena, .{
- .len = operand.vectorLen(),
- .elem_type = log2_elem_ty,
+ return mod.vectorType(.{
+ .len = operand.vectorLen(mod),
+ .child = log2_elem_ty.ip_index,
});
},
else => {},
@@ -17207,7 +17225,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
_ = try sema.analyzeBodyInner(&sub_block, body);
const operand_ty = sema.typeOf(operand);
- const ptr_info = operand_ty.ptrInfo().data;
+ const ptr_info = operand_ty.ptrInfo(mod);
const res_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = err_union_ty.errorUnionPayload(),
.@"addrspace" = ptr_info.@"addrspace",
@@ -17398,6 +17416,7 @@ fn retWithErrTracing(
ret_tag: Air.Inst.Tag,
operand: Air.Inst.Ref,
) CompileError!Zir.Inst.Index {
+ const mod = sema.mod;
const need_check = switch (is_non_err) {
.bool_true => {
_ = try block.addUnOp(ret_tag, operand);
@@ -17409,7 +17428,7 @@ fn retWithErrTracing(
const gpa = sema.gpa;
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
- const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty);
+ const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty);
const return_err_fn = try sema.getBuiltin("returnError");
const args: [1]Air.Inst.Ref = .{err_return_trace};
@@ -17755,7 +17774,7 @@ fn structInitEmpty(
fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref {
const mod = sema.mod;
- const arr_len = obj_ty.arrayLen();
+ const arr_len = obj_ty.arrayLen(mod);
if (arr_len != 0) {
if (obj_ty.zigTypeTag(mod) == .Array) {
return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len});
@@ -17763,7 +17782,7 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com
return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len});
}
}
- if (obj_ty.sentinel()) |sentinel| {
+ if (obj_ty.sentinel(mod)) |sentinel| {
const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel);
return sema.addConstant(obj_ty, val);
} else {
@@ -18199,6 +18218,7 @@ fn zirArrayInit(
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
@@ -18208,8 +18228,7 @@ fn zirArrayInit(
assert(args.len >= 2); // array_ty + at least one element
const array_ty = try sema.resolveType(block, src, args[0]);
- const sentinel_val = array_ty.sentinel();
- const mod = sema.mod;
+ const sentinel_val = array_ty.sentinel(mod);
const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null));
defer gpa.free(resolved_args);
@@ -18489,14 +18508,16 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
}
fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
- const opt_ptr_stack_trace_ty = try Type.Tag.optional_single_mut_pointer.create(sema.arena, stack_trace_ty);
+ const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty);
+ const opt_ptr_stack_trace_ty = try Type.optional(sema.arena, ptr_stack_trace_ty, mod);
if (sema.owner_func != null and
sema.owner_func.?.calls_or_awaits_errorable_fn and
- sema.mod.comp.bin_file.options.error_return_tracing and
- sema.mod.backendSupportsFeature(.error_return_trace))
+ mod.comp.bin_file.options.error_return_tracing and
+ mod.backendSupportsFeature(.error_return_trace))
{
return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty);
}
@@ -18585,8 +18606,11 @@ fn zirUnaryMath(
switch (operand_ty.zigTypeTag(mod)) {
.Vector => {
const scalar_ty = operand_ty.scalarType(mod);
- const vec_len = operand_ty.vectorLen();
- const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty);
+ const vec_len = operand_ty.vectorLen(mod);
+ const result_ty = try mod.vectorType(.{
+ .len = vec_len,
+ .child = scalar_ty.ip_index,
+ });
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef())
return sema.addConstUndef(result_ty);
@@ -18730,12 +18754,15 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const len_val = struct_val[0];
const child_val = struct_val[1];
- const len = len_val.toUnsignedInt(mod);
+ const len = @intCast(u32, len_val.toUnsignedInt(mod));
const child_ty = child_val.toType();
try sema.checkVectorElemType(block, src, child_ty);
- const ty = try Type.vector(sema.arena, len, try child_ty.copy(sema.arena));
+ const ty = try mod.vectorType(.{
+ .len = len,
+ .child = child_ty.ip_index,
+ });
return sema.addType(ty);
},
.Float => {
@@ -18872,7 +18899,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const child_ty = try child_val.toType().copy(sema.arena);
- const ty = try Type.optional(sema.arena, child_ty);
+ const ty = try Type.optional(sema.arena, child_ty, mod);
return sema.addType(ty);
},
.ErrorUnion => {
@@ -18912,7 +18939,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
// TODO use reflection instead of magic numbers here
// error_set: type,
const name_val = struct_val[0];
- const name_str = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, sema.mod);
+ const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, sema.mod);
const kv = try mod.getErrorValue(name_str);
const gop = names.getOrPutAssumeCapacity(kv.key);
@@ -19038,7 +19065,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const value_val = field_struct_val[1];
const field_name = try name_val.toAllocatedBytes(
- Type.initTag(.const_slice_u8),
+ Type.const_slice_u8,
new_decl_arena_allocator,
sema.mod,
);
@@ -19215,7 +19242,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const alignment_val = field_struct_val[2];
const field_name = try name_val.toAllocatedBytes(
- Type.initTag(.const_slice_u8),
+ Type.const_slice_u8,
new_decl_arena_allocator,
sema.mod,
);
@@ -19482,7 +19509,7 @@ fn reifyStruct(
}
const field_name = try name_val.toAllocatedBytes(
- Type.initTag(.const_slice_u8),
+ Type.const_slice_u8,
new_decl_arena_allocator,
mod,
);
@@ -19626,7 +19653,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
try sema.checkPtrOperand(block, ptr_src, ptr_ty);
- var ptr_info = ptr_ty.ptrInfo().data;
+ var ptr_info = ptr_ty.ptrInfo(mod);
const src_addrspace = ptr_info.@"addrspace";
if (!target_util.addrSpaceCastIsValid(sema.mod.getTarget(), src_addrspace, dest_addrspace)) {
const msg = msg: {
@@ -19641,7 +19668,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
ptr_info.@"addrspace" = dest_addrspace;
const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional)
- try Type.optional(sema.arena, dest_ptr_ty)
+ try Type.optional(sema.arena, dest_ptr_ty, mod)
else
dest_ptr_ty;
@@ -19731,6 +19758,7 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData)
}
fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, ty_src, inst_data.operand);
@@ -19738,10 +19766,10 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
- const bytes = try ty.nameAllocArena(anon_decl.arena(), sema.mod);
+ const bytes = try ty.nameAllocArena(anon_decl.arena(), mod);
const new_decl = try anon_decl.finish(
- try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len),
+ try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
@@ -19842,7 +19870,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const elem_ty = ptr_ty.elemType2(mod);
const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema);
- if (ptr_ty.isSlice()) {
+ if (ptr_ty.isSlice(mod)) {
const msg = msg: {
const msg = try sema.errMsg(block, type_src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -19987,8 +20015,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
try sema.checkPtrType(block, dest_ty_src, dest_ty);
try sema.checkPtrOperand(block, operand_src, operand_ty);
- const operand_info = operand_ty.ptrInfo().data;
- const dest_info = dest_ty.ptrInfo().data;
+ const operand_info = operand_ty.ptrInfo(mod);
+ const dest_info = dest_ty.ptrInfo(mod);
if (!operand_info.mutable and dest_info.mutable) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
@@ -20042,12 +20070,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: {
// Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result
if (dest_ty.zigTypeTag(mod) == .Optional) {
- var buf: Type.Payload.ElemType = undefined;
- var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data;
+ var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod);
dest_ptr_info.@"align" = operand_align;
- break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info));
+ break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info), mod);
} else {
- var dest_ptr_info = dest_ty.ptrInfo().data;
+ var dest_ptr_info = dest_ty.ptrInfo(mod);
dest_ptr_info.@"align" = operand_align;
break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info);
}
@@ -20110,6 +20137,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
@@ -20117,7 +20145,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
const operand_ty = sema.typeOf(operand);
try sema.checkPtrOperand(block, operand_src, operand_ty);
- var ptr_info = operand_ty.ptrInfo().data;
+ var ptr_info = operand_ty.ptrInfo(mod);
ptr_info.mutable = true;
const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
@@ -20130,6 +20158,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
}
fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
@@ -20137,7 +20166,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const operand_ty = sema.typeOf(operand);
try sema.checkPtrOperand(block, operand_src, operand_ty);
- var ptr_info = operand_ty.ptrInfo().data;
+ var ptr_info = operand_ty.ptrInfo(mod);
ptr_info.@"volatile" = false;
const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
@@ -20163,7 +20192,10 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
const is_vector = operand_ty.zigTypeTag(mod) == .Vector;
const dest_ty = if (is_vector)
- try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty)
+ try mod.vectorType(.{
+ .len = operand_ty.vectorLen(mod),
+ .child = dest_scalar_ty.ip_index,
+ })
else
dest_scalar_ty;
@@ -20218,7 +20250,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
}
var elem_buf: Value.ElemValueBuffer = undefined;
- const elems = try sema.arena.alloc(Value, operand_ty.vectorLen());
+ const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod));
for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod);
@@ -20245,7 +20277,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
try sema.checkPtrOperand(block, ptr_src, ptr_ty);
- var ptr_info = ptr_ty.ptrInfo().data;
+ var ptr_info = ptr_ty.ptrInfo(mod);
ptr_info.@"align" = dest_align;
var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
if (ptr_ty.zigTypeTag(mod) == .Optional) {
@@ -20314,8 +20346,11 @@ fn zirBitCount(
const result_scalar_ty = try mod.smallestUnsignedInt(bits);
switch (operand_ty.zigTypeTag(mod)) {
.Vector => {
- const vec_len = operand_ty.vectorLen();
- const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty);
+ const vec_len = operand_ty.vectorLen(mod);
+ const result_ty = try mod.vectorType(.{
+ .len = vec_len,
+ .child = result_scalar_ty.ip_index,
+ });
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef()) return sema.addConstUndef(result_ty);
@@ -20388,7 +20423,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (val.isUndef())
return sema.addConstUndef(operand_ty);
- const vec_len = operand_ty.vectorLen();
+ const vec_len = operand_ty.vectorLen(mod);
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
@@ -20437,7 +20472,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (val.isUndef())
return sema.addConstUndef(operand_ty);
- const vec_len = operand_ty.vectorLen();
+ const vec_len = operand_ty.vectorLen(mod);
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
@@ -20546,7 +20581,7 @@ fn checkInvalidPtrArithmetic(
) CompileError!void {
const mod = sema.mod;
switch (try ty.zigTypeTagOrPoison(mod)) {
- .Pointer => switch (ty.ptrSize()) {
+ .Pointer => switch (ty.ptrSize(mod)) {
.One, .Slice => return,
.Many, .C => return sema.fail(
block,
@@ -20676,7 +20711,7 @@ fn checkNumericType(
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
- .Vector => switch (ty.childType().zigTypeTag(mod)) {
+ .Vector => switch (ty.childType(mod).zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}),
},
@@ -20726,7 +20761,7 @@ fn checkAtomicPtrOperand(
const ptr_ty = sema.typeOf(ptr);
const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) {
- .Pointer => ptr_ty.ptrInfo().data,
+ .Pointer => ptr_ty.ptrInfo(mod),
else => {
const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data);
_ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
@@ -20797,7 +20832,7 @@ fn checkIntOrVector(
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
.Int => return operand_ty,
.Vector => {
- const elem_ty = operand_ty.childType();
+ const elem_ty = operand_ty.childType(mod);
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
.Int => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
@@ -20821,7 +20856,7 @@ fn checkIntOrVectorAllowComptime(
switch (try operand_ty.zigTypeTagOrPoison(mod)) {
.Int, .ComptimeInt => return operand_ty,
.Vector => {
- const elem_ty = operand_ty.childType();
+ const elem_ty = operand_ty.childType(mod);
switch (try elem_ty.zigTypeTagOrPoison(mod)) {
.Int, .ComptimeInt => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
@@ -20870,7 +20905,7 @@ fn checkSimdBinOp(
const rhs_ty = sema.typeOf(uncasted_rhs);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
- var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen() else null;
+ var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null;
const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
@@ -20912,8 +20947,8 @@ fn checkVectorizableBinaryOperands(
};
if (lhs_is_vector and rhs_is_vector) {
- const lhs_len = lhs_ty.arrayLen();
- const rhs_len = rhs_ty.arrayLen();
+ const lhs_len = lhs_ty.arrayLen(mod);
+ const rhs_len = rhs_ty.arrayLen(mod);
if (lhs_len != rhs_len) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "vector length mismatch", .{});
@@ -20966,7 +21001,7 @@ fn resolveExportOptions(
const name_operand = try sema.fieldVal(block, src, options, "name", name_src);
const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known");
- const name_ty = Type.initTag(.const_slice_u8);
+ const name_ty = Type.const_slice_u8;
const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod);
const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src);
@@ -20975,7 +21010,7 @@ fn resolveExportOptions(
const section_operand = try sema.fieldVal(block, src, options, "section", section_src);
const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known");
- const section_ty = Type.initTag(.const_slice_u8);
+ const section_ty = Type.const_slice_u8;
const section = if (section_opt_val.optionalValue(mod)) |section_val|
try section_val.toAllocatedBytes(section_ty, sema.arena, mod)
else
@@ -21087,7 +21122,7 @@ fn zirCmpxchg(
return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{});
}
- const result_ty = try Type.optional(sema.arena, elem_ty);
+ const result_ty = try Type.optional(sema.arena, elem_ty, mod);
// special case zero bit types
if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) {
@@ -21133,6 +21168,7 @@ fn zirCmpxchg(
}
fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
@@ -21141,9 +21177,9 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const scalar = try sema.resolveInst(extra.rhs);
const scalar_ty = sema.typeOf(scalar);
try sema.checkVectorElemType(block, scalar_src, scalar_ty);
- const vector_ty = try Type.Tag.vector.create(sema.arena, .{
+ const vector_ty = try mod.vectorType(.{
.len = len,
- .elem_type = scalar_ty,
+ .child = scalar_ty.ip_index,
});
if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| {
if (scalar_val.isUndef()) return sema.addConstUndef(vector_ty);
@@ -21172,7 +21208,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)});
}
- const scalar_ty = operand_ty.childType();
+ const scalar_ty = operand_ty.childType(mod);
// Type-check depending on operation.
switch (operation) {
@@ -21190,7 +21226,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
},
}
- const vec_len = operand_ty.vectorLen();
+ const vec_len = operand_ty.vectorLen(mod);
if (vec_len == 0) {
// TODO re-evaluate if we should introduce a "neutral value" for some operations,
// e.g. zero for add and one for mul.
@@ -21243,12 +21279,12 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
var mask_ty = sema.typeOf(mask);
const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) {
- .Array, .Vector => sema.typeOf(mask).arrayLen(),
+ .Array, .Vector => sema.typeOf(mask).arrayLen(mod),
else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}),
};
- mask_ty = try Type.Tag.vector.create(sema.arena, .{
- .len = mask_len,
- .elem_type = Type.i32,
+ mask_ty = try mod.vectorType(.{
+ .len = @intCast(u32, mask_len),
+ .child = .i32_type,
});
mask = try sema.coerce(block, mask_ty, mask, mask_src);
const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known");
@@ -21272,13 +21308,13 @@ fn analyzeShuffle(
var a = a_arg;
var b = b_arg;
- const res_ty = try Type.Tag.vector.create(sema.arena, .{
+ const res_ty = try mod.vectorType(.{
.len = mask_len,
- .elem_type = elem_ty,
+ .child = elem_ty.ip_index,
});
var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) {
- .Array, .Vector => sema.typeOf(a).arrayLen(),
+ .Array, .Vector => sema.typeOf(a).arrayLen(mod),
.Undefined => null,
else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{
elem_ty.fmt(sema.mod),
@@ -21286,7 +21322,7 @@ fn analyzeShuffle(
}),
};
var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) {
- .Array, .Vector => sema.typeOf(b).arrayLen(),
+ .Array, .Vector => sema.typeOf(b).arrayLen(mod),
.Undefined => null,
else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{
elem_ty.fmt(sema.mod),
@@ -21296,16 +21332,16 @@ fn analyzeShuffle(
if (maybe_a_len == null and maybe_b_len == null) {
return sema.addConstUndef(res_ty);
}
- const a_len = maybe_a_len orelse maybe_b_len.?;
- const b_len = maybe_b_len orelse a_len;
+ const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?);
+ const b_len = @intCast(u32, maybe_b_len orelse a_len);
- const a_ty = try Type.Tag.vector.create(sema.arena, .{
+ const a_ty = try mod.vectorType(.{
.len = a_len,
- .elem_type = elem_ty,
+ .child = elem_ty.ip_index,
});
- const b_ty = try Type.Tag.vector.create(sema.arena, .{
+ const b_ty = try mod.vectorType(.{
.len = b_len,
- .elem_type = elem_ty,
+ .child = elem_ty.ip_index,
});
if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src);
@@ -21437,15 +21473,21 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
const pred_ty = sema.typeOf(pred_uncoerced);
const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) {
- .Vector, .Array => pred_ty.arrayLen(),
+ .Vector, .Array => pred_ty.arrayLen(mod),
else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}),
};
- const vec_len = try sema.usizeCast(block, pred_src, vec_len_u64);
+ const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64));
- const bool_vec_ty = try Type.vector(sema.arena, vec_len, Type.bool);
+ const bool_vec_ty = try mod.vectorType(.{
+ .len = vec_len,
+ .child = .bool_type,
+ });
const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src);
- const vec_ty = try Type.vector(sema.arena, vec_len, elem_ty);
+ const vec_ty = try mod.vectorType(.{
+ .len = vec_len,
+ .child = elem_ty.ip_index,
+ });
const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src);
const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src);
@@ -21854,7 +21896,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
}
try sema.checkPtrOperand(block, ptr_src, field_ptr_ty);
- const field_ptr_ty_info = field_ptr_ty.ptrInfo().data;
+ const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod);
var ptr_ty_data: Type.Payload.Pointer.Data = .{
.pointee_type = parent_ty.structFieldType(field_index),
@@ -22052,8 +22094,8 @@ fn analyzeMinMax(
}
const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: {
- const elem_ty = orig_ty.childType();
- const len = orig_ty.vectorLen();
+ const elem_ty = orig_ty.childType(mod);
+ const len = orig_ty.vectorLen(mod);
if (len == 0) break :blk orig_ty;
if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats
@@ -22068,7 +22110,10 @@ fn analyzeMinMax(
}
const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max);
- break :blk try Type.vector(sema.arena, len, refined_elem_ty);
+ break :blk try mod.vectorType(.{
+ .len = len,
+ .child = refined_elem_ty.ip_index,
+ });
} else blk: {
if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats
if (val.isUndef()) break :blk orig_ty; // can't refine undef
@@ -22129,8 +22174,8 @@ fn analyzeMinMax(
if (known_undef) break :refine; // can't refine undef
const unrefined_ty = sema.typeOf(cur_minmax.?);
const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector;
- const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty;
- const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty;
+ const comptime_elem_ty = if (is_vector) comptime_ty.childType(mod) else comptime_ty;
+ const unrefined_elem_ty = if (is_vector) unrefined_ty.childType(mod) else unrefined_ty;
if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats
@@ -22150,7 +22195,10 @@ fn analyzeMinMax(
const final_elem_ty = try mod.intFittingRange(min_val, max_val);
const final_ty = if (is_vector)
- try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty)
+ try mod.vectorType(.{
+ .len = unrefined_ty.vectorLen(mod),
+ .child = final_elem_ty.ip_index,
+ })
else
final_elem_ty;
@@ -22165,7 +22213,7 @@ fn analyzeMinMax(
fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref {
const mod = sema.mod;
- const info = sema.typeOf(ptr).ptrInfo().data;
+ const info = sema.typeOf(ptr).ptrInfo(mod);
if (info.size == .One) {
// Already an array pointer.
return ptr;
@@ -22659,7 +22707,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body.len;
- const ty = Type.initTag(.const_slice_u8);
+ const ty = Type.const_slice_u8;
const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known");
if (val.isGenericPoison()) {
break :blk FuncLinkSection{ .generic = {} };
@@ -22943,7 +22991,7 @@ fn resolveExternOptions(
const name_ref = try sema.fieldVal(block, src, options, "name", name_src);
const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known");
- const name = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod);
+ const name = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod);
const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src);
const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known");
@@ -22957,7 +23005,7 @@ fn resolveExternOptions(
const library_name = if (!library_name_val.isNull(mod)) blk: {
const payload = library_name_val.castTag(.opt_payload).?.data;
- const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod);
+ const library_name = try payload.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod);
if (library_name.len == 0) {
return sema.fail(block, library_src, "library name cannot be empty", .{});
}
@@ -22994,7 +23042,7 @@ fn zirBuiltinExtern(
if (!ty.isPtrAtRuntime(mod)) {
return sema.fail(block, ty_src, "expected (optional) pointer", .{});
}
- if (!try sema.validateExternType(ty.childType(), .other)) {
+ if (!try sema.validateExternType(ty.childType(mod), .other)) {
const msg = msg: {
const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
@@ -23014,7 +23062,7 @@ fn zirBuiltinExtern(
};
if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) {
- ty = try Type.optional(sema.arena, ty);
+ ty = try Type.optional(sema.arena, ty, mod);
}
// TODO check duplicate extern
@@ -23194,7 +23242,7 @@ fn validateRunTimeType(
=> return false,
.Pointer => {
- const elem_ty = ty.childType();
+ const elem_ty = ty.childType(mod);
switch (elem_ty.zigTypeTag(mod)) {
.Opaque => return true,
.Fn => return elem_ty.isFnOrHasRuntimeBits(mod),
@@ -23204,11 +23252,10 @@ fn validateRunTimeType(
.Opaque => return is_extern,
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const child_ty = ty.optionalChild(&buf);
+ const child_ty = ty.optionalChild(mod);
return sema.validateRunTimeType(child_ty, is_extern);
},
- .Array, .Vector => ty = ty.elemType(),
+ .Array, .Vector => ty = ty.childType(mod),
.ErrorUnion => ty = ty.errorUnionPayload(),
@@ -23277,7 +23324,7 @@ fn explainWhyTypeIsComptimeInner(
},
.Array, .Vector => {
- try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set);
+ try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
},
.Pointer => {
const elem_ty = ty.elemType2(mod);
@@ -23295,12 +23342,11 @@ fn explainWhyTypeIsComptimeInner(
}
return;
}
- try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set);
+ try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set);
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(&buf), type_set);
+ try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set);
},
.ErrorUnion => {
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(), type_set);
@@ -23451,7 +23497,7 @@ fn explainWhyTypeIsNotExtern(
if (ty.isSlice(mod)) {
try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{});
} else {
- const pointee_ty = ty.childType();
+ const pointee_ty = ty.childType(mod);
try mod.errNoteNonLazy(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)});
try sema.explainWhyTypeIsComptime(msg, src_loc, pointee_ty);
}
@@ -23698,7 +23744,7 @@ fn panicWithMsg(
.@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic
});
const null_stack_trace = try sema.addConstant(
- try Type.optional(arena, ptr_stack_trace_ty),
+ try Type.optional(arena, ptr_stack_trace_ty, mod),
Value.null,
);
const args: [3]Air.Inst.Ref = .{ msg_inst, null_stack_trace, .null_value };
@@ -23927,7 +23973,7 @@ fn fieldVal(
const is_pointer_to = object_ty.isSinglePointer(mod);
const inner_ty = if (is_pointer_to)
- object_ty.childType()
+ object_ty.childType(mod)
else
object_ty;
@@ -23936,12 +23982,12 @@ fn fieldVal(
if (mem.eql(u8, field_name, "len")) {
return sema.addConstant(
Type.usize,
- try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()),
+ try Value.Tag.int_u64.create(arena, inner_ty.arrayLen(mod)),
);
} else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) {
- const ptr_info = object_ty.ptrInfo().data;
+ const ptr_info = object_ty.ptrInfo(mod);
const result_ty = try Type.ptr(sema.arena, sema.mod, .{
- .pointee_type = ptr_info.pointee_type.childType(),
+ .pointee_type = ptr_info.pointee_type.childType(mod),
.sentinel = ptr_info.sentinel,
.@"align" = ptr_info.@"align",
.@"addrspace" = ptr_info.@"addrspace",
@@ -23964,7 +24010,7 @@ fn fieldVal(
}
},
.Pointer => {
- const ptr_info = inner_ty.ptrInfo().data;
+ const ptr_info = inner_ty.ptrInfo(mod);
if (ptr_info.size == .Slice) {
if (mem.eql(u8, field_name, "ptr")) {
const slice = if (is_pointer_to)
@@ -24107,7 +24153,7 @@ fn fieldPtr(
const object_ptr_src = src; // TODO better source location
const object_ptr_ty = sema.typeOf(object_ptr);
const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) {
- .Pointer => object_ptr_ty.elemType(),
+ .Pointer => object_ptr_ty.childType(mod),
else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}),
};
@@ -24117,7 +24163,7 @@ fn fieldPtr(
const is_pointer_to = object_ty.isSinglePointer(mod);
const inner_ty = if (is_pointer_to)
- object_ty.childType()
+ object_ty.childType(mod)
else
object_ty;
@@ -24128,7 +24174,7 @@ fn fieldPtr(
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
Type.usize,
- try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()),
+ try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen(mod)),
0, // default alignment
));
} else {
@@ -24154,9 +24200,9 @@ fn fieldPtr(
const result_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = slice_ptr_ty,
- .mutable = attr_ptr_ty.ptrIsMutable(),
+ .mutable = attr_ptr_ty.ptrIsMutable(mod),
.@"volatile" = attr_ptr_ty.isVolatilePtr(),
- .@"addrspace" = attr_ptr_ty.ptrAddressSpace(),
+ .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod),
});
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
@@ -24175,9 +24221,9 @@ fn fieldPtr(
} else if (mem.eql(u8, field_name, "len")) {
const result_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = Type.usize,
- .mutable = attr_ptr_ty.ptrIsMutable(),
+ .mutable = attr_ptr_ty.ptrIsMutable(mod),
.@"volatile" = attr_ptr_ty.isVolatilePtr(),
- .@"addrspace" = attr_ptr_ty.ptrAddressSpace(),
+ .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod),
});
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
@@ -24329,14 +24375,14 @@ fn fieldCallBind(
const mod = sema.mod;
const raw_ptr_src = src; // TODO better source location
const raw_ptr_ty = sema.typeOf(raw_ptr);
- const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C))
- raw_ptr_ty.childType()
+ const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C))
+ raw_ptr_ty.childType(mod)
else
return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)});
// Optionally dereference a second pointer to get the concrete type.
- const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize() == .One;
- const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty;
+ const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One;
+ const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty;
const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty;
const object_ptr = if (is_double_ptr)
try sema.analyzeLoad(block, src, raw_ptr, src)
@@ -24404,9 +24450,9 @@ fn fieldCallBind(
// zig fmt: off
if (first_param_type.isGenericPoison() or (
first_param_type.zigTypeTag(mod) == .Pointer and
- (first_param_type.ptrSize() == .One or
- first_param_type.ptrSize() == .C) and
- first_param_type.childType().eql(concrete_ty, sema.mod)))
+ (first_param_type.ptrSize(mod) == .One or
+ first_param_type.ptrSize(mod) == .C) and
+ first_param_type.childType(mod).eql(concrete_ty, sema.mod)))
{
// zig fmt: on
// Note that if the param type is generic poison, we know that it must
@@ -24425,8 +24471,7 @@ fn fieldCallBind(
.arg0_inst = deref,
} };
} else if (first_param_type.zigTypeTag(mod) == .Optional) {
- var opt_buf: Type.Payload.ElemType = undefined;
- const child = first_param_type.optionalChild(&opt_buf);
+ const child = first_param_type.optionalChild(mod);
if (child.eql(concrete_ty, sema.mod)) {
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
return .{ .method = .{
@@ -24434,8 +24479,8 @@ fn fieldCallBind(
.arg0_inst = deref,
} };
} else if (child.zigTypeTag(mod) == .Pointer and
- child.ptrSize() == .One and
- child.childType().eql(concrete_ty, sema.mod))
+ child.ptrSize(mod) == .One and
+ child.childType(mod).eql(concrete_ty, sema.mod))
{
return .{ .method = .{
.func_inst = decl_val,
@@ -24482,15 +24527,15 @@ fn finishFieldCallBind(
field_index: u32,
object_ptr: Air.Inst.Ref,
) CompileError!ResolvedFieldCallee {
+ const mod = sema.mod;
const arena = sema.arena;
const ptr_field_ty = try Type.ptr(arena, sema.mod, .{
.pointee_type = field_ty,
- .mutable = ptr_ty.ptrIsMutable(),
- .@"addrspace" = ptr_ty.ptrAddressSpace(),
+ .mutable = ptr_ty.ptrIsMutable(mod),
+ .@"addrspace" = ptr_ty.ptrAddressSpace(mod),
});
- const mod = sema.mod;
- const container_ty = ptr_ty.childType();
+ const container_ty = ptr_ty.childType(mod);
if (container_ty.zigTypeTag(mod) == .Struct) {
if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| {
return .{ .direct = try sema.addConstant(field_ty, default_val) };
@@ -24618,7 +24663,7 @@ fn structFieldPtrByIndex(
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const field = struct_obj.fields.values()[field_index];
const struct_ptr_ty = sema.typeOf(struct_ptr);
- const struct_ptr_ty_info = struct_ptr_ty.ptrInfo().data;
+ const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
var ptr_ty_data: Type.Payload.Pointer.Data = .{
.pointee_type = field.ty,
@@ -24696,7 +24741,7 @@ fn structFieldPtrByIndex(
ptr_field_ty,
try Value.Tag.field_ptr.create(sema.arena, .{
.container_ptr = struct_ptr_val,
- .container_ty = struct_ptr_ty.childType(),
+ .container_ty = struct_ptr_ty.childType(mod),
.field_index = field_index,
}),
);
@@ -24846,9 +24891,9 @@ fn unionFieldPtr(
const field = union_obj.fields.values()[field_index];
const ptr_field_ty = try Type.ptr(arena, sema.mod, .{
.pointee_type = field.ty,
- .mutable = union_ptr_ty.ptrIsMutable(),
+ .mutable = union_ptr_ty.ptrIsMutable(mod),
.@"volatile" = union_ptr_ty.isVolatilePtr(),
- .@"addrspace" = union_ptr_ty.ptrAddressSpace(),
+ .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod),
});
const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?);
@@ -25009,7 +25054,7 @@ fn elemPtr(
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) {
- .Pointer => indexable_ptr_ty.elemType(),
+ .Pointer => indexable_ptr_ty.childType(mod),
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}),
};
try checkIndexable(sema, block, src, indexable_ty);
@@ -25046,7 +25091,7 @@ fn elemPtrOneLayerOnly(
try checkIndexable(sema, block, src, indexable_ty);
- switch (indexable_ty.ptrSize()) {
+ switch (indexable_ty.ptrSize(mod)) {
.Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Many, .C => {
const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
@@ -25065,7 +25110,7 @@ fn elemPtrOneLayerOnly(
return block.addPtrElemPtr(indexable, elem_index, result_ty);
},
.One => {
- assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable
+ assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable
return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety);
},
}
@@ -25091,7 +25136,7 @@ fn elemVal(
const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src);
switch (indexable_ty.zigTypeTag(mod)) {
- .Pointer => switch (indexable_ty.ptrSize()) {
+ .Pointer => switch (indexable_ty.ptrSize(mod)) {
.Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Many, .C => {
const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable);
@@ -25112,7 +25157,7 @@ fn elemVal(
return block.addBinOp(.ptr_elem_val, indexable, elem_index);
},
.One => {
- assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable
+ assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable
const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety);
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
},
@@ -25171,7 +25216,7 @@ fn tupleFieldPtr(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const tuple_ptr_ty = sema.typeOf(tuple_ptr);
- const tuple_ty = tuple_ptr_ty.childType();
+ const tuple_ty = tuple_ptr_ty.childType(mod);
_ = try sema.resolveTypeFields(tuple_ty);
const field_count = tuple_ty.structFieldCount();
@@ -25188,9 +25233,9 @@ fn tupleFieldPtr(
const field_ty = tuple_ty.structFieldType(field_index);
const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = field_ty,
- .mutable = tuple_ptr_ty.ptrIsMutable(),
+ .mutable = tuple_ptr_ty.ptrIsMutable(mod),
.@"volatile" = tuple_ptr_ty.isVolatilePtr(),
- .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(),
+ .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod),
});
if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
@@ -25271,10 +25316,10 @@ fn elemValArray(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const array_ty = sema.typeOf(array);
- const array_sent = array_ty.sentinel();
- const array_len = array_ty.arrayLen();
+ const array_sent = array_ty.sentinel(mod);
+ const array_len = array_ty.arrayLen(mod);
const array_len_s = array_len + @boolToInt(array_sent != null);
- const elem_ty = array_ty.childType();
+ const elem_ty = array_ty.childType(mod);
if (array_len_s == 0) {
return sema.fail(block, array_src, "indexing into empty array is not allowed", .{});
@@ -25335,9 +25380,9 @@ fn elemPtrArray(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const array_ptr_ty = sema.typeOf(array_ptr);
- const array_ty = array_ptr_ty.childType();
- const array_sent = array_ty.sentinel() != null;
- const array_len = array_ty.arrayLen();
+ const array_ty = array_ptr_ty.childType(mod);
+ const array_sent = array_ty.sentinel(mod) != null;
+ const array_len = array_ty.arrayLen(mod);
const array_len_s = array_len + @boolToInt(array_sent);
if (array_len_s == 0) {
@@ -25396,7 +25441,7 @@ fn elemValSlice(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const slice_ty = sema.typeOf(slice);
- const slice_sent = slice_ty.sentinel() != null;
+ const slice_sent = slice_ty.sentinel(mod) != null;
const elem_ty = slice_ty.elemType2(mod);
var runtime_src = slice_src;
@@ -25453,7 +25498,7 @@ fn elemPtrSlice(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const slice_ty = sema.typeOf(slice);
- const slice_sent = slice_ty.sentinel() != null;
+ const slice_sent = slice_ty.sentinel(mod) != null;
const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice);
// The index must not be undefined since it can be out of bounds.
@@ -25614,7 +25659,7 @@ fn coerceExtra(
}
// T to ?T
- const child_type = try dest_ty.optionalChildAlloc(sema.arena);
+ const child_type = dest_ty.optionalChild(mod);
const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => {
if (in_memory_result == .no_match) {
@@ -25628,7 +25673,7 @@ fn coerceExtra(
return try sema.wrapOptional(block, dest_ty, intermediate, inst_src);
},
.Pointer => pointer: {
- const dest_info = dest_ty.ptrInfo().data;
+ const dest_info = dest_ty.ptrInfo(mod);
// Function body to function pointer.
if (inst_ty.zigTypeTag(mod) == .Fn) {
@@ -25643,11 +25688,11 @@ fn coerceExtra(
if (dest_info.size != .One) break :single_item;
if (!inst_ty.isSinglePointer(mod)) break :single_item;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
- const ptr_elem_ty = inst_ty.childType();
+ const ptr_elem_ty = inst_ty.childType(mod);
const array_ty = dest_info.pointee_type;
if (array_ty.zigTypeTag(mod) != .Array) break :single_item;
- const array_elem_ty = array_ty.childType();
- if (array_ty.arrayLen() != 1) break :single_item;
+ const array_elem_ty = array_ty.childType(mod);
+ if (array_ty.arrayLen(mod) != 1) break :single_item;
const dest_is_mut = dest_info.mutable;
switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
.ok => {},
@@ -25660,9 +25705,9 @@ fn coerceExtra(
src_array_ptr: {
if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
- const array_ty = inst_ty.childType();
+ const array_ty = inst_ty.childType(mod);
if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr;
- const array_elem_type = array_ty.childType();
+ const array_elem_type = array_ty.childType(mod);
const dest_is_mut = dest_info.mutable;
const dst_elem_type = dest_info.pointee_type;
@@ -25680,7 +25725,7 @@ fn coerceExtra(
}
if (dest_info.sentinel) |dest_sent| {
- if (array_ty.sentinel()) |inst_sent| {
+ if (array_ty.sentinel(mod)) |inst_sent| {
if (!dest_sent.eql(inst_sent, dst_elem_type, sema.mod)) {
in_memory_result = .{ .ptr_sentinel = .{
.actual = inst_sent,
@@ -25721,7 +25766,7 @@ fn coerceExtra(
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr;
// In this case we must add a safety check because the C pointer
// could be null.
- const src_elem_ty = inst_ty.childType();
+ const src_elem_ty = inst_ty.childType(mod);
const dest_is_mut = dest_info.mutable;
const dst_elem_type = dest_info.pointee_type;
switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
@@ -25784,7 +25829,7 @@ fn coerceExtra(
},
.Pointer => p: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
- const inst_info = inst_ty.ptrInfo().data;
+ const inst_info = inst_ty.ptrInfo(mod);
switch (try sema.coerceInMemoryAllowed(
block,
dest_info.pointee_type,
@@ -25814,7 +25859,7 @@ fn coerceExtra(
.Union => {
// pointer to anonymous struct to pointer to union
if (inst_ty.isSinglePointer(mod) and
- inst_ty.childType().isAnonStruct() and
+ inst_ty.childType(mod).isAnonStruct() and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
@@ -25823,7 +25868,7 @@ fn coerceExtra(
.Struct => {
// pointer to anonymous struct to pointer to struct
if (inst_ty.isSinglePointer(mod) and
- inst_ty.childType().isAnonStruct() and
+ inst_ty.childType(mod).isAnonStruct() and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) {
@@ -25835,7 +25880,7 @@ fn coerceExtra(
.Array => {
// pointer to tuple to pointer to array
if (inst_ty.isSinglePointer(mod) and
- inst_ty.childType().isTuple() and
+ inst_ty.childType(mod).isTuple() and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
@@ -25854,7 +25899,7 @@ fn coerceExtra(
}
if (!inst_ty.isSinglePointer(mod)) break :to_slice;
- const inst_child_ty = inst_ty.childType();
+ const inst_child_ty = inst_ty.childType(mod);
if (!inst_child_ty.isTuple()) break :to_slice;
// empty tuple to zero-length slice
@@ -25887,7 +25932,7 @@ fn coerceExtra(
.Many => p: {
if (!inst_ty.isSlice(mod)) break :p;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p;
- const inst_info = inst_ty.ptrInfo().data;
+ const inst_info = inst_ty.ptrInfo(mod);
switch (try sema.coerceInMemoryAllowed(
block,
@@ -26196,9 +26241,8 @@ fn coerceExtra(
}
// ?T to T
- var buf: Type.Payload.ElemType = undefined;
if (inst_ty.zigTypeTag(mod) == .Optional and
- (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
+ (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{});
@@ -26399,10 +26443,8 @@ const InMemoryCoercionResult = union(enum) {
cur = pair.child;
},
.optional_shape => |pair| {
- var buf_actual: Type.Payload.ElemType = undefined;
- var buf_wanted: Type.Payload.ElemType = undefined;
try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{
- pair.actual.optionalChild(&buf_actual).fmt(sema.mod), pair.wanted.optionalChild(&buf_wanted).fmt(sema.mod),
+ pair.actual.optionalChild(mod).fmt(sema.mod), pair.wanted.optionalChild(mod).fmt(sema.mod),
});
break;
},
@@ -26640,10 +26682,8 @@ fn coerceInMemoryAllowed(
}
// Pointers / Pointer-like Optionals
- var dest_buf: Type.Payload.ElemType = undefined;
- var src_buf: Type.Payload.ElemType = undefined;
- const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty, &dest_buf);
- const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty, &src_buf);
+ const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty);
+ const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty);
if (maybe_dest_ptr_ty) |dest_ptr_ty| {
if (maybe_src_ptr_ty) |src_ptr_ty| {
return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src);
@@ -26685,8 +26725,8 @@ fn coerceInMemoryAllowed(
// Arrays
if (dest_tag == .Array and src_tag == .Array) {
- const dest_info = dest_ty.arrayInfo();
- const src_info = src_ty.arrayInfo();
+ const dest_info = dest_ty.arrayInfo(mod);
+ const src_info = src_ty.arrayInfo(mod);
if (dest_info.len != src_info.len) {
return InMemoryCoercionResult{ .array_len = .{
.actual = src_info.len,
@@ -26717,8 +26757,8 @@ fn coerceInMemoryAllowed(
// Vectors
if (dest_tag == .Vector and src_tag == .Vector) {
- const dest_len = dest_ty.vectorLen();
- const src_len = src_ty.vectorLen();
+ const dest_len = dest_ty.vectorLen(mod);
+ const src_len = src_ty.vectorLen(mod);
if (dest_len != src_len) {
return InMemoryCoercionResult{ .vector_len = .{
.actual = src_len,
@@ -26748,8 +26788,8 @@ fn coerceInMemoryAllowed(
.wanted = dest_ty,
} };
}
- const dest_child_type = dest_ty.optionalChild(&dest_buf);
- const src_child_type = src_ty.optionalChild(&src_buf);
+ const dest_child_type = dest_ty.optionalChild(mod);
+ const src_child_type = src_ty.optionalChild(mod);
const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src);
if (child != .ok) {
@@ -27019,8 +27059,8 @@ fn coerceInMemoryAllowedPtrs(
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
const mod = sema.mod;
- const dest_info = dest_ptr_ty.ptrInfo().data;
- const src_info = src_ptr_ty.ptrInfo().data;
+ const dest_info = dest_ptr_ty.ptrInfo(mod);
+ const src_info = src_ptr_ty.ptrInfo(mod);
const ok_ptr_size = src_info.size == dest_info.size or
src_info.size == .C or dest_info.size == .C;
@@ -27206,11 +27246,12 @@ fn storePtr2(
operand_src: LazySrcLoc,
air_tag: Air.Inst.Tag,
) CompileError!void {
+ const mod = sema.mod;
const ptr_ty = sema.typeOf(ptr);
if (ptr_ty.isConstPtr())
return sema.fail(block, ptr_src, "cannot assign to constant", .{});
- const elem_ty = ptr_ty.childType();
+ const elem_ty = ptr_ty.childType(mod);
// To generate better code for tuples, we detect a tuple operand here, and
// analyze field loads and stores directly. This avoids an extra allocation + memcpy
@@ -27221,7 +27262,6 @@ fn storePtr2(
// this code does not handle tuple-to-struct coercion which requires dealing with missing
// fields.
const operand_ty = sema.typeOf(uncasted_operand);
- const mod = sema.mod;
if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) {
const field_count = operand_ty.structFieldCount();
var i: u32 = 0;
@@ -27247,7 +27287,7 @@ fn storePtr2(
// as well as working around an LLVM bug:
// https://github.com/ziglang/zig/issues/11154
if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| {
- const vector_ty = sema.typeOf(vector_ptr).childType();
+ const vector_ty = sema.typeOf(vector_ptr).childType(mod);
const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
@@ -27288,7 +27328,7 @@ fn storePtr2(
try sema.requireRuntimeBlock(block, src, runtime_src);
try sema.queueFullTypeResolution(elem_ty);
- if (ptr_ty.ptrInfo().data.vector_index == .runtime) {
+ if (ptr_ty.ptrInfo(mod).vector_index == .runtime) {
const ptr_inst = Air.refToIndex(ptr).?;
const air_tags = sema.air_instructions.items(.tag);
if (air_tags[ptr_inst] == .ptr_elem_ptr) {
@@ -27322,8 +27362,8 @@ fn storePtr2(
/// pointer. Only if the final element type matches the vector element type, and the
/// lengths match.
fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
- const array_ty = sema.typeOf(ptr).childType();
const mod = sema.mod;
+ const array_ty = sema.typeOf(ptr).childType(mod);
if (array_ty.zigTypeTag(mod) != .Array) return null;
var ptr_inst = Air.refToIndex(ptr) orelse return null;
const air_datas = sema.air_instructions.items(.data);
@@ -27332,7 +27372,6 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
const prev_ptr = air_datas[ptr_inst].ty_op.operand;
const prev_ptr_ty = sema.typeOf(prev_ptr);
const prev_ptr_child_ty = switch (prev_ptr_ty.tag()) {
- .single_mut_pointer => prev_ptr_ty.castTag(.single_mut_pointer).?.data,
.pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type,
else => return null,
};
@@ -27342,9 +27381,9 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
// We have a pointer-to-array and a pointer-to-vector. If the elements and
// lengths match, return the result.
- const vector_ty = sema.typeOf(prev_ptr).childType();
- if (array_ty.childType().eql(vector_ty.childType(), sema.mod) and
- array_ty.arrayLen() == vector_ty.vectorLen())
+ const vector_ty = sema.typeOf(prev_ptr).childType(mod);
+ if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and
+ array_ty.arrayLen(mod) == vector_ty.vectorLen(mod))
{
return prev_ptr;
} else {
@@ -27476,14 +27515,14 @@ fn beginComptimePtrMutation(
switch (parent.pointee) {
.direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) {
.Array, .Vector => {
- const check_len = parent.ty.arrayLenIncludingSentinel();
+ const check_len = parent.ty.arrayLenIncludingSentinel(mod);
if (elem_ptr.index >= check_len) {
// TODO have the parent include the decl so we can say "declared here"
return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{
elem_ptr.index, check_len,
});
}
- const elem_ty = parent.ty.childType();
+ const elem_ty = parent.ty.childType(mod);
// We might have a pointer to multiple elements of the array (e.g. a pointer
// to a sub-array). In this case, we just have to reinterpret the relevant
@@ -27510,7 +27549,7 @@ fn beginComptimePtrMutation(
defer parent.finishArena(sema.mod);
const array_len_including_sentinel =
- try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel());
+ try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
const elems = try arena.alloc(Value, array_len_including_sentinel);
@memset(elems, Value.undef);
@@ -27536,7 +27575,7 @@ fn beginComptimePtrMutation(
defer parent.finishArena(sema.mod);
const bytes = val_ptr.castTag(.bytes).?.data;
- const dest_len = parent.ty.arrayLenIncludingSentinel();
+ const dest_len = parent.ty.arrayLenIncludingSentinel(mod);
// bytes.len may be one greater than dest_len because of the case when
// assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted.
assert(bytes.len >= dest_len);
@@ -27567,13 +27606,13 @@ fn beginComptimePtrMutation(
defer parent.finishArena(sema.mod);
const str_lit = val_ptr.castTag(.str_lit).?.data;
- const dest_len = parent.ty.arrayLenIncludingSentinel();
+ const dest_len = parent.ty.arrayLenIncludingSentinel(mod);
const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
const elems = try arena.alloc(Value, @intCast(usize, dest_len));
for (bytes, 0..) |byte, i| {
elems[i] = try Value.Tag.int_u64.create(arena, byte);
}
- if (parent.ty.sentinel()) |sent_val| {
+ if (parent.ty.sentinel(mod)) |sent_val| {
assert(elems.len == bytes.len + 1);
elems[bytes.len] = sent_val;
}
@@ -27603,7 +27642,7 @@ fn beginComptimePtrMutation(
const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena);
const array_len_including_sentinel =
- try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel());
+ try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
const elems = try arena.alloc(Value, array_len_including_sentinel);
if (elems.len > 0) elems[0] = repeated_val;
for (elems[1..]) |*elem| {
@@ -27906,12 +27945,12 @@ fn beginComptimePtrMutation(
},
.opt_payload_ptr => {
const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else {
- return sema.beginComptimePtrMutation(block, src, ptr_val, try ptr_elem_ty.optionalChildAlloc(sema.arena));
+ return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod));
};
var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty);
switch (parent.pointee) {
.direct => |val_ptr| {
- const payload_ty = try parent.ty.optionalChildAlloc(sema.arena);
+ const payload_ty = parent.ty.optionalChild(mod);
switch (val_ptr.tag()) {
.undef, .null_value => {
// An optional has been initialized to undefined at comptime and now we
@@ -27984,7 +28023,7 @@ fn beginComptimePtrMutationInner(
// Handle the case that the decl is an array and we're actually trying to point to an element.
if (decl_ty.isArrayOrVector(mod)) {
- const decl_elem_ty = decl_ty.childType();
+ const decl_elem_ty = decl_ty.childType(mod);
if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) {
return ComptimePtrMutationKit{
.decl_ref_mut = decl_ref_mut,
@@ -28105,7 +28144,7 @@ fn beginComptimePtrLoad(
// If we're loading an elem_ptr that was derived from a different type
// than the true type of the underlying decl, we cannot deref directly
const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: {
- const deref_elem_ty = deref.pointee.?.ty.childType();
+ const deref_elem_ty = deref.pointee.?.ty.childType(mod);
break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok;
} else false;
@@ -28115,12 +28154,12 @@ fn beginComptimePtrLoad(
}
var array_tv = deref.pointee.?;
- const check_len = array_tv.ty.arrayLenIncludingSentinel();
+ const check_len = array_tv.ty.arrayLenIncludingSentinel(mod);
if (maybe_array_ty) |load_ty| {
// It's possible that we're loading a [N]T, in which case we'd like to slice
// the pointee array directly from our parent array.
- if (load_ty.isArrayOrVector(mod) and load_ty.childType().eql(elem_ty, sema.mod)) {
- const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel());
+ if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) {
+ const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod));
deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{
.ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod),
.val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N),
@@ -28134,7 +28173,7 @@ fn beginComptimePtrLoad(
break :blk deref;
}
if (elem_ptr.index == check_len - 1) {
- if (array_tv.ty.sentinel()) |sent| {
+ if (array_tv.ty.sentinel(mod)) |sent| {
deref.pointee = TypedValue{
.ty = elem_ty,
.val = sent,
@@ -28226,7 +28265,7 @@ fn beginComptimePtrLoad(
const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data;
const payload_ty = switch (ptr_val.tag()) {
.eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(),
- .opt_payload_ptr => try payload_ptr.container_ty.optionalChildAlloc(sema.arena),
+ .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod),
else => unreachable,
};
var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty);
@@ -28357,12 +28396,13 @@ fn coerceArrayPtrToSlice(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
if (try sema.resolveMaybeUndefVal(inst)) |val| {
const ptr_array_ty = sema.typeOf(inst);
- const array_ty = ptr_array_ty.childType();
+ const array_ty = ptr_array_ty.childType(mod);
const slice_val = try Value.Tag.slice.create(sema.arena, .{
.ptr = val,
- .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()),
+ .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)),
});
return sema.addConstant(dest_ty, slice_val);
}
@@ -28371,11 +28411,11 @@ fn coerceArrayPtrToSlice(
}
fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool {
- const dest_info = dest_ty.ptrInfo().data;
- const inst_info = inst_ty.ptrInfo().data;
const mod = sema.mod;
- const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or
- (inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or
+ const dest_info = dest_ty.ptrInfo(mod);
+ const inst_info = inst_ty.ptrInfo(mod);
+ const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or
+ (inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or
(inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0);
const ok_cv_qualifiers =
@@ -28647,7 +28687,8 @@ fn coerceAnonStructToUnionPtrs(
ptr_anon_struct: Air.Inst.Ref,
anon_struct_src: LazySrcLoc,
) !Air.Inst.Ref {
- const union_ty = ptr_union_ty.childType();
+ const mod = sema.mod;
+ const union_ty = ptr_union_ty.childType(mod);
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src);
return sema.analyzeRef(block, union_ty_src, union_inst);
@@ -28661,7 +28702,8 @@ fn coerceAnonStructToStructPtrs(
ptr_anon_struct: Air.Inst.Ref,
anon_struct_src: LazySrcLoc,
) !Air.Inst.Ref {
- const struct_ty = ptr_struct_ty.childType();
+ const mod = sema.mod;
+ const struct_ty = ptr_struct_ty.childType(mod);
const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src);
const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src);
return sema.analyzeRef(block, struct_ty_src, struct_inst);
@@ -28676,15 +28718,16 @@ fn coerceArrayLike(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
+ const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
- const inst_len = inst_ty.arrayLen();
- const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen());
- const target = sema.mod.getTarget();
+ const inst_len = inst_ty.arrayLen(mod);
+ const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod));
+ const target = mod.getTarget();
if (dest_len != inst_len) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{
- dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod),
+ dest_ty.fmt(mod), inst_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
@@ -28694,8 +28737,8 @@ fn coerceArrayLike(
return sema.failWithOwnedErrorMsg(msg);
}
- const dest_elem_ty = dest_ty.childType();
- const inst_elem_ty = inst_ty.childType();
+ const dest_elem_ty = dest_ty.childType(mod);
+ const inst_elem_ty = inst_ty.childType(mod);
const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_elem_ty, inst_elem_ty, false, target, dest_ty_src, inst_src);
if (in_memory_result == .ok) {
if (try sema.resolveMaybeUndefVal(inst)) |inst_val| {
@@ -28749,9 +28792,10 @@ fn coerceTupleToArray(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
+ const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
- const inst_len = inst_ty.arrayLen();
- const dest_len = dest_ty.arrayLen();
+ const inst_len = inst_ty.arrayLen(mod);
+ const dest_len = dest_ty.arrayLen(mod);
if (dest_len != inst_len) {
const msg = msg: {
@@ -28766,16 +28810,16 @@ fn coerceTupleToArray(
return sema.failWithOwnedErrorMsg(msg);
}
- const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel());
+ const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod));
const element_vals = try sema.arena.alloc(Value, dest_elems);
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems);
- const dest_elem_ty = dest_ty.childType();
+ const dest_elem_ty = dest_ty.childType(mod);
var runtime_src: ?LazySrcLoc = null;
for (element_vals, 0..) |*elem, i_usize| {
const i = @intCast(u32, i_usize);
if (i_usize == inst_len) {
- elem.* = dest_ty.sentinel().?;
+ elem.* = dest_ty.sentinel(mod).?;
element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*);
break;
}
@@ -28812,9 +28856,10 @@ fn coerceTupleToSlicePtrs(
ptr_tuple: Air.Inst.Ref,
tuple_src: LazySrcLoc,
) !Air.Inst.Ref {
- const tuple_ty = sema.typeOf(ptr_tuple).childType();
+ const mod = sema.mod;
+ const tuple_ty = sema.typeOf(ptr_tuple).childType(mod);
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
- const slice_info = slice_ty.ptrInfo().data;
+ const slice_info = slice_ty.ptrInfo(mod);
const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod);
const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src);
if (slice_info.@"align" != 0) {
@@ -28833,8 +28878,9 @@ fn coerceTupleToArrayPtrs(
ptr_tuple: Air.Inst.Ref,
tuple_src: LazySrcLoc,
) !Air.Inst.Ref {
+ const mod = sema.mod;
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
- const ptr_info = ptr_array_ty.ptrInfo().data;
+ const ptr_info = ptr_array_ty.ptrInfo(mod);
const array_ty = ptr_info.pointee_type;
const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src);
if (ptr_info.@"align" != 0) {
@@ -29231,7 +29277,7 @@ fn analyzeLoad(
const mod = sema.mod;
const ptr_ty = sema.typeOf(ptr);
const elem_ty = switch (ptr_ty.zigTypeTag(mod)) {
- .Pointer => ptr_ty.childType(),
+ .Pointer => ptr_ty.childType(mod),
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}),
};
@@ -29245,7 +29291,7 @@ fn analyzeLoad(
}
}
- if (ptr_ty.ptrInfo().data.vector_index == .runtime) {
+ if (ptr_ty.ptrInfo(mod).vector_index == .runtime) {
const ptr_inst = Air.refToIndex(ptr).?;
const air_tags = sema.air_instructions.items(.tag);
if (air_tags[ptr_inst] == .ptr_elem_ptr) {
@@ -29318,8 +29364,7 @@ fn analyzeIsNull(
const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
const operand_ty = sema.typeOf(operand);
- var buf: Type.Payload.ElemType = undefined;
- if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(&buf).zigTypeTag(mod) == .NoReturn) {
+ if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) {
return inverted_non_null_res;
}
if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) {
@@ -29339,7 +29384,7 @@ fn analyzePtrIsNonErrComptimeOnly(
const mod = sema.mod;
const ptr_ty = sema.typeOf(operand);
assert(ptr_ty.zigTypeTag(mod) == .Pointer);
- const child_ty = ptr_ty.childType();
+ const child_ty = ptr_ty.childType(mod);
const child_tag = child_ty.zigTypeTag(mod);
if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true;
@@ -29495,7 +29540,7 @@ fn analyzeSlice(
// the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
const ptr_ptr_ty = sema.typeOf(ptr_ptr);
const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) {
- .Pointer => ptr_ptr_ty.elemType(),
+ .Pointer => ptr_ptr_ty.childType(mod),
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}),
};
@@ -29506,30 +29551,30 @@ fn analyzeSlice(
var ptr_sentinel: ?Value = null;
switch (ptr_ptr_child_ty.zigTypeTag(mod)) {
.Array => {
- ptr_sentinel = ptr_ptr_child_ty.sentinel();
- elem_ty = ptr_ptr_child_ty.childType();
+ ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
+ elem_ty = ptr_ptr_child_ty.childType(mod);
},
- .Pointer => switch (ptr_ptr_child_ty.ptrSize()) {
+ .Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) {
.One => {
- const double_child_ty = ptr_ptr_child_ty.childType();
+ const double_child_ty = ptr_ptr_child_ty.childType(mod);
if (double_child_ty.zigTypeTag(mod) == .Array) {
- ptr_sentinel = double_child_ty.sentinel();
+ ptr_sentinel = double_child_ty.sentinel(mod);
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
array_ty = double_child_ty;
- elem_ty = double_child_ty.childType();
+ elem_ty = double_child_ty.childType(mod);
} else {
return sema.fail(block, src, "slice of single-item pointer", .{});
}
},
.Many, .C => {
- ptr_sentinel = ptr_ptr_child_ty.sentinel();
+ ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
array_ty = ptr_ptr_child_ty;
- elem_ty = ptr_ptr_child_ty.childType();
+ elem_ty = ptr_ptr_child_ty.childType(mod);
- if (ptr_ptr_child_ty.ptrSize() == .C) {
+ if (ptr_ptr_child_ty.ptrSize(mod) == .C) {
if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| {
if (ptr_val.isNull(mod)) {
return sema.fail(block, src, "slice of null pointer", .{});
@@ -29538,11 +29583,11 @@ fn analyzeSlice(
}
},
.Slice => {
- ptr_sentinel = ptr_ptr_child_ty.sentinel();
+ ptr_sentinel = ptr_ptr_child_ty.sentinel(mod);
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
array_ty = ptr_ptr_child_ty;
- elem_ty = ptr_ptr_child_ty.childType();
+ elem_ty = ptr_ptr_child_ty.childType(mod);
},
},
else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}),
@@ -29563,7 +29608,7 @@ fn analyzeSlice(
var end_is_len = uncasted_end_opt == .none;
const end = e: {
if (array_ty.zigTypeTag(mod) == .Array) {
- const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen());
+ const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod));
if (!end_is_len) {
const end = if (by_length) end: {
@@ -29574,10 +29619,10 @@ fn analyzeSlice(
if (try sema.resolveMaybeUndefVal(end)) |end_val| {
const len_s_val = try Value.Tag.int_u64.create(
sema.arena,
- array_ty.arrayLenIncludingSentinel(),
+ array_ty.arrayLenIncludingSentinel(mod),
);
if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) {
- const sentinel_label: []const u8 = if (array_ty.sentinel() != null)
+ const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null)
" +1 (sentinel)"
else
"";
@@ -29617,7 +29662,7 @@ fn analyzeSlice(
if (slice_val.isUndef()) {
return sema.fail(block, src, "slice of undefined", .{});
}
- const has_sentinel = slice_ty.sentinel() != null;
+ const has_sentinel = slice_ty.sentinel(mod) != null;
var int_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel),
@@ -29751,8 +29796,8 @@ fn analyzeSlice(
try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false);
const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
- const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data;
- const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C;
+ const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo(mod);
+ const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize(mod) != .C;
if (opt_new_len_val) |new_len_val| {
const new_len_int = new_len_val.toUnsignedInt(mod);
@@ -29780,7 +29825,7 @@ fn analyzeSlice(
if (slice_ty.isSlice(mod)) {
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
- const actual_len = if (slice_ty.sentinel() == null)
+ const actual_len = if (slice_ty.sentinel(mod) == null)
slice_len_inst
else
try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
@@ -29839,7 +29884,7 @@ fn analyzeSlice(
// requirement: end <= len
const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array)
- try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel())
+ try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel(mod))
else if (slice_ty.isSlice(mod)) blk: {
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
// we don't need to add one for sentinels because the
@@ -29848,7 +29893,7 @@ fn analyzeSlice(
}
const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice);
- if (slice_ty.sentinel() == null) break :blk slice_len_inst;
+ if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst;
// we have to add one because slice lengths don't include the sentinel
break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true);
@@ -30284,7 +30329,10 @@ fn cmpVector(
const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src);
- const result_ty = try Type.vector(sema.arena, lhs_ty.vectorLen(), Type.bool);
+ const result_ty = try mod.vectorType(.{
+ .len = lhs_ty.vectorLen(mod),
+ .child = .bool_type,
+ });
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| {
@@ -30484,12 +30532,12 @@ fn resolvePeerTypes(
}
continue;
},
- .Pointer => if (chosen_ty.ptrSize() == .C) continue,
+ .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue,
else => {},
},
.ComptimeInt => switch (chosen_ty_tag) {
.Int, .Float, .ComptimeFloat => continue,
- .Pointer => if (chosen_ty.ptrSize() == .C) continue,
+ .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue,
else => {},
},
.Float => switch (chosen_ty_tag) {
@@ -30654,10 +30702,10 @@ fn resolvePeerTypes(
},
},
.Pointer => {
- const cand_info = candidate_ty.ptrInfo().data;
+ const cand_info = candidate_ty.ptrInfo(mod);
switch (chosen_ty_tag) {
.Pointer => {
- const chosen_info = chosen_ty.ptrInfo().data;
+ const chosen_info = chosen_ty.ptrInfo(mod);
seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable;
@@ -30690,8 +30738,8 @@ fn resolvePeerTypes(
chosen_info.pointee_type.zigTypeTag(mod) == .Array and
cand_info.pointee_type.zigTypeTag(mod) == .Array)
{
- const chosen_elem_ty = chosen_info.pointee_type.childType();
- const cand_elem_ty = cand_info.pointee_type.childType();
+ const chosen_elem_ty = chosen_info.pointee_type.childType(mod);
+ const cand_elem_ty = cand_info.pointee_type.childType(mod);
const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_elem_ty, cand_elem_ty, chosen_info.mutable, target, src, src);
if (chosen_ok) {
@@ -30757,10 +30805,9 @@ fn resolvePeerTypes(
}
},
.Optional => {
- var opt_child_buf: Type.Payload.ElemType = undefined;
- const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf);
+ const chosen_ptr_ty = chosen_ty.optionalChild(mod);
if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) {
- const chosen_info = chosen_ptr_ty.ptrInfo().data;
+ const chosen_info = chosen_ptr_ty.ptrInfo(mod);
seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable;
@@ -30777,7 +30824,7 @@ fn resolvePeerTypes(
.ErrorUnion => {
const chosen_ptr_ty = chosen_ty.errorUnionPayload();
if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) {
- const chosen_info = chosen_ptr_ty.ptrInfo().data;
+ const chosen_info = chosen_ptr_ty.ptrInfo(mod);
seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable;
@@ -30802,8 +30849,7 @@ fn resolvePeerTypes(
}
},
.Optional => {
- var opt_child_buf: Type.Payload.ElemType = undefined;
- const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf);
+ const opt_child_ty = candidate_ty.optionalChild(mod);
if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) {
seen_const = seen_const or opt_child_ty.isConstPtr();
any_are_null = true;
@@ -30818,13 +30864,13 @@ fn resolvePeerTypes(
},
.Vector => switch (chosen_ty_tag) {
.Vector => {
- const chosen_len = chosen_ty.vectorLen();
- const candidate_len = candidate_ty.vectorLen();
+ const chosen_len = chosen_ty.vectorLen(mod);
+ const candidate_len = candidate_ty.vectorLen(mod);
if (chosen_len != candidate_len)
continue;
- const chosen_child_ty = chosen_ty.childType();
- const candidate_child_ty = candidate_ty.childType();
+ const chosen_child_ty = chosen_ty.childType(mod);
+ const candidate_child_ty = candidate_ty.childType(mod);
if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) {
const chosen_info = chosen_child_ty.intInfo(mod);
const candidate_info = candidate_child_ty.intInfo(mod);
@@ -30853,8 +30899,8 @@ fn resolvePeerTypes(
.Vector => continue,
else => {},
},
- .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) {
- if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) {
+ .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) {
+ if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) {
continue;
}
},
@@ -30874,8 +30920,7 @@ fn resolvePeerTypes(
continue;
},
.Optional => {
- var opt_child_buf: Type.Payload.ElemType = undefined;
- const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf);
+ const opt_child_ty = chosen_ty.optionalChild(mod);
if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, candidate_ty, false, target, src, src)) == .ok) {
continue;
}
@@ -30949,16 +30994,16 @@ fn resolvePeerTypes(
if (convert_to_slice) {
// turn *[N]T => []T
- const chosen_child_ty = chosen_ty.childType();
- var info = chosen_ty.ptrInfo();
- info.data.sentinel = chosen_child_ty.sentinel();
- info.data.size = .Slice;
- info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr());
- info.data.pointee_type = chosen_child_ty.elemType2(mod);
-
- const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data);
+ const chosen_child_ty = chosen_ty.childType(mod);
+ var info = chosen_ty.ptrInfo(mod);
+ info.sentinel = chosen_child_ty.sentinel(mod);
+ info.size = .Slice;
+ info.mutable = !(seen_const or chosen_child_ty.isConstPtr());
+ info.pointee_type = chosen_child_ty.elemType2(mod);
+
+ const new_ptr_ty = try Type.ptr(sema.arena, mod, info);
const opt_ptr_ty = if (any_are_null)
- try Type.optional(sema.arena, new_ptr_ty)
+ try Type.optional(sema.arena, new_ptr_ty, mod)
else
new_ptr_ty;
const set_ty = err_set_ty orelse return opt_ptr_ty;
@@ -30970,22 +31015,22 @@ fn resolvePeerTypes(
switch (chosen_ty.zigTypeTag(mod)) {
.ErrorUnion => {
const ptr_ty = chosen_ty.errorUnionPayload();
- var info = ptr_ty.ptrInfo();
- info.data.mutable = false;
- const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data);
+ var info = ptr_ty.ptrInfo(mod);
+ info.mutable = false;
+ const new_ptr_ty = try Type.ptr(sema.arena, mod, info);
const opt_ptr_ty = if (any_are_null)
- try Type.optional(sema.arena, new_ptr_ty)
+ try Type.optional(sema.arena, new_ptr_ty, mod)
else
new_ptr_ty;
const set_ty = err_set_ty orelse chosen_ty.errorUnionSet();
return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod);
},
.Pointer => {
- var info = chosen_ty.ptrInfo();
- info.data.mutable = false;
- const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data);
+ var info = chosen_ty.ptrInfo(mod);
+ info.mutable = false;
+ const new_ptr_ty = try Type.ptr(sema.arena, mod, info);
const opt_ptr_ty = if (any_are_null)
- try Type.optional(sema.arena, new_ptr_ty)
+ try Type.optional(sema.arena, new_ptr_ty, mod)
else
new_ptr_ty;
const set_ty = err_set_ty orelse return opt_ptr_ty;
@@ -30998,7 +31043,7 @@ fn resolvePeerTypes(
if (any_are_null) {
const opt_ty = switch (chosen_ty.zigTypeTag(mod)) {
.Null, .Optional => chosen_ty,
- else => try Type.optional(sema.arena, chosen_ty),
+ else => try Type.optional(sema.arena, chosen_ty, mod),
};
const set_ty = err_set_ty orelse return opt_ty;
return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod);
@@ -31077,13 +31122,12 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
.Struct => return sema.resolveStructLayout(ty),
.Union => return sema.resolveUnionLayout(ty),
.Array => {
- if (ty.arrayLenIncludingSentinel() == 0) return;
- const elem_ty = ty.childType();
+ if (ty.arrayLenIncludingSentinel(mod) == 0) return;
+ const elem_ty = ty.childType(mod);
return sema.resolveTypeLayout(elem_ty);
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
+ const payload_ty = ty.optionalChild(mod);
// In case of querying the ABI alignment of this optional, we will ask
// for hasRuntimeBits() of the payload type, so we need "requires comptime"
// to be known already before this function returns.
@@ -31343,10 +31387,10 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Pointer) {
- switch (ty.ptrSize()) {
+ switch (ty.ptrSize(mod)) {
.Slice, .Many, .C => return,
.One => {
- const elem_ty = ty.childType();
+ const elem_ty = ty.childType(mod);
if (elem_ty.zigTypeTag(mod) == .Array) return;
// TODO https://github.com/ziglang/zig/issues/15479
// if (elem_ty.isTuple()) return;
@@ -31418,8 +31462,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.int_type => false,
.ptr_type => @panic("TODO"),
.array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()),
+ .opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
.simple_type => |t| switch (t) {
.f16,
@@ -31478,12 +31522,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
};
return switch (ty.tag()) {
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .anyerror_void_error_union,
.empty_struct_literal,
.empty_struct,
.error_set,
@@ -31491,34 +31529,20 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.error_set_inferred,
.error_set_merged,
.@"opaque",
- .array_u8,
- .array_u8_sentinel_0,
.enum_simple,
=> false,
- .single_const_pointer_to_comptime_int,
- .function,
- => true,
+ .function => true,
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
.array,
.array_sentinel,
- .vector,
- => return sema.resolveTypeRequiresComptime(ty.childType()),
+ => return sema.resolveTypeRequiresComptime(ty.childType(mod)),
- .pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- => {
- const child_ty = ty.childType();
+ .pointer => {
+ const child_ty = ty.childType(mod);
if (child_ty.zigTypeTag(mod) == .Fn) {
return child_ty.fnInfo().is_generic;
} else {
@@ -31526,12 +31550,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
}
},
- .optional,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
- => {
- var buf: Type.Payload.ElemType = undefined;
- return sema.resolveTypeRequiresComptime(ty.optionalChild(&buf));
+ .optional => {
+ return sema.resolveTypeRequiresComptime(ty.optionalChild(mod));
},
.tuple, .anon_struct => {
@@ -31609,7 +31629,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
switch (ty.zigTypeTag(mod)) {
.Pointer => {
- const child_ty = try sema.resolveTypeFields(ty.childType());
+ const child_ty = try sema.resolveTypeFields(ty.childType(mod));
return sema.resolveTypeFully(child_ty);
},
.Struct => switch (ty.tag()) {
@@ -31624,10 +31644,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
else => {},
},
.Union => return sema.resolveUnionFully(ty),
- .Array => return sema.resolveTypeFully(ty.childType()),
+ .Array => return sema.resolveTypeFully(ty.childType(mod)),
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- return sema.resolveTypeFully(ty.optionalChild(&buf));
+ return sema.resolveTypeFully(ty.optionalChild(mod));
},
.ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()),
.Fn => {
@@ -32897,10 +32916,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
return null;
}
},
- .ptr_type => @panic("TODO"),
+ .ptr_type => return null,
.array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .vector_type => |vector_type| {
+ if (vector_type.len == 0) return Value.initTag(.empty_array);
+ if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v;
+ return null;
+ },
+ .opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
.simple_type => |t| switch (t) {
.f16,
@@ -32963,34 +32986,15 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.error_set_merged,
.error_union,
.function,
- .single_const_pointer_to_comptime_int,
.array_sentinel,
- .array_u8_sentinel_0,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .const_slice,
- .mut_slice,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
- .anyerror_void_error_union,
.error_set_inferred,
.@"opaque",
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
.anyframe_T,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .single_const_pointer,
- .single_mut_pointer,
.pointer,
=> return null,
.optional => {
- var buf: Type.Payload.ElemType = undefined;
- const child_ty = ty.optionalChild(&buf);
+ const child_ty = ty.optionalChild(mod);
if (child_ty.isNoReturn()) {
return Value.null;
} else {
@@ -33111,10 +33115,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value),
- .vector, .array, .array_u8 => {
- if (ty.arrayLen() == 0)
+ .array => {
+ if (ty.arrayLen(mod) == 0)
return Value.initTag(.empty_array);
- if ((try sema.typeHasOnePossibleValue(ty.elemType())) != null) {
+ if ((try sema.typeHasOnePossibleValue(ty.childType(mod))) != null) {
return Value.initTag(.the_only_possible_value);
}
return null;
@@ -33147,20 +33151,13 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
.data = .{ .interned = ty.ip_index },
});
return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
+ } else {
+ try sema.air_instructions.append(sema.gpa, .{
+ .tag = .const_ty,
+ .data = .{ .ty = ty },
+ });
+ return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
}
- switch (ty.tag()) {
- .manyptr_u8 => return .manyptr_u8_type,
- .manyptr_const_u8 => return .manyptr_const_u8_type,
- .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type,
- .const_slice_u8 => return .const_slice_u8_type,
- .anyerror_void_error_union => return .anyerror_void_error_union_type,
- else => {},
- }
- try sema.air_instructions.append(sema.gpa, .{
- .tag = .const_ty,
- .data = .{ .ty = ty },
- });
- return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
}
fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref {
@@ -33173,6 +33170,15 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref {
pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref {
const gpa = sema.gpa;
+ if (val.ip_index != .none) {
+ if (@enumToInt(val.ip_index) < Air.ref_start_index)
+ return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index));
+ try sema.air_instructions.append(gpa, .{
+ .tag = .interned,
+ .data = .{ .interned = val.ip_index },
+ });
+ return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1));
+ }
const ty_inst = try sema.addType(ty);
try sema.air_values.append(gpa, val);
try sema.air_instructions.append(gpa, .{
@@ -33331,7 +33337,8 @@ pub fn analyzeAddressSpace(
/// Asserts the value is a pointer and dereferences it.
/// Returns `null` if the pointer contents cannot be loaded at comptime.
fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value {
- const load_ty = ptr_ty.childType();
+ const mod = sema.mod;
+ const load_ty = ptr_ty.childType(mod);
const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true);
switch (res) {
.runtime_load => return null,
@@ -33422,11 +33429,7 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError
/// This can return `error.AnalysisFail` because it sometimes requires resolving whether
/// a type has zero bits, which can cause a "foo depends on itself" compile error.
/// This logic must be kept in sync with `Type.isPtrLikeOptional`.
-fn typePtrOrOptionalPtrTy(
- sema: *Sema,
- ty: Type,
- buf: *Type.Payload.ElemType,
-) !?Type {
+fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
const mod = sema.mod;
if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
@@ -33435,14 +33438,14 @@ fn typePtrOrOptionalPtrTy(
.C => return ptr_type.elem_type.toType(),
.One, .Many => return ty,
},
- .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) {
+ .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice, .C => return null,
.Many, .One => {
if (ptr_type.is_allowzero) return null;
// optionals of zero sized types behave like bools, not pointers
- const payload_ty = o.payload_type.toType();
+ const payload_ty = opt_child.toType();
if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) {
return null;
}
@@ -33456,25 +33459,9 @@ fn typePtrOrOptionalPtrTy(
};
switch (ty.tag()) {
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- => return ty.optionalChild(buf),
-
- .single_const_pointer_to_comptime_int,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- => return ty,
-
- .pointer => switch (ty.ptrSize()) {
+ .pointer => switch (ty.ptrSize(mod)) {
.Slice => return null,
- .C => return ty.optionalChild(buf),
+ .C => return ty.optionalChild(mod),
else => return ty,
},
@@ -33482,10 +33469,10 @@ fn typePtrOrOptionalPtrTy(
.inferred_alloc_mut => unreachable,
.optional => {
- const child_type = ty.optionalChild(buf);
+ const child_type = ty.optionalChild(mod);
if (child_type.zigTypeTag(mod) != .Pointer) return null;
- const info = child_type.ptrInfo().data;
+ const info = child_type.ptrInfo(mod);
switch (info.size) {
.Slice, .C => return null,
.Many, .One => {
@@ -33518,8 +33505,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.int_type => return false,
.ptr_type => @panic("TODO"),
.array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()),
+ .opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
.simple_type => |t| return switch (t) {
.f16,
@@ -33578,12 +33565,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
}
}
return switch (ty.tag()) {
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .anyerror_void_error_union,
.empty_struct_literal,
.empty_struct,
.error_set,
@@ -33591,34 +33572,20 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.error_set_inferred,
.error_set_merged,
.@"opaque",
- .array_u8,
- .array_u8_sentinel_0,
.enum_simple,
=> false,
- .single_const_pointer_to_comptime_int,
- .function,
- => true,
+ .function => true,
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
.array,
.array_sentinel,
- .vector,
- => return sema.typeRequiresComptime(ty.childType()),
+ => return sema.typeRequiresComptime(ty.childType(mod)),
- .pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- => {
- const child_ty = ty.childType();
+ .pointer => {
+ const child_ty = ty.childType(mod);
if (child_ty.zigTypeTag(mod) == .Fn) {
return child_ty.fnInfo().is_generic;
} else {
@@ -33626,12 +33593,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
}
},
- .optional,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
- => {
- var buf: Type.Payload.ElemType = undefined;
- return sema.typeRequiresComptime(ty.optionalChild(&buf));
+ .optional => {
+ return sema.typeRequiresComptime(ty.optionalChild(mod));
},
.tuple, .anon_struct => {
@@ -33814,7 +33777,7 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -33874,7 +33837,7 @@ fn intSub(
) !Value {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -33934,7 +33897,7 @@ fn floatAdd(
) !Value {
const mod = sema.mod;
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
+ const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -33992,7 +33955,7 @@ fn floatSub(
) !Value {
const mod = sema.mod;
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
+ const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -34050,8 +34013,8 @@ fn intSubWithOverflow(
) !Value.OverflowArithmeticResult {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
- const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
- const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -34105,8 +34068,8 @@ fn floatToInt(
) CompileError!Value {
const mod = sema.mod;
if (float_ty.zigTypeTag(mod) == .Vector) {
- const elem_ty = float_ty.childType();
- const result_data = try sema.arena.alloc(Value, float_ty.vectorLen());
+ const elem_ty = float_ty.childType(mod);
+ const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(sema.mod, i, &buf);
@@ -34383,8 +34346,8 @@ fn intAddWithOverflow(
) !Value.OverflowArithmeticResult {
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
- const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
- const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -34442,7 +34405,7 @@ fn compareAll(
const mod = sema.mod;
if (ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
- while (i < ty.vectorLen()) : (i += 1) {
+ while (i < ty.vectorLen(mod)) : (i += 1) {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -34490,7 +34453,7 @@ fn compareVector(
) !Value {
const mod = sema.mod;
assert(ty.zigTypeTag(mod) == .Vector);
- const result_data = try sema.arena.alloc(Value, ty.vectorLen());
+ const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -34511,10 +34474,10 @@ fn compareVector(
/// This code is duplicated in `analyzePtrArithmetic`.
fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
const mod = sema.mod;
- const ptr_info = ptr_ty.ptrInfo().data;
+ const ptr_info = ptr_ty.ptrInfo(mod);
const elem_ty = ptr_ty.elemType2(mod);
const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0;
- const parent_ty = ptr_ty.childType();
+ const parent_ty = ptr_ty.childType(mod);
const VI = Type.Payload.Pointer.Data.VectorIndex;
@@ -34522,14 +34485,14 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
host_size: u16 = 0,
alignment: u32 = 0,
vector_index: VI = .none,
- } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: {
+ } = if (parent_ty.isVector(mod) and ptr_info.size == .One) blk: {
const elem_bits = elem_ty.bitSize(mod);
if (elem_bits == 0) break :blk .{};
const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
if (!is_packed) break :blk .{};
break :blk .{
- .host_size = @intCast(u16, parent_ty.arrayLen()),
+ .host_size = @intCast(u16, parent_ty.arrayLen(mod)),
.alignment = @intCast(u16, parent_ty.abiAlignment(mod)),
.vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime,
};
src/type.zig
@@ -40,7 +40,7 @@ pub const Type = struct {
.ptr_type => return .Pointer,
.array_type => return .Array,
.vector_type => return .Vector,
- .optional_type => return .Optional,
+ .opt_type => return .Optional,
.error_union_type => return .ErrorUnion,
.struct_type => return .Struct,
.union_type => return .Union,
@@ -118,38 +118,17 @@ pub const Type = struct {
.function => return .Fn,
.array,
- .array_u8_sentinel_0,
- .array_u8,
.array_sentinel,
=> return .Array,
- .vector => return .Vector,
-
- .single_const_pointer_to_comptime_int,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
.pointer,
.inferred_alloc_const,
.inferred_alloc_mut,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
=> return .Pointer,
- .optional,
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- => return .Optional,
+ .optional => return .Optional,
- .anyerror_void_error_union, .error_union => return .ErrorUnion,
+ .error_union => return .ErrorUnion,
.anyframe_T => return .AnyFrame,
@@ -177,8 +156,7 @@ pub const Type = struct {
return switch (self.zigTypeTag(mod)) {
.ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod),
.Optional => {
- var buf: Payload.ElemType = undefined;
- return self.optionalChild(&buf).baseZigTypeTag(mod);
+ return self.optionalChild(mod).baseZigTypeTag(mod);
},
else => |t| t,
};
@@ -218,8 +196,7 @@ pub const Type = struct {
.Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr()),
.Optional => {
if (!is_equality_cmp) return false;
- var buf: Payload.ElemType = undefined;
- return ty.optionalChild(&buf).isSelfComparable(mod, is_equality_cmp);
+ return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp);
},
};
}
@@ -275,9 +252,8 @@ pub const Type = struct {
}
pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() {
- if (self.ip_index != .none) {
- return null;
- }
+ assert(self.ip_index == .none);
+
if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count)
return null;
@@ -287,281 +263,61 @@ pub const Type = struct {
return null;
}
- pub fn castPointer(self: Type) ?*Payload.ElemType {
- return switch (self.tag()) {
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- => self.cast(Payload.ElemType),
-
- .inferred_alloc_const => unreachable,
- .inferred_alloc_mut => unreachable,
-
- else => null,
- };
- }
-
/// If it is a function pointer, returns the function type. Otherwise returns null.
pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type {
if (ty.zigTypeTag(mod) != .Pointer) return null;
- const elem_ty = ty.childType();
+ const elem_ty = ty.childType(mod);
if (elem_ty.zigTypeTag(mod) != .Fn) return null;
return elem_ty;
}
- pub fn ptrIsMutable(ty: Type) bool {
- return switch (ty.tag()) {
- .single_const_pointer_to_comptime_int,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .single_const_pointer,
- .many_const_pointer,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- .c_const_pointer,
- .const_slice,
- => false,
-
- .single_mut_pointer,
- .many_mut_pointer,
- .manyptr_u8,
- .c_mut_pointer,
- .mut_slice,
- => true,
-
- .pointer => ty.castTag(.pointer).?.data.mutable,
-
- else => unreachable,
+ pub fn ptrIsMutable(ty: Type, mod: *const Module) bool {
+ return switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .pointer => ty.castTag(.pointer).?.data.mutable,
+ else => unreachable,
+ },
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .ptr_type => |ptr_type| !ptr_type.is_const,
+ else => unreachable,
+ },
};
}
- pub const ArrayInfo = struct { elem_type: Type, sentinel: ?Value = null, len: u64 };
- pub fn arrayInfo(self: Type) ArrayInfo {
+ pub const ArrayInfo = struct {
+ elem_type: Type,
+ sentinel: ?Value = null,
+ len: u64,
+ };
+
+ pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo {
return .{
- .len = self.arrayLen(),
- .sentinel = self.sentinel(),
- .elem_type = self.elemType(),
+ .len = self.arrayLen(mod),
+ .sentinel = self.sentinel(mod),
+ .elem_type = self.childType(mod),
};
}
- pub fn ptrInfo(self: Type) Payload.Pointer {
- switch (self.ip_index) {
- .none => switch (self.tag()) {
- .single_const_pointer_to_comptime_int => return .{ .data = .{
- .pointee_type = Type.comptime_int,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = false,
- .@"volatile" = false,
- .size = .One,
- } },
- .const_slice_u8 => return .{ .data = .{
- .pointee_type = Type.u8,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = false,
- .@"volatile" = false,
- .size = .Slice,
- } },
- .const_slice_u8_sentinel_0 => return .{ .data = .{
- .pointee_type = Type.u8,
- .sentinel = Value.zero,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = false,
- .@"volatile" = false,
- .size = .Slice,
- } },
- .single_const_pointer => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = false,
- .@"volatile" = false,
- .size = .One,
- } },
- .single_mut_pointer => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = true,
- .@"volatile" = false,
- .size = .One,
- } },
- .many_const_pointer => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = false,
- .@"volatile" = false,
- .size = .Many,
- } },
- .manyptr_const_u8 => return .{ .data = .{
- .pointee_type = Type.u8,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = false,
- .@"volatile" = false,
- .size = .Many,
- } },
- .manyptr_const_u8_sentinel_0 => return .{ .data = .{
- .pointee_type = Type.u8,
- .sentinel = Value.zero,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = false,
- .@"volatile" = false,
- .size = .Many,
- } },
- .many_mut_pointer => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = true,
- .@"volatile" = false,
- .size = .Many,
- } },
- .manyptr_u8 => return .{ .data = .{
- .pointee_type = Type.u8,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = true,
- .@"volatile" = false,
- .size = .Many,
- } },
- .c_const_pointer => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = true,
- .mutable = false,
- .@"volatile" = false,
- .size = .C,
- } },
- .c_mut_pointer => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = true,
- .mutable = true,
- .@"volatile" = false,
- .size = .C,
- } },
- .const_slice => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = false,
- .@"volatile" = false,
- .size = .Slice,
- } },
- .mut_slice => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = true,
- .@"volatile" = false,
- .size = .Slice,
- } },
-
- .pointer => return self.castTag(.pointer).?.*,
-
- .optional_single_mut_pointer => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = true,
- .@"volatile" = false,
- .size = .One,
- } },
- .optional_single_const_pointer => return .{ .data = .{
- .pointee_type = self.castPointer().?.data,
- .sentinel = null,
- .@"align" = 0,
- .@"addrspace" = .generic,
- .bit_offset = 0,
- .host_size = 0,
- .@"allowzero" = false,
- .mutable = false,
- .@"volatile" = false,
- .size = .One,
- } },
- .optional => {
- var buf: Payload.ElemType = undefined;
- const child_type = self.optionalChild(&buf);
- return child_type.ptrInfo();
+ pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data {
+ return switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .pointer => ty.castTag(.pointer).?.data,
+ .optional => b: {
+ const child_type = ty.optionalChild(mod);
+ break :b child_type.ptrInfo(mod);
},
else => unreachable,
},
- else => @panic("TODO"),
- }
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .ptr_type => |p| Payload.Pointer.Data.fromKey(p),
+ .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
+ .ptr_type => |p| Payload.Pointer.Data.fromKey(p),
+ else => unreachable,
+ },
+ else => unreachable,
+ },
+ };
}
pub fn eql(a: Type, b: Type, mod: *Module) bool {
@@ -658,20 +414,17 @@ pub const Type = struct {
},
.array,
- .array_u8_sentinel_0,
- .array_u8,
.array_sentinel,
- .vector,
=> {
if (a.zigTypeTag(mod) != b.zigTypeTag(mod)) return false;
- if (a.arrayLen() != b.arrayLen())
+ if (a.arrayLen(mod) != b.arrayLen(mod))
return false;
- const elem_ty = a.elemType();
- if (!elem_ty.eql(b.elemType(), mod))
+ const elem_ty = a.childType(mod);
+ if (!elem_ty.eql(b.childType(mod), mod))
return false;
- const sentinel_a = a.sentinel();
- const sentinel_b = b.sentinel();
+ const sentinel_a = a.sentinel(mod);
+ const sentinel_b = b.sentinel(mod);
if (sentinel_a) |sa| {
if (sentinel_b) |sb| {
return sa.eql(sb, elem_ty, mod);
@@ -683,28 +436,14 @@ pub const Type = struct {
}
},
- .single_const_pointer_to_comptime_int,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
.pointer,
.inferred_alloc_const,
.inferred_alloc_mut,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
=> {
if (b.zigTypeTag(mod) != .Pointer) return false;
- const info_a = a.ptrInfo().data;
- const info_b = b.ptrInfo().data;
+ const info_a = a.ptrInfo(mod);
+ const info_b = b.ptrInfo(mod);
if (!info_a.pointee_type.eql(info_b.pointee_type, mod))
return false;
if (info_a.@"align" != info_b.@"align")
@@ -743,18 +482,13 @@ pub const Type = struct {
return true;
},
- .optional,
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- => {
+ .optional => {
if (b.zigTypeTag(mod) != .Optional) return false;
- var buf_a: Payload.ElemType = undefined;
- var buf_b: Payload.ElemType = undefined;
- return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b), mod);
+ return a.optionalChild(mod).eql(b.optionalChild(mod), mod);
},
- .anyerror_void_error_union, .error_union => {
+ .error_union => {
if (b.zigTypeTag(mod) != .ErrorUnion) return false;
const a_set = a.errorUnionSet();
@@ -947,47 +681,23 @@ pub const Type = struct {
},
.array,
- .array_u8_sentinel_0,
- .array_u8,
.array_sentinel,
=> {
std.hash.autoHash(hasher, std.builtin.TypeId.Array);
- const elem_ty = ty.elemType();
- std.hash.autoHash(hasher, ty.arrayLen());
+ const elem_ty = ty.childType(mod);
+ std.hash.autoHash(hasher, ty.arrayLen(mod));
hashWithHasher(elem_ty, hasher, mod);
- hashSentinel(ty.sentinel(), elem_ty, hasher, mod);
+ hashSentinel(ty.sentinel(mod), elem_ty, hasher, mod);
},
- .vector => {
- std.hash.autoHash(hasher, std.builtin.TypeId.Vector);
-
- const elem_ty = ty.elemType();
- std.hash.autoHash(hasher, ty.vectorLen());
- hashWithHasher(elem_ty, hasher, mod);
- },
-
- .single_const_pointer_to_comptime_int,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
.pointer,
.inferred_alloc_const,
.inferred_alloc_mut,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
=> {
std.hash.autoHash(hasher, std.builtin.TypeId.Pointer);
- const info = ty.ptrInfo().data;
+ const info = ty.ptrInfo(mod);
hashWithHasher(info.pointee_type, hasher, mod);
hashSentinel(info.sentinel, info.pointee_type, hasher, mod);
std.hash.autoHash(hasher, info.@"align");
@@ -1001,17 +711,13 @@ pub const Type = struct {
std.hash.autoHash(hasher, info.size);
},
- .optional,
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- => {
+ .optional => {
std.hash.autoHash(hasher, std.builtin.TypeId.Optional);
- var buf: Payload.ElemType = undefined;
- hashWithHasher(ty.optionalChild(&buf), hasher, mod);
+ hashWithHasher(ty.optionalChild(mod), hasher, mod);
},
- .anyerror_void_error_union, .error_union => {
+ .error_union => {
std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion);
const set_ty = ty.errorUnionSet();
@@ -1023,7 +729,7 @@ pub const Type = struct {
.anyframe_T => {
std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame);
- hashWithHasher(ty.childType(), hasher, mod);
+ hashWithHasher(ty.childType(mod), hasher, mod);
},
.empty_struct => {
@@ -1129,33 +835,12 @@ pub const Type = struct {
.legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough },
};
} else switch (self.legacy.ptr_otherwise.tag) {
- .single_const_pointer_to_comptime_int,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .anyerror_void_error_union,
.inferred_alloc_const,
.inferred_alloc_mut,
.empty_struct_literal,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
=> unreachable,
- .array_u8,
- .array_u8_sentinel_0,
- => return self.copyPayloadShallow(allocator, Payload.Len),
-
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
.optional,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
.anyframe_T,
=> {
const payload = self.cast(Payload.ElemType).?;
@@ -1170,13 +855,6 @@ pub const Type = struct {
};
},
- .vector => {
- const payload = self.castTag(.vector).?.data;
- return Tag.vector.create(allocator, .{
- .len = payload.len,
- .elem_type = try payload.elem_type.copy(allocator),
- });
- },
.array => {
const payload = self.castTag(.array).?.data;
return Tag.array.create(allocator, .{
@@ -1408,13 +1086,6 @@ pub const Type = struct {
});
},
- .anyerror_void_error_union => return writer.writeAll("anyerror!void"),
- .const_slice_u8 => return writer.writeAll("[]const u8"),
- .const_slice_u8_sentinel_0 => return writer.writeAll("[:0]const u8"),
- .single_const_pointer_to_comptime_int => return writer.writeAll("*const comptime_int"),
- .manyptr_u8 => return writer.writeAll("[*]u8"),
- .manyptr_const_u8 => return writer.writeAll("[*]const u8"),
- .manyptr_const_u8_sentinel_0 => return writer.writeAll("[*:0]const u8"),
.function => {
const payload = ty.castTag(.function).?.data;
try writer.writeAll("fn(");
@@ -1447,20 +1118,6 @@ pub const Type = struct {
ty = return_type;
continue;
},
- .array_u8 => {
- const len = ty.castTag(.array_u8).?.data;
- return writer.print("[{d}]u8", .{len});
- },
- .array_u8_sentinel_0 => {
- const len = ty.castTag(.array_u8_sentinel_0).?.data;
- return writer.print("[{d}:0]u8", .{len});
- },
- .vector => {
- const payload = ty.castTag(.vector).?.data;
- try writer.print("@Vector({d}, ", .{payload.len});
- try payload.elem_type.dump("", .{}, writer);
- return writer.writeAll(")");
- },
.array => {
const payload = ty.castTag(.array).?.data;
try writer.print("[{d}]", .{payload.len});
@@ -1512,72 +1169,12 @@ pub const Type = struct {
try writer.writeAll("}");
return;
},
- .single_const_pointer => {
- const pointee_type = ty.castTag(.single_const_pointer).?.data;
- try writer.writeAll("*const ");
- ty = pointee_type;
- continue;
- },
- .single_mut_pointer => {
- const pointee_type = ty.castTag(.single_mut_pointer).?.data;
- try writer.writeAll("*");
- ty = pointee_type;
- continue;
- },
- .many_const_pointer => {
- const pointee_type = ty.castTag(.many_const_pointer).?.data;
- try writer.writeAll("[*]const ");
- ty = pointee_type;
- continue;
- },
- .many_mut_pointer => {
- const pointee_type = ty.castTag(.many_mut_pointer).?.data;
- try writer.writeAll("[*]");
- ty = pointee_type;
- continue;
- },
- .c_const_pointer => {
- const pointee_type = ty.castTag(.c_const_pointer).?.data;
- try writer.writeAll("[*c]const ");
- ty = pointee_type;
- continue;
- },
- .c_mut_pointer => {
- const pointee_type = ty.castTag(.c_mut_pointer).?.data;
- try writer.writeAll("[*c]");
- ty = pointee_type;
- continue;
- },
- .const_slice => {
- const pointee_type = ty.castTag(.const_slice).?.data;
- try writer.writeAll("[]const ");
- ty = pointee_type;
- continue;
- },
- .mut_slice => {
- const pointee_type = ty.castTag(.mut_slice).?.data;
- try writer.writeAll("[]");
- ty = pointee_type;
- continue;
- },
.optional => {
const child_type = ty.castTag(.optional).?.data;
try writer.writeByte('?');
ty = child_type;
continue;
},
- .optional_single_const_pointer => {
- const pointee_type = ty.castTag(.optional_single_const_pointer).?.data;
- try writer.writeAll("?*const ");
- ty = pointee_type;
- continue;
- },
- .optional_single_mut_pointer => {
- const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data;
- try writer.writeAll("?*");
- ty = pointee_type;
- continue;
- },
.pointer => {
const payload = ty.castTag(.pointer).?.data;
@@ -1680,7 +1277,7 @@ pub const Type = struct {
.ptr_type => @panic("TODO"),
.array_type => @panic("TODO"),
.vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
.simple_type => |s| return writer.writeAll(@tagName(s)),
.struct_type => @panic("TODO"),
@@ -1733,14 +1330,6 @@ pub const Type = struct {
try decl.renderFullyQualifiedName(mod, writer);
},
- .anyerror_void_error_union => try writer.writeAll("anyerror!void"),
- .const_slice_u8 => try writer.writeAll("[]const u8"),
- .const_slice_u8_sentinel_0 => try writer.writeAll("[:0]const u8"),
- .single_const_pointer_to_comptime_int => try writer.writeAll("*const comptime_int"),
- .manyptr_u8 => try writer.writeAll("[*]u8"),
- .manyptr_const_u8 => try writer.writeAll("[*]const u8"),
- .manyptr_const_u8_sentinel_0 => try writer.writeAll("[*:0]const u8"),
-
.error_set_inferred => {
const func = ty.castTag(.error_set_inferred).?.data.func;
@@ -1799,20 +1388,6 @@ pub const Type = struct {
try print(error_union.payload, writer, mod);
},
- .array_u8 => {
- const len = ty.castTag(.array_u8).?.data;
- try writer.print("[{d}]u8", .{len});
- },
- .array_u8_sentinel_0 => {
- const len = ty.castTag(.array_u8_sentinel_0).?.data;
- try writer.print("[{d}:0]u8", .{len});
- },
- .vector => {
- const payload = ty.castTag(.vector).?.data;
- try writer.print("@Vector({d}, ", .{payload.len});
- try print(payload.elem_type, writer, mod);
- try writer.writeAll(")");
- },
.array => {
const payload = ty.castTag(.array).?.data;
try writer.print("[{d}]", .{payload.len});
@@ -1865,17 +1440,8 @@ pub const Type = struct {
try writer.writeAll("}");
},
- .pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- => {
- const info = ty.ptrInfo().data;
+ .pointer => {
+ const info = ty.ptrInfo(mod);
if (info.sentinel) |s| switch (info.size) {
.One, .C => unreachable,
@@ -1920,16 +1486,6 @@ pub const Type = struct {
try writer.writeByte('?');
try print(child_type, writer, mod);
},
- .optional_single_mut_pointer => {
- const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data;
- try writer.writeAll("?*");
- try print(pointee_type, writer, mod);
- },
- .optional_single_const_pointer => {
- const pointee_type = ty.castTag(.optional_single_const_pointer).?.data;
- try writer.writeAll("?*const ");
- try print(pointee_type, writer, mod);
- },
.anyframe_T => {
const return_type = ty.castTag(.anyframe_T).?.data;
try writer.print("anyframe->", .{});
@@ -1963,12 +1519,6 @@ pub const Type = struct {
pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value {
if (self.ip_index != .none) return self.ip_index.toValue();
switch (self.tag()) {
- .single_const_pointer_to_comptime_int => return Value{ .ip_index = .single_const_pointer_to_comptime_int_type, .legacy = undefined },
- .const_slice_u8 => return Value{ .ip_index = .const_slice_u8_type, .legacy = undefined },
- .const_slice_u8_sentinel_0 => return Value{ .ip_index = .const_slice_u8_sentinel_0_type, .legacy = undefined },
- .manyptr_u8 => return Value{ .ip_index = .manyptr_u8_type, .legacy = undefined },
- .manyptr_const_u8 => return Value{ .ip_index = .manyptr_const_u8_type, .legacy = undefined },
- .manyptr_const_u8_sentinel_0 => return Value{ .ip_index = .manyptr_const_u8_sentinel_0_type, .legacy = undefined },
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
else => return Value.Tag.ty.create(allocator, self),
@@ -1996,10 +1546,41 @@ pub const Type = struct {
) RuntimeBitsError!bool {
if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => |int_type| return int_type.bits != 0,
- .ptr_type => @panic("TODO"),
- .array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .ptr_type => |ptr_type| {
+ // Pointers to zero-bit types still have a runtime address; however, pointers
+ // to comptime-only types do not, with the exception of function pointers.
+ if (ignore_comptime_only) return true;
+ const child_ty = ptr_type.elem_type.toType();
+ if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic;
+ if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty));
+ return !comptimeOnly(ty, mod);
+ },
+ .array_type => |array_type| {
+ if (array_type.sentinel != .none) {
+ return array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
+ } else {
+ return array_type.len > 0 and
+ try array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
+ }
+ },
+ .vector_type => |vector_type| {
+ return vector_type.len > 0 and
+ try vector_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
+ },
+ .opt_type => |child| {
+ const child_ty = child.toType();
+ if (child_ty.isNoReturn()) {
+ // Then the optional is comptime-known to be null.
+ return false;
+ }
+ if (ignore_comptime_only) {
+ return true;
+ } else if (strat == .sema) {
+ return !(try strat.sema.typeRequiresComptime(child_ty));
+ } else {
+ return !comptimeOnly(child_ty, mod);
+ }
+ },
.error_union_type => @panic("TODO"),
.simple_type => |t| return switch (t) {
.f16,
@@ -2058,14 +1639,7 @@ pub const Type = struct {
.enum_tag => unreachable, // it's a value, not a type
};
switch (ty.tag()) {
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .array_u8_sentinel_0,
- .anyerror_void_error_union,
.error_set_inferred,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
.@"opaque",
.error_set_single,
@@ -2077,22 +1651,12 @@ pub const Type = struct {
// Pointers to zero-bit types still have a runtime address; however, pointers
// to comptime-only types do not, with the exception of function pointers.
.anyframe_T,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
.pointer,
=> {
if (ignore_comptime_only) {
return true;
- } else if (ty.childType().zigTypeTag(mod) == .Fn) {
- return !ty.childType().fnInfo().is_generic;
+ } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) {
+ return !ty.childType(mod).fnInfo().is_generic;
} else if (strat == .sema) {
return !(try strat.sema.typeRequiresComptime(ty));
} else {
@@ -2101,7 +1665,6 @@ pub const Type = struct {
},
// These are false because they are comptime-only types.
- .single_const_pointer_to_comptime_int,
.empty_struct,
.empty_struct_literal,
// These are function *bodies*, not pointers.
@@ -2111,8 +1674,7 @@ pub const Type = struct {
=> return false,
.optional => {
- var buf: Payload.ElemType = undefined;
- const child_ty = ty.optionalChild(&buf);
+ const child_ty = ty.optionalChild(mod);
if (child_ty.isNoReturn()) {
// Then the optional is comptime-known to be null.
return false;
@@ -2200,10 +1762,9 @@ pub const Type = struct {
}
},
- .array, .vector => return ty.arrayLen() != 0 and
- try ty.elemType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
- .array_u8 => return ty.arrayLen() != 0,
- .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ .array => return ty.arrayLen(mod) != 0 and
+ try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
@@ -2224,14 +1785,14 @@ pub const Type = struct {
/// readFrom/writeToMemory are supported only for types with a well-
/// defined memory layout
pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool {
- if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .int_type => return true,
- .ptr_type => @panic("TODO"),
- .array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
- .error_union_type => @panic("TODO"),
- .simple_type => |t| return switch (t) {
+ if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => true,
+ .ptr_type => true,
+ .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod),
+ .vector_type => true,
+ .opt_type => |child| child.toType().isPtrLikeOptional(mod),
+ .error_union_type => false,
+ .simple_type => |t| switch (t) {
.f16,
.f32,
.f64,
@@ -2287,23 +1848,8 @@ pub const Type = struct {
.enum_tag => unreachable, // it's a value, not a type
};
return switch (ty.tag()) {
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- .array_u8,
- .array_u8_sentinel_0,
.pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .single_const_pointer_to_comptime_int,
.enum_numbered,
- .vector,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
=> true,
.error_set,
@@ -2313,13 +1859,8 @@ pub const Type = struct {
.@"opaque",
// These are function bodies, not function pointers.
.function,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .const_slice,
- .mut_slice,
.enum_simple,
.error_union,
- .anyerror_void_error_union,
.anyframe_T,
.tuple,
.anon_struct,
@@ -2336,7 +1877,7 @@ pub const Type = struct {
.array,
.array_sentinel,
- => ty.childType().hasWellDefinedLayout(mod),
+ => ty.childType(mod).hasWellDefinedLayout(mod),
.optional => ty.isPtrLikeOptional(mod),
.@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto,
@@ -2417,76 +1958,36 @@ pub const Type = struct {
}
pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 {
- switch (ty.tag()) {
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- => {
- const child_type = ty.cast(Payload.ElemType).?.data;
- if (opt_sema) |sema| {
- const res = try child_type.abiAlignmentAdvanced(mod, .{ .sema = sema });
- return res.scalar;
- }
- return (child_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
- },
-
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- => return 1,
+ switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .pointer => {
+ const ptr_info = ty.castTag(.pointer).?.data;
+ if (ptr_info.@"align" != 0) {
+ return ptr_info.@"align";
+ } else if (opt_sema) |sema| {
+ const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema });
+ return res.scalar;
+ } else {
+ return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
+ }
+ },
+ .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema),
- .pointer => {
- const ptr_info = ty.castTag(.pointer).?.data;
- if (ptr_info.@"align" != 0) {
- return ptr_info.@"align";
- } else if (opt_sema) |sema| {
- const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema });
- return res.scalar;
- } else {
- return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
- }
+ else => unreachable,
+ },
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => @panic("TODO"),
},
- .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema),
-
- else => unreachable,
}
}
- pub fn ptrAddressSpace(self: Type) std.builtin.AddressSpace {
+ pub fn ptrAddressSpace(self: Type, mod: *const Module) std.builtin.AddressSpace {
return switch (self.tag()) {
- .single_const_pointer_to_comptime_int,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- .inferred_alloc_const,
- .inferred_alloc_mut,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- => .generic,
-
.pointer => self.castTag(.pointer).?.data.@"addrspace",
.optional => {
- var buf: Payload.ElemType = undefined;
- const child_type = self.optionalChild(&buf);
- return child_type.ptrAddressSpace();
+ const child_type = self.optionalChild(mod);
+ return child_type.ptrAddressSpace(mod);
},
else => unreachable,
@@ -2530,15 +2031,31 @@ pub const Type = struct {
) Module.CompileError!AbiAlignmentAdvanced {
const target = mod.getTarget();
+ const opt_sema = switch (strat) {
+ .sema => |sema| sema,
+ else => null,
+ };
+
if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => |int_type| {
if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 };
return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) };
},
- .ptr_type => @panic("TODO"),
- .array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .ptr_type => {
+ return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
+ },
+ .array_type => |array_type| {
+ return array_type.child.toType().abiAlignmentAdvanced(mod, strat);
+ },
+ .vector_type => |vector_type| {
+ const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema);
+ const bits = @intCast(u32, bits_u64);
+ const bytes = ((bits * vector_type.len) + 7) / 8;
+ const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
+ return AbiAlignmentAdvanced{ .scalar = alignment };
+ },
+
+ .opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
.simple_type => |t| switch (t) {
.bool,
@@ -2617,15 +2134,8 @@ pub const Type = struct {
.enum_tag => unreachable, // it's a value, not a type
};
- const opt_sema = switch (strat) {
- .sema => |sema| sema,
- else => null,
- };
switch (ty.tag()) {
- .array_u8_sentinel_0,
- .array_u8,
- .@"opaque",
- => return AbiAlignmentAdvanced{ .scalar = 1 },
+ .@"opaque" => return AbiAlignmentAdvanced{ .scalar = 1 },
// represents machine code; not a pointer
.function => {
@@ -2634,47 +2144,21 @@ pub const Type = struct {
return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) };
},
- .single_const_pointer_to_comptime_int,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
.pointer,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
.anyframe_T,
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
// TODO revisit this when we have the concept of the error tag type
- .anyerror_void_error_union,
.error_set_inferred,
.error_set_single,
.error_set,
.error_set_merged,
=> return AbiAlignmentAdvanced{ .scalar = 2 },
- .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(mod, strat),
-
- .vector => {
- const len = ty.arrayLen();
- const bits = try bitSizeAdvanced(ty.elemType(), mod, opt_sema);
- const bytes = ((bits * len) + 7) / 8;
- const alignment = std.math.ceilPowerOfTwoAssert(u64, bytes);
- return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) };
- },
+ .array, .array_sentinel => return ty.childType(mod).abiAlignmentAdvanced(mod, strat),
.optional => {
- var buf: Payload.ElemType = undefined;
- const child_type = ty.optionalChild(&buf);
+ const child_type = ty.optionalChild(mod);
switch (child_type.zigTypeTag(mod)) {
.Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
@@ -2933,8 +2417,29 @@ pub const Type = struct {
},
.ptr_type => @panic("TODO"),
.array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .vector_type => |vector_type| {
+ const opt_sema = switch (strat) {
+ .sema => |sema| sema,
+ .eager => null,
+ .lazy => |arena| return AbiSizeAdvanced{
+ .val = try Value.Tag.lazy_size.create(arena, ty),
+ },
+ };
+ const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema);
+ const elem_bits = @intCast(u32, elem_bits_u64);
+ const total_bits = elem_bits * vector_type.len;
+ const total_bytes = (total_bits + 7) / 8;
+ const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
+ .scalar => |x| x,
+ .val => return AbiSizeAdvanced{
+ .val = try Value.Tag.lazy_size.create(strat.lazy, ty),
+ },
+ };
+ const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment);
+ return AbiSizeAdvanced{ .scalar = result };
+ },
+
+ .opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
.simple_type => |t| switch (t) {
.bool,
@@ -3014,7 +2519,6 @@ pub const Type = struct {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
- .single_const_pointer_to_comptime_int,
.empty_struct_literal,
.empty_struct,
=> return AbiSizeAdvanced{ .scalar = 0 },
@@ -3068,8 +2572,6 @@ pub const Type = struct {
return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true);
},
- .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data },
- .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 },
.array => {
const payload = ty.castTag(.array).?.data;
switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) {
@@ -3093,47 +2595,7 @@ pub const Type = struct {
}
},
- .vector => {
- const payload = ty.castTag(.vector).?.data;
- const opt_sema = switch (strat) {
- .sema => |sema| sema,
- .eager => null,
- .lazy => |arena| return AbiSizeAdvanced{
- .val = try Value.Tag.lazy_size.create(arena, ty),
- },
- };
- const elem_bits = try payload.elem_type.bitSizeAdvanced(mod, opt_sema);
- const total_bits = elem_bits * payload.len;
- const total_bytes = (total_bits + 7) / 8;
- const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
- .scalar => |x| x,
- .val => return AbiSizeAdvanced{
- .val = try Value.Tag.lazy_size.create(strat.lazy, ty),
- },
- };
- const result = std.mem.alignForwardGeneric(u64, total_bytes, alignment);
- return AbiSizeAdvanced{ .scalar = result };
- },
-
- .anyframe_T,
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
-
- .const_slice,
- .mut_slice,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
+ .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.pointer => switch (ty.castTag(.pointer).?.data.size) {
.Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
@@ -3141,7 +2603,6 @@ pub const Type = struct {
},
// TODO revisit this when we have the concept of the error tag type
- .anyerror_void_error_union,
.error_set_inferred,
.error_set,
.error_set_merged,
@@ -3149,8 +2610,7 @@ pub const Type = struct {
=> return AbiSizeAdvanced{ .scalar = 2 },
.optional => {
- var buf: Payload.ElemType = undefined;
- const child_type = ty.optionalChild(&buf);
+ const child_type = ty.optionalChild(mod);
if (child_type.isNoReturn()) {
return AbiSizeAdvanced{ .scalar = 0 };
@@ -3272,8 +2732,12 @@ pub const Type = struct {
.int_type => |int_type| return int_type.bits,
.ptr_type => @panic("TODO"),
.array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .vector_type => |vector_type| {
+ const child_ty = vector_type.child.toType();
+ const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema);
+ return elem_bit_size * vector_type.len;
+ },
+ .opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
.simple_type => |t| switch (t) {
.f16 => return 16,
@@ -3339,7 +2803,6 @@ pub const Type = struct {
switch (ty.tag()) {
.function => unreachable, // represents machine code; not a pointer
- .single_const_pointer_to_comptime_int => unreachable,
.empty_struct => unreachable,
.empty_struct_literal => unreachable,
.inferred_alloc_const => unreachable,
@@ -3388,13 +2851,6 @@ pub const Type = struct {
return size;
},
- .vector => {
- const payload = ty.castTag(.vector).?.data;
- const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema);
- return elem_bit_size * payload.len;
- },
- .array_u8 => return 8 * ty.castTag(.array_u8).?.data,
- .array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1),
.array => {
const payload = ty.castTag(.array).?.data;
const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod));
@@ -3415,43 +2871,13 @@ pub const Type = struct {
.anyframe_T => return target.ptrBitWidth(),
- .const_slice,
- .mut_slice,
- => return target.ptrBitWidth() * 2,
-
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- => return target.ptrBitWidth() * 2,
-
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- => {
- return target.ptrBitWidth();
- },
-
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- => {
- return target.ptrBitWidth();
- },
-
.pointer => switch (ty.castTag(.pointer).?.data.size) {
.Slice => return target.ptrBitWidth() * 2,
else => return target.ptrBitWidth(),
},
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- => return target.ptrBitWidth(),
-
.error_set,
.error_set_single,
- .anyerror_void_error_union,
.error_set_inferred,
.error_set_merged,
=> return 16, // TODO revisit this when we have the concept of the error tag type
@@ -3481,12 +2907,11 @@ pub const Type = struct {
return true;
},
.Array => {
- if (ty.arrayLenIncludingSentinel() == 0) return true;
- return ty.childType().layoutIsResolved(mod);
+ if (ty.arrayLenIncludingSentinel(mod) == 0) return true;
+ return ty.childType(mod).layoutIsResolved(mod);
},
.Optional => {
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buf);
+ const payload_ty = ty.optionalChild(mod);
return payload_ty.layoutIsResolved(mod);
},
.ErrorUnion => {
@@ -3500,9 +2925,6 @@ pub const Type = struct {
pub fn isSinglePointer(ty: Type, mod: *const Module) bool {
switch (ty.ip_index) {
.none => return switch (ty.tag()) {
- .single_const_pointer,
- .single_mut_pointer,
- .single_const_pointer_to_comptime_int,
.inferred_alloc_const,
.inferred_alloc_mut,
=> true,
@@ -3519,54 +2941,33 @@ pub const Type = struct {
}
/// Asserts `ty` is a pointer.
- pub fn ptrSize(ty: Type) std.builtin.Type.Pointer.Size {
- return ptrSizeOrNull(ty).?;
+ pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size {
+ return ptrSizeOrNull(ty, mod).?;
}
/// Returns `null` if `ty` is not a pointer.
- pub fn ptrSizeOrNull(ty: Type) ?std.builtin.Type.Pointer.Size {
- return switch (ty.tag()) {
- .const_slice,
- .mut_slice,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- => .Slice,
-
- .many_const_pointer,
- .many_mut_pointer,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- => .Many,
-
- .c_const_pointer,
- .c_mut_pointer,
- => .C,
-
- .single_const_pointer,
- .single_mut_pointer,
- .single_const_pointer_to_comptime_int,
- .inferred_alloc_const,
- .inferred_alloc_mut,
- => .One,
+ pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size {
+ return switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .inferred_alloc_const,
+ .inferred_alloc_mut,
+ => .One,
- .pointer => ty.castTag(.pointer).?.data.size,
+ .pointer => ty.castTag(.pointer).?.data.size,
- else => null,
+ else => null,
+ },
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .ptr_type => |ptr_info| ptr_info.size,
+ else => null,
+ },
};
}
pub fn isSlice(ty: Type, mod: *const Module) bool {
return switch (ty.ip_index) {
.none => switch (ty.tag()) {
- .const_slice,
- .mut_slice,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- => true,
-
.pointer => ty.castTag(.pointer).?.data.size == .Slice,
-
else => false,
},
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
@@ -3583,78 +2984,28 @@ pub const Type = struct {
pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type {
switch (self.tag()) {
- .const_slice_u8 => return Type.initTag(.manyptr_const_u8),
- .const_slice_u8_sentinel_0 => return Type.initTag(.manyptr_const_u8_sentinel_0),
-
- .const_slice => {
- const elem_type = self.castTag(.const_slice).?.data;
- buffer.* = .{
- .elem_type = .{
- .base = .{ .tag = .many_const_pointer },
- .data = elem_type,
- },
- };
- return Type.initPayload(&buffer.elem_type.base);
- },
- .mut_slice => {
- const elem_type = self.castTag(.mut_slice).?.data;
- buffer.* = .{
- .elem_type = .{
- .base = .{ .tag = .many_mut_pointer },
- .data = elem_type,
- },
- };
- return Type.initPayload(&buffer.elem_type.base);
- },
-
.pointer => {
const payload = self.castTag(.pointer).?.data;
assert(payload.size == .Slice);
- if (payload.sentinel != null or
- payload.@"align" != 0 or
- payload.@"addrspace" != .generic or
- payload.bit_offset != 0 or
- payload.host_size != 0 or
- payload.vector_index != .none or
- payload.@"allowzero" or
- payload.@"volatile")
- {
- buffer.* = .{
- .pointer = .{
- .data = .{
- .pointee_type = payload.pointee_type,
- .sentinel = payload.sentinel,
- .@"align" = payload.@"align",
- .@"addrspace" = payload.@"addrspace",
- .bit_offset = payload.bit_offset,
- .host_size = payload.host_size,
- .vector_index = payload.vector_index,
- .@"allowzero" = payload.@"allowzero",
- .mutable = payload.mutable,
- .@"volatile" = payload.@"volatile",
- .size = .Many,
- },
- },
- };
- return Type.initPayload(&buffer.pointer.base);
- } else if (payload.mutable) {
- buffer.* = .{
- .elem_type = .{
- .base = .{ .tag = .many_mut_pointer },
- .data = payload.pointee_type,
- },
- };
- return Type.initPayload(&buffer.elem_type.base);
- } else {
- buffer.* = .{
- .elem_type = .{
- .base = .{ .tag = .many_const_pointer },
- .data = payload.pointee_type,
+ buffer.* = .{
+ .pointer = .{
+ .data = .{
+ .pointee_type = payload.pointee_type,
+ .sentinel = payload.sentinel,
+ .@"align" = payload.@"align",
+ .@"addrspace" = payload.@"addrspace",
+ .bit_offset = payload.bit_offset,
+ .host_size = payload.host_size,
+ .vector_index = payload.vector_index,
+ .@"allowzero" = payload.@"allowzero",
+ .mutable = payload.mutable,
+ .@"volatile" = payload.@"volatile",
+ .size = .Many,
},
- };
- return Type.initPayload(&buffer.elem_type.base);
- }
+ },
+ };
+ return Type.initPayload(&buffer.pointer.base);
},
else => unreachable,
@@ -3663,19 +3014,7 @@ pub const Type = struct {
pub fn isConstPtr(self: Type) bool {
return switch (self.tag()) {
- .single_const_pointer,
- .many_const_pointer,
- .c_const_pointer,
- .single_const_pointer_to_comptime_int,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .const_slice,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- => true,
-
.pointer => !self.castTag(.pointer).?.data.mutable,
-
else => false,
};
}
@@ -3702,49 +3041,46 @@ pub const Type = struct {
pub fn isCPtr(self: Type) bool {
return switch (self.tag()) {
- .c_const_pointer,
- .c_mut_pointer,
- => return true,
-
.pointer => self.castTag(.pointer).?.data.size == .C,
else => return false,
};
}
- pub fn isPtrAtRuntime(self: Type, mod: *const Module) bool {
- switch (self.tag()) {
- .c_const_pointer,
- .c_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- .manyptr_u8,
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- .single_const_pointer,
- .single_const_pointer_to_comptime_int,
- .single_mut_pointer,
- => return true,
+ pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool {
+ switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .pointer => switch (ty.castTag(.pointer).?.data.size) {
+ .Slice => return false,
+ .One, .Many, .C => return true,
+ },
- .pointer => switch (self.castTag(.pointer).?.data.size) {
- .Slice => return false,
- .One, .Many, .C => return true,
- },
+ .optional => {
+ const child_type = ty.optionalChild(mod);
+ if (child_type.zigTypeTag(mod) != .Pointer) return false;
+ const info = child_type.ptrInfo(mod);
+ switch (info.size) {
+ .Slice, .C => return false,
+ .Many, .One => return !info.@"allowzero",
+ }
+ },
- .optional => {
- var buf: Payload.ElemType = undefined;
- const child_type = self.optionalChild(&buf);
- if (child_type.zigTypeTag(mod) != .Pointer) return false;
- const info = child_type.ptrInfo().data;
- switch (info.size) {
- .Slice, .C => return false,
- .Many, .One => return !info.@"allowzero",
- }
+ else => return false,
+ },
+ else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .ptr_type => |ptr_type| switch (ptr_type.size) {
+ .Slice => false,
+ .One, .Many, .C => true,
+ },
+ .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
+ .ptr_type => |p| switch (p.size) {
+ .Slice, .C => false,
+ .Many, .One => !p.is_allowzero,
+ },
+ else => false,
+ },
+ else => false,
},
-
- else => return false,
}
}
@@ -3754,23 +3090,17 @@ pub const Type = struct {
if (ty.isPtrLikeOptional(mod)) {
return true;
}
- return ty.ptrInfo().data.@"allowzero";
+ return ty.ptrInfo(mod).@"allowzero";
}
/// See also `isPtrLikeOptional`.
pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool {
switch (ty.tag()) {
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- => return true,
-
.optional => {
const child_ty = ty.castTag(.optional).?.data;
switch (child_ty.zigTypeTag(mod)) {
.Pointer => {
- const info = child_ty.ptrInfo().data;
+ const info = child_ty.ptrInfo(mod);
switch (info.size) {
.C => return false,
.Slice, .Many, .One => return !info.@"allowzero",
@@ -3793,7 +3123,7 @@ pub const Type = struct {
pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool {
if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.ptr_type => |ptr_type| ptr_type.size == .C,
- .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) {
+ .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice, .C => false,
.Many, .One => !ptr_type.is_allowzero,
@@ -3803,16 +3133,10 @@ pub const Type = struct {
else => false,
};
switch (ty.tag()) {
- .optional_single_const_pointer,
- .optional_single_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- => return true,
-
.optional => {
const child_ty = ty.castTag(.optional).?.data;
if (child_ty.zigTypeTag(mod) != .Pointer) return false;
- const info = child_ty.ptrInfo().data;
+ const info = child_ty.ptrInfo(mod);
switch (info.size) {
.Slice, .C => return false,
.Many, .One => return !info.@"allowzero",
@@ -3828,43 +3152,24 @@ pub const Type = struct {
/// For *[N]T, returns [N]T.
/// For *T, returns T.
/// For [*]T, returns T.
- pub fn childType(ty: Type) Type {
- return switch (ty.tag()) {
- .vector => ty.castTag(.vector).?.data.elem_type,
- .array => ty.castTag(.array).?.data.elem_type,
- .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- => ty.castPointer().?.data,
-
- .array_u8,
- .array_u8_sentinel_0,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- => Type.u8,
-
- .single_const_pointer_to_comptime_int => Type.comptime_int,
- .pointer => ty.castTag(.pointer).?.data.pointee_type,
+ pub fn childType(ty: Type, mod: *const Module) Type {
+ return childTypeIp(ty, mod.intern_pool);
+ }
- else => unreachable,
+ pub fn childTypeIp(ty: Type, ip: InternPool) Type {
+ return switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .array => ty.castTag(.array).?.data.elem_type,
+ .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type,
+
+ .pointer => ty.castTag(.pointer).?.data.pointee_type,
+
+ else => unreachable,
+ },
+ else => ip.childType(ty.ip_index).toType(),
};
}
- /// Asserts the type is a pointer or array type.
- /// TODO this is deprecated in favor of `childType`.
- pub const elemType = childType;
-
/// For *[N]T, returns T.
/// For ?*T, returns T.
/// For ?*[N]T, returns T.
@@ -3875,54 +3180,42 @@ pub const Type = struct {
/// For []T, returns T.
/// For anyframe->T, returns T.
pub fn elemType2(ty: Type, mod: *const Module) Type {
- return switch (ty.tag()) {
- .vector => ty.castTag(.vector).?.data.elem_type,
- .array => ty.castTag(.array).?.data.elem_type,
- .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- => ty.castPointer().?.data,
-
- .single_const_pointer,
- .single_mut_pointer,
- => ty.castPointer().?.data.shallowElemType(mod),
-
- .array_u8,
- .array_u8_sentinel_0,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- => Type.u8,
-
- .single_const_pointer_to_comptime_int => Type.comptime_int,
- .pointer => {
- const info = ty.castTag(.pointer).?.data;
- const child_ty = info.pointee_type;
- if (info.size == .One) {
- return child_ty.shallowElemType(mod);
- } else {
- return child_ty;
- }
- },
- .optional => ty.castTag(.optional).?.data.childType(),
- .optional_single_mut_pointer => ty.castPointer().?.data,
- .optional_single_const_pointer => ty.castPointer().?.data,
+ return switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .array => ty.castTag(.array).?.data.elem_type,
+ .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type,
+
+ .pointer => {
+ const info = ty.castTag(.pointer).?.data;
+ const child_ty = info.pointee_type;
+ if (info.size == .One) {
+ return child_ty.shallowElemType(mod);
+ } else {
+ return child_ty;
+ }
+ },
+ .optional => ty.castTag(.optional).?.data.childType(mod),
- .anyframe_T => ty.castTag(.anyframe_T).?.data,
+ .anyframe_T => ty.castTag(.anyframe_T).?.data,
- else => unreachable,
+ else => unreachable,
+ },
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .ptr_type => |ptr_type| switch (ptr_type.size) {
+ .One => ptr_type.elem_type.toType().shallowElemType(mod),
+ .Many, .C, .Slice => ptr_type.elem_type.toType(),
+ },
+ .vector_type => |vector_type| vector_type.child.toType(),
+ .array_type => |array_type| array_type.child.toType(),
+ .opt_type => |child| mod.intern_pool.childType(child).toType(),
+ else => unreachable,
+ },
};
}
fn shallowElemType(child_ty: Type, mod: *const Module) Type {
return switch (child_ty.zigTypeTag(mod)) {
- .Array, .Vector => child_ty.childType(),
+ .Array, .Vector => child_ty.childType(mod),
else => child_ty,
};
}
@@ -3930,7 +3223,7 @@ pub const Type = struct {
/// For vectors, returns the element type. Otherwise returns self.
pub fn scalarType(ty: Type, mod: *const Module) Type {
return switch (ty.zigTypeTag(mod)) {
- .Vector => ty.childType(),
+ .Vector => ty.childType(mod),
else => ty,
};
}
@@ -3938,51 +3231,25 @@ pub const Type = struct {
/// Asserts that the type is an optional.
/// Resulting `Type` will have inner memory referencing `buf`.
/// Note that for C pointers this returns the type unmodified.
- pub fn optionalChild(ty: Type, buf: *Payload.ElemType) Type {
- return switch (ty.tag()) {
- .optional => ty.castTag(.optional).?.data,
- .optional_single_mut_pointer => {
- buf.* = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = ty.castPointer().?.data,
- };
- return Type.initPayload(&buf.base);
- },
- .optional_single_const_pointer => {
- buf.* = .{
- .base = .{ .tag = .single_const_pointer },
- .data = ty.castPointer().?.data,
- };
- return Type.initPayload(&buf.base);
- },
+ pub fn optionalChild(ty: Type, mod: *const Module) Type {
+ return switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .optional => ty.castTag(.optional).?.data,
- .pointer, // here we assume it is a C pointer
- .c_const_pointer,
- .c_mut_pointer,
- => return ty,
+ .pointer, // here we assume it is a C pointer
+ => return ty,
- else => unreachable,
- };
- }
-
- /// Asserts that the type is an optional.
- /// Same as `optionalChild` but allocates the buffer if needed.
- pub fn optionalChildAlloc(ty: Type, allocator: Allocator) !Type {
- switch (ty.tag()) {
- .optional => return ty.castTag(.optional).?.data,
- .optional_single_mut_pointer => {
- return Tag.single_mut_pointer.create(allocator, ty.castPointer().?.data);
+ else => unreachable,
},
- .optional_single_const_pointer => {
- return Tag.single_const_pointer.create(allocator, ty.castPointer().?.data);
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .opt_type => |child| child.toType(),
+ .ptr_type => |ptr_type| b: {
+ assert(ptr_type.size == .C);
+ break :b ty;
+ },
+ else => unreachable,
},
- .pointer, // here we assume it is a C pointer
- .c_const_pointer,
- .c_mut_pointer,
- => return ty,
-
- else => unreachable,
- }
+ };
}
/// Returns the tag type of a union, if the type is a union and it has a tag type.
@@ -4071,19 +3338,25 @@ pub const Type = struct {
}
/// Asserts that the type is an error union.
- pub fn errorUnionPayload(self: Type) Type {
- return switch (self.tag()) {
- .anyerror_void_error_union => Type.void,
- .error_union => self.castTag(.error_union).?.data.payload,
- else => unreachable,
+ pub fn errorUnionPayload(ty: Type) Type {
+ return switch (ty.ip_index) {
+ .anyerror_void_error_union_type => Type.void,
+ .none => switch (ty.tag()) {
+ .error_union => ty.castTag(.error_union).?.data.payload,
+ else => unreachable,
+ },
+ else => @panic("TODO"),
};
}
- pub fn errorUnionSet(self: Type) Type {
- return switch (self.tag()) {
- .anyerror_void_error_union => Type.anyerror,
- .error_union => self.castTag(.error_union).?.data.error_set,
- else => unreachable,
+ pub fn errorUnionSet(ty: Type) Type {
+ return switch (ty.ip_index) {
+ .anyerror_void_error_union_type => Type.anyerror,
+ .none => switch (ty.tag()) {
+ .error_union => ty.castTag(.error_union).?.data.error_set,
+ else => unreachable,
+ },
+ else => @panic("TODO"),
};
}
@@ -4168,67 +3441,73 @@ pub const Type = struct {
}
/// Asserts the type is an array or vector or struct.
- pub fn arrayLen(ty: Type) u64 {
- return switch (ty.tag()) {
- .vector => ty.castTag(.vector).?.data.len,
- .array => ty.castTag(.array).?.data.len,
- .array_sentinel => ty.castTag(.array_sentinel).?.data.len,
- .array_u8 => ty.castTag(.array_u8).?.data,
- .array_u8_sentinel_0 => ty.castTag(.array_u8_sentinel_0).?.data,
- .tuple => ty.castTag(.tuple).?.data.types.len,
- .anon_struct => ty.castTag(.anon_struct).?.data.types.len,
- .@"struct" => ty.castTag(.@"struct").?.data.fields.count(),
- .empty_struct, .empty_struct_literal => 0,
+ pub fn arrayLen(ty: Type, mod: *const Module) u64 {
+ return arrayLenIp(ty, mod.intern_pool);
+ }
- else => unreachable,
+ pub fn arrayLenIp(ty: Type, ip: InternPool) u64 {
+ return switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .array => ty.castTag(.array).?.data.len,
+ .array_sentinel => ty.castTag(.array_sentinel).?.data.len,
+ .tuple => ty.castTag(.tuple).?.data.types.len,
+ .anon_struct => ty.castTag(.anon_struct).?.data.types.len,
+ .@"struct" => ty.castTag(.@"struct").?.data.fields.count(),
+ .empty_struct, .empty_struct_literal => 0,
+
+ else => unreachable,
+ },
+ else => switch (ip.indexToKey(ty.ip_index)) {
+ .vector_type => |vector_type| vector_type.len,
+ .array_type => |array_type| array_type.len,
+ else => unreachable,
+ },
};
}
- pub fn arrayLenIncludingSentinel(ty: Type) u64 {
- return ty.arrayLen() + @boolToInt(ty.sentinel() != null);
+ pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 {
+ return ty.arrayLen(mod) + @boolToInt(ty.sentinel(mod) != null);
}
- pub fn vectorLen(ty: Type) u32 {
- return switch (ty.tag()) {
- .vector => @intCast(u32, ty.castTag(.vector).?.data.len),
- .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len),
- .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len),
- else => unreachable,
+ pub fn vectorLen(ty: Type, mod: *const Module) u32 {
+ return switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len),
+ .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len),
+ else => unreachable,
+ },
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .vector_type => |vector_type| vector_type.len,
+ else => unreachable,
+ },
};
}
/// Asserts the type is an array, pointer or vector.
- pub fn sentinel(self: Type) ?Value {
- return switch (self.tag()) {
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .single_const_pointer_to_comptime_int,
- .vector,
- .array,
- .array_u8,
- .manyptr_u8,
- .manyptr_const_u8,
- .const_slice_u8,
- .const_slice,
- .mut_slice,
- .tuple,
- .empty_struct_literal,
- .@"struct",
- => return null,
+ pub fn sentinel(ty: Type, mod: *const Module) ?Value {
+ return switch (ty.ip_index) {
+ .none => switch (ty.tag()) {
+ .array,
+ .tuple,
+ .empty_struct_literal,
+ .@"struct",
+ => null,
- .pointer => return self.castTag(.pointer).?.data.sentinel,
- .array_sentinel => return self.castTag(.array_sentinel).?.data.sentinel,
+ .pointer => ty.castTag(.pointer).?.data.sentinel,
+ .array_sentinel => ty.castTag(.array_sentinel).?.data.sentinel,
- .array_u8_sentinel_0,
- .const_slice_u8_sentinel_0,
- .manyptr_const_u8_sentinel_0,
- => return Value.zero,
+ else => unreachable,
+ },
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .vector_type,
+ .struct_type,
+ => null,
- else => unreachable,
+ .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null,
+ .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null,
+
+ else => unreachable,
+ },
};
}
@@ -4292,8 +3571,6 @@ pub const Type = struct {
return .{ .signedness = .unsigned, .bits = 16 };
},
- .vector => ty = ty.castTag(.vector).?.data.elem_type,
-
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.layout == .Packed);
@@ -4321,8 +3598,9 @@ pub const Type = struct {
.int_type => |int_type| return int_type,
.ptr_type => unreachable,
.array_type => unreachable,
- .vector_type => @panic("TODO"),
- .optional_type => unreachable,
+ .vector_type => |vector_type| ty = vector_type.child.toType(),
+
+ .opt_type => unreachable,
.error_union_type => unreachable,
.simple_type => unreachable, // handled via Index enum tag above
.struct_type => @panic("TODO"),
@@ -4426,7 +3704,11 @@ pub const Type = struct {
/// Asserts the type is a function or a function pointer.
pub fn fnReturnType(ty: Type) Type {
- const fn_ty = if (ty.castPointer()) |p| p.data else ty;
+ const fn_ty = switch (ty.tag()) {
+ .pointer => ty.castTag(.pointer).?.data.pointee_type,
+ .function => ty,
+ else => unreachable,
+ };
return fn_ty.castTag(.function).?.data.return_type;
}
@@ -4516,8 +3798,12 @@ pub const Type = struct {
},
.ptr_type => @panic("TODO"),
.array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .vector_type => |vector_type| {
+ if (vector_type.len == 0) return Value.initTag(.empty_array);
+ if (vector_type.child.toType().onePossibleValue(mod)) |v| return v;
+ return null;
+ },
+ .opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
.simple_type => |t| switch (t) {
.f16,
@@ -4580,34 +3866,15 @@ pub const Type = struct {
.error_set,
.error_set_merged,
.function,
- .single_const_pointer_to_comptime_int,
.array_sentinel,
- .array_u8_sentinel_0,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .const_slice,
- .mut_slice,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
- .anyerror_void_error_union,
.error_set_inferred,
.@"opaque",
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
.anyframe_T,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .single_const_pointer,
- .single_mut_pointer,
.pointer,
=> return null,
.optional => {
- var buf: Payload.ElemType = undefined;
- const child_ty = ty.optionalChild(&buf);
+ const child_ty = ty.optionalChild(mod);
if (child_ty.isNoReturn()) {
return Value.null;
} else {
@@ -4690,10 +3957,10 @@ pub const Type = struct {
.empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value),
- .vector, .array, .array_u8 => {
- if (ty.arrayLen() == 0)
+ .array => {
+ if (ty.arrayLen(mod) == 0)
return Value.initTag(.empty_array);
- if (ty.elemType().onePossibleValue(mod) != null)
+ if (ty.childType(mod).onePossibleValue(mod) != null)
return Value.initTag(.the_only_possible_value);
return null;
},
@@ -4711,9 +3978,9 @@ pub const Type = struct {
if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => false,
.ptr_type => @panic("TODO"),
- .array_type => @panic("TODO"),
- .vector_type => @panic("TODO"),
- .optional_type => @panic("TODO"),
+ .array_type => |array_type| return array_type.child.toType().comptimeOnly(mod),
+ .vector_type => |vector_type| return vector_type.child.toType().comptimeOnly(mod),
+ .opt_type => @panic("TODO"),
.error_union_type => @panic("TODO"),
.simple_type => |t| switch (t) {
.f16,
@@ -4772,12 +4039,6 @@ pub const Type = struct {
};
return switch (ty.tag()) {
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
- .anyerror_void_error_union,
.empty_struct_literal,
.empty_struct,
.error_set,
@@ -4785,35 +4046,21 @@ pub const Type = struct {
.error_set_inferred,
.error_set_merged,
.@"opaque",
- .array_u8,
- .array_u8_sentinel_0,
.enum_simple,
=> false,
- .single_const_pointer_to_comptime_int,
// These are function bodies, not function pointers.
- .function,
- => true,
+ .function => true,
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
.array,
.array_sentinel,
- .vector,
- => return ty.childType().comptimeOnly(mod),
+ => return ty.childType(mod).comptimeOnly(mod),
- .pointer,
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
- => {
- const child_ty = ty.childType();
+ .pointer => {
+ const child_ty = ty.childType(mod);
if (child_ty.zigTypeTag(mod) == .Fn) {
return false;
} else {
@@ -4821,12 +4068,8 @@ pub const Type = struct {
}
},
- .optional,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
- => {
- var buf: Type.Payload.ElemType = undefined;
- return ty.optionalChild(&buf).comptimeOnly(mod);
+ .optional => {
+ return ty.optionalChild(mod).comptimeOnly(mod);
},
.tuple, .anon_struct => {
@@ -4882,6 +4125,10 @@ pub const Type = struct {
};
}
+ pub fn isVector(ty: Type, mod: *const Module) bool {
+ return ty.zigTypeTag(mod) == .Vector;
+ }
+
pub fn isArrayOrVector(ty: Type, mod: *const Module) bool {
return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => true,
@@ -4892,9 +4139,9 @@ pub const Type = struct {
pub fn isIndexable(ty: Type, mod: *const Module) bool {
return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => true,
- .Pointer => switch (ty.ptrSize()) {
+ .Pointer => switch (ty.ptrSize(mod)) {
.Slice, .Many, .C => true,
- .One => ty.elemType().zigTypeTag(mod) == .Array,
+ .One => ty.childType(mod).zigTypeTag(mod) == .Array,
},
.Struct => ty.isTuple(),
else => false,
@@ -4904,10 +4151,10 @@ pub const Type = struct {
pub fn indexableHasLen(ty: Type, mod: *const Module) bool {
return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => true,
- .Pointer => switch (ty.ptrSize()) {
+ .Pointer => switch (ty.ptrSize(mod)) {
.Many, .C => false,
.Slice => true,
- .One => ty.elemType().zigTypeTag(mod) == .Array,
+ .One => ty.childType(mod).zigTypeTag(mod) == .Array,
},
.Struct => ty.isTuple(),
else => false,
@@ -5527,14 +4774,6 @@ pub const Type = struct {
/// with different enum tags, because the the former requires more payload data than the latter.
/// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`.
pub const Tag = enum(usize) {
- // The first section of this enum are tags that require no payload.
- manyptr_u8,
- manyptr_const_u8,
- manyptr_const_u8_sentinel_0,
- single_const_pointer_to_comptime_int,
- const_slice_u8,
- const_slice_u8_sentinel_0,
- anyerror_void_error_union,
/// Same as `empty_struct` except it has an empty namespace.
empty_struct_literal,
/// This is a special value that tracks a set of types that have been stored
@@ -5545,28 +4784,15 @@ pub const Type = struct {
inferred_alloc_const, // See last_no_payload_tag below.
// After this, the tag requires a payload.
- array_u8,
- array_u8_sentinel_0,
array,
array_sentinel,
- vector,
/// Possible Value tags for this: @"struct"
tuple,
/// Possible Value tags for this: @"struct"
anon_struct,
pointer,
- single_const_pointer,
- single_mut_pointer,
- many_const_pointer,
- many_mut_pointer,
- c_const_pointer,
- c_mut_pointer,
- const_slice,
- mut_slice,
function,
optional,
- optional_single_mut_pointer,
- optional_single_const_pointer,
error_union,
anyframe_T,
error_set,
@@ -5590,33 +4816,12 @@ pub const Type = struct {
pub fn Type(comptime t: Tag) type {
return switch (t) {
- .single_const_pointer_to_comptime_int,
- .anyerror_void_error_union,
- .const_slice_u8,
- .const_slice_u8_sentinel_0,
.inferred_alloc_const,
.inferred_alloc_mut,
.empty_struct_literal,
- .manyptr_u8,
- .manyptr_const_u8,
- .manyptr_const_u8_sentinel_0,
=> @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
- .array_u8,
- .array_u8_sentinel_0,
- => Payload.Len,
-
- .single_const_pointer,
- .single_mut_pointer,
- .many_const_pointer,
- .many_mut_pointer,
- .c_const_pointer,
- .c_mut_pointer,
- .const_slice,
- .mut_slice,
.optional,
- .optional_single_mut_pointer,
- .optional_single_const_pointer,
.anyframe_T,
=> Payload.ElemType,
@@ -5624,7 +4829,7 @@ pub const Type = struct {
.error_set_inferred => Payload.ErrorSetInferred,
.error_set_merged => Payload.ErrorSetMerged,
- .array, .vector => Payload.Array,
+ .array => Payload.Array,
.array_sentinel => Payload.ArraySentinel,
.pointer => Payload.Pointer,
.function => Payload.Function,
@@ -5847,15 +5052,28 @@ pub const Type = struct {
@"volatile": bool = false,
size: std.builtin.Type.Pointer.Size = .One,
- pub const VectorIndex = enum(u32) {
- none = std.math.maxInt(u32),
- runtime = std.math.maxInt(u32) - 1,
- _,
- };
+ pub const VectorIndex = InternPool.Key.PtrType.VectorIndex;
+
pub fn alignment(data: Data, mod: *const Module) u32 {
if (data.@"align" != 0) return data.@"align";
return abiAlignment(data.pointee_type, mod);
}
+
+ pub fn fromKey(p: InternPool.Key.PtrType) Data {
+ return .{
+ .pointee_type = p.elem_type.toType(),
+ .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null,
+ .@"align" = p.alignment,
+ .@"addrspace" = p.address_space,
+ .bit_offset = p.bit_offset,
+ .host_size = p.host_size,
+ .vector_index = p.vector_index,
+ .@"allowzero" = p.is_allowzero,
+ .mutable = !p.is_const,
+ .@"volatile" = p.is_volatile,
+ .size = p.size,
+ };
+ }
};
};
@@ -5986,6 +5204,17 @@ pub const Type = struct {
pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined };
pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined };
+ pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type, .legacy = undefined };
+ pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type, .legacy = undefined };
+ pub const single_const_pointer_to_comptime_int: Type = .{
+ .ip_index = .single_const_pointer_to_comptime_int_type,
+ .legacy = undefined,
+ };
+ pub const const_slice_u8_sentinel_0: Type = .{
+ .ip_index = .const_slice_u8_sentinel_0_type,
+ .legacy = undefined,
+ };
+
pub const generic_poison: Type = .{ .ip_index = .generic_poison_type, .legacy = undefined };
pub const err_int = Type.u16;
@@ -6019,50 +5248,6 @@ pub const Type = struct {
}
}
- if (d.@"align" == 0 and d.@"addrspace" == .generic and
- d.bit_offset == 0 and d.host_size == 0 and d.vector_index == .none and
- !d.@"allowzero" and !d.@"volatile")
- {
- if (d.sentinel) |sent| {
- if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) {
- switch (d.size) {
- .Slice => {
- if (sent.compareAllWithZero(.eq, mod)) {
- return Type.initTag(.const_slice_u8_sentinel_0);
- }
- },
- .Many => {
- if (sent.compareAllWithZero(.eq, mod)) {
- return Type.initTag(.manyptr_const_u8_sentinel_0);
- }
- },
- else => {},
- }
- }
- } else if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) {
- switch (d.size) {
- .Slice => return Type.initTag(.const_slice_u8),
- .Many => return Type.initTag(.manyptr_const_u8),
- else => {},
- }
- } else {
- const T = Type.Tag;
- const type_payload = try arena.create(Type.Payload.ElemType);
- type_payload.* = .{
- .base = .{
- .tag = switch (d.size) {
- .One => if (d.mutable) T.single_mut_pointer else T.single_const_pointer,
- .Many => if (d.mutable) T.many_mut_pointer else T.many_const_pointer,
- .C => if (d.mutable) T.c_mut_pointer else T.c_const_pointer,
- .Slice => if (d.mutable) T.mut_slice else T.const_slice,
- },
- },
- .data = d.pointee_type,
- };
- return Type.initPayload(&type_payload.base);
- }
- }
-
return Type.Tag.pointer.create(arena, d);
}
@@ -6073,13 +5258,21 @@ pub const Type = struct {
elem_type: Type,
mod: *Module,
) Allocator.Error!Type {
- if (elem_type.eql(Type.u8, mod)) {
- if (sent) |some| {
- if (some.eql(Value.zero, elem_type, mod)) {
- return Tag.array_u8_sentinel_0.create(arena, len);
+ if (elem_type.ip_index != .none) {
+ if (sent) |s| {
+ if (s.ip_index != .none) {
+ return mod.arrayType(.{
+ .len = len,
+ .child = elem_type.ip_index,
+ .sentinel = s.ip_index,
+ });
}
} else {
- return Tag.array_u8.create(arena, len);
+ return mod.arrayType(.{
+ .len = len,
+ .child = elem_type.ip_index,
+ .sentinel = .none,
+ });
}
}
@@ -6097,24 +5290,11 @@ pub const Type = struct {
});
}
- pub fn vector(arena: Allocator, len: u64, elem_type: Type) Allocator.Error!Type {
- return Tag.vector.create(arena, .{
- .len = len,
- .elem_type = elem_type,
- });
- }
-
- pub fn optional(arena: Allocator, child_type: Type) Allocator.Error!Type {
- switch (child_type.tag()) {
- .single_const_pointer => return Type.Tag.optional_single_const_pointer.create(
- arena,
- child_type.elemType(),
- ),
- .single_mut_pointer => return Type.Tag.optional_single_mut_pointer.create(
- arena,
- child_type.elemType(),
- ),
- else => return Type.Tag.optional.create(arena, child_type),
+ pub fn optional(arena: Allocator, child_type: Type, mod: *Module) Allocator.Error!Type {
+ if (child_type.ip_index != .none) {
+ return mod.optionalType(child_type.ip_index);
+ } else {
+ return Type.Tag.optional.create(arena, child_type);
}
}
@@ -6125,12 +5305,6 @@ pub const Type = struct {
mod: *Module,
) Allocator.Error!Type {
assert(error_set.zigTypeTag(mod) == .ErrorSet);
- if (error_set.eql(Type.anyerror, mod) and
- payload.eql(Type.void, mod))
- {
- return Type.initTag(.anyerror_void_error_union);
- }
-
return Type.Tag.error_union.create(arena, .{
.error_set = error_set,
.payload = payload,
src/TypedValue.zig
@@ -77,15 +77,6 @@ pub fn print(
return writer.writeAll("(variable)");
while (true) switch (val.tag()) {
- .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"),
- .const_slice_u8_type => return writer.writeAll("[]const u8"),
- .const_slice_u8_sentinel_0_type => return writer.writeAll("[:0]const u8"),
- .anyerror_void_error_union_type => return writer.writeAll("anyerror!void"),
-
- .manyptr_u8_type => return writer.writeAll("[*]u8"),
- .manyptr_const_u8_type => return writer.writeAll("[*]const u8"),
- .manyptr_const_u8_sentinel_0_type => return writer.writeAll("[*:0]const u8"),
-
.empty_struct_value, .aggregate => {
if (level == 0) {
return writer.writeAll(".{ ... }");
@@ -112,7 +103,7 @@ pub fn print(
return writer.writeAll("}");
} else {
const elem_ty = ty.elemType2(mod);
- const len = ty.arrayLen();
+ const len = ty.arrayLen(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len = @intCast(usize, std.math.min(len, max_string_len));
@@ -288,7 +279,7 @@ pub fn print(
.ty = ty.elemType2(mod),
.val = val.castTag(.repeated).?.data,
};
- const len = ty.arrayLen();
+ const len = ty.arrayLen(mod);
const max_len = std.math.min(len, max_aggregate_items);
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
@@ -306,7 +297,7 @@ pub fn print(
try writer.writeAll(".{ ");
try print(.{
.ty = ty.elemType2(mod),
- .val = ty.sentinel().?,
+ .val = ty.sentinel(mod).?,
}, writer, level - 1, mod);
return writer.writeAll(" }");
},
@@ -364,8 +355,7 @@ pub fn print(
},
.opt_payload => {
val = val.castTag(.opt_payload).?.data;
- var buf: Type.Payload.ElemType = undefined;
- ty = ty.optionalChild(&buf);
+ ty = ty.optionalChild(mod);
return print(.{ .ty = ty, .val = val }, writer, level, mod);
},
.eu_payload_ptr => {
@@ -386,13 +376,8 @@ pub fn print(
try writer.writeAll(", &(payload of ");
- var ptr_ty: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = data.container_ty,
- };
-
try print(.{
- .ty = Type.initPayload(&ptr_ty.base),
+ .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"),
.val = data.container_ptr,
}, writer, level - 1, mod);
@@ -415,13 +400,8 @@ pub fn print(
try writer.writeAll(", &(payload of ");
- var ptr_ty: Type.Payload.ElemType = .{
- .base = .{ .tag = .single_mut_pointer },
- .data = data.container_ty,
- };
-
try print(.{
- .ty = Type.initPayload(&ptr_ty.base),
+ .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"),
.val = data.container_ptr,
}, writer, level - 1, mod);
src/value.zig
@@ -33,14 +33,6 @@ pub const Value = struct {
// Keep in sync with tools/stage2_pretty_printers_common.py
pub const Tag = enum(usize) {
// The first section of this enum are tags that require no payload.
- manyptr_u8_type,
- manyptr_const_u8_type,
- manyptr_const_u8_sentinel_0_type,
- single_const_pointer_to_comptime_int_type,
- const_slice_u8_type,
- const_slice_u8_sentinel_0_type,
- anyerror_void_error_union_type,
-
undef,
zero,
one,
@@ -140,11 +132,6 @@ pub const Value = struct {
pub fn Type(comptime t: Tag) type {
return switch (t) {
- .single_const_pointer_to_comptime_int_type,
- .const_slice_u8_type,
- .const_slice_u8_sentinel_0_type,
- .anyerror_void_error_union_type,
-
.undef,
.zero,
.one,
@@ -153,9 +140,6 @@ pub const Value = struct {
.empty_struct_value,
.empty_array,
.null_value,
- .manyptr_u8_type,
- .manyptr_const_u8_type,
- .manyptr_const_u8_sentinel_0_type,
=> @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"),
.int_big_positive,
@@ -280,9 +264,7 @@ pub const Value = struct {
}
pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() {
- if (self.ip_index != .none) {
- return null;
- }
+ assert(self.ip_index == .none);
if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count)
return null;
@@ -305,11 +287,6 @@ pub const Value = struct {
.legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough },
};
} else switch (self.legacy.ptr_otherwise.tag) {
- .single_const_pointer_to_comptime_int_type,
- .const_slice_u8_type,
- .const_slice_u8_sentinel_0_type,
- .anyerror_void_error_union_type,
-
.undef,
.zero,
.one,
@@ -318,9 +295,6 @@ pub const Value = struct {
.empty_array,
.null_value,
.empty_struct_value,
- .manyptr_u8_type,
- .manyptr_const_u8_type,
- .manyptr_const_u8_sentinel_0_type,
=> unreachable,
.ty, .lazy_align, .lazy_size => {
@@ -553,14 +527,6 @@ pub const Value = struct {
}
var val = start_val;
while (true) switch (val.tag()) {
- .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"),
- .const_slice_u8_type => return out_stream.writeAll("[]const u8"),
- .const_slice_u8_sentinel_0_type => return out_stream.writeAll("[:0]const u8"),
- .anyerror_void_error_union_type => return out_stream.writeAll("anyerror!void"),
- .manyptr_u8_type => return out_stream.writeAll("[*]u8"),
- .manyptr_const_u8_type => return out_stream.writeAll("[*]const u8"),
- .manyptr_const_u8_sentinel_0_type => return out_stream.writeAll("[*:0]const u8"),
-
.empty_struct_value => return out_stream.writeAll("struct {}{}"),
.aggregate => {
return out_stream.writeAll("(aggregate)");
@@ -674,7 +640,7 @@ pub const Value = struct {
switch (val.tag()) {
.bytes => {
const bytes = val.castTag(.bytes).?.data;
- const adjusted_len = bytes.len - @boolToInt(ty.sentinel() != null);
+ const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null);
const adjusted_bytes = bytes[0..adjusted_len];
return allocator.dupe(u8, adjusted_bytes);
},
@@ -686,7 +652,7 @@ pub const Value = struct {
.enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data),
.repeated => {
const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod));
- const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen()));
+ const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod)));
@memset(result, byte);
return result;
},
@@ -701,7 +667,7 @@ pub const Value = struct {
const slice = val.castTag(.slice).?.data;
return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod);
},
- else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod),
+ else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod),
}
}
@@ -720,13 +686,6 @@ pub const Value = struct {
if (self.ip_index != .none) return self.ip_index.toType();
return switch (self.tag()) {
.ty => self.castTag(.ty).?.data,
- .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int),
- .const_slice_u8_type => Type.initTag(.const_slice_u8),
- .const_slice_u8_sentinel_0_type => Type.initTag(.const_slice_u8_sentinel_0),
- .anyerror_void_error_union_type => Type.initTag(.anyerror_void_error_union),
- .manyptr_u8_type => Type.initTag(.manyptr_u8),
- .manyptr_const_u8_type => Type.initTag(.manyptr_const_u8),
- .manyptr_const_u8_sentinel_0_type => Type.initTag(.manyptr_const_u8_sentinel_0),
else => unreachable,
};
@@ -1096,8 +1055,8 @@ pub const Value = struct {
else => unreachable,
},
.Array => {
- const len = ty.arrayLen();
- const elem_ty = ty.childType();
+ const len = ty.arrayLen(mod);
+ const elem_ty = ty.childType(mod);
const elem_size = @intCast(usize, elem_ty.abiSize(mod));
var elem_i: usize = 0;
var elem_value_buf: ElemValueBuffer = undefined;
@@ -1150,8 +1109,7 @@ pub const Value = struct {
},
.Optional => {
if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout;
- var buf: Type.Payload.ElemType = undefined;
- const child = ty.optionalChild(&buf);
+ const child = ty.optionalChild(mod);
const opt_val = val.optionalValue(mod);
if (opt_val) |some| {
return some.writeToMemory(child, mod, buffer);
@@ -1220,9 +1178,9 @@ pub const Value = struct {
else => unreachable,
},
.Vector => {
- const elem_ty = ty.childType();
+ const elem_ty = ty.childType(mod);
const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod));
- const len = @intCast(usize, ty.arrayLen());
+ const len = @intCast(usize, ty.arrayLen(mod));
var bits: u16 = 0;
var elem_i: usize = 0;
@@ -1267,8 +1225,7 @@ pub const Value = struct {
},
.Optional => {
assert(ty.isPtrLikeOptional(mod));
- var buf: Type.Payload.ElemType = undefined;
- const child = ty.optionalChild(&buf);
+ const child = ty.optionalChild(mod);
const opt_val = val.optionalValue(mod);
if (opt_val) |some| {
return some.writeToPackedMemory(child, mod, buffer, bit_offset);
@@ -1335,9 +1292,9 @@ pub const Value = struct {
else => unreachable,
},
.Array => {
- const elem_ty = ty.childType();
+ const elem_ty = ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
- const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen()));
+ const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod)));
var offset: usize = 0;
for (elems) |*elem| {
elem.* = try readFromMemory(elem_ty, mod, buffer[offset..], arena);
@@ -1386,8 +1343,7 @@ pub const Value = struct {
},
.Optional => {
assert(ty.isPtrLikeOptional(mod));
- var buf: Type.Payload.ElemType = undefined;
- const child = ty.optionalChild(&buf);
+ const child = ty.optionalChild(mod);
return readFromMemory(child, mod, buffer, arena);
},
else => @panic("TODO implement readFromMemory for more types"),
@@ -1449,8 +1405,8 @@ pub const Value = struct {
else => unreachable,
},
.Vector => {
- const elem_ty = ty.childType();
- const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen()));
+ const elem_ty = ty.childType(mod);
+ const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod)));
var bits: u16 = 0;
const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod));
@@ -1483,8 +1439,7 @@ pub const Value = struct {
},
.Optional => {
assert(ty.isPtrLikeOptional(mod));
- var buf: Type.Payload.ElemType = undefined;
- const child = ty.optionalChild(&buf);
+ const child = ty.optionalChild(mod);
return readFromPackedMemory(child, mod, buffer, bit_offset, arena);
},
else => @panic("TODO implement readFromPackedMemory for more types"),
@@ -1956,7 +1911,7 @@ pub const Value = struct {
pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool {
if (ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
- while (i < ty.vectorLen()) : (i += 1) {
+ while (i < ty.vectorLen(mod)) : (i += 1) {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -2092,8 +2047,7 @@ pub const Value = struct {
.opt_payload => {
const a_payload = a.castTag(.opt_payload).?.data;
const b_payload = b.castTag(.opt_payload).?.data;
- var buffer: Type.Payload.ElemType = undefined;
- const payload_ty = ty.optionalChild(&buffer);
+ const payload_ty = ty.optionalChild(mod);
return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema);
},
.slice => {
@@ -2175,7 +2129,7 @@ pub const Value = struct {
return true;
}
- const elem_ty = ty.childType();
+ const elem_ty = ty.childType(mod);
for (a_field_vals, 0..) |a_elem, i| {
const b_elem = b_field_vals[i];
@@ -2239,8 +2193,8 @@ pub const Value = struct {
return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema);
},
.Array, .Vector => {
- const len = ty.arrayLen();
- const elem_ty = ty.childType();
+ const len = ty.arrayLen(mod);
+ const elem_ty = ty.childType(mod);
var i: usize = 0;
var a_buf: ElemValueBuffer = undefined;
var b_buf: ElemValueBuffer = undefined;
@@ -2253,11 +2207,11 @@ pub const Value = struct {
}
return true;
},
- .Pointer => switch (ty.ptrSize()) {
+ .Pointer => switch (ty.ptrSize(mod)) {
.Slice => {
- const a_len = switch (a_ty.ptrSize()) {
+ const a_len = switch (a_ty.ptrSize(mod)) {
.Slice => a.sliceLen(mod),
- .One => a_ty.childType().arrayLen(),
+ .One => a_ty.childType(mod).arrayLen(mod),
else => unreachable,
};
if (a_len != b.sliceLen(mod)) {
@@ -2266,7 +2220,7 @@ pub const Value = struct {
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
- const a_ptr = switch (a_ty.ptrSize()) {
+ const a_ptr = switch (a_ty.ptrSize(mod)) {
.Slice => a.slicePtr(),
.One => a,
else => unreachable,
@@ -2412,8 +2366,8 @@ pub const Value = struct {
else => return hashPtr(val, hasher, mod),
},
.Array, .Vector => {
- const len = ty.arrayLen();
- const elem_ty = ty.childType();
+ const len = ty.arrayLen(mod);
+ const elem_ty = ty.childType(mod);
var index: usize = 0;
var elem_value_buf: ElemValueBuffer = undefined;
while (index < len) : (index += 1) {
@@ -2438,8 +2392,7 @@ pub const Value = struct {
if (val.castTag(.opt_payload)) |payload| {
std.hash.autoHash(hasher, true); // non-null
const sub_val = payload.data;
- var buffer: Type.Payload.ElemType = undefined;
- const sub_ty = ty.optionalChild(&buffer);
+ const sub_ty = ty.optionalChild(mod);
sub_val.hash(sub_ty, hasher, mod);
} else {
std.hash.autoHash(hasher, false); // null
@@ -2534,8 +2487,8 @@ pub const Value = struct {
else => val.hashPtr(hasher, mod),
},
.Array, .Vector => {
- const len = ty.arrayLen();
- const elem_ty = ty.childType();
+ const len = ty.arrayLen(mod);
+ const elem_ty = ty.childType(mod);
var index: usize = 0;
var elem_value_buf: ElemValueBuffer = undefined;
while (index < len) : (index += 1) {
@@ -2544,8 +2497,7 @@ pub const Value = struct {
}
},
.Optional => if (val.castTag(.opt_payload)) |payload| {
- var buf: Type.Payload.ElemType = undefined;
- const child_ty = ty.optionalChild(&buf);
+ const child_ty = ty.optionalChild(mod);
payload.data.hashUncoerced(child_ty, hasher, mod);
} else std.hash.autoHash(hasher, std.builtin.TypeId.Null),
.ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else {
@@ -2720,7 +2672,7 @@ pub const Value = struct {
const decl_index = val.castTag(.decl_ref).?.data;
const decl = mod.declPtr(decl_index);
if (decl.ty.zigTypeTag(mod) == .Array) {
- return decl.ty.arrayLen();
+ return decl.ty.arrayLen(mod);
} else {
return 1;
}
@@ -2729,7 +2681,7 @@ pub const Value = struct {
const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
const decl = mod.declPtr(decl_index);
if (decl.ty.zigTypeTag(mod) == .Array) {
- return decl.ty.arrayLen();
+ return decl.ty.arrayLen(mod);
} else {
return 1;
}
@@ -2737,7 +2689,7 @@ pub const Value = struct {
.comptime_field_ptr => {
const payload = val.castTag(.comptime_field_ptr).?.data;
if (payload.field_ty.zigTypeTag(mod) == .Array) {
- return payload.field_ty.arrayLen();
+ return payload.field_ty.arrayLen(mod);
} else {
return 1;
}
@@ -3137,7 +3089,7 @@ pub const Value = struct {
pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value {
if (int_ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, int_ty.vectorLen());
+ const result_data = try arena.alloc(Value, int_ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -3250,7 +3202,7 @@ pub const Value = struct {
mod: *Module,
) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
+ const result_data = try arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3298,7 +3250,7 @@ pub const Value = struct {
mod: *Module,
) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
+ const result_data = try arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3345,8 +3297,8 @@ pub const Value = struct {
mod: *Module,
) !OverflowArithmeticResult {
if (ty.zigTypeTag(mod) == .Vector) {
- const overflowed_data = try arena.alloc(Value, ty.vectorLen());
- const result_data = try arena.alloc(Value, ty.vectorLen());
+ const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod));
+ const result_data = try arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3408,7 +3360,7 @@ pub const Value = struct {
mod: *Module,
) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
+ const result_data = try arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3452,7 +3404,7 @@ pub const Value = struct {
mod: *Module,
) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
+ const result_data = try arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3527,7 +3479,7 @@ pub const Value = struct {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
+ const result_data = try arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -3565,7 +3517,7 @@ pub const Value = struct {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3601,7 +3553,7 @@ pub const Value = struct {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
+ const result_data = try arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3631,7 +3583,7 @@ pub const Value = struct {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3666,7 +3618,7 @@ pub const Value = struct {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3701,7 +3653,7 @@ pub const Value = struct {
pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3741,7 +3693,7 @@ pub const Value = struct {
pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3781,7 +3733,7 @@ pub const Value = struct {
pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3857,7 +3809,7 @@ pub const Value = struct {
pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3904,7 +3856,7 @@ pub const Value = struct {
pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3950,7 +3902,7 @@ pub const Value = struct {
pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -3986,7 +3938,7 @@ pub const Value = struct {
pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4007,7 +3959,7 @@ pub const Value = struct {
mod: *Module,
) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4038,7 +3990,7 @@ pub const Value = struct {
pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -4078,8 +4030,8 @@ pub const Value = struct {
mod: *Module,
) !OverflowArithmeticResult {
if (ty.zigTypeTag(mod) == .Vector) {
- const overflowed_data = try allocator.alloc(Value, ty.vectorLen());
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod));
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -4136,7 +4088,7 @@ pub const Value = struct {
mod: *Module,
) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
+ const result_data = try arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -4184,7 +4136,7 @@ pub const Value = struct {
mod: *Module,
) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, ty.vectorLen());
+ const result_data = try arena.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -4212,7 +4164,7 @@ pub const Value = struct {
pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag(mod) == .Vector) {
- const result_data = try allocator.alloc(Value, ty.vectorLen());
+ const result_data = try allocator.alloc(Value, ty.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -4264,7 +4216,7 @@ pub const Value = struct {
) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4300,7 +4252,7 @@ pub const Value = struct {
) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -4359,7 +4311,7 @@ pub const Value = struct {
) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -4418,7 +4370,7 @@ pub const Value = struct {
) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -4477,7 +4429,7 @@ pub const Value = struct {
) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
@@ -4530,7 +4482,7 @@ pub const Value = struct {
pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4570,7 +4522,7 @@ pub const Value = struct {
pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4610,7 +4562,7 @@ pub const Value = struct {
pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4650,7 +4602,7 @@ pub const Value = struct {
pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4690,7 +4642,7 @@ pub const Value = struct {
pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4730,7 +4682,7 @@ pub const Value = struct {
pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4770,7 +4722,7 @@ pub const Value = struct {
pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4810,7 +4762,7 @@ pub const Value = struct {
pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4850,7 +4802,7 @@ pub const Value = struct {
pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4890,7 +4842,7 @@ pub const Value = struct {
pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4930,7 +4882,7 @@ pub const Value = struct {
pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -4970,7 +4922,7 @@ pub const Value = struct {
pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -5010,7 +4962,7 @@ pub const Value = struct {
pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -5050,7 +5002,7 @@ pub const Value = struct {
pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
@@ -5097,7 +5049,7 @@ pub const Value = struct {
) !Value {
const target = mod.getTarget();
if (float_type.zigTypeTag(mod) == .Vector) {
- const result_data = try arena.alloc(Value, float_type.vectorLen());
+ const result_data = try arena.alloc(Value, float_type.vectorLen(mod));
for (result_data, 0..) |*scalar, i| {
var mulend1_buf: Value.ElemValueBuffer = undefined;
const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf);