Commit afa74c6b21
Changed files (17)
src
arch
aarch64
arm
riscv64
sparc64
wasm
x86_64
Liveness
src/Air/types_resolved.zig
@@ -208,8 +208,6 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
- .int_from_ptr,
- .int_from_bool,
.ret,
.ret_safe,
.ret_load,
src/arch/aarch64/CodeGen.zig
@@ -734,7 +734,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.fpext => try self.airFpext(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
- .int_from_bool => try self.airIntFromBool(inst),
.is_non_null => try self.airIsNonNull(inst),
.is_non_null_ptr => try self.airIsNonNullPtr(inst),
.is_null => try self.airIsNull(inst),
@@ -746,7 +745,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.load => try self.airLoad(inst),
.loop => try self.airLoop(inst),
.not => try self.airNot(inst),
- .int_from_ptr => try self.airIntFromPtr(inst),
.ret => try self.airRet(inst),
.ret_safe => try self.airRet(inst), // TODO
.ret_load => try self.airRetLoad(inst),
@@ -1294,13 +1292,6 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airIntFromBool(self: *Self, inst: Air.Inst.Index) InnerError!void {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try self.resolveInst(un_op);
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand;
- return self.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const pt = self.pt;
@@ -5906,12 +5897,6 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
}
}
-fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const result = try self.resolveInst(un_op);
- return self.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
fn airBitCast(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = if (self.liveness.isUnused(inst)) .dead else result: {
src/arch/arm/CodeGen.zig
@@ -723,7 +723,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.fpext => try self.airFpext(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
- .int_from_bool => try self.airIntFromBool(inst),
.is_non_null => try self.airIsNonNull(inst),
.is_non_null_ptr => try self.airIsNonNullPtr(inst),
.is_null => try self.airIsNull(inst),
@@ -735,7 +734,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.load => try self.airLoad(inst),
.loop => try self.airLoop(inst),
.not => try self.airNot(inst),
- .int_from_ptr => try self.airIntFromPtr(inst),
.ret => try self.airRet(inst),
.ret_safe => try self.airRet(inst), // TODO
.ret_load => try self.airRetLoad(inst),
@@ -1258,13 +1256,6 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try self.resolveInst(un_op);
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand;
- return self.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const pt = self.pt;
@@ -5874,12 +5865,6 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
}
}
-fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const result = try self.resolveInst(un_op);
- return self.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result = if (self.liveness.isUnused(inst)) .dead else result: {
src/arch/riscv64/CodeGen.zig
@@ -1557,7 +1557,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.fpext => try func.airFpext(inst),
.intcast => try func.airIntCast(inst),
.trunc => try func.airTrunc(inst),
- .int_from_bool => try func.airIntFromBool(inst),
.is_non_null => try func.airIsNonNull(inst),
.is_non_null_ptr => try func.airIsNonNullPtr(inst),
.is_null => try func.airIsNull(inst),
@@ -1569,7 +1568,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.load => try func.airLoad(inst),
.loop => try func.airLoop(inst),
.not => try func.airNot(inst),
- .int_from_ptr => try func.airIntFromPtr(inst),
.ret => try func.airRet(inst, false),
.ret_safe => try func.airRet(inst, true),
.ret_load => try func.airRetLoad(inst),
@@ -2299,13 +2297,6 @@ fn airTrunc(func: *Func, inst: Air.Inst.Index) !void {
return func.finishAir(inst, operand, .{ ty_op.operand, .none, .none });
}
-fn airIntFromBool(func: *Func, inst: Air.Inst.Index) !void {
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try func.resolveInst(un_op);
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else operand;
- return func.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
fn airNot(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
@@ -7262,21 +7253,6 @@ fn genSetMem(
}
}
-fn airIntFromPtr(func: *Func, inst: Air.Inst.Index) !void {
- const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const result = result: {
- const src_mcv = try func.resolveInst(un_op);
- const src_ty = func.typeOfIndex(inst);
- if (func.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv;
-
- const dst_mcv = try func.allocRegOrMem(src_ty, inst, true);
- const dst_ty = func.typeOfIndex(inst);
- try func.genCopy(dst_ty, dst_mcv, src_mcv);
- break :result dst_mcv;
- };
- return func.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
fn airBitCast(func: *Func, inst: Air.Inst.Index) !void {
const pt = func.pt;
const zcu = pt.zcu;
@@ -7285,8 +7261,9 @@ fn airBitCast(func: *Func, inst: Air.Inst.Index) !void {
const result = if (func.liveness.isUnused(inst)) .unreach else result: {
const src_mcv = try func.resolveInst(ty_op.operand);
- const dst_ty = func.typeOfIndex(inst);
const src_ty = func.typeOf(ty_op.operand);
+ if (src_ty.toIntern() == .bool_type) break :result src_mcv;
+ const dst_ty = func.typeOfIndex(inst);
const src_lock = if (src_mcv.getReg()) |reg| func.register_manager.lockReg(reg) else null;
defer if (src_lock) |lock| func.register_manager.unlockReg(lock);
src/arch/sparc64/CodeGen.zig
@@ -577,7 +577,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.fpext => @panic("TODO try self.airFpext(inst)"),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
- .int_from_bool => try self.airIntFromBool(inst),
.is_non_null => try self.airIsNonNull(inst),
.is_non_null_ptr => @panic("TODO try self.airIsNonNullPtr(inst)"),
.is_null => try self.airIsNull(inst),
@@ -589,7 +588,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.load => try self.airLoad(inst),
.loop => try self.airLoop(inst),
.not => try self.airNot(inst),
- .int_from_ptr => try self.airIntFromPtr(inst),
.ret => try self.airRet(inst),
.ret_safe => try self.airRet(inst), // TODO
.ret_load => try self.airRetLoad(inst),
@@ -1077,13 +1075,6 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airIntFromBool(self: *Self, inst: Air.Inst.Index) !void {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try self.resolveInst(un_op);
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else operand;
- return self.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -2230,12 +2221,6 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airIntFromPtr(self: *Self, inst: Air.Inst.Index) !void {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const result = try self.resolveInst(un_op);
- return self.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
fn airRem(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
src/arch/wasm/CodeGen.zig
@@ -1926,7 +1926,6 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.br => cg.airBr(inst),
.repeat => cg.airRepeat(inst),
.switch_dispatch => return cg.fail("TODO implement `switch_dispatch`", .{}),
- .int_from_bool => cg.airIntFromBool(inst),
.cond_br => cg.airCondBr(inst),
.intcast => cg.airIntcast(inst),
.fptrunc => cg.airFptrunc(inst),
@@ -1972,7 +1971,6 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.ptr_sub => cg.airPtrBinOp(inst, .sub),
.ptr_elem_ptr => cg.airPtrElemPtr(inst),
.ptr_elem_val => cg.airPtrElemVal(inst),
- .int_from_ptr => cg.airIntFromPtr(inst),
.ret => cg.airRet(inst),
.ret_safe => cg.airRet(inst), // TODO
.ret_ptr => cg.airRetPtr(inst),
@@ -3777,7 +3775,11 @@ fn airBitcast(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try cg.wrapOperand(operand, wanted_ty);
}
- break :result cg.reuseOperand(ty_op.operand, operand);
+ break :result switch (operand) {
+ // for stack offset, return a pointer to this offset.
+ .stack_offset => try cg.buildPointerOffset(operand, 0, .new),
+ else => cg.reuseOperand(ty_op.operand, operand),
+ };
};
return cg.finishAir(inst, result, &.{ty_op.operand});
}
@@ -4637,14 +4639,6 @@ fn trunc(cg: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerEr
return result;
}
-fn airIntFromBool(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try cg.resolveInst(un_op);
- const result = cg.reuseOperand(un_op, operand);
-
- return cg.finishAir(inst, result, &.{un_op});
-}
-
fn airArrayToSlice(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const zcu = cg.pt.zcu;
const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
@@ -4668,21 +4662,6 @@ fn airArrayToSlice(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return cg.finishAir(inst, slice_local, &.{ty_op.operand});
}
-fn airIntFromPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
- const zcu = cg.pt.zcu;
- const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try cg.resolveInst(un_op);
- const ptr_ty = cg.typeOf(un_op);
- const result = if (ptr_ty.isSlice(zcu))
- try cg.slicePtr(operand)
- else switch (operand) {
- // for stack offset, return a pointer to this offset.
- .stack_offset => try cg.buildPointerOffset(operand, 0, .new),
- else => cg.reuseOperand(un_op, operand),
- };
- return cg.finishAir(inst, result, &.{un_op});
-}
-
fn airPtrElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const zcu = cg.pt.zcu;
const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
src/arch/x86_64/CodeGen.zig
@@ -21853,17 +21853,6 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
}, cg);
try res.finish(inst, &.{ty_op.operand}, &ops, cg);
},
- .int_from_ptr => if (use_old) try cg.airIntFromPtr(inst) else {
- const un_op = air_datas[@intFromEnum(inst)].un_op;
- var ops = try cg.tempsFromOperands(inst, .{un_op});
- try ops[0].toSlicePtr(cg);
- try ops[0].finish(inst, &.{un_op}, &ops, cg);
- },
- .int_from_bool => if (use_old) try cg.airIntFromBool(inst) else {
- const un_op = air_datas[@intFromEnum(inst)].un_op;
- const ops = try cg.tempsFromOperands(inst, .{un_op});
- try ops[0].finish(inst, &.{un_op}, &ops, cg);
- },
.ret => try cg.airRet(inst, false),
.ret_safe => try cg.airRet(inst, true),
.ret_load => try cg.airRetLoad(inst),
@@ -24493,19 +24482,6 @@ fn airTrunc(self: *CodeGen, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn airIntFromBool(self: *CodeGen, inst: Air.Inst.Index) !void {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const ty = self.typeOfIndex(inst);
-
- const operand = try self.resolveInst(un_op);
- const dst_mcv = if (self.reuseOperand(inst, un_op, 0, operand))
- operand
- else
- try self.copyToRegisterWithInstTracking(inst, ty, operand);
-
- return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none });
-}
-
fn airSlice(self: *CodeGen, inst: Air.Inst.Index) !void {
const zcu = self.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
@@ -37205,21 +37181,6 @@ fn genLazySymbolRef(
}
}
-fn airIntFromPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const result = result: {
- // TODO: handle case where the operand is a slice not a raw pointer
- const src_mcv = try self.resolveInst(un_op);
- if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv;
-
- const dst_mcv = try self.allocRegOrMem(inst, true);
- const dst_ty = self.typeOfIndex(inst);
- try self.genCopy(dst_ty, dst_mcv, src_mcv, .{});
- break :result dst_mcv;
- };
- return self.finishAir(inst, result, .{ un_op, .none, .none });
-}
-
fn airBitCast(self: *CodeGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
const zcu = pt.zcu;
src/codegen/c.zig
@@ -3325,7 +3325,6 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.bitcast => try airBitcast(f, inst),
.intcast => try airIntCast(f, inst),
.trunc => try airTrunc(f, inst),
- .int_from_bool => try airIntFromBool(f, inst),
.load => try airLoad(f, inst),
.store => try airStore(f, inst, false),
.store_safe => try airStore(f, inst, true),
@@ -3371,8 +3370,6 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.fpext,
=> try airFloatCast(f, inst),
- .int_from_ptr => try airIntFromPtr(f, inst),
-
.atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.unordered)),
.atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.monotonic)),
.atomic_store_release => try airAtomicStore(f, inst, toMemoryOrder(.release)),
@@ -3983,21 +3980,6 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
-fn airIntFromBool(f: *Function, inst: Air.Inst.Index) !CValue {
- const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try f.resolveInst(un_op);
- try reap(f, inst, &.{un_op});
- const writer = f.object.writer();
- const inst_ty = f.typeOfIndex(inst);
- const local = try f.allocLocal(inst, inst_ty);
- const a = try Assignment.start(f, writer, try f.ctypeFromType(inst_ty, .complete));
- try f.writeCValue(writer, local, .Other);
- try a.assign(f, writer);
- try f.writeCValue(writer, operand, .Other);
- try a.end(f, writer);
- return local;
-}
-
fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const pt = f.object.dg.pt;
const zcu = pt.zcu;
@@ -4970,7 +4952,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal
src_info.bits == dest_info.bits) return operand;
}
- if (dest_ty.isPtrAtRuntime(zcu) and operand_ty.isPtrAtRuntime(zcu)) {
+ if (dest_ty.isPtrAtRuntime(zcu) or operand_ty.isPtrAtRuntime(zcu)) {
const local = try f.allocLocal(null, dest_ty);
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = (");
@@ -6455,30 +6437,6 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
-fn airIntFromPtr(f: *Function, inst: Air.Inst.Index) !CValue {
- const pt = f.object.dg.pt;
- const zcu = pt.zcu;
- const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-
- const operand = try f.resolveInst(un_op);
- const operand_ty = f.typeOf(un_op);
- try reap(f, inst, &.{un_op});
- const inst_ty = f.typeOfIndex(inst);
- const writer = f.object.writer();
- const local = try f.allocLocal(inst, inst_ty);
- try f.writeCValue(writer, local, .Other);
-
- try writer.writeAll(" = (");
- try f.renderType(writer, inst_ty);
- try writer.writeByte(')');
- if (operand_ty.isSlice(zcu))
- try f.writeCValueMember(writer, operand, .{ .identifier = "ptr" })
- else
- try f.writeCValue(writer, operand, .Other);
- try writer.writeAll(";\n");
- return local;
-}
-
fn airUnBuiltinCall(
f: *Function,
inst: Air.Inst.Index,
src/codegen/llvm.zig
@@ -5154,7 +5154,6 @@ pub const FuncGen = struct {
.ret_ptr => try self.airRetPtr(inst),
.arg => try self.airArg(inst),
.bitcast => try self.airBitCast(inst),
- .int_from_bool => try self.airIntFromBool(inst),
.breakpoint => try self.airBreakpoint(inst),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
@@ -5167,7 +5166,6 @@ pub const FuncGen = struct {
.trunc => try self.airTrunc(inst),
.fptrunc => try self.airFptrunc(inst),
.fpext => try self.airFpext(inst),
- .int_from_ptr => try self.airIntFromPtr(inst),
.load => try self.airLoad(body[i..]),
.not => try self.airNot(inst),
.store => try self.airStore(inst, false),
@@ -9435,16 +9433,6 @@ pub const FuncGen = struct {
}
}
- fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const o = self.ng.object;
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try self.resolveInst(un_op);
- const ptr_ty = self.typeOf(un_op);
- const operand_ptr = try self.sliceOrArrayPtr(operand, ptr_ty);
- const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst));
- return self.wip.cast(.ptrtoint, operand_ptr, dest_llvm_ty, "");
- }
-
fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
@@ -9476,6 +9464,10 @@ pub const FuncGen = struct {
return self.wip.cast(.inttoptr, operand, llvm_dest_ty, "");
}
+ if (operand_ty.isPtrAtRuntime(zcu) and inst_ty.zigTypeTag(zcu) == .int) {
+ return self.wip.cast(.ptrtoint, operand, llvm_dest_ty, "");
+ }
+
if (operand_ty.zigTypeTag(zcu) == .vector and inst_ty.zigTypeTag(zcu) == .array) {
const elem_ty = operand_ty.childType(zcu);
if (!result_is_ref) {
@@ -9564,12 +9556,6 @@ pub const FuncGen = struct {
return self.wip.cast(.bitcast, operand, llvm_dest_ty, "");
}
- fn airIntFromBool(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try self.resolveInst(un_op);
- return operand;
- }
-
fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = o.pt;
src/codegen/spirv.zig
@@ -3449,10 +3449,8 @@ const NavGen = struct {
.bitcast => try self.airBitCast(inst),
.intcast, .trunc => try self.airIntCast(inst),
- .int_from_ptr => try self.airIntFromPtr(inst),
.float_from_int => try self.airFloatFromInt(inst),
.int_from_float => try self.airIntFromFloat(inst),
- .int_from_bool => try self.airIntFromBool(inst),
.fpext, .fptrunc => try self.airFloatCast(inst),
.not => try self.airNot(inst),
@@ -4706,9 +4704,14 @@ const NavGen = struct {
fn airBitCast(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const operand_id = try self.resolve(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
const result_ty = self.typeOfIndex(inst);
+ if (operand_ty.toIntern() == .bool_type) {
+ const operand = try self.temporary(ty_op.operand);
+ const result = try self.intFromBool(operand);
+ return try result.materialize(self);
+ }
+ const operand_id = try self.resolve(ty_op.operand);
return try self.bitCast(result_ty, operand_ty, operand_id);
}
@@ -4749,12 +4752,6 @@ const NavGen = struct {
return result_id;
}
- fn airIntFromPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand_id = try self.resolve(un_op);
- return try self.intFromPtr(operand_id);
- }
-
fn airFloatFromInt(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_ty = self.typeOf(ty_op.operand);
@@ -4808,13 +4805,6 @@ const NavGen = struct {
return result_id;
}
- fn airIntFromBool(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
- const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
- const operand = try self.temporary(un_op);
- const result = try self.intFromBool(operand);
- return try result.materialize(self);
- }
-
fn airFloatCast(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
src/Liveness/Verify.zig
@@ -130,8 +130,6 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
- .int_from_ptr,
- .int_from_bool,
.is_named_enum_value,
.tag_name,
.error_name,
src/Air.zig
@@ -266,7 +266,8 @@ pub const Inst = struct {
/// Boolean or binary NOT.
/// Uses the `ty_op` field.
not,
- /// Reinterpret the memory representation of a value as a different type.
+ /// Reinterpret the bits of a value as a different type. This is like `@bitCast` but
+ /// also supports enums and pointers.
/// Uses the `ty_op` field.
bitcast,
/// Uses the `ty_pl` field with payload `Block`. A block runs its body which always ends
@@ -517,14 +518,6 @@ pub const Inst = struct {
/// Read a value from a pointer.
/// Uses the `ty_op` field.
load,
- /// Converts a pointer to its address. Result type is always `usize`.
- /// Pointer type size may be any, including slice.
- /// Uses the `un_op` field.
- int_from_ptr,
- /// Given a boolean, returns 0 or 1.
- /// Result type is always `u1`.
- /// Uses the `un_op` field.
- int_from_bool,
/// Return a value from a function.
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `un_op` field.
@@ -1542,7 +1535,6 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.c_va_end,
=> return Type.void,
- .int_from_ptr,
.slice_len,
.ret_addr,
.frame_addr,
@@ -1552,8 +1544,6 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.wasm_memory_grow => return Type.isize,
.wasm_memory_size => return Type.usize,
- .int_from_bool => return Type.u1,
-
.tag_name, .error_name => return Type.slice_const_u8_sentinel_0,
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
@@ -1815,8 +1805,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.is_non_err_ptr,
.bool_and,
.bool_or,
- .int_from_ptr,
- .int_from_bool,
.fptrunc,
.fpext,
.intcast,
src/Liveness.zig
@@ -402,8 +402,6 @@ pub fn categorizeOperand(
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
- .int_from_ptr,
- .int_from_bool,
.is_named_enum_value,
.tag_name,
.error_name,
@@ -1028,8 +1026,6 @@ fn analyzeInst(
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
- .int_from_ptr,
- .int_from_bool,
.is_named_enum_value,
.tag_name,
.error_name,
src/print_air.zig
@@ -172,8 +172,6 @@ const Writer = struct {
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
- .int_from_ptr,
- .int_from_bool,
.ret,
.ret_safe,
.ret_load,
src/Sema.zig
@@ -10054,6 +10054,8 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
};
return sema.failWithOwnedErrorMsg(block, msg);
}
+ const len = if (is_vector) operand_ty.vectorLen(zcu) else undefined;
+ const dest_ty: Type = if (is_vector) try pt.vectorType(.{ .child = .usize_type, .len = len }) else .usize;
if (try sema.resolveValueIntable(operand)) |operand_val| ct: {
if (!is_vector) {
if (operand_val.isUndef(zcu)) {
@@ -10064,8 +10066,6 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
(try operand_val.toUnsignedIntSema(pt)),
)).toIntern());
}
- const len = operand_ty.vectorLen(zcu);
- const dest_ty = try pt.vectorType(.{ .child = .usize_type, .len = len });
const new_elems = try sema.arena.alloc(InternPool.Index, len);
for (new_elems, 0..) |*new_elem, i| {
const ptr_val = try operand_val.elemValue(pt, i);
@@ -10089,16 +10089,14 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), ptr_src);
try sema.validateRuntimeValue(block, ptr_src, operand);
- if (!is_vector) {
- return block.addUnOp(.int_from_ptr, operand);
+ if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
+ return block.addBitCast(dest_ty, operand);
}
- const len = operand_ty.vectorLen(zcu);
- const dest_ty = try pt.vectorType(.{ .child = .usize_type, .len = len });
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
const idx_ref = try pt.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
- new_elem.* = try block.addUnOp(.int_from_ptr, old_elem);
+ new_elem.* = try block.addBitCast(.usize, old_elem);
}
return block.addAggregateInit(dest_ty, new_elems);
}
@@ -10585,7 +10583,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
if (dst_bits >= src_bits) {
return sema.coerce(block, dest_ty, operand, operand_src);
}
- if (!is_vector) {
+ if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
return block.addTyOp(.fptrunc, dest_ty, operand);
}
const vec_len = operand_ty.vectorLen(zcu);
@@ -14762,7 +14760,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
});
const many_ty = slice_ty.slicePtrFieldType(zcu);
- const many_alloc = try block.addTyOp(.bitcast, many_ty, mutable_alloc);
+ const many_alloc = try block.addBitCast(many_ty, mutable_alloc);
// lhs_dest_slice = dest[0..lhs.len]
const slice_ty_ref = Air.internedToRef(slice_ty.toIntern());
@@ -14812,7 +14810,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
- return block.addTyOp(.bitcast, constant_alloc_ty, mutable_alloc);
+ return block.addBitCast(constant_alloc_ty, mutable_alloc);
}
var elem_i: u32 = 0;
@@ -14845,7 +14843,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store);
}
- return block.addTyOp(.bitcast, constant_alloc_ty, mutable_alloc);
+ return block.addBitCast(constant_alloc_ty, mutable_alloc);
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, result_len);
@@ -16612,8 +16610,8 @@ fn analyzeArithmetic(
};
try sema.requireRuntimeBlock(block, src, runtime_src);
- const lhs_int = try block.addUnOp(.int_from_ptr, lhs);
- const rhs_int = try block.addUnOp(.int_from_ptr, rhs);
+ const lhs_int = try block.addBitCast(.usize, lhs);
+ const rhs_int = try block.addBitCast(.usize, rhs);
const address = try block.addBinOp(.sub_wrap, lhs_int, rhs_int);
return try block.addBinOp(.div_exact, address, try pt.intRef(Type.usize, elem_size));
}
@@ -21231,14 +21229,14 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
if (operand_scalar_ty.toIntern() != .bool_type) {
return sema.fail(block, src, "expected 'bool', found '{}'", .{operand_scalar_ty.zigTypeTag(zcu)});
}
+ const len = if (is_vector) operand_ty.vectorLen(zcu) else undefined;
+ const dest_ty: Type = if (is_vector) try pt.vectorType(.{ .child = .u1_type, .len = len }) else .u1;
if (try sema.resolveValue(operand)) |val| {
if (!is_vector) {
if (val.isUndef(zcu)) return pt.undefRef(Type.u1);
if (val.toBool()) return Air.internedToRef((try pt.intValue(Type.u1, 1)).toIntern());
return Air.internedToRef((try pt.intValue(Type.u1, 0)).toIntern());
}
- const len = operand_ty.vectorLen(zcu);
- const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len });
if (val.isUndef(zcu)) return pt.undefRef(dest_ty);
const new_elems = try sema.arena.alloc(InternPool.Index, len);
for (new_elems, 0..) |*new_elem, i| {
@@ -21256,16 +21254,14 @@ fn zirIntFromBool(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.storage = .{ .elems = new_elems },
} }));
}
- if (!is_vector) {
- return block.addUnOp(.int_from_bool, operand);
+ if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
+ return block.addBitCast(dest_ty, operand);
}
- const len = operand_ty.vectorLen(zcu);
- const dest_ty = try pt.vectorType(.{ .child = .u1_type, .len = len });
const new_elems = try sema.arena.alloc(Air.Inst.Ref, len);
for (new_elems, 0..) |*new_elem, i| {
const idx_ref = try pt.intRef(Type.usize, i);
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
- new_elem.* = try block.addUnOp(.int_from_bool, old_elem);
+ new_elem.* = try block.addBitCast(.u1, old_elem);
}
return block.addAggregateInit(dest_ty, new_elems);
}
@@ -22858,14 +22854,27 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
.storage = .{ .repeated_elem = (try pt.intValue(dest_scalar_ty, 0)).toIntern() },
} }));
}
- if (!is_vector) {
+ if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand);
if (block.wantSafety()) {
const back = try block.addTyOp(.float_from_int, operand_ty, result);
- const diff = try block.addBinOp(.sub, operand, back);
- const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_ty, 1.0)).toIntern()));
- const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_ty, -1.0)).toIntern()));
- const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
+ const diff = try block.addBinOp(if (block.float_mode == .optimized) .sub_optimized else .sub, operand, back);
+ const ok = if (is_vector) ok: {
+ const ok_pos = try block.addCmpVector(diff, Air.internedToRef((try sema.splat(operand_ty, try pt.floatValue(operand_scalar_ty, 1.0))).toIntern()), .lt);
+ const ok_neg = try block.addCmpVector(diff, Air.internedToRef((try sema.splat(operand_ty, try pt.floatValue(operand_scalar_ty, -1.0))).toIntern()), .gt);
+ const ok = try block.addBinOp(.bit_and, ok_pos, ok_neg);
+ break :ok try block.addInst(.{
+ .tag = .reduce,
+ .data = .{ .reduce = .{
+ .operand = ok,
+ .operation = .And,
+ } },
+ });
+ } else ok: {
+ const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try pt.floatValue(operand_ty, 1.0)).toIntern()));
+ const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try pt.floatValue(operand_ty, -1.0)).toIntern()));
+ break :ok try block.addBinOp(.bool_and, ok_pos, ok_neg);
+ };
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
}
return result;
@@ -22917,7 +22926,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
}
try sema.requireRuntimeBlock(block, src, operand_src);
- if (!is_vector) {
+ if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
return block.addTyOp(.float_from_int, dest_ty, operand);
}
const len = operand_ty.vectorLen(zcu);
@@ -22996,17 +23005,37 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
});
}
try sema.requireRuntimeBlock(block, src, operand_src);
- if (!is_vector) {
+ if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) {
if (!ptr_ty.isAllowzeroPtr(zcu)) {
- const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
+ const is_non_zero = if (is_vector) all_non_zero: {
+ const zero_usize = Air.internedToRef((try sema.splat(operand_ty, .zero_usize)).toIntern());
+ const is_non_zero = try block.addCmpVector(operand_coerced, zero_usize, .neq);
+ break :all_non_zero try block.addInst(.{
+ .tag = .reduce,
+ .data = .{ .reduce = .{
+ .operand = is_non_zero,
+ .operation = .And,
+ } },
+ });
+ } else try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
}
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
- const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
+ const align_minus_1 = Air.internedToRef((try sema.splat(operand_ty, try pt.intValue(Type.usize, align_bytes_minus_1))).toIntern());
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
- const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
+ const is_aligned = if (is_vector) all_aligned: {
+ const splat_zero_usize = Air.internedToRef((try sema.splat(operand_ty, .zero_usize)).toIntern());
+ const is_aligned = try block.addCmpVector(remainder, splat_zero_usize, .eq);
+ break :all_aligned try block.addInst(.{
+ .tag = .reduce,
+ .data = .{ .reduce = .{
+ .operand = is_aligned,
+ .operation = .And,
+ } },
+ });
+ } else try block.addBinOp(.cmp_eq, remainder, .zero_usize);
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
}
}
@@ -23559,7 +23588,11 @@ fn ptrCastFull(
if (block.wantSafety() and operand_ty.ptrAllowsZero(zcu) and !dest_ty.ptrAllowsZero(zcu) and
(try Type.fromInterned(dest_info.child).hasRuntimeBitsSema(pt) or Type.fromInterned(dest_info.child).zigTypeTag(zcu) == .@"fn"))
{
- const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
+ const actual_ptr = if (src_info.flags.size == .slice)
+ try sema.analyzeSlicePtr(block, src, ptr, operand_ty)
+ else
+ ptr;
+ const ptr_int = try block.addBitCast(.usize, actual_ptr);
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
const ok = if (src_info.flags.size == .slice and dest_info.flags.size == .slice) ok: {
const len = try sema.analyzeSliceLen(block, operand_src, ptr);
@@ -23575,7 +23608,11 @@ fn ptrCastFull(
{
const align_bytes_minus_1 = dest_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
- const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
+ const actual_ptr = if (src_info.flags.size == .slice)
+ try sema.analyzeSlicePtr(block, src, ptr, operand_ty)
+ else
+ ptr;
+ const ptr_int = try block.addBitCast(.usize, actual_ptr);
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
const ok = if (src_info.flags.size == .slice and dest_info.flags.size == .slice) ok: {
@@ -31403,7 +31440,7 @@ fn coerceCompatiblePtrs(
try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
else
inst;
- const ptr_int = try block.addUnOp(.int_from_ptr, actual_ptr);
+ const ptr_int = try block.addBitCast(.usize, actual_ptr);
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
const ok = if (inst_ty.isSlice(zcu)) ok: {
const len = try sema.analyzeSliceLen(block, inst_src, inst);
src/target.zig
@@ -745,5 +745,8 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, compt
.stage2_llvm => false,
else => true,
},
+ .all_vector_instructions => switch (backend) {
+ else => false,
+ },
};
}
src/Zcu.zig
@@ -3336,6 +3336,15 @@ pub const Feature = enum {
safety_checked_instructions,
/// If the backend supports running from another thread.
separate_thread,
+ /// If the backend supports the following AIR instructions with vector types:
+ /// * `Air.Inst.Tag.bit_and`
+ /// * `Air.Inst.Tag.bit_or`
+ /// * `Air.Inst.Tag.bitcast`
+ /// * `Air.Inst.Tag.float_from_int`
+ /// * `Air.Inst.Tag.fptrunc`
+ /// * `Air.Inst.Tag.int_from_float`
+ /// If not supported, Sema will scalarize the operation.
+ all_vector_instructions,
};
pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool {