Commit f10950526e
Changed files (6)
src
test
src/arch/wasm/CodeGen.zig
@@ -2896,7 +2896,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.layout == .Packed);
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
- val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0);
+ val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
var payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = std.mem.readIntLittle(u64, &buf),
@@ -2907,7 +2907,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
.Vector => {
assert(determineSimdStoreStrategy(ty, target) == .direct);
var buf: [16]u8 = undefined;
- val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf);
+ val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable;
return func.storeSimdImmd(buf);
},
else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}),
src/codegen.zig
@@ -527,7 +527,7 @@ pub fn generateSymbol(
.fail => |em| return Result{ .fail = em },
}
} else {
- field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits);
+ field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
}
bits += @intCast(u16, field_ty.bitSize(target));
}
src/Sema.zig
@@ -2529,7 +2529,7 @@ fn coerceResultPtr(
_ = try block.addBinOp(.store, new_ptr, null_inst);
return Air.Inst.Ref.void_value;
}
- return sema.bitCast(block, ptr_ty, new_ptr, src);
+ return sema.bitCast(block, ptr_ty, new_ptr, src, null);
}
const trash_inst = trash_block.instructions.pop();
@@ -2545,7 +2545,7 @@ fn coerceResultPtr(
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
new_ptr = try sema.addConstant(ptr_operand_ty, ptr_val);
} else {
- new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src);
+ new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src, null);
}
},
.wrap_optional => {
@@ -9655,7 +9655,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
.Vector,
=> {},
}
- return sema.bitCast(block, dest_ty, operand, operand_src);
+ return sema.bitCast(block, dest_ty, operand, inst_data.src(), operand_src);
}
fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9888,7 +9888,7 @@ fn zirSwitchCapture(
switch (operand_ty.zigTypeTag()) {
.ErrorSet => if (block.switch_else_err_ty) |some| {
- return sema.bitCast(block, some, operand, operand_src);
+ return sema.bitCast(block, some, operand, operand_src, null);
} else {
try block.addUnreachable(false);
return Air.Inst.Ref.unreachable_value;
@@ -9988,14 +9988,14 @@ fn zirSwitchCapture(
Module.ErrorSet.sortNames(&names);
const else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names);
- return sema.bitCast(block, else_error_ty, operand, operand_src);
+ return sema.bitCast(block, else_error_ty, operand, operand_src, null);
} else {
const item_ref = try sema.resolveInst(items[0]);
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
const item_ty = try Type.Tag.error_set_single.create(sema.arena, item_val.getError().?);
- return sema.bitCast(block, item_ty, operand, operand_src);
+ return sema.bitCast(block, item_ty, operand, operand_src, null);
}
},
else => {
@@ -19953,7 +19953,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
} else is_aligned;
try sema.addSafetyCheck(block, ok, .incorrect_alignment);
}
- return sema.bitCast(block, dest_ty, ptr, ptr_src);
+ return sema.bitCast(block, dest_ty, ptr, ptr_src, null);
}
fn zirBitCount(
@@ -24141,8 +24141,9 @@ fn unionFieldVal(
return sema.addConstant(field.ty, tag_and_val.val);
} else {
const old_ty = union_ty.unionFieldType(tag_and_val.tag, sema.mod);
- const new_val = try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0);
- return sema.addConstant(field.ty, new_val);
+ if (try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0)) |new_val| {
+ return sema.addConstant(field.ty, new_val);
+ }
}
},
}
@@ -26514,8 +26515,12 @@ fn storePtrVal(
const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
- reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer);
- operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]);
+ reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) {
+ error.ReinterpretDeclRef => unreachable,
+ };
+ operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) {
+ error.ReinterpretDeclRef => unreachable,
+ };
const arena = mut_kit.beginArena(sema.mod);
defer mut_kit.finishArena(sema.mod);
@@ -27398,6 +27403,7 @@ fn bitCast(
dest_ty_unresolved: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
+ operand_src: ?LazySrcLoc,
) CompileError!Air.Inst.Ref {
const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved);
try sema.resolveTypeLayout(dest_ty);
@@ -27419,10 +27425,11 @@ fn bitCast(
}
if (try sema.resolveMaybeUndefVal(inst)) |val| {
- const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0);
- return sema.addConstant(dest_ty, result_val);
+ if (try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0)) |result_val| {
+ return sema.addConstant(dest_ty, result_val);
+ }
}
- try sema.requireRuntimeBlock(block, inst_src, null);
+ try sema.requireRuntimeBlock(block, inst_src, operand_src);
return block.addBitCast(dest_ty, inst);
}
@@ -27434,7 +27441,7 @@ fn bitCastVal(
old_ty: Type,
new_ty: Type,
buffer_offset: usize,
-) !Value {
+) !?Value {
const target = sema.mod.getTarget();
if (old_ty.eql(new_ty, sema.mod)) return val;
@@ -27443,8 +27450,10 @@ fn bitCastVal(
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
- val.writeToMemory(old_ty, sema.mod, buffer);
- return Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena);
+ val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) {
+ error.ReinterpretDeclRef => return null,
+ };
+ return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena);
}
fn coerceArrayPtrToSlice(
@@ -27551,7 +27560,7 @@ fn coerceCompatiblePtrs(
} else is_non_zero;
try sema.addSafetyCheck(block, ok, .cast_to_null);
}
- return sema.bitCast(block, dest_ty, inst, inst_src);
+ return sema.bitCast(block, dest_ty, inst, inst_src, null);
}
fn coerceEnumToUnion(
@@ -28291,7 +28300,7 @@ fn analyzeRef(
try sema.storePtr(block, src, alloc, operand);
// TODO: Replace with sema.coerce when that supports adding pointer constness.
- return sema.bitCast(block, ptr_type, alloc, src);
+ return sema.bitCast(block, ptr_type, alloc, src, null);
}
fn analyzeLoad(
@@ -32327,11 +32336,11 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value
// Try the smaller bit-cast first, since that's more efficient than using the larger `parent`
if (deref.pointee) |tv| if (load_sz <= try sema.typeAbiSize(tv.ty))
- return DerefResult{ .val = try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0) };
+ return DerefResult{ .val = (try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0)) orelse return .runtime_load };
// If that fails, try to bit-cast from the largest parent value with a well-defined layout
if (deref.parent) |parent| if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent.tv.ty))
- return DerefResult{ .val = try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset) };
+ return DerefResult{ .val = (try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset)) orelse return .runtime_load };
if (deref.ty_without_well_defined_layout) |bad_ty| {
// We got no parent for bit-casting, or the parent we got was too small. Either way, the problem
src/value.zig
@@ -1249,11 +1249,22 @@ pub const Value = extern union {
};
}
+ fn isDeclRef(val: Value) bool {
+ var check = val;
+ while (true) switch (check.tag()) {
+ .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return true,
+ .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr,
+ .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr,
+ .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr,
+ else => return false,
+ };
+ }
+
/// Write a Value's contents to `buffer`.
///
/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
/// the end of the value in memory.
- pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) void {
+ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ReinterpretDeclRef}!void {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef()) {
@@ -1309,7 +1320,7 @@ pub const Value = extern union {
var buf_off: usize = 0;
while (elem_i < len) : (elem_i += 1) {
const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf);
- elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]);
+ try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]);
buf_off += elem_size;
}
},
@@ -1317,7 +1328,7 @@ pub const Value = extern union {
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
- writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+ return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
.Struct => switch (ty.containerLayout()) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
@@ -1326,12 +1337,12 @@ pub const Value = extern union {
const field_vals = val.castTag(.aggregate).?.data;
for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target));
- writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
+ try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
}
},
.Packed => {
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
- writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+ return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
},
.ErrorSet => {
@@ -1345,9 +1356,14 @@ pub const Value = extern union {
.Extern => @panic("TODO implement writeToMemory for extern unions"),
.Packed => {
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
- writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
+ return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
},
+ .Pointer => {
+ assert(!ty.isSlice()); // No well defined layout.
+ if (val.isDeclRef()) return error.ReinterpretDeclRef;
+ return val.writeToMemory(Type.usize, mod, buffer);
+ },
else => @panic("TODO implement writeToMemory for more types"),
}
}
@@ -1356,7 +1372,7 @@ pub const Value = extern union {
///
/// Both the start and the end of the provided buffer must be tight, since
/// big-endian packed memory layouts start at the end of the buffer.
- pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) void {
+ pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) error{ReinterpretDeclRef}!void {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef()) {
@@ -1420,7 +1436,7 @@ pub const Value = extern union {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i;
const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf);
- elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits);
+ try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits);
bits += elem_bit_size;
}
},
@@ -1433,7 +1449,7 @@ pub const Value = extern union {
const field_vals = val.castTag(.aggregate).?.data;
for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(target));
- field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
+ try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
bits += field_bits;
}
},
@@ -1446,9 +1462,14 @@ pub const Value = extern union {
const field_type = ty.unionFields().values()[field_index.?].ty;
const field_val = val.fieldValue(field_type, field_index.?);
- field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
+ return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
},
},
+ .Pointer => {
+ assert(!ty.isSlice()); // No well defined layout.
+ if (val.isDeclRef()) return error.ReinterpretDeclRef;
+ return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset);
+ },
else => @panic("TODO implement writeToPackedMemory for more types"),
}
}
@@ -1553,6 +1574,10 @@ pub const Value = extern union {
};
return Value.initPayload(&payload.base);
},
+ .Pointer => {
+ assert(!ty.isSlice()); // No well defined layout.
+ return readFromMemory(Type.usize, mod, buffer, arena);
+ },
else => @panic("TODO implement readFromMemory for more types"),
}
}
@@ -1640,6 +1665,10 @@ pub const Value = extern union {
return Tag.aggregate.create(arena, field_vals);
},
},
+ .Pointer => {
+ assert(!ty.isSlice()); // No well defined layout.
+ return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena);
+ },
else => @panic("TODO implement readFromPackedMemory for more types"),
}
}
test/cases/compile_errors/bitCast_same_size_but_bit_count_mismatch.zig
@@ -7,4 +7,4 @@ export fn entry(byte: u8) void {
// backend=stage2
// target=native
//
-// :2:29: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits
+// :2:16: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits
test/cases/compile_errors/bitCast_with_different_sizes_inside_an_expression.zig
@@ -7,4 +7,4 @@ export fn entry() void {
// backend=stage2
// target=native
//
-// :2:29: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits
+// :2:16: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits