Commit 6ee3cabe5c
lib/std/fs/path.zig
@@ -32,7 +32,7 @@ pub fn isSep(byte: u8) bool {
/// This is different from mem.join in that the separator will not be repeated if
/// it is found at the end or beginning of a pair of consecutive paths.
fn joinSep(allocator: *Allocator, separator: u8, paths: []const []const u8) ![]u8 {
- if (paths.len == 0) return (([*]u8)(undefined))[0..0];
+ if (paths.len == 0) return &[0]u8{};
const total_len = blk: {
var sum: usize = paths[0].len;
lib/std/heap.zig
@@ -41,8 +41,7 @@ var direct_allocator_state = Allocator{
const DirectAllocator = struct {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
- if (n == 0)
- return (([*]u8)(undefined))[0..0];
+ if (n == 0) return &[0]u8{};
if (builtin.os == .windows) {
const w = os.windows;
@@ -261,8 +260,7 @@ pub const HeapAllocator = switch (builtin.os) {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
- if (n == 0)
- return (([*]u8)(undefined))[0..0];
+ if (n == 0) return &[0]u8{};
const amt = n + alignment + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst);
@@ -677,7 +675,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
) catch {
const result = try self.fallback_allocator.reallocFn(
self.fallback_allocator,
- ([*]u8)(undefined)[0..0],
+ &[0]u8{},
undefined,
new_size,
new_align,
lib/std/mem.zig
@@ -122,7 +122,7 @@ pub const Allocator = struct {
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
- const byte_slice = try self.reallocFn(self, ([*]u8)(undefined)[0..0], undefined, byte_count, a);
+ const byte_slice = try self.reallocFn(self, &[0]u8{}, undefined, byte_count, a);
assert(byte_slice.len == byte_count);
@memset(byte_slice.ptr, undefined, byte_slice.len);
if (alignment == null) {
@@ -976,7 +976,7 @@ pub const SplitIterator = struct {
/// Naively combines a series of slices with a separator.
/// Allocates memory for the result, which must be freed by the caller.
pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
- if (slices.len == 0) return (([*]u8)(undefined))[0..0];
+ if (slices.len == 0) return &[0]u8{};
const total_len = blk: {
var sum: usize = separator.len * (slices.len - 1);
@@ -1011,7 +1011,7 @@ test "mem.join" {
/// Copies each T from slices into a new slice that exactly holds all the elements.
pub fn concat(allocator: *Allocator, comptime T: type, slices: []const []const T) ![]T {
- if (slices.len == 0) return (([*]T)(undefined))[0..0];
+ if (slices.len == 0) return &[0]T{};
const total_len = blk: {
var sum: usize = 0;
src/ir.cpp
@@ -10494,6 +10494,50 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT
continue;
}
+ // *[N]T to []T
+ // *[N]T to E![]T
+ if (cur_type->id == ZigTypeIdPointer &&
+ cur_type->data.pointer.child_type->id == ZigTypeIdArray &&
+ ((prev_type->id == ZigTypeIdErrorUnion && is_slice(prev_type->data.error_union.payload_type)) ||
+ is_slice(prev_type)))
+ {
+ ZigType *array_type = cur_type->data.pointer.child_type;
+ ZigType *slice_type = (prev_type->id == ZigTypeIdErrorUnion) ?
+ prev_type->data.error_union.payload_type : prev_type;
+ ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index].type_entry;
+ if ((slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
+ types_match_const_cast_only(ira,
+ slice_ptr_type->data.pointer.child_type,
+ array_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
+ {
+ convert_to_const_slice = false;
+ continue;
+ }
+ }
+
+ // *[N]T to []T
+ // *[N]T to E![]T
+ if (prev_type->id == ZigTypeIdPointer &&
+ prev_type->data.pointer.child_type->id == ZigTypeIdArray &&
+ ((cur_type->id == ZigTypeIdErrorUnion && is_slice(cur_type->data.error_union.payload_type)) ||
+ is_slice(cur_type)))
+ {
+ ZigType *array_type = prev_type->data.pointer.child_type;
+ ZigType *slice_type = (cur_type->id == ZigTypeIdErrorUnion) ?
+ cur_type->data.error_union.payload_type : cur_type;
+ ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index].type_entry;
+ if ((slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
+ types_match_const_cast_only(ira,
+ slice_ptr_type->data.pointer.child_type,
+ array_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
+ {
+ prev_inst = cur_inst;
+ convert_to_const_slice = false;
+ continue;
+ }
+ }
+
+ // [N]T to []T
if (cur_type->id == ZigTypeIdArray && is_slice(prev_type) &&
(prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const ||
cur_type->data.array.len == 0) &&
@@ -10505,6 +10549,7 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT
continue;
}
+ // [N]T to []T
if (prev_type->id == ZigTypeIdArray && is_slice(cur_type) &&
(cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const ||
prev_type->data.array.len == 0) &&
@@ -12642,12 +12687,71 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
// *[N]T to []T
- if (is_slice(wanted_type) &&
+ // *[N]T to E![]T
+ if ((is_slice(wanted_type) ||
+ (wanted_type->id == ZigTypeIdErrorUnion &&
+ is_slice(wanted_type->data.error_union.payload_type))) &&
+ actual_type->id == ZigTypeIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == ZigTypeIdArray)
+ {
+ ZigType *slice_type = (wanted_type->id == ZigTypeIdErrorUnion) ?
+ wanted_type->data.error_union.payload_type : wanted_type;
+ ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index].type_entry;
+ assert(slice_ptr_type->id == ZigTypeIdPointer);
+ ZigType *array_type = actual_type->data.pointer.child_type;
+ bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0
+ || !actual_type->data.pointer.is_const);
+ if (const_ok && types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
+ array_type->data.array.child_type, source_node,
+ !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ {
+ // If the pointers both have ABI align, it works.
+ // Or if the array length is 0, alignment doesn't matter.
+ bool ok_align = array_type->data.array.len == 0 ||
+ (slice_ptr_type->data.pointer.explicit_alignment == 0 &&
+ actual_type->data.pointer.explicit_alignment == 0);
+ if (!ok_align) {
+ // If either one has non ABI align, we have to resolve them both
+ if ((err = type_resolve(ira->codegen, actual_type->data.pointer.child_type,
+ ResolveStatusAlignmentKnown)))
+ {
+ return ira->codegen->invalid_instruction;
+ }
+ if ((err = type_resolve(ira->codegen, slice_ptr_type->data.pointer.child_type,
+ ResolveStatusAlignmentKnown)))
+ {
+ return ira->codegen->invalid_instruction;
+ }
+ ok_align = get_ptr_align(ira->codegen, actual_type) >= get_ptr_align(ira->codegen, slice_ptr_type);
+ }
+ if (ok_align) {
+ if (wanted_type->id == ZigTypeIdErrorUnion) {
+ IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, slice_type, value, nullptr);
+ if (type_is_invalid(cast1->value.type))
+ return ira->codegen->invalid_instruction;
+
+ IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1, result_loc);
+ if (type_is_invalid(cast2->value.type))
+ return ira->codegen->invalid_instruction;
+
+ return cast2;
+ } else {
+ return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, slice_type, result_loc);
+ }
+ }
+ }
+ }
+
+ // *[N]T to E![]T
+ if (wanted_type->id == ZigTypeIdErrorUnion &&
+ is_slice(wanted_type->data.error_union.payload_type) &&
actual_type->id == ZigTypeIdPointer &&
actual_type->data.pointer.ptr_len == PtrLenSingle &&
actual_type->data.pointer.child_type->id == ZigTypeIdArray)
{
- ZigType *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ ZigType *slice_type = wanted_type->data.error_union.payload_type;
+ ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index].type_entry;
assert(slice_ptr_type->id == ZigTypeIdPointer);
ZigType *array_type = actual_type->data.pointer.child_type;
bool const_ok = (slice_ptr_type->data.pointer.is_const || array_type->data.array.len == 0
@@ -12674,7 +12778,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
ok_align = get_ptr_align(ira->codegen, actual_type) >= get_ptr_align(ira->codegen, slice_ptr_type);
}
if (ok_align) {
- return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type, result_loc);
+ return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, slice_type, result_loc);
}
}
}
test/stage1/behavior/cast.zig
@@ -538,3 +538,24 @@ test "implicit cast comptime_int to comptime_float" {
comptime expect(comptime_float(10) == f32(10));
expect(2 == 2.0);
}
+
+test "implicit cast *[0]T to E![]const u8" {
+ var x = (anyerror![]const u8)(&[0]u8{});
+ expect((x catch unreachable).len == 0);
+}
+
+test "peer cast *[0]T to E![]const T" {
+ var buffer: [5]u8 = "abcde";
+ var buf: anyerror![]const u8 = buffer[0..];
+ var b = false;
+ var y = if (b) &[0]u8{} else buf;
+ expect(mem.eql(u8, "abcde", y catch unreachable));
+}
+
+test "peer cast *[0]T to []const T" {
+ var buffer: [5]u8 = "abcde";
+ var buf: []const u8 = buffer[0..];
+ var b = false;
+ var y = if (b) &[0]u8{} else buf;
+ expect(mem.eql(u8, "abcde", y));
+}