Commit a3104a4a78
Changed files (11)
src
test
behavior
src/codegen/llvm/bindings.zig
@@ -194,7 +194,7 @@ pub const Type = opaque {
extern fn LLVMConstReal(RealTy: *const Type, N: f64) *const Value;
pub const constArray = LLVMConstArray;
- extern fn LLVMConstArray(ElementTy: *const Type, ConstantVals: [*]*const Value, Length: c_uint) *const Value;
+ extern fn LLVMConstArray(ElementTy: *const Type, ConstantVals: [*]const *const Value, Length: c_uint) *const Value;
pub const constNamedStruct = LLVMConstNamedStruct;
extern fn LLVMConstNamedStruct(
src/codegen/llvm.zig
@@ -1031,54 +1031,60 @@ pub const DeclGen = struct {
},
else => |tag| return self.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }),
},
- .Array => {
- const gpa = self.gpa;
- if (tv.val.castTag(.bytes)) |payload| {
- const zero_sentinel = if (tv.ty.sentinel()) |sentinel| blk: {
- if (sentinel.tag() == .zero) break :blk true;
- return self.todo("handle other sentinel values", .{});
- } else false;
-
+ .Array => switch (tv.val.tag()) {
+ .bytes => {
+ const bytes = tv.val.castTag(.bytes).?.data;
return self.context.constString(
- payload.data.ptr,
- @intCast(c_uint, payload.data.len),
- llvm.Bool.fromBool(!zero_sentinel),
+ bytes.ptr,
+ @intCast(c_uint, bytes.len),
+ .True, // don't null terminate. bytes has the sentinel, if any.
);
- }
- if (tv.val.castTag(.array)) |payload| {
+ },
+ .array => {
+ const elem_vals = tv.val.castTag(.array).?.data;
const elem_ty = tv.ty.elemType();
- const elem_vals = payload.data;
- const sento = tv.ty.sentinel();
- const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len + @boolToInt(sento != null));
+ const gpa = self.gpa;
+ const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len);
defer gpa.free(llvm_elems);
for (elem_vals) |elem_val, i| {
llvm_elems[i] = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_val });
}
- if (sento) |sent| llvm_elems[elem_vals.len] = try self.genTypedValue(.{ .ty = elem_ty, .val = sent });
const llvm_elem_ty = try self.llvmType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
- }
- if (tv.val.castTag(.repeated)) |payload| {
- const val = payload.data;
+ },
+ .repeated => {
+ const val = tv.val.castTag(.repeated).?.data;
const elem_ty = tv.ty.elemType();
+ const sentinel = tv.ty.sentinel();
const len = tv.ty.arrayLen();
-
- const llvm_elems = try gpa.alloc(*const llvm.Value, len);
+ const len_including_sent = len + @boolToInt(sentinel != null);
+ const gpa = self.gpa;
+ const llvm_elems = try gpa.alloc(*const llvm.Value, len_including_sent);
defer gpa.free(llvm_elems);
- var i: u64 = 0;
- while (i < len) : (i += 1) {
- llvm_elems[i] = try self.genTypedValue(.{ .ty = elem_ty, .val = val });
+ for (llvm_elems[0..len]) |*elem| {
+ elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = val });
+ }
+ if (sentinel) |sent| {
+ llvm_elems[len] = try self.genTypedValue(.{ .ty = elem_ty, .val = sent });
}
const llvm_elem_ty = try self.llvmType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
- }
- return self.todo("handle more array values", .{});
+ },
+ .empty_array_sentinel => {
+ const elem_ty = tv.ty.elemType();
+ const sent_val = tv.ty.sentinel().?;
+ const sentinel = try self.genTypedValue(.{ .ty = elem_ty, .val = sent_val });
+ const llvm_elems: [1]*const llvm.Value = .{sentinel};
+ const llvm_elem_ty = try self.llvmType(elem_ty);
+ return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len);
+ },
+ else => unreachable,
},
.Optional => {
var buf: Type.Payload.ElemType = undefined;
src/AstGen.zig
@@ -1235,7 +1235,15 @@ fn arrayInitExpr(
};
} else {
const sentinel = try comptimeExpr(gz, scope, .{ .ty = elem_type }, array_type.ast.sentinel);
- const array_type_inst = try gz.addArrayTypeSentinel(len_inst, elem_type, sentinel);
+ const array_type_inst = try gz.addPlNode(
+ .array_type_sentinel,
+ array_init.ast.type_expr,
+ Zir.Inst.ArrayTypeSentinel{
+ .len = len_inst,
+ .elem_type = elem_type,
+ .sentinel = sentinel,
+ },
+ );
break :inst .{
.array = array_type_inst,
.elem = elem_type,
@@ -1425,7 +1433,15 @@ fn structInitExpr(
break :blk try gz.addBin(.array_type, .zero_usize, elem_type);
} else blk: {
const sentinel = try comptimeExpr(gz, scope, .{ .ty = elem_type }, array_type.ast.sentinel);
- break :blk try gz.addArrayTypeSentinel(.zero_usize, elem_type, sentinel);
+ break :blk try gz.addPlNode(
+ .array_type_sentinel,
+ struct_init.ast.type_expr,
+ Zir.Inst.ArrayTypeSentinel{
+ .len = .zero_usize,
+ .elem_type = elem_type,
+ .sentinel = sentinel,
+ },
+ );
};
const result = try gz.addUnNode(.struct_init_empty, array_type_inst, node);
return rvalue(gz, rl, result, node);
@@ -2976,11 +2992,15 @@ fn arrayTypeSentinel(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.I
{
return astgen.failNode(len_node, "unable to infer array size", .{});
}
- const len = try expr(gz, scope, .{ .coerced_ty = .usize_type }, len_node);
+ const len = try reachableExpr(gz, scope, .{ .coerced_ty = .usize_type }, len_node, node);
const elem_type = try typeExpr(gz, scope, extra.elem_type);
- const sentinel = try expr(gz, scope, .{ .coerced_ty = elem_type }, extra.sentinel);
+ const sentinel = try reachableExpr(gz, scope, .{ .coerced_ty = elem_type }, extra.sentinel, node);
- const result = try gz.addArrayTypeSentinel(len, elem_type, sentinel);
+ const result = try gz.addPlNode(.array_type_sentinel, node, Zir.Inst.ArrayTypeSentinel{
+ .len = len,
+ .elem_type = elem_type,
+ .sentinel = sentinel,
+ });
return rvalue(gz, rl, result, node);
}
@@ -10017,32 +10037,6 @@ const GenZir = struct {
return indexToRef(new_index);
}
- fn addArrayTypeSentinel(
- gz: *GenZir,
- len: Zir.Inst.Ref,
- sentinel: Zir.Inst.Ref,
- elem_type: Zir.Inst.Ref,
- ) !Zir.Inst.Ref {
- const gpa = gz.astgen.gpa;
- try gz.instructions.ensureUnusedCapacity(gpa, 1);
- try gz.astgen.instructions.ensureUnusedCapacity(gpa, 1);
-
- const payload_index = try gz.astgen.addExtra(Zir.Inst.ArrayTypeSentinel{
- .sentinel = sentinel,
- .elem_type = elem_type,
- });
- const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len);
- gz.astgen.instructions.appendAssumeCapacity(.{
- .tag = .array_type_sentinel,
- .data = .{ .array_type_sentinel = .{
- .len = len,
- .payload_index = payload_index,
- } },
- });
- gz.instructions.appendAssumeCapacity(new_index);
- return indexToRef(new_index);
- }
-
fn addUnTok(
gz: *GenZir,
tag: Zir.Inst.Tag,
src/Module.zig
@@ -1885,6 +1885,55 @@ pub const SrcLoc = struct {
const token_starts = tree.tokens.items(.start);
return token_starts[tok_index];
},
+
+ .node_offset_array_type_len => |node_off| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const node_tags = tree.nodes.items(.tag);
+ const parent_node = src_loc.declRelativeToNodeIndex(node_off);
+
+ const full: Ast.full.ArrayType = switch (node_tags[parent_node]) {
+ .array_type => tree.arrayType(parent_node),
+ .array_type_sentinel => tree.arrayTypeSentinel(parent_node),
+ else => unreachable,
+ };
+ const node = full.ast.elem_count;
+ const main_tokens = tree.nodes.items(.main_token);
+ const tok_index = main_tokens[node];
+ const token_starts = tree.tokens.items(.start);
+ return token_starts[tok_index];
+ },
+ .node_offset_array_type_sentinel => |node_off| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const node_tags = tree.nodes.items(.tag);
+ const parent_node = src_loc.declRelativeToNodeIndex(node_off);
+
+ const full: Ast.full.ArrayType = switch (node_tags[parent_node]) {
+ .array_type => tree.arrayType(parent_node),
+ .array_type_sentinel => tree.arrayTypeSentinel(parent_node),
+ else => unreachable,
+ };
+ const node = full.ast.sentinel;
+ const main_tokens = tree.nodes.items(.main_token);
+ const tok_index = main_tokens[node];
+ const token_starts = tree.tokens.items(.start);
+ return token_starts[tok_index];
+ },
+ .node_offset_array_type_elem => |node_off| {
+ const tree = try src_loc.file_scope.getTree(gpa);
+ const node_tags = tree.nodes.items(.tag);
+ const parent_node = src_loc.declRelativeToNodeIndex(node_off);
+
+ const full: Ast.full.ArrayType = switch (node_tags[parent_node]) {
+ .array_type => tree.arrayType(parent_node),
+ .array_type_sentinel => tree.arrayTypeSentinel(parent_node),
+ else => unreachable,
+ };
+ const node = full.ast.elem_type;
+ const main_tokens = tree.nodes.items(.main_token);
+ const tok_index = main_tokens[node];
+ const token_starts = tree.tokens.items(.start);
+ return token_starts[tok_index];
+ },
}
}
@@ -2085,6 +2134,24 @@ pub const LazySrcLoc = union(enum) {
/// expression AST node. Next, navigate to the string literal of the `extern "foo"`.
/// The Decl is determined contextually.
node_offset_lib_name: i32,
+ /// The source location points to the len expression of an `[N:S]T`
+ /// expression, found by taking this AST node index offset from the containing
+ /// Decl AST node, which points to an `[N:S]T` expression AST node. Next, navigate
+ /// to the len expression.
+ /// The Decl is determined contextually.
+ node_offset_array_type_len: i32,
+ /// The source location points to the sentinel expression of an `[N:S]T`
+ /// expression, found by taking this AST node index offset from the containing
+ /// Decl AST node, which points to an `[N:S]T` expression AST node. Next, navigate
+ /// to the sentinel expression.
+ /// The Decl is determined contextually.
+ node_offset_array_type_sentinel: i32,
+ /// The source location points to the elem expression of an `[N:S]T`
+ /// expression, found by taking this AST node index offset from the containing
+ /// Decl AST node, which points to an `[N:S]T` expression AST node. Next, navigate
+ /// to the elem expression.
+ /// The Decl is determined contextually.
+ node_offset_array_type_elem: i32,
/// Upgrade to a `SrcLoc` based on the `Decl` provided.
pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl) SrcLoc {
@@ -2130,6 +2197,9 @@ pub const LazySrcLoc = union(enum) {
.node_offset_fn_type_ret_ty,
.node_offset_anyframe_type,
.node_offset_lib_name,
+ .node_offset_array_type_len,
+ .node_offset_array_type_sentinel,
+ .node_offset_array_type_elem,
=> .{
.file_scope = decl.getFileScope(),
.parent_decl_node = decl.src_node,
@@ -4125,36 +4195,6 @@ pub fn optionalType(arena: *Allocator, child_type: Type) Allocator.Error!Type {
}
}
-pub fn arrayType(
- arena: *Allocator,
- len: u64,
- sentinel: ?Value,
- elem_type: Type,
-) Allocator.Error!Type {
- if (elem_type.eql(Type.initTag(.u8))) {
- if (sentinel) |some| {
- if (some.eql(Value.initTag(.zero), elem_type)) {
- return Type.Tag.array_u8_sentinel_0.create(arena, len);
- }
- } else {
- return Type.Tag.array_u8.create(arena, len);
- }
- }
-
- if (sentinel) |some| {
- return Type.Tag.array_sentinel.create(arena, .{
- .len = len,
- .sentinel = some,
- .elem_type = elem_type,
- });
- }
-
- return Type.Tag.array.create(arena, .{
- .len = len,
- .elem_type = elem_type,
- });
-}
-
pub fn errorUnionType(
arena: *Allocator,
error_set: Type,
src/print_zir.zig
@@ -542,14 +542,15 @@ const Writer = struct {
stream: anytype,
inst: Zir.Inst.Index,
) (@TypeOf(stream).Error || error{OutOfMemory})!void {
- const inst_data = self.code.instructions.items(.data)[inst].array_type_sentinel;
+ const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data;
- try self.writeInstRef(stream, inst_data.len);
+ try self.writeInstRef(stream, extra.len);
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.sentinel);
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.elem_type);
- try stream.writeAll(")");
+ try stream.writeAll(") ");
+ try self.writeSrc(stream, inst_data.src());
}
fn writePtrTypeSimple(
src/Sema.zig
@@ -1021,7 +1021,7 @@ fn resolveConstString(
const wanted_type = Type.initTag(.const_slice_u8);
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced_inst);
- return val.toAllocatedBytes(sema.arena);
+ return val.toAllocatedBytes(wanted_type, sema.arena);
}
pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type {
@@ -2436,10 +2436,10 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
- const bytes = try new_decl_arena.allocator.dupe(u8, zir_bytes);
+ const bytes = try new_decl_arena.allocator.dupeZ(u8, zir_bytes);
const decl_ty = try Type.Tag.array_u8_sentinel_0.create(&new_decl_arena.allocator, bytes.len);
- const decl_val = try Value.Tag.bytes.create(&new_decl_arena.allocator, bytes);
+ const decl_val = try Value.Tag.bytes.create(&new_decl_arena.allocator, bytes[0 .. bytes.len + 1]);
const new_decl = try sema.mod.createAnonymousDecl(block, .{
.ty = decl_ty,
@@ -3747,7 +3747,7 @@ fn analyzeCall(
.code = fn_zir,
.owner_decl = new_decl,
.func = null,
- .fn_ret_ty = Type.initTag(.void),
+ .fn_ret_ty = Type.void,
.owner_func = null,
.comptime_args = try new_decl_arena.allocator.alloc(TypedValue, uncasted_args.len),
.comptime_args_fn_inst = module_fn.zir_body_inst,
@@ -4040,9 +4040,9 @@ fn zirArrayType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
defer tracy.end();
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
- const len = try sema.resolveInt(block, .unneeded, bin_inst.lhs, Type.initTag(.usize));
+ const len = try sema.resolveInt(block, .unneeded, bin_inst.lhs, Type.usize);
const elem_type = try sema.resolveType(block, .unneeded, bin_inst.rhs);
- const array_ty = try Module.arrayType(sema.arena, len, null, elem_type);
+ const array_ty = try Type.array(sema.arena, len, null, elem_type);
return sema.addType(array_ty);
}
@@ -4051,14 +4051,17 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
const tracy = trace(@src());
defer tracy.end();
- const inst_data = sema.code.instructions.items(.data)[inst].array_type_sentinel;
- const len = try sema.resolveInt(block, .unneeded, inst_data.len, Type.initTag(.usize));
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data;
- const elem_type = try sema.resolveType(block, .unneeded, extra.elem_type);
+ const len_src: LazySrcLoc = .{ .node_offset_array_type_len = inst_data.src_node };
+ const sentinel_src: LazySrcLoc = .{ .node_offset_array_type_sentinel = inst_data.src_node };
+ const elem_src: LazySrcLoc = .{ .node_offset_array_type_elem = inst_data.src_node };
+ const len = try sema.resolveInt(block, len_src, extra.len, Type.usize);
+ const elem_type = try sema.resolveType(block, elem_src, extra.elem_type);
const uncasted_sentinel = sema.resolveInst(extra.sentinel);
- const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, .unneeded);
- const sentinel_val = try sema.resolveConstValue(block, .unneeded, sentinel);
- const array_ty = try Module.arrayType(sema.arena, len, sentinel_val, elem_type);
+ const sentinel = try sema.coerce(block, elem_type, uncasted_sentinel, sentinel_src);
+ const sentinel_val = try sema.resolveConstValue(block, sentinel_src, sentinel);
+ const array_ty = try Type.array(sema.arena, len, sentinel_val, elem_type);
return sema.addType(array_ty);
}
@@ -4658,7 +4661,7 @@ fn funcCommon(
// the function as generic.
var is_generic = false;
const bare_return_type: Type = ret_ty: {
- if (ret_ty_body.len == 0) break :ret_ty Type.initTag(.void);
+ if (ret_ty_body.len == 0) break :ret_ty Type.void;
const err = err: {
// Make sure any nested param instructions don't clobber our work.
@@ -6560,13 +6563,14 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| {
const final_len = lhs_info.len + rhs_info.len;
+ const final_len_including_sent = final_len + @boolToInt(res_sent != null);
const is_pointer = lhs_ty.zigTypeTag() == .Pointer;
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const lhs_sub_val = if (is_pointer) (try lhs_val.pointerDeref(anon_decl.arena())).? else lhs_val;
const rhs_sub_val = if (is_pointer) (try rhs_val.pointerDeref(anon_decl.arena())).? else rhs_val;
- const buf = try anon_decl.arena().alloc(Value, final_len);
+ const buf = try anon_decl.arena().alloc(Value, final_len_including_sent);
{
var i: u64 = 0;
while (i < lhs_info.len) : (i += 1) {
@@ -6581,10 +6585,17 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
buf[lhs_info.len + i] = try val.copy(anon_decl.arena());
}
}
- const ty = if (res_sent) |rs|
- try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = lhs_info.elem_type, .sentinel = rs })
- else
- try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = lhs_info.elem_type });
+ const ty = if (res_sent) |rs| ty: {
+ buf[final_len] = try rs.copy(anon_decl.arena());
+ break :ty try Type.Tag.array_sentinel.create(anon_decl.arena(), .{
+ .len = final_len,
+ .elem_type = lhs_info.elem_type,
+ .sentinel = rs,
+ });
+ } else try Type.Tag.array.create(anon_decl.arena(), .{
+ .len = final_len,
+ .elem_type = lhs_info.elem_type,
+ });
const val = try Value.Tag.array.create(anon_decl.arena(), buf);
return if (is_pointer)
sema.analyzeDeclRef(try anon_decl.finish(ty, val))
@@ -6623,20 +6634,31 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
// In `**` rhs has to be comptime-known, but lhs can be runtime-known
- const tomulby = try sema.resolveInt(block, rhs_src, extra.rhs, Type.initTag(.usize));
+ const tomulby = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize);
const mulinfo = getArrayCatInfo(lhs_ty) orelse
return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty});
- const final_len = std.math.mul(u64, mulinfo.len, tomulby) catch return sema.fail(block, rhs_src, "operation results in overflow", .{});
+ const final_len = std.math.mul(u64, mulinfo.len, tomulby) catch
+ return sema.fail(block, rhs_src, "operation results in overflow", .{});
+ const final_len_including_sent = final_len + @boolToInt(mulinfo.sentinel != null);
+
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
+
const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try lhs_val.pointerDeref(anon_decl.arena())).? else lhs_val;
const final_ty = if (mulinfo.sentinel) |sent|
- try Type.Tag.array_sentinel.create(anon_decl.arena(), .{ .len = final_len, .elem_type = mulinfo.elem_type, .sentinel = sent })
+ try Type.Tag.array_sentinel.create(anon_decl.arena(), .{
+ .len = final_len,
+ .elem_type = mulinfo.elem_type,
+ .sentinel = sent,
+ })
else
- try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = mulinfo.elem_type });
- const buf = try anon_decl.arena().alloc(Value, final_len);
+ try Type.Tag.array.create(anon_decl.arena(), .{
+ .len = final_len,
+ .elem_type = mulinfo.elem_type,
+ });
+ const buf = try anon_decl.arena().alloc(Value, final_len_including_sent);
// handles the optimisation where arr.len == 0 : [_]T { X } ** N
const val = if (mulinfo.len == 1) blk: {
@@ -6652,6 +6674,9 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
buf[mulinfo.len * i + j] = try val.copy(anon_decl.arena());
}
}
+ if (mulinfo.sentinel) |sent| {
+ buf[final_len] = try sent.copy(anon_decl.arena());
+ }
break :blk try Value.Tag.array.create(anon_decl.arena(), buf);
};
if (lhs_ty.zigTypeTag() == .Pointer) {
@@ -6760,7 +6785,7 @@ fn analyzeArithmetic(
};
// TODO if the operand is comptime-known to be negative, or is a negative int,
// coerce to isize instead of usize.
- const casted_rhs = try sema.coerce(block, Type.initTag(.usize), rhs, rhs_src);
+ const casted_rhs = try sema.coerce(block, Type.usize, rhs, rhs_src);
const runtime_src = runtime_src: {
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
if (try sema.resolveDefinedValue(block, rhs_src, casted_rhs)) |rhs_val| {
@@ -8521,9 +8546,21 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- const struct_type = try sema.resolveType(block, src, inst_data.operand);
+ const obj_ty = try sema.resolveType(block, src, inst_data.operand);
- return sema.addConstant(struct_type, Value.initTag(.empty_struct_value));
+ switch (obj_ty.zigTypeTag()) {
+ .Struct => return sema.addConstant(obj_ty, Value.initTag(.empty_struct_value)),
+ .Array => {
+ if (obj_ty.sentinel()) |sentinel| {
+ const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel);
+ return sema.addConstant(obj_ty, val);
+ } else {
+ return sema.addConstant(obj_ty, Value.initTag(.empty_array));
+ }
+ },
+ .Void => return sema.addConstant(obj_ty, Value.void),
+ else => unreachable,
+ }
}
fn zirUnionInitPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -8973,7 +9010,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operand_res = sema.resolveInst(extra.rhs);
- const operand_coerced = try sema.coerce(block, Type.initTag(.usize), operand_res, operand_src);
+ const operand_coerced = try sema.coerce(block, Type.usize, operand_res, operand_src);
const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const type_res = try sema.resolveType(block, src, extra.lhs);
@@ -9010,7 +9047,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.data = ptr_align - 1,
};
const align_minus_1 = try sema.addConstant(
- Type.initTag(.usize),
+ Type.usize,
Value.initPayload(&val_payload.base),
);
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
@@ -9295,6 +9332,48 @@ fn checkAtomicOperandType(
}
}
+fn checkPtrIsNotComptimeMutable(
+ sema: *Sema,
+ block: *Block,
+ ptr_val: Value,
+ ptr_src: LazySrcLoc,
+ operand_src: LazySrcLoc,
+) CompileError!void {
+ _ = operand_src;
+ if (ptr_val.isComptimeMutablePtr()) {
+ return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{});
+ }
+}
+
+fn checkComptimeVarStore(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ decl_ref_mut: Value.Payload.DeclRefMut.Data,
+) CompileError!void {
+ if (decl_ref_mut.runtime_index < block.runtime_index) {
+ if (block.runtime_cond) |cond_src| {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{});
+ errdefer msg.destroy(sema.gpa);
+ try sema.errNote(block, cond_src, msg, "runtime condition here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+ if (block.runtime_loop) |loop_src| {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{});
+ errdefer msg.destroy(sema.gpa);
+ try sema.errNote(block, loop_src, msg, "non-inline loop here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+ unreachable;
+ }
+}
+
fn resolveExportOptions(
sema: *Sema,
block: *Block,
@@ -9313,8 +9392,9 @@ fn resolveExportOptions(
if (!fields[section_index].isNull()) {
return sema.fail(block, src, "TODO: implement exporting with linksection", .{});
}
+ const name_ty = Type.initTag(.const_slice_u8);
return std.builtin.ExportOptions{
- .name = try fields[name_index].toAllocatedBytes(sema.arena),
+ .name = try fields[name_index].toAllocatedBytes(name_ty, sema.arena),
.linkage = fields[linkage_index].toEnum(std.builtin.GlobalLinkage),
.section = null, // TODO
};
@@ -9547,7 +9627,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
- if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| {
+ const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand);
+ const operand_val = maybe_operand_val orelse {
+ try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
+ break :rs operand_src;
+ };
+ if (ptr_val.isComptimeMutablePtr()) {
const target = sema.mod.getTarget();
const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
const new_val = switch (op) {
@@ -9565,7 +9650,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
};
try sema.storePtrVal(block, src, ptr_val, new_val, operand_ty);
return sema.addConstant(operand_ty, stored_val);
- } else break :rs operand_src;
+ } else break :rs ptr_src;
} else ptr_src;
const flags: u32 = @as(u32, @enumToInt(order)) | (@as(u32, @enumToInt(op)) << 3);
@@ -9682,7 +9767,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
.size = .Many,
});
const src_ptr = try sema.coerce(block, wanted_src_ptr_ty, uncasted_src_ptr, src_src);
- const len = try sema.coerce(block, Type.initTag(.usize), sema.resolveInst(extra.byte_count), len_src);
+ const len = try sema.coerce(block, Type.usize, sema.resolveInst(extra.byte_count), len_src);
const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr);
const maybe_src_ptr_val = try sema.resolveDefinedValue(block, src_src, src_ptr);
@@ -9729,7 +9814,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
}
const elem_ty = dest_ptr_ty.elemType2();
const value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.byte), value_src);
- const len = try sema.coerce(block, Type.initTag(.usize), sema.resolveInst(extra.byte_count), len_src);
+ const len = try sema.coerce(block, Type.usize, sema.resolveInst(extra.byte_count), len_src);
const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr);
const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len);
@@ -10270,7 +10355,7 @@ fn fieldVal(
try sema.requireRuntimeBlock(block, src);
return block.addTyOp(.slice_ptr, result_ty, object);
} else if (mem.eql(u8, field_name, "len")) {
- const result_ty = Type.initTag(.usize);
+ const result_ty = Type.usize;
if (try sema.resolveMaybeUndefVal(block, object_src, object)) |val| {
if (val.isUndef()) return sema.addConstUndef(result_ty);
return sema.addConstant(
@@ -10955,7 +11040,7 @@ fn elemVal(
return block.addBinOp(.ptr_elem_val, array_maybe_ptr, elem_index);
},
.One => {
- const indexable_ty = maybe_ptr_ty.elemType();
+ const indexable_ty = maybe_ptr_ty.childType();
switch (indexable_ty.zigTypeTag()) {
.Pointer => switch (indexable_ty.ptrSize()) {
.Slice => {
@@ -10986,12 +11071,22 @@ fn elemVal(
try sema.requireRuntimeBlock(block, src);
return block.addBinOp(.ptr_ptr_elem_val, array_maybe_ptr, elem_index);
},
- .One => return sema.fail(
- block,
- array_ptr_src,
- "expected pointer, found '{}'",
- .{indexable_ty.elemType()},
- ),
+ .One => {
+ const array_ty = indexable_ty.childType();
+ if (array_ty.zigTypeTag() == .Array) {
+ // We have a double pointer to an array, and we want an element
+ // value. This can happen with this code for example:
+ // var a: *[1]u8 = undefined; _ = a[0];
+ const array_ptr = try sema.analyzeLoad(block, src, array_maybe_ptr, array_ptr_src);
+ const ptr = try sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src);
+ return sema.analyzeLoad(block, src, ptr, elem_index_src);
+ } else return sema.fail(
+ block,
+ array_ptr_src,
+ "expected pointer, found '{}'",
+ .{array_ty},
+ );
+ },
},
.Array => {
const ptr = try sema.elemPtr(block, src, array_maybe_ptr, elem_index, elem_index_src);
@@ -11463,13 +11558,15 @@ fn storePtr2(
return;
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
- const operand_val = (try sema.resolveMaybeUndefVal(block, operand_src, operand)) orelse
- return sema.fail(block, src, "cannot store runtime value in compile time variable", .{});
- if (ptr_val.tag() == .decl_ref_mut) {
+ const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand);
+ const operand_val = maybe_operand_val orelse {
+ try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
+ break :rs operand_src;
+ };
+ if (ptr_val.isComptimeMutablePtr()) {
try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty);
return;
- }
- break :rs operand_src;
+ } else break :rs ptr_src;
} else ptr_src;
// TODO handle if the element type requires comptime
@@ -11489,40 +11586,166 @@ fn storePtrVal(
operand_val: Value,
operand_ty: Type,
) !void {
- if (ptr_val.castTag(.decl_ref_mut)) |decl_ref_mut| {
- if (decl_ref_mut.data.runtime_index < block.runtime_index) {
- if (block.runtime_cond) |cond_src| {
- const msg = msg: {
- const msg = try sema.errMsg(block, src, "store to comptime variable depends on runtime condition", .{});
- errdefer msg.destroy(sema.gpa);
- try sema.errNote(block, cond_src, msg, "runtime condition here", .{});
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
+ var kit = try beginComptimePtrMutation(sema, block, src, ptr_val);
+ try sema.checkComptimeVarStore(block, src, kit.decl_ref_mut);
+
+ const target = sema.mod.getTarget();
+ const bitcasted_val = try operand_val.bitCast(operand_ty, kit.ty, target, sema.gpa, sema.arena);
+
+ const arena = kit.beginArena(sema.gpa);
+ defer kit.finishArena();
+
+ kit.val.* = try bitcasted_val.copy(arena);
+}
+
+const ComptimePtrMutationKit = struct {
+ decl_ref_mut: Value.Payload.DeclRefMut.Data,
+ val: *Value,
+ ty: Type,
+ decl_arena: std.heap.ArenaAllocator = undefined,
+
+ fn beginArena(self: *ComptimePtrMutationKit, gpa: *Allocator) *Allocator {
+ self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa);
+ return &self.decl_arena.allocator;
+ }
+
+ fn finishArena(self: *ComptimePtrMutationKit) void {
+ self.decl_ref_mut.decl.value_arena.?.* = self.decl_arena.state;
+ self.decl_arena = undefined;
+ }
+};
+
+fn beginComptimePtrMutation(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ ptr_val: Value,
+) CompileError!ComptimePtrMutationKit {
+ switch (ptr_val.tag()) {
+ .decl_ref_mut => {
+ const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data;
+ return ComptimePtrMutationKit{
+ .decl_ref_mut = decl_ref_mut,
+ .val = &decl_ref_mut.decl.val,
+ .ty = decl_ref_mut.decl.ty,
+ };
+ },
+ .elem_ptr => {
+ const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
+ var parent = try beginComptimePtrMutation(sema, block, src, elem_ptr.array_ptr);
+ const elem_ty = parent.ty.childType();
+ switch (parent.val.tag()) {
+ .undef => {
+ // An array has been initialized to undefined at comptime and now we
+ // are for the first time setting an element. We must change the representation
+ // of the array from `undef` to `array`.
+ const arena = parent.beginArena(sema.gpa);
+ defer parent.finishArena();
+
+ const elems = try arena.alloc(Value, parent.ty.arrayLenIncludingSentinel());
+ mem.set(Value, elems, Value.undef);
+
+ parent.val.* = try Value.Tag.array.create(arena, elems);
+
+ return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &elems[elem_ptr.index],
+ .ty = elem_ty,
+ };
+ },
+ .bytes => {
+ // An array is memory-optimized to store a slice of bytes, but we are about
+ // to modify an individual field and the representation has to change.
+ // If we wanted to avoid this, there would need to be special detection
+ // elsewhere to identify when writing a value to an array element that is stored
+ // using the `bytes` tag, and handle it without making a call to this function.
+ const arena = parent.beginArena(sema.gpa);
+ defer parent.finishArena();
+
+ const bytes = parent.val.castTag(.bytes).?.data;
+ assert(bytes.len == parent.ty.arrayLenIncludingSentinel());
+ const elems = try arena.alloc(Value, bytes.len);
+ for (elems) |*elem, i| {
+ elem.* = try Value.Tag.int_u64.create(arena, bytes[i]);
+ }
+
+ parent.val.* = try Value.Tag.array.create(arena, elems);
+
+ return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &elems[elem_ptr.index],
+ .ty = elem_ty,
+ };
+ },
+ .repeated => {
+ // An array is memory-optimized to store only a single element value, and
+ // that value is understood to be the same for the entire length of the array.
+ // However, now we want to modify an individual field and so the
+ // representation has to change. If we wanted to avoid this, there would
+ // need to be special detection elsewhere to identify when writing a value to an
+ // array element that is stored using the `repeated` tag, and handle it
+ // without making a call to this function.
+ const arena = parent.beginArena(sema.gpa);
+ defer parent.finishArena();
+
+ const repeated_val = try parent.val.castTag(.repeated).?.data.copy(arena);
+ const elems = try arena.alloc(Value, parent.ty.arrayLenIncludingSentinel());
+ mem.set(Value, elems, repeated_val);
+
+ parent.val.* = try Value.Tag.array.create(arena, elems);
+
+ return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &elems[elem_ptr.index],
+ .ty = elem_ty,
+ };
+ },
+
+ .array => return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &parent.val.castTag(.array).?.data[elem_ptr.index],
+ .ty = elem_ty,
+ },
+
+ else => unreachable,
}
- if (block.runtime_loop) |loop_src| {
- const msg = msg: {
- const msg = try sema.errMsg(block, src, "cannot store to comptime variable in non-inline loop", .{});
- errdefer msg.destroy(sema.gpa);
- try sema.errNote(block, loop_src, msg, "non-inline loop here", .{});
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
+ },
+ .field_ptr => {
+ const field_ptr = ptr_val.castTag(.field_ptr).?.data;
+ var parent = try beginComptimePtrMutation(sema, block, src, field_ptr.container_ptr);
+ const field_ty = parent.ty.structFieldType(field_ptr.field_index);
+ switch (parent.val.tag()) {
+ .undef => {
+ // A struct has been initialized to undefined at comptime and now we
+ // are for the first time setting a field. We must change the representation
+ // of the struct from `undef` to `struct`.
+ const arena = parent.beginArena(sema.gpa);
+ defer parent.finishArena();
+
+ const fields = try arena.alloc(Value, parent.ty.structFieldCount());
+ mem.set(Value, fields, Value.undef);
+
+ parent.val.* = try Value.Tag.@"struct".create(arena, fields);
+
+ return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &fields[field_ptr.field_index],
+ .ty = field_ty,
+ };
+ },
+ .@"struct" => return ComptimePtrMutationKit{
+ .decl_ref_mut = parent.decl_ref_mut,
+ .val = &parent.val.castTag(.@"struct").?.data[field_ptr.field_index],
+ .ty = field_ty,
+ },
+
+ else => unreachable,
}
- unreachable;
- }
- var new_arena = std.heap.ArenaAllocator.init(sema.gpa);
- errdefer new_arena.deinit();
- const new_ty = try operand_ty.copy(&new_arena.allocator);
- const new_val = try operand_val.copy(&new_arena.allocator);
- const decl = decl_ref_mut.data.decl;
- var old_arena = decl.value_arena.?.promote(sema.gpa);
- decl.value_arena = null;
- try decl.finalizeNewArena(&new_arena);
- decl.ty = new_ty;
- decl.val = new_val;
- old_arena.deinit();
- return;
+ },
+ .eu_payload_ptr => return sema.fail(block, src, "TODO comptime store to eu_payload_ptr", .{}),
+ .opt_payload_ptr => return sema.fail(block, src, "TODO comptime store opt_payload_ptr", .{}),
+ .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already
+ else => unreachable,
}
}
@@ -11672,7 +11895,7 @@ fn analyzeLoad(
) CompileError!Air.Inst.Ref {
const ptr_ty = sema.typeOf(ptr);
const elem_ty = switch (ptr_ty.zigTypeTag()) {
- .Pointer => ptr_ty.elemType(),
+ .Pointer => ptr_ty.childType(),
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}),
};
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
@@ -11693,12 +11916,12 @@ fn analyzeSliceLen(
) CompileError!Air.Inst.Ref {
if (try sema.resolveMaybeUndefVal(block, src, slice_inst)) |slice_val| {
if (slice_val.isUndef()) {
- return sema.addConstUndef(Type.initTag(.usize));
+ return sema.addConstUndef(Type.usize);
}
return sema.addIntUnsigned(Type.usize, slice_val.sliceLen());
}
try sema.requireRuntimeBlock(block, src);
- return block.addTyOp(.slice_len, Type.initTag(.usize), slice_inst);
+ return block.addTyOp(.slice_len, Type.usize, slice_inst);
}
fn analyzeIsNull(
@@ -11806,7 +12029,7 @@ fn analyzeSlice(
array_type.sentinel()
else
slice_sentinel;
- return_elem_type = try Module.arrayType(sema.arena, len, array_sentinel, elem_type);
+ return_elem_type = try Type.array(sema.arena, len, array_sentinel, elem_type);
return_ptr_size = .One;
}
}
@@ -12396,7 +12619,7 @@ fn semaStructFields(
.code = zir,
.owner_decl = decl,
.func = null,
- .fn_ret_ty = Type.initTag(.void),
+ .fn_ret_ty = Type.void,
.owner_func = null,
};
defer sema.deinit();
@@ -12566,7 +12789,7 @@ fn semaUnionFields(
.code = zir,
.owner_decl = decl,
.func = null,
- .fn_ret_ty = Type.initTag(.void),
+ .fn_ret_ty = Type.void,
.owner_func = null,
};
defer sema.deinit();
@@ -12677,7 +12900,7 @@ fn semaUnionFields(
}
const field_ty: Type = if (!has_type)
- Type.initTag(.void)
+ Type.void
else if (field_type_ref == .none)
Type.initTag(.noreturn)
else
@@ -12959,7 +13182,7 @@ fn typeHasOnePossibleValue(
},
.empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value),
- .void => return Value.initTag(.void_value),
+ .void => return Value.void,
.noreturn => return Value.initTag(.unreachable_value),
.@"null" => return Value.initTag(.null_value),
.@"undefined" => return Value.initTag(.undef),
src/type.zig
@@ -1468,7 +1468,19 @@ pub const Type = extern union {
// TODO lazy types
.array, .vector => self.elemType().hasCodeGenBits() and self.arrayLen() != 0,
.array_u8 => self.arrayLen() != 0,
- .array_sentinel, .single_const_pointer, .single_mut_pointer, .many_const_pointer, .many_mut_pointer, .c_const_pointer, .c_mut_pointer, .const_slice, .mut_slice, .pointer => self.elemType().hasCodeGenBits(),
+
+ .array_sentinel,
+ .single_const_pointer,
+ .single_mut_pointer,
+ .many_const_pointer,
+ .many_mut_pointer,
+ .c_const_pointer,
+ .c_mut_pointer,
+ .const_slice,
+ .mut_slice,
+ .pointer,
+ => self.childType().hasCodeGenBits(),
+
.int_signed, .int_unsigned => self.cast(Payload.Bits).?.data != 0,
.error_union => {
@@ -2560,18 +2572,22 @@ pub const Type = extern union {
}
/// Asserts the type is an array or vector.
- pub fn arrayLen(self: Type) u64 {
- return switch (self.tag()) {
- .vector => self.castTag(.vector).?.data.len,
- .array => self.castTag(.array).?.data.len,
- .array_sentinel => self.castTag(.array_sentinel).?.data.len,
- .array_u8 => self.castTag(.array_u8).?.data,
- .array_u8_sentinel_0 => self.castTag(.array_u8_sentinel_0).?.data,
+ pub fn arrayLen(ty: Type) u64 {
+ return switch (ty.tag()) {
+ .vector => ty.castTag(.vector).?.data.len,
+ .array => ty.castTag(.array).?.data.len,
+ .array_sentinel => ty.castTag(.array_sentinel).?.data.len,
+ .array_u8 => ty.castTag(.array_u8).?.data,
+ .array_u8_sentinel_0 => ty.castTag(.array_u8_sentinel_0).?.data,
else => unreachable,
};
}
+ pub fn arrayLenIncludingSentinel(ty: Type) u64 {
+ return ty.arrayLen() + @boolToInt(ty.sentinel() != null);
+ }
+
/// Asserts the type is an array, pointer or vector.
pub fn sentinel(self: Type) ?Value {
return switch (self.tag()) {
@@ -3882,9 +3898,11 @@ pub const Type = extern union {
};
};
+ pub const @"u8" = initTag(.u8);
pub const @"bool" = initTag(.bool);
pub const @"usize" = initTag(.usize);
pub const @"comptime_int" = initTag(.comptime_int);
+ pub const @"void" = initTag(.void);
pub fn ptr(arena: *Allocator, d: Payload.Pointer.Data) !Type {
assert(d.host_size == 0 or d.bit_offset < d.host_size * 8);
@@ -3917,6 +3935,36 @@ pub const Type = extern union {
return Type.initPayload(&type_payload.base);
}
+ pub fn array(
+ arena: *Allocator,
+ len: u64,
+ sent: ?Value,
+ elem_type: Type,
+ ) Allocator.Error!Type {
+ if (elem_type.eql(Type.u8)) {
+ if (sent) |some| {
+ if (some.eql(Value.initTag(.zero), elem_type)) {
+ return Tag.array_u8_sentinel_0.create(arena, len);
+ }
+ } else {
+ return Tag.array_u8.create(arena, len);
+ }
+ }
+
+ if (sent) |some| {
+ return Tag.array_sentinel.create(arena, .{
+ .len = len,
+ .sentinel = some,
+ .elem_type = elem_type,
+ });
+ }
+
+ return Tag.array.create(arena, .{
+ .len = len,
+ .elem_type = elem_type,
+ });
+ }
+
pub fn smallestUnsignedBits(max: u64) u16 {
if (max == 0) return 0;
const base = std.math.log2(max);
src/value.zig
@@ -112,7 +112,9 @@ pub const Value = extern union {
/// This Tag will never be seen by machine codegen backends. It is changed into a
/// `decl_ref` when a comptime variable goes out of scope.
decl_ref_mut,
+ /// Pointer to a specific element of an array.
elem_ptr,
+ /// Pointer to a specific field of a struct.
field_ptr,
/// A slice of u8 whose memory is managed externally.
bytes,
@@ -120,7 +122,11 @@ pub const Value = extern union {
/// is stored externally.
repeated,
/// Each element stored as a `Value`.
+ /// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
+ /// so the slice length will be one more than the type's array length.
array,
+ /// An array with length 0 but it has a sentinel.
+ empty_array_sentinel,
/// Pointer and length as sub `Value` objects.
slice,
float_16,
@@ -255,6 +261,7 @@ pub const Value = extern union {
.eu_payload_ptr,
.opt_payload,
.opt_payload_ptr,
+ .empty_array_sentinel,
=> Payload.SubValue,
.bytes,
@@ -486,6 +493,7 @@ pub const Value = extern union {
.eu_payload_ptr,
.opt_payload,
.opt_payload_ptr,
+ .empty_array_sentinel,
=> {
const payload = self.cast(Payload.SubValue).?;
const new_payload = try arena.create(Payload.SubValue);
@@ -697,6 +705,7 @@ pub const Value = extern union {
val = val.castTag(.repeated).?.data;
},
.array => return out_stream.writeAll("(array)"),
+ .empty_array_sentinel => return out_stream.writeAll("(empty array with sentinel)"),
.slice => return out_stream.writeAll("(slice)"),
.float_16 => return out_stream.print("{}", .{val.castTag(.float_16).?.data}),
.float_32 => return out_stream.print("{}", .{val.castTag(.float_32).?.data}),
@@ -731,22 +740,23 @@ pub const Value = extern union {
/// Asserts that the value is representable as an array of bytes.
/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
- pub fn toAllocatedBytes(self: Value, allocator: *Allocator) ![]u8 {
- if (self.castTag(.bytes)) |payload| {
- return std.mem.dupe(allocator, u8, payload.data);
- }
- if (self.castTag(.enum_literal)) |payload| {
- return std.mem.dupe(allocator, u8, payload.data);
- }
- if (self.castTag(.repeated)) |payload| {
- _ = payload;
- @panic("TODO implement toAllocatedBytes for this Value tag");
- }
- if (self.castTag(.decl_ref)) |payload| {
- const val = try payload.data.value();
- return val.toAllocatedBytes(allocator);
+ pub fn toAllocatedBytes(val: Value, ty: Type, allocator: *Allocator) ![]u8 {
+ switch (val.tag()) {
+ .bytes => {
+ const bytes = val.castTag(.bytes).?.data;
+ const adjusted_len = bytes.len - @boolToInt(ty.sentinel() != null);
+ const adjusted_bytes = bytes[0..adjusted_len];
+ return std.mem.dupe(allocator, u8, adjusted_bytes);
+ },
+ .enum_literal => return std.mem.dupe(allocator, u8, val.castTag(.enum_literal).?.data),
+ .repeated => @panic("TODO implement toAllocatedBytes for this Value tag"),
+ .decl_ref => {
+ const decl = val.castTag(.decl_ref).?.data;
+ const decl_val = try decl.value();
+ return decl_val.toAllocatedBytes(decl.ty, allocator);
+ },
+ else => unreachable,
}
- unreachable;
}
pub const ToTypeBuffer = Type.Payload.Bits;
@@ -965,6 +975,8 @@ pub const Value = extern union {
gpa: *Allocator,
arena: *Allocator,
) !Value {
+ if (old_ty.eql(new_ty)) return val;
+
// For types with well-defined memory layouts, we serialize them a byte buffer,
// then deserialize to the new type.
const buffer = try gpa.alloc(u8, old_ty.abiSize(target));
@@ -1527,37 +1539,34 @@ pub const Value = extern union {
/// Asserts the value is a pointer and dereferences it.
/// Returns error.AnalysisFail if the pointer points to a Decl that failed semantic analysis.
- pub fn pointerDeref(
- self: Value,
- allocator: *Allocator,
- ) error{ AnalysisFail, OutOfMemory }!?Value {
- const sub_val: Value = switch (self.tag()) {
- .decl_ref_mut => val: {
+ pub fn pointerDeref(val: Value, arena: *Allocator) error{ AnalysisFail, OutOfMemory }!?Value {
+ const sub_val: Value = switch (val.tag()) {
+ .decl_ref_mut => sub_val: {
// The decl whose value we are obtaining here may be overwritten with
// a different value, which would invalidate this memory. So we must
// copy here.
- const val = try self.castTag(.decl_ref_mut).?.data.decl.value();
- break :val try val.copy(allocator);
+ const sub_val = try val.castTag(.decl_ref_mut).?.data.decl.value();
+ break :sub_val try sub_val.copy(arena);
},
- .decl_ref => try self.castTag(.decl_ref).?.data.value(),
+ .decl_ref => try val.castTag(.decl_ref).?.data.value(),
.elem_ptr => blk: {
- const elem_ptr = self.castTag(.elem_ptr).?.data;
- const array_val = (try elem_ptr.array_ptr.pointerDeref(allocator)) orelse return null;
- break :blk try array_val.elemValue(allocator, elem_ptr.index);
+ const elem_ptr = val.castTag(.elem_ptr).?.data;
+ const array_val = (try elem_ptr.array_ptr.pointerDeref(arena)) orelse return null;
+ break :blk try array_val.elemValue(arena, elem_ptr.index);
},
.field_ptr => blk: {
- const field_ptr = self.castTag(.field_ptr).?.data;
- const container_val = (try field_ptr.container_ptr.pointerDeref(allocator)) orelse return null;
- break :blk try container_val.fieldValue(allocator, field_ptr.field_index);
+ const field_ptr = val.castTag(.field_ptr).?.data;
+ const container_val = (try field_ptr.container_ptr.pointerDeref(arena)) orelse return null;
+ break :blk try container_val.fieldValue(arena, field_ptr.field_index);
},
.eu_payload_ptr => blk: {
- const err_union_ptr = self.castTag(.eu_payload_ptr).?.data;
- const err_union_val = (try err_union_ptr.pointerDeref(allocator)) orelse return null;
+ const err_union_ptr = val.castTag(.eu_payload_ptr).?.data;
+ const err_union_val = (try err_union_ptr.pointerDeref(arena)) orelse return null;
break :blk err_union_val.castTag(.eu_payload).?.data;
},
.opt_payload_ptr => blk: {
- const opt_ptr = self.castTag(.opt_payload_ptr).?.data;
- const opt_val = (try opt_ptr.pointerDeref(allocator)) orelse return null;
+ const opt_ptr = val.castTag(.opt_payload_ptr).?.data;
+ const opt_val = (try opt_ptr.pointerDeref(arena)) orelse return null;
break :blk opt_val.castTag(.opt_payload).?.data;
},
@@ -1582,24 +1591,33 @@ pub const Value = extern union {
return sub_val;
}
+ pub fn isComptimeMutablePtr(val: Value) bool {
+ return switch (val.tag()) {
+ .decl_ref_mut => true,
+ .elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr),
+ .field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr),
+ .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data),
+ .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data),
+
+ else => false,
+ };
+ }
+
/// Gets the decl referenced by this pointer. If the pointer does not point
/// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr),
/// this function returns null.
- pub fn pointerDecl(self: Value) ?*Module.Decl {
- return switch (self.tag()) {
- .decl_ref_mut => self.castTag(.decl_ref_mut).?.data.decl,
- .extern_fn, .decl_ref => self.cast(Payload.Decl).?.data,
- .function => self.castTag(.function).?.data.owner_decl,
- .variable => self.castTag(.variable).?.data.owner_decl,
+ pub fn pointerDecl(val: Value) ?*Module.Decl {
+ return switch (val.tag()) {
+ .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl,
+ .extern_fn, .decl_ref => val.cast(Payload.Decl).?.data,
+ .function => val.castTag(.function).?.data.owner_decl,
+ .variable => val.castTag(.variable).?.data.owner_decl,
else => null,
};
}
pub fn sliceLen(val: Value) u64 {
return switch (val.tag()) {
- .empty_array => 0,
- .bytes => val.castTag(.bytes).?.data.len,
- .array => val.castTag(.array).?.data.len,
.slice => val.castTag(.slice).?.data.len.toUnsignedInt(),
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
@@ -1615,17 +1633,23 @@ pub const Value = extern union {
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
- pub fn elemValue(self: Value, allocator: *Allocator, index: usize) error{OutOfMemory}!Value {
- switch (self.tag()) {
+ pub fn elemValue(val: Value, arena: *Allocator, index: usize) error{OutOfMemory}!Value {
+ switch (val.tag()) {
.empty_array => unreachable, // out of bounds array index
+ .empty_struct_value => unreachable, // out of bounds array index
+
+ .empty_array_sentinel => {
+ assert(index == 0); // The only valid index for an empty array with sentinel.
+ return val.castTag(.empty_array_sentinel).?.data;
+ },
- .bytes => return Tag.int_u64.create(allocator, self.castTag(.bytes).?.data[index]),
+ .bytes => return Tag.int_u64.create(arena, val.castTag(.bytes).?.data[index]),
// No matter the index; all the elements are the same!
- .repeated => return self.castTag(.repeated).?.data,
+ .repeated => return val.castTag(.repeated).?.data,
- .array => return self.castTag(.array).?.data[index],
- .slice => return self.castTag(.slice).?.data.ptr.elemValue(allocator, index),
+ .array => return val.castTag(.array).?.data[index],
+ .slice => return val.castTag(.slice).?.data.ptr.elemValue(arena, index),
else => unreachable,
}
@@ -2556,10 +2580,12 @@ pub const Value = extern union {
pub const base_tag = Tag.decl_ref_mut;
base: Payload = Payload{ .tag = base_tag },
- data: struct {
+ data: Data,
+
+ pub const Data = struct {
decl: *Module.Decl,
runtime_index: u32,
- },
+ };
};
pub const ElemPtr = struct {
@@ -2584,6 +2610,7 @@ pub const Value = extern union {
pub const Bytes = struct {
base: Payload,
+ /// Includes the sentinel, if any.
data: []const u8,
};
@@ -2706,6 +2733,8 @@ pub const Value = extern union {
pub const zero = initTag(.zero);
pub const one = initTag(.one);
pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base };
+ pub const undef = initTag(.undef);
+ pub const @"void" = initTag(.void_value);
};
var negative_one_payload: Value.Payload.I64 = .{
src/Zir.zig
@@ -210,8 +210,8 @@ pub const Inst = struct {
/// `[N]T` syntax. No source location provided.
/// Uses the `bin` union field. lhs is length, rhs is element type.
array_type,
- /// `[N:S]T` syntax. No source location provided.
- /// Uses the `array_type_sentinel` field.
+ /// `[N:S]T` syntax. Source location is the array type expression node.
+ /// Uses the `pl_node` union field. Payload is `ArrayTypeSentinel`.
array_type_sentinel,
/// `@Vector` builtin.
/// Uses the `pl_node` union field with `Bin` payload.
@@ -1256,7 +1256,7 @@ pub const Inst = struct {
.array_cat = .pl_node,
.array_mul = .pl_node,
.array_type = .bin,
- .array_type_sentinel = .array_type_sentinel,
+ .array_type_sentinel = .pl_node,
.vector_type = .pl_node,
.elem_type = .un_node,
.indexable_ptr_len = .un_node,
@@ -2137,11 +2137,6 @@ pub const Inst = struct {
node: i32,
int: u64,
float: f64,
- array_type_sentinel: struct {
- len: Ref,
- /// index into extra, points to an `ArrayTypeSentinel`
- payload_index: u32,
- },
ptr_type_simple: struct {
is_allowzero: bool,
is_mutable: bool,
@@ -2245,7 +2240,6 @@ pub const Inst = struct {
node,
int,
float,
- array_type_sentinel,
ptr_type_simple,
ptr_type,
int_type,
@@ -2427,6 +2421,7 @@ pub const Inst = struct {
};
pub const ArrayTypeSentinel = struct {
+ len: Ref,
sentinel: Ref,
elem_type: Ref,
};
test/behavior/array.zig
@@ -1,4 +1,5 @@
const std = @import("std");
+const builtin = @import("builtin");
const testing = std.testing;
const mem = std.mem;
const expect = testing.expect;
@@ -76,3 +77,30 @@ test "array len field" {
try expect(ptr.len == 4);
comptime try expect(ptr.len == 4);
}
+
+test "array with sentinels" {
+ const S = struct {
+ fn doTheTest(is_ct: bool) !void {
+ if (is_ct or builtin.zig_is_stage2) {
+ var zero_sized: [0:0xde]u8 = [_:0xde]u8{};
+ // Stage1 test coverage disabled at runtime because of
+ // https://github.com/ziglang/zig/issues/4372
+ try expect(zero_sized[0] == 0xde);
+ var reinterpreted = @ptrCast(*[1]u8, &zero_sized);
+ try expect(reinterpreted[0] == 0xde);
+ }
+ var arr: [3:0x55]u8 = undefined;
+ // Make sure the sentinel pointer is pointing after the last element.
+ if (!is_ct) {
+ const sentinel_ptr = @ptrToInt(&arr[3]);
+ const last_elem_ptr = @ptrToInt(&arr[2]);
+ try expect((sentinel_ptr - last_elem_ptr) == 1);
+ }
+ // Make sure the sentinel is writeable.
+ arr[3] = 0x55;
+ }
+ };
+
+ try S.doTheTest(false);
+ comptime try S.doTheTest(true);
+}
test/behavior/array_stage1.zig
@@ -4,33 +4,6 @@ const mem = std.mem;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
-test "array with sentinels" {
- const S = struct {
- fn doTheTest(is_ct: bool) !void {
- if (is_ct) {
- var zero_sized: [0:0xde]u8 = [_:0xde]u8{};
- // Disabled at runtime because of
- // https://github.com/ziglang/zig/issues/4372
- try expectEqual(@as(u8, 0xde), zero_sized[0]);
- var reinterpreted = @ptrCast(*[1]u8, &zero_sized);
- try expectEqual(@as(u8, 0xde), reinterpreted[0]);
- }
- var arr: [3:0x55]u8 = undefined;
- // Make sure the sentinel pointer is pointing after the last element
- if (!is_ct) {
- const sentinel_ptr = @ptrToInt(&arr[3]);
- const last_elem_ptr = @ptrToInt(&arr[2]);
- try expectEqual(@as(usize, 1), sentinel_ptr - last_elem_ptr);
- }
- // Make sure the sentinel is writeable
- arr[3] = 0x55;
- }
- };
-
- try S.doTheTest(false);
- comptime try S.doTheTest(true);
-}
-
test "void arrays" {
var array: [4]void = undefined;
array[0] = void{};