Commit 0c601965ab
Changed files (5)
lib
std
lib/std/enums.zig
@@ -32,7 +32,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def
.fields = fields,
.decls = &[_]std.builtin.TypeInfo.Declaration{},
.is_tuple = false,
- }});
+ } });
}
/// Looks up the supplied fields in the given enum type.
@@ -70,7 +70,7 @@ pub fn values(comptime E: type) []const E {
test "std.enum.values" {
const E = extern enum { a, b, c, d = 0 };
- testing.expectEqualSlices(E, &.{.a, .b, .c, .d}, values(E));
+ testing.expectEqualSlices(E, &.{ .a, .b, .c, .d }, values(E));
}
/// Returns the set of all unique named values in the given enum, in
@@ -82,10 +82,10 @@ pub fn uniqueValues(comptime E: type) []const E {
test "std.enum.uniqueValues" {
const E = extern enum { a, b, c, d = 0, e, f = 3 };
- testing.expectEqualSlices(E, &.{.a, .b, .c, .f}, uniqueValues(E));
+ testing.expectEqualSlices(E, &.{ .a, .b, .c, .f }, uniqueValues(E));
const F = enum { a, b, c };
- testing.expectEqualSlices(F, &.{.a, .b, .c}, uniqueValues(F));
+ testing.expectEqualSlices(F, &.{ .a, .b, .c }, uniqueValues(F));
}
/// Returns the set of all unique field values in the given enum, in
@@ -102,8 +102,7 @@ pub fn uniqueFields(comptime E: type) []const EnumField {
}
var unique_fields: []const EnumField = &[_]EnumField{};
- outer:
- for (raw_fields) |candidate| {
+ outer: for (raw_fields) |candidate| {
for (unique_fields) |u| {
if (u.value == candidate.value)
continue :outer;
@@ -116,28 +115,25 @@ pub fn uniqueFields(comptime E: type) []const EnumField {
}
/// Determines the length of a direct-mapped enum array, indexed by
-/// @intCast(usize, @enumToInt(enum_value)). The enum must be exhaustive.
+/// @intCast(usize, @enumToInt(enum_value)).
+/// If the enum is non-exhaustive, the resulting length will only be enough
+/// to hold all explicit fields.
/// If the enum contains any fields with values that cannot be represented
/// by usize, a compile error is issued. The max_unused_slots parameter limits
/// the total number of items which have no matching enum key (holes in the enum
/// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots
/// must be at least 3, to allow unused slots 0, 3, and 4.
fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int) comptime_int {
- const info = @typeInfo(E).Enum;
- if (!info.is_exhaustive) {
- @compileError("Cannot create direct array of non-exhaustive enum "++@typeName(E));
- }
-
var max_value: comptime_int = -1;
const max_usize: comptime_int = ~@as(usize, 0);
const fields = uniqueFields(E);
for (fields) |f| {
if (f.value < 0) {
- @compileError("Cannot create a direct enum array for "++@typeName(E)++", field ."++f.name++" has a negative value.");
+ @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " has a negative value.");
}
if (f.value > max_value) {
if (f.value > max_usize) {
- @compileError("Cannot create a direct enum array for "++@typeName(E)++", field ."++f.name++" is larger than the max value of usize.");
+ @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " is larger than the max value of usize.");
}
max_value = f.value;
}
@@ -147,14 +143,16 @@ fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int)
if (unused_slots > max_unused_slots) {
const unused_str = std.fmt.comptimePrint("{d}", .{unused_slots});
const allowed_str = std.fmt.comptimePrint("{d}", .{max_unused_slots});
- @compileError("Cannot create a direct enum array for "++@typeName(E)++". It would have "++unused_str++" unused slots, but only "++allowed_str++" are allowed.");
+ @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ". It would have " ++ unused_str ++ " unused slots, but only " ++ allowed_str ++ " are allowed.");
}
return max_value + 1;
}
/// Initializes an array of Data which can be indexed by
-/// @intCast(usize, @enumToInt(enum_value)). The enum must be exhaustive.
+/// @intCast(usize, @enumToInt(enum_value)).
+/// If the enum is non-exhaustive, the resulting array will only be large enough
+/// to hold all explicit fields.
/// If the enum contains any fields with values that cannot be represented
/// by usize, a compile error is issued. The max_unused_slots parameter limits
/// the total number of items which have no matching enum key (holes in the enum
@@ -243,9 +241,9 @@ pub fn nameCast(comptime E: type, comptime value: anytype) E {
if (@hasField(E, n)) {
return @field(E, n);
}
- @compileError("Enum "++@typeName(E)++" has no field named "++n);
+ @compileError("Enum " ++ @typeName(E) ++ " has no field named " ++ n);
}
- @compileError("Cannot cast from "++@typeName(@TypeOf(value))++" to "++@typeName(E));
+ @compileError("Cannot cast from " ++ @typeName(@TypeOf(value)) ++ " to " ++ @typeName(E));
}
}
@@ -256,7 +254,7 @@ test "std.enums.nameCast" {
testing.expectEqual(A.a, nameCast(A, A.a));
testing.expectEqual(A.a, nameCast(A, B.a));
testing.expectEqual(A.a, nameCast(A, "a"));
- testing.expectEqual(A.a, nameCast(A, @as(*const[1]u8, "a")));
+ testing.expectEqual(A.a, nameCast(A, @as(*const [1]u8, "a")));
testing.expectEqual(A.a, nameCast(A, @as([:0]const u8, "a")));
testing.expectEqual(A.a, nameCast(A, @as([]const u8, "a")));
@@ -398,12 +396,12 @@ pub fn EnumArray(comptime E: type, comptime V: type) type {
pub fn NoExtension(comptime Self: type) type {
return NoExt;
}
-const NoExt = struct{};
+const NoExt = struct {};
/// A set type with an Indexer mapping from keys to indices.
/// Presence or absence is stored as a dense bitfield. This
/// type does no allocation and can be copied by value.
-pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type {
+pub fn IndexedSet(comptime I: type, comptime Ext: fn (type) type) type {
comptime ensureIndexer(I);
return struct {
const Self = @This();
@@ -422,7 +420,7 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type {
bits: BitSet = BitSet.initEmpty(),
- /// Returns a set containing all possible keys.
+ /// Returns a set containing all possible keys.
pub fn initFull() Self {
return .{ .bits = BitSet.initFull() };
}
@@ -492,7 +490,8 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type {
pub fn next(self: *Iterator) ?Key {
return if (self.inner.next()) |index|
Indexer.keyForIndex(index)
- else null;
+ else
+ null;
}
};
};
@@ -501,7 +500,7 @@ pub fn IndexedSet(comptime I: type, comptime Ext: fn(type)type) type {
/// A map from keys to values, using an index lookup. Uses a
/// bitfield to track presence and a dense array of values.
/// This type does no allocation and can be copied by value.
-pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn(type)type) type {
+pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type {
comptime ensureIndexer(I);
return struct {
const Self = @This();
@@ -652,7 +651,8 @@ pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn(type)type
.key = Indexer.keyForIndex(index),
.value = &self.values[index],
}
- else null;
+ else
+ null;
}
};
};
@@ -660,7 +660,7 @@ pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: fn(type)type
/// A dense array of values, using an indexed lookup.
/// This type does no allocation and can be copied by value.
-pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: fn(type)type) type {
+pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: fn (type) type) type {
comptime ensureIndexer(I);
return struct {
const Self = @This();
@@ -769,9 +769,9 @@ pub fn ensureIndexer(comptime T: type) void {
if (!@hasDecl(T, "count")) @compileError("Indexer must have decl count: usize.");
if (@TypeOf(T.count) != usize) @compileError("Indexer.count must be a usize.");
if (!@hasDecl(T, "indexOf")) @compileError("Indexer.indexOf must be a fn(Key)usize.");
- if (@TypeOf(T.indexOf) != fn(T.Key)usize) @compileError("Indexer must have decl indexOf: fn(Key)usize.");
+ if (@TypeOf(T.indexOf) != fn (T.Key) usize) @compileError("Indexer must have decl indexOf: fn(Key)usize.");
if (!@hasDecl(T, "keyForIndex")) @compileError("Indexer must have decl keyForIndex: fn(usize)Key.");
- if (@TypeOf(T.keyForIndex) != fn(usize)T.Key) @compileError("Indexer.keyForIndex must be a fn(usize)Key.");
+ if (@TypeOf(T.keyForIndex) != fn (usize) T.Key) @compileError("Indexer.keyForIndex must be a fn(usize)Key.");
}
}
@@ -802,14 +802,18 @@ pub fn EnumIndexer(comptime E: type) type {
return struct {
pub const Key = E;
pub const count: usize = 0;
- pub fn indexOf(e: E) usize { unreachable; }
- pub fn keyForIndex(i: usize) E { unreachable; }
+ pub fn indexOf(e: E) usize {
+ unreachable;
+ }
+ pub fn keyForIndex(i: usize) E {
+ unreachable;
+ }
};
}
std.sort.sort(EnumField, &fields, {}, ascByValue);
const min = fields[0].value;
- const max = fields[fields.len-1].value;
- if (max - min == fields.len-1) {
+ const max = fields[fields.len - 1].value;
+ if (max - min == fields.len - 1) {
return struct {
pub const Key = E;
pub const count = fields.len;
@@ -844,7 +848,7 @@ pub fn EnumIndexer(comptime E: type) type {
}
test "std.enums.EnumIndexer dense zeroed" {
- const E = enum{ b = 1, a = 0, c = 2 };
+ const E = enum { b = 1, a = 0, c = 2 };
const Indexer = EnumIndexer(E);
ensureIndexer(Indexer);
testing.expectEqual(E, Indexer.Key);
@@ -908,7 +912,7 @@ test "std.enums.EnumIndexer sparse" {
}
test "std.enums.EnumIndexer repeats" {
- const E = extern enum{ a = -2, c = 6, b = 4, b2 = 4 };
+ const E = extern enum { a = -2, c = 6, b = 4, b2 = 4 };
const Indexer = EnumIndexer(E);
ensureIndexer(Indexer);
testing.expectEqual(E, Indexer.Key);
@@ -957,7 +961,8 @@ test "std.enums.EnumSet" {
}
var mut = Set.init(.{
- .a=true, .c=true,
+ .a = true,
+ .c = true,
});
testing.expectEqual(@as(usize, 2), mut.count());
testing.expectEqual(true, mut.contains(.a));
@@ -986,7 +991,7 @@ test "std.enums.EnumSet" {
testing.expectEqual(@as(?E, null), it.next());
}
- mut.toggleSet(Set.init(.{ .a=true, .b=true }));
+ mut.toggleSet(Set.init(.{ .a = true, .b = true }));
testing.expectEqual(@as(usize, 2), mut.count());
testing.expectEqual(true, mut.contains(.a));
testing.expectEqual(false, mut.contains(.b));
@@ -994,7 +999,7 @@ test "std.enums.EnumSet" {
testing.expectEqual(true, mut.contains(.d));
testing.expectEqual(true, mut.contains(.e)); // aliases a
- mut.setUnion(Set.init(.{ .a=true, .b=true }));
+ mut.setUnion(Set.init(.{ .a = true, .b = true }));
testing.expectEqual(@as(usize, 3), mut.count());
testing.expectEqual(true, mut.contains(.a));
testing.expectEqual(true, mut.contains(.b));
@@ -1009,7 +1014,7 @@ test "std.enums.EnumSet" {
testing.expectEqual(false, mut.contains(.c));
testing.expectEqual(true, mut.contains(.d));
- mut.setIntersection(Set.init(.{ .a=true, .b=true }));
+ mut.setIntersection(Set.init(.{ .a = true, .b = true }));
testing.expectEqual(@as(usize, 1), mut.count());
testing.expectEqual(true, mut.contains(.a));
testing.expectEqual(false, mut.contains(.b));
@@ -1072,7 +1077,7 @@ test "std.enums.EnumArray sized" {
const undef = Array.initUndefined();
var inst = Array.initFill(5);
const inst2 = Array.init(.{ .a = 1, .b = 2, .c = 3, .d = 4 });
- const inst3 = Array.initDefault(6, .{.b = 4, .c = 2});
+ const inst3 = Array.initDefault(6, .{ .b = 4, .c = 2 });
testing.expectEqual(@as(usize, 5), inst.get(.a));
testing.expectEqual(@as(usize, 5), inst.get(.b));
@@ -1272,10 +1277,12 @@ test "std.enums.EnumMap sized" {
var iter = a.iterator();
const Entry = Map.Entry;
testing.expectEqual(@as(?Entry, Entry{
- .key = .b, .value = &a.values[1],
+ .key = .b,
+ .value = &a.values[1],
}), iter.next());
testing.expectEqual(@as(?Entry, Entry{
- .key = .d, .value = &a.values[3],
+ .key = .d,
+ .value = &a.values[3],
}), iter.next());
testing.expectEqual(@as(?Entry, null), iter.next());
}
src/astgen.zig
@@ -58,11 +58,8 @@ pub const ResultLoc = union(enum) {
};
};
-const void_inst: zir.Inst.Ref = @enumToInt(zir.Const.void_value);
-
pub fn typeExpr(mod: *Module, scope: *Scope, type_node: ast.Node.Index) InnerError!zir.Inst.Ref {
- const type_rl: ResultLoc = .{ .ty = @enumToInt(zir.Const.type_type) };
- return expr(mod, scope, type_rl, type_node);
+ return expr(mod, scope, .{ .ty = .type_type }, type_node);
}
fn lvalExpr(mod: *Module, scope: *Scope, node: ast.Node.Index) InnerError!zir.Inst.Ref {
@@ -291,59 +288,59 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) In
.assign => {
try assign(mod, scope, node);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_bit_and => {
try assignOp(mod, scope, node, .bit_and);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_bit_or => {
try assignOp(mod, scope, node, .bit_or);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_bit_shift_left => {
try assignOp(mod, scope, node, .shl);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_bit_shift_right => {
try assignOp(mod, scope, node, .shr);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_bit_xor => {
try assignOp(mod, scope, node, .xor);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_div => {
try assignOp(mod, scope, node, .div);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_sub => {
try assignOp(mod, scope, node, .sub);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_sub_wrap => {
try assignOp(mod, scope, node, .subwrap);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_mod => {
try assignOp(mod, scope, node, .mod_rem);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_add => {
try assignOp(mod, scope, node, .add);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_add_wrap => {
try assignOp(mod, scope, node, .addwrap);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_mul => {
try assignOp(mod, scope, node, .mul);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.assign_mul_wrap => {
try assignOp(mod, scope, node, .mulwrap);
- return rvalue(mod, scope, rl, void_inst, node);
+ return rvalue(mod, scope, rl, .void_value, node);
},
.add => return simpleBinOp(mod, scope, rl, node, .add),
@@ -450,22 +447,10 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) In
const result = try expr(mod, scope, .ref, node_datas[node].lhs);
return rvalue(mod, scope, rl, result, node);
},
- .undefined_literal => {
- const result = @enumToInt(zir.Const.undef);
- return rvalue(mod, scope, rl, result, node);
- },
- .true_literal => {
- const result = @enumToInt(zir.Const.bool_true);
- return rvalue(mod, scope, rl, result, node);
- },
- .false_literal => {
- const result = @enumToInt(zir.Const.bool_false);
- return rvalue(mod, scope, rl, result, node);
- },
- .null_literal => {
- const result = @enumToInt(zir.Const.null_value);
- return rvalue(mod, scope, rl, result, node);
- },
+ .undefined_literal => return rvalue(mod, scope, rl, .undef, node),
+ .true_literal => return rvalue(mod, scope, rl, .bool_true, node),
+ .false_literal => return rvalue(mod, scope, rl, .bool_false, node),
+ .null_literal => return rvalue(mod, scope, rl, .null_value, node),
.optional_type => {
const operand = try typeExpr(mod, scope, node_datas[node].lhs);
const result = try gz.addUnNode(.optional_type, operand, node);
@@ -830,7 +815,7 @@ pub fn blockExpr(
}
try blockExprStmts(mod, scope, block_node, statements);
- return rvalue(mod, scope, rl, void_inst, block_node);
+ return rvalue(mod, scope, rl, .void_value, block_node);
}
fn checkLabelRedefinition(mod: *Module, parent_scope: *Scope, label: ast.TokenIndex) !void {
@@ -935,13 +920,13 @@ fn labeledBlockExpr(
// The code took advantage of the result location as a pointer.
// Turn the break instruction operands into void.
for (block_scope.labeled_breaks.items) |br| {
- zir_datas[br].@"break".operand = @enumToInt(zir.Const.void_value);
+ zir_datas[br].@"break".operand = .void_value;
}
// TODO technically not needed since we changed the tag to break_void but
// would be better still to elide the ones that are in this list.
try block_scope.setBlockBody(block_inst);
- return gz.zir_code.ref_start_index + block_inst;
+ return zir.Inst.Ref.fromIndex(block_inst, gz.zir_code.param_count);
},
.break_operand => {
// All break operands are values that did not use the result location pointer.
@@ -954,7 +939,7 @@ fn labeledBlockExpr(
// would be better still to elide the ones that are in this list.
}
try block_scope.setBlockBody(block_inst);
- const block_ref = gz.zir_code.ref_start_index + block_inst;
+ const block_ref = zir.Inst.Ref.fromIndex(block_inst, gz.zir_code.param_count);
switch (rl) {
.ref => return block_ref,
else => return rvalue(mod, parent_scope, rl, block_ref, block_node),
@@ -1006,8 +991,7 @@ fn blockExprStmts(
// We need to emit an error if the result is not `noreturn` or `void`, but
// we want to avoid adding the ZIR instruction if possible for performance.
const maybe_unused_result = try expr(mod, scope, .none, statement);
- const elide_check = if (maybe_unused_result >= gz.zir_code.ref_start_index) b: {
- const inst = maybe_unused_result - gz.zir_code.ref_start_index;
+ const elide_check = if (maybe_unused_result.toIndex(gz.zir_code.param_count)) |inst| b: {
// Note that this array becomes invalid after appending more items to it
// in the above while loop.
const zir_tags = gz.zir_code.instructions.items(.tag);
@@ -1167,10 +1151,10 @@ fn blockExprStmts(
=> break :b true,
}
} else switch (maybe_unused_result) {
- @enumToInt(zir.Const.unused) => unreachable,
+ .none => unreachable,
- @enumToInt(zir.Const.void_value),
- @enumToInt(zir.Const.unreachable_value),
+ .void_value,
+ .unreachable_value,
=> true,
else => false,
@@ -1283,8 +1267,8 @@ fn varDecl(
};
defer init_scope.instructions.deinit(mod.gpa);
- var resolve_inferred_alloc: zir.Inst.Ref = 0;
- var opt_type_inst: zir.Inst.Ref = 0;
+ var resolve_inferred_alloc: zir.Inst.Ref = .none;
+ var opt_type_inst: zir.Inst.Ref = .none;
if (var_decl.ast.type_node != 0) {
const type_inst = try typeExpr(mod, &init_scope.base, var_decl.ast.type_node);
opt_type_inst = type_inst;
@@ -1308,14 +1292,14 @@ fn varDecl(
const expected_len = parent_zir.items.len + init_scope.instructions.items.len - 2;
try parent_zir.ensureCapacity(mod.gpa, expected_len);
for (init_scope.instructions.items) |src_inst| {
- if (wzc.ref_start_index + src_inst == init_scope.rl_ptr) continue;
+ if (zir.Inst.Ref.fromIndex(src_inst, wzc.param_count) == init_scope.rl_ptr) continue;
if (zir_tags[src_inst] == .store_to_block_ptr) {
if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) continue;
}
parent_zir.appendAssumeCapacity(src_inst);
}
assert(parent_zir.items.len == expected_len);
- const casted_init = if (opt_type_inst != 0)
+ const casted_init = if (opt_type_inst != .none)
try gz.addPlNode(.as_node, var_decl.ast.type_node, zir.Inst.As{
.dest_type = opt_type_inst,
.operand = init_inst,
@@ -1348,7 +1332,7 @@ fn varDecl(
parent_zir.appendAssumeCapacity(src_inst);
}
assert(parent_zir.items.len == expected_len);
- if (resolve_inferred_alloc != 0) {
+ if (resolve_inferred_alloc != .none) {
_ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node);
}
const sub_scope = try block_arena.create(Scope.LocalPtr);
@@ -1362,7 +1346,7 @@ fn varDecl(
return &sub_scope.base;
},
.keyword_var => {
- var resolve_inferred_alloc: zir.Inst.Ref = 0;
+ var resolve_inferred_alloc: zir.Inst.Ref = .none;
const var_data: struct {
result_loc: ResultLoc,
alloc: zir.Inst.Ref,
@@ -1377,7 +1361,7 @@ fn varDecl(
break :a .{ .alloc = alloc, .result_loc = .{ .inferred_ptr = alloc } };
};
const init_inst = try expr(mod, scope, var_data.result_loc, var_decl.ast.init_node);
- if (resolve_inferred_alloc != 0) {
+ if (resolve_inferred_alloc != .none) {
_ = try gz.addUnNode(.resolve_inferred_alloc, resolve_inferred_alloc, node);
}
const sub_scope = try block_arena.create(Scope.LocalPtr);
@@ -1440,7 +1424,7 @@ fn boolNot(mod: *Module, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) Inn
const tree = scope.tree();
const node_datas = tree.nodes.items(.data);
- const operand = try expr(mod, scope, .{ .ty = @enumToInt(zir.Const.bool_type) }, node_datas[node].lhs);
+ const operand = try expr(mod, scope, .{ .ty = .bool_type }, node_datas[node].lhs);
const gz = scope.getGenZir();
const result = try gz.addUnNode(.bool_not, operand, node);
return rvalue(mod, scope, rl, result, node);
@@ -1501,10 +1485,10 @@ fn ptrType(
return rvalue(mod, scope, rl, result, node);
}
- var sentinel_ref: zir.Inst.Ref = 0;
- var align_ref: zir.Inst.Ref = 0;
- var bit_start_ref: zir.Inst.Ref = 0;
- var bit_end_ref: zir.Inst.Ref = 0;
+ var sentinel_ref: zir.Inst.Ref = .none;
+ var align_ref: zir.Inst.Ref = .none;
+ var bit_start_ref: zir.Inst.Ref = .none;
+ var bit_end_ref: zir.Inst.Ref = .none;
var trailing_count: u32 = 0;
if (ptr_info.ast.sentinel != 0) {
@@ -1529,24 +1513,28 @@ fn ptrType(
@typeInfo(zir.Inst.PtrType).Struct.fields.len + trailing_count);
const payload_index = gz.zir_code.addExtraAssumeCapacity(zir.Inst.PtrType{ .elem_type = elem_type });
- if (sentinel_ref != 0) gz.zir_code.extra.appendAssumeCapacity(sentinel_ref);
- if (align_ref != 0) gz.zir_code.extra.appendAssumeCapacity(align_ref);
- if (bit_start_ref != 0) {
- gz.zir_code.extra.appendAssumeCapacity(bit_start_ref);
- gz.zir_code.extra.appendAssumeCapacity(bit_end_ref);
+ if (sentinel_ref != .none) {
+ gz.zir_code.extra.appendAssumeCapacity(@enumToInt(sentinel_ref));
+ }
+ if (align_ref != .none) {
+ gz.zir_code.extra.appendAssumeCapacity(@enumToInt(align_ref));
+ }
+ if (bit_start_ref != .none) {
+ gz.zir_code.extra.appendAssumeCapacity(@enumToInt(bit_start_ref));
+ gz.zir_code.extra.appendAssumeCapacity(@enumToInt(bit_end_ref));
}
const new_index = @intCast(zir.Inst.Index, gz.zir_code.instructions.len);
- const result = new_index + gz.zir_code.ref_start_index;
+ const result = zir.Inst.Ref.fromIndex(new_index, gz.zir_code.param_count);
gz.zir_code.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{
.ptr_type = .{
.flags = .{
.is_allowzero = ptr_info.allowzero_token != null,
.is_mutable = ptr_info.const_token == null,
.is_volatile = ptr_info.volatile_token != null,
- .has_sentinel = sentinel_ref != 0,
- .has_align = align_ref != 0,
- .has_bit_range = bit_start_ref != 0,
+ .has_sentinel = sentinel_ref != .none,
+ .has_align = align_ref != .none,
+ .has_bit_range = bit_start_ref != .none,
},
.size = ptr_info.size,
.payload_index = payload_index,
@@ -1561,10 +1549,9 @@ fn arrayType(mod: *Module, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) !
const tree = scope.tree();
const node_datas = tree.nodes.items(.data);
const gz = scope.getGenZir();
- const usize_type = @enumToInt(zir.Const.usize_type);
// TODO check for [_]T
- const len = try expr(mod, scope, .{ .ty = usize_type }, node_datas[node].lhs);
+ const len = try expr(mod, scope, .{ .ty = .usize_type }, node_datas[node].lhs);
const elem_type = try typeExpr(mod, scope, node_datas[node].rhs);
const result = try gz.addBin(.array_type, len, elem_type);
@@ -1576,10 +1563,9 @@ fn arrayTypeSentinel(mod: *Module, scope: *Scope, rl: ResultLoc, node: ast.Node.
const node_datas = tree.nodes.items(.data);
const extra = tree.extraData(node_datas[node].rhs, ast.Node.ArrayTypeSentinel);
const gz = scope.getGenZir();
- const usize_type = @enumToInt(zir.Const.usize_type);
// TODO check for [_]T
- const len = try expr(mod, scope, .{ .ty = usize_type }, node_datas[node].lhs);
+ const len = try expr(mod, scope, .{ .ty = .usize_type }, node_datas[node].lhs);
const elem_type = try typeExpr(mod, scope, extra.elem_type);
const sentinel = try expr(mod, scope, .{ .ty = elem_type }, extra.sentinel);
@@ -1784,7 +1770,7 @@ fn finishThenElseBlock(
} },
});
}
- const elide_else = if (else_result != 0) wzc.refIsNoReturn(else_result) else false;
+ const elide_else = if (else_result != .none) wzc.refIsNoReturn(else_result) else false;
if (!elide_else) {
_ = try else_scope.add(.{
.tag = .break_void_node,
@@ -1796,7 +1782,7 @@ fn finishThenElseBlock(
}
assert(!strat.elide_store_to_block_ptr_instructions);
try setCondBrPayload(condbr, cond, then_scope, else_scope);
- return wzc.ref_start_index + main_block;
+ return zir.Inst.Ref.fromIndex(main_block, wzc.param_count);
},
.break_operand => {
if (!wzc.refIsNoReturn(then_result)) {
@@ -1808,7 +1794,7 @@ fn finishThenElseBlock(
} },
});
}
- if (else_result != 0) {
+ if (else_result != .none) {
if (!wzc.refIsNoReturn(else_result)) {
_ = try else_scope.add(.{
.tag = .@"break",
@@ -1832,7 +1818,7 @@ fn finishThenElseBlock(
} else {
try setCondBrPayload(condbr, cond, then_scope, else_scope);
}
- const block_ref = wzc.ref_start_index + main_block;
+ const block_ref = zir.Inst.Ref.fromIndex(main_block, wzc.param_count);
switch (rl) {
.ref => return block_ref,
else => return rvalue(mod, parent_scope, rl, block_ref, node),
@@ -1981,9 +1967,8 @@ fn boolBinOp(
) InnerError!zir.Inst.Ref {
const gz = scope.getGenZir();
const node_datas = gz.tree().nodes.items(.data);
- const bool_type = @enumToInt(zir.Const.bool_type);
- const lhs = try expr(mod, scope, .{ .ty = bool_type }, node_datas[node].lhs);
+ const lhs = try expr(mod, scope, .{ .ty = .bool_type }, node_datas[node].lhs);
const bool_br = try gz.addBoolBr(zir_tag, lhs);
var rhs_scope: Scope.GenZir = .{
@@ -1992,11 +1977,11 @@ fn boolBinOp(
.force_comptime = gz.force_comptime,
};
defer rhs_scope.instructions.deinit(mod.gpa);
- const rhs = try expr(mod, &rhs_scope.base, .{ .ty = bool_type }, node_datas[node].rhs);
+ const rhs = try expr(mod, &rhs_scope.base, .{ .ty = .bool_type }, node_datas[node].rhs);
_ = try rhs_scope.addUnNode(.break_flat, rhs, node);
try rhs_scope.setBoolBrBody(bool_br);
- const block_ref = gz.zir_code.ref_start_index + bool_br;
+ const block_ref = zir.Inst.Ref.fromIndex(bool_br, gz.zir_code.param_count);
return rvalue(mod, scope, rl, block_ref, node);
}
@@ -2024,8 +2009,7 @@ fn ifExpr(
} else if (if_full.payload_token) |payload_token| {
return mod.failTok(scope, payload_token, "TODO implement if optional", .{});
} else {
- const bool_rl: ResultLoc = .{ .ty = @enumToInt(zir.Const.bool_type) };
- break :c try expr(mod, &block_scope.base, bool_rl, if_full.ast.cond_expr);
+ break :c try expr(mod, &block_scope.base, .{ .ty = .bool_type }, if_full.ast.cond_expr);
}
};
@@ -2073,7 +2057,7 @@ fn ifExpr(
};
} else .{
.src = if_full.ast.then_expr,
- .result = 0,
+ .result = .none,
};
return finishThenElseBlock(
@@ -2185,7 +2169,7 @@ fn whileExpr(
} else if (while_full.payload_token) |payload_token| {
return mod.failTok(scope, payload_token, "TODO implement while optional", .{});
} else {
- const bool_type_rl: ResultLoc = .{ .ty = @enumToInt(zir.Const.bool_type) };
+ const bool_type_rl: ResultLoc = .{ .ty = .bool_type };
break :c try expr(mod, &continue_scope.base, bool_type_rl, while_full.ast.cond_expr);
}
};
@@ -2200,8 +2184,7 @@ fn whileExpr(
// and there are no `continue` statements.
// The "repeat" at the end of a loop body is implied.
if (while_full.ast.cont_expr != 0) {
- const void_type_rl: ResultLoc = .{ .ty = @enumToInt(zir.Const.void_type) };
- _ = try expr(mod, &loop_scope.base, void_type_rl, while_full.ast.cont_expr);
+ _ = try expr(mod, &loop_scope.base, .{ .ty = .void_type }, while_full.ast.cont_expr);
}
const is_inline = while_full.inline_token != null;
const repeat_tag: zir.Inst.Tag = if (is_inline) .repeat_inline else .repeat;
@@ -2251,7 +2234,7 @@ fn whileExpr(
};
} else .{
.src = while_full.ast.then_expr,
- .result = 0,
+ .result = .none,
};
if (loop_scope.label) |some| {
@@ -2838,7 +2821,7 @@ fn ret(mod: *Module, scope: *Scope, node: ast.Node.Index) InnerError!zir.Inst.Re
.ty = try gz.addNode(.ret_type, node),
};
break :operand try expr(mod, scope, rl, operand_node);
- } else void_inst;
+ } else .void_value;
return gz.addUnNode(.ret_node, operand, node);
}
@@ -2862,8 +2845,8 @@ fn identifier(
return mod.failNode(scope, ident, "TODO implement '_' identifier", .{});
}
- if (simple_types.get(ident_name)) |zir_const_tag| {
- return rvalue(mod, scope, rl, @enumToInt(zir_const_tag), ident);
+ if (simple_types.get(ident_name)) |zir_const_ref| {
+ return rvalue(mod, scope, rl, zir_const_ref, ident);
}
if (ident_name.len >= 2) integer: {
@@ -3028,9 +3011,9 @@ fn integerLiteral(
const prefixed_bytes = tree.tokenSlice(int_token);
const gz = scope.getGenZir();
if (std.fmt.parseInt(u64, prefixed_bytes, 0)) |small_int| {
- const result: zir.Inst.Index = switch (small_int) {
- 0 => @enumToInt(zir.Const.zero),
- 1 => @enumToInt(zir.Const.one),
+ const result: zir.Inst.Ref = switch (small_int) {
+ 0 => .zero,
+ 1 => .one,
else => try gz.addInt(small_int),
};
return rvalue(mod, scope, rl, result, node);
@@ -3078,14 +3061,11 @@ fn asmExpr(
const node_datas = tree.nodes.items(.data);
const gz = scope.getGenZir();
- const str_type = @enumToInt(zir.Const.const_slice_u8_type);
- const str_type_rl: ResultLoc = .{ .ty = str_type };
- const asm_source = try expr(mod, scope, str_type_rl, full.ast.template);
+ const asm_source = try expr(mod, scope, .{ .ty = .const_slice_u8_type }, full.ast.template);
if (full.outputs.len != 0) {
return mod.failTok(scope, full.ast.asm_token, "TODO implement asm with an output", .{});
}
- const return_type = @enumToInt(zir.Const.void_type);
const constraints = try arena.alloc(u32, full.inputs.len);
const args = try arena.alloc(zir.Inst.Ref, full.inputs.len);
@@ -3098,22 +3078,21 @@ fn asmExpr(
try mod.parseStrLit(scope, constraint_token, string_bytes, token_bytes, 0);
try string_bytes.append(mod.gpa, 0);
- const usize_rl: ResultLoc = .{ .ty = @enumToInt(zir.Const.usize_type) };
- args[i] = try expr(mod, scope, usize_rl, node_datas[input].lhs);
+ args[i] = try expr(mod, scope, .{ .ty = .usize_type }, node_datas[input].lhs);
}
const tag: zir.Inst.Tag = if (full.volatile_token != null) .asm_volatile else .@"asm";
const result = try gz.addPlNode(tag, node, zir.Inst.Asm{
.asm_source = asm_source,
- .return_type = return_type,
- .output = 0,
+ .return_type = .void_type,
+ .output = .none,
.args_len = @intCast(u32, full.inputs.len),
.clobbers_len = 0, // TODO implement asm clobbers
});
try gz.zir_code.extra.ensureCapacity(mod.gpa, gz.zir_code.extra.items.len +
args.len + constraints.len);
- gz.zir_code.extra.appendSliceAssumeCapacity(args);
+ gz.zir_code.extra.appendSliceAssumeCapacity(mem.bytesAsSlice(u32, mem.sliceAsBytes(args)));
gz.zir_code.extra.appendSliceAssumeCapacity(constraints);
return rvalue(mod, scope, rl, result, node);
@@ -3185,7 +3164,7 @@ fn asRlPtr(
const expected_len = parent_zir.items.len + as_scope.instructions.items.len - 2;
try parent_zir.ensureCapacity(mod.gpa, expected_len);
for (as_scope.instructions.items) |src_inst| {
- if (wzc.ref_start_index + src_inst == as_scope.rl_ptr) continue;
+ if (zir.Inst.Ref.fromIndex(src_inst, wzc.param_count) == as_scope.rl_ptr) continue;
if (zir_tags[src_inst] == .store_to_block_ptr) {
if (zir_datas[src_inst].bin.lhs == as_scope.rl_ptr) continue;
}
@@ -3272,11 +3251,12 @@ fn typeOf(
}
const arena = scope.arena();
var items = try arena.alloc(zir.Inst.Ref, params.len);
- for (params) |param, param_i|
+ for (params) |param, param_i| {
items[param_i] = try expr(mod, scope, .none, param);
+ }
const result = try gz.addPlNode(.typeof_peer, node, zir.Inst.MultiOp{ .operands_len = @intCast(u32, params.len) });
- try gz.zir_code.extra.appendSlice(gz.zir_code.gpa, items);
+ try gz.zir_code.extra.appendSlice(gz.zir_code.gpa, mem.bytesAsSlice(u32, mem.sliceAsBytes(items)));
return rvalue(mod, scope, rl, result, node);
}
@@ -3351,8 +3331,7 @@ fn builtinCall(
return rvalue(mod, scope, rl, result, node);
},
.set_eval_branch_quota => {
- const u32_rl: ResultLoc = .{ .ty = @enumToInt(zir.Const.u32_type) };
- const quota = try expr(mod, scope, u32_rl, params[0]);
+ const quota = try expr(mod, scope, .{ .ty = .u32_type }, params[0]);
const result = try gz.addUnNode(.set_eval_branch_quota, quota, node);
return rvalue(mod, scope, rl, result, node);
},
@@ -3498,7 +3477,7 @@ fn callExpr(
}
const lhs = try expr(mod, scope, .none, call.ast.fn_expr);
- const args = try mod.gpa.alloc(zir.Inst.Index, call.ast.params.len);
+ const args = try mod.gpa.alloc(zir.Inst.Ref, call.ast.params.len);
defer mod.gpa.free(args);
const gz = scope.getGenZir();
@@ -3517,7 +3496,7 @@ fn callExpr(
true => .async_kw,
false => .auto,
};
- const result: zir.Inst.Index = res: {
+ const result: zir.Inst.Ref = res: {
const tag: zir.Inst.Tag = switch (modifier) {
.auto => switch (args.len == 0) {
true => break :res try gz.addUnNode(.call_none, lhs, node),
@@ -3536,7 +3515,7 @@ fn callExpr(
return rvalue(mod, scope, rl, result, node); // TODO function call with result location
}
-pub const simple_types = std.ComptimeStringMap(zir.Const, .{
+pub const simple_types = std.ComptimeStringMap(zir.Inst.Ref, .{
.{ "u8", .u8_type },
.{ "i8", .i8_type },
.{ "u16", .u16_type },
src/Module.zig
@@ -914,16 +914,16 @@ pub const Scope = struct {
parent: *Scope,
/// All `GenZir` scopes for the same ZIR share this.
zir_code: *WipZirCode,
- /// Keeps track of the list of instructions in this scope only. References
+ /// Keeps track of the list of instructions in this scope only. Indexes
/// to instructions in `zir_code`.
- instructions: std.ArrayListUnmanaged(zir.Inst.Ref) = .{},
+ instructions: std.ArrayListUnmanaged(zir.Inst.Index) = .{},
label: ?Label = null,
break_block: zir.Inst.Index = 0,
continue_block: zir.Inst.Index = 0,
/// Only valid when setBlockResultLoc is called.
break_result_loc: astgen.ResultLoc = undefined,
/// When a block has a pointer result location, here it is.
- rl_ptr: zir.Inst.Ref = 0,
+ rl_ptr: zir.Inst.Ref = .none,
/// Keeps track of how many branches of a block did not actually
/// consume the result location. astgen uses this to figure out
/// whether to rely on break instructions or writing to the result
@@ -1001,8 +1001,8 @@ pub const Scope = struct {
ret_ty: zir.Inst.Ref,
cc: zir.Inst.Ref,
}) !zir.Inst.Ref {
- assert(args.ret_ty != 0);
- assert(args.cc != 0);
+ assert(args.ret_ty != .none);
+ assert(args.cc != .none);
const gpa = gz.zir_code.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.zir_code.instructions.ensureCapacity(gpa, gz.zir_code.instructions.len + 1);
@@ -1013,7 +1013,7 @@ pub const Scope = struct {
.cc = args.cc,
.param_types_len = @intCast(u32, args.param_types.len),
});
- gz.zir_code.extra.appendSliceAssumeCapacity(args.param_types);
+ gz.zir_code.extra.appendSliceAssumeCapacity(mem.bytesAsSlice(u32, mem.sliceAsBytes(args.param_types)));
const new_index = @intCast(zir.Inst.Index, gz.zir_code.instructions.len);
gz.zir_code.instructions.appendAssumeCapacity(.{
@@ -1024,7 +1024,7 @@ pub const Scope = struct {
} },
});
gz.instructions.appendAssumeCapacity(new_index);
- return new_index + gz.zir_code.ref_start_index;
+ return zir.Inst.Ref.fromIndex(new_index, gz.zir_code.param_count);
}
pub fn addFnType(
@@ -1033,7 +1033,7 @@ pub const Scope = struct {
ret_ty: zir.Inst.Ref,
param_types: []const zir.Inst.Ref,
) !zir.Inst.Ref {
- assert(ret_ty != 0);
+ assert(ret_ty != .none);
const gpa = gz.zir_code.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.zir_code.instructions.ensureCapacity(gpa, gz.zir_code.instructions.len + 1);
@@ -1043,7 +1043,7 @@ pub const Scope = struct {
const payload_index = gz.zir_code.addExtraAssumeCapacity(zir.Inst.FnType{
.param_types_len = @intCast(u32, param_types.len),
});
- gz.zir_code.extra.appendSliceAssumeCapacity(param_types);
+ gz.zir_code.extra.appendSliceAssumeCapacity(mem.bytesAsSlice(u32, mem.sliceAsBytes(param_types)));
const new_index = @intCast(zir.Inst.Index, gz.zir_code.instructions.len);
gz.zir_code.instructions.appendAssumeCapacity(.{
@@ -1054,7 +1054,7 @@ pub const Scope = struct {
} },
});
gz.instructions.appendAssumeCapacity(new_index);
- return new_index + gz.zir_code.ref_start_index;
+ return zir.Inst.Ref.fromIndex(new_index, gz.zir_code.param_count);
}
pub fn addCall(
@@ -1065,7 +1065,7 @@ pub const Scope = struct {
/// Absolute node index. This function does the conversion to offset from Decl.
src_node: ast.Node.Index,
) !zir.Inst.Ref {
- assert(callee != 0);
+ assert(callee != .none);
assert(src_node != 0);
const gpa = gz.zir_code.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
@@ -1077,7 +1077,7 @@ pub const Scope = struct {
.callee = callee,
.args_len = @intCast(u32, args.len),
});
- gz.zir_code.extra.appendSliceAssumeCapacity(args);
+ gz.zir_code.extra.appendSliceAssumeCapacity(mem.bytesAsSlice(u32, mem.sliceAsBytes(args)));
const new_index = @intCast(zir.Inst.Index, gz.zir_code.instructions.len);
gz.zir_code.instructions.appendAssumeCapacity(.{
@@ -1088,7 +1088,7 @@ pub const Scope = struct {
} },
});
gz.instructions.appendAssumeCapacity(new_index);
- return new_index + gz.zir_code.ref_start_index;
+ return zir.Inst.Ref.fromIndex(new_index, gz.zir_code.param_count);
}
/// Note that this returns a `zir.Inst.Index` not a ref.
@@ -1098,7 +1098,7 @@ pub const Scope = struct {
tag: zir.Inst.Tag,
lhs: zir.Inst.Ref,
) !zir.Inst.Index {
- assert(lhs != 0);
+ assert(lhs != .none);
const gpa = gz.zir_code.gpa;
try gz.instructions.ensureCapacity(gpa, gz.instructions.items.len + 1);
try gz.zir_code.instructions.ensureCapacity(gpa, gz.zir_code.instructions.len + 1);
@@ -1129,7 +1129,7 @@ pub const Scope = struct {
/// Absolute node index. This function does the conversion to offset from Decl.
src_node: ast.Node.Index,
) !zir.Inst.Ref {
- assert(operand != 0);
+ assert(operand != .none);
return gz.add(.{
.tag = tag,
.data = .{ .un_node = .{
@@ -1160,7 +1160,7 @@ pub const Scope = struct {
} },
});
gz.instructions.appendAssumeCapacity(new_index);
- return new_index + gz.zir_code.ref_start_index;
+ return zir.Inst.Ref.fromIndex(new_index, gz.zir_code.param_count);
}
pub fn addArrayTypeSentinel(
@@ -1186,7 +1186,7 @@ pub const Scope = struct {
} },
});
gz.instructions.appendAssumeCapacity(new_index);
- return new_index + gz.zir_code.ref_start_index;
+ return zir.Inst.Ref.fromIndex(new_index, gz.zir_code.param_count);
}
pub fn addUnTok(
@@ -1196,7 +1196,7 @@ pub const Scope = struct {
/// Absolute token index. This function does the conversion to Decl offset.
abs_tok_index: ast.TokenIndex,
) !zir.Inst.Ref {
- assert(operand != 0);
+ assert(operand != .none);
return gz.add(.{
.tag = tag,
.data = .{ .un_tok = .{
@@ -1228,8 +1228,8 @@ pub const Scope = struct {
lhs: zir.Inst.Ref,
rhs: zir.Inst.Ref,
) !zir.Inst.Ref {
- assert(lhs != 0);
- assert(rhs != 0);
+ assert(lhs != .none);
+ assert(rhs != .none);
return gz.add(.{
.tag = tag,
.data = .{ .bin = .{
@@ -1317,7 +1317,7 @@ pub const Scope = struct {
const new_index = @intCast(zir.Inst.Index, gz.zir_code.instructions.len);
gz.zir_code.instructions.appendAssumeCapacity(inst);
gz.instructions.appendAssumeCapacity(new_index);
- return gz.zir_code.ref_start_index + new_index;
+ return zir.Inst.Ref.fromIndex(new_index, gz.zir_code.param_count);
}
};
@@ -1331,7 +1331,7 @@ pub const Scope = struct {
parent: *Scope,
gen_zir: *GenZir,
name: []const u8,
- inst: zir.Inst.Index,
+ inst: zir.Inst.Ref,
/// Source location of the corresponding variable declaration.
src: LazySrcLoc,
};
@@ -1346,7 +1346,7 @@ pub const Scope = struct {
parent: *Scope,
gen_zir: *GenZir,
name: []const u8,
- ptr: zir.Inst.Index,
+ ptr: zir.Inst.Ref,
/// Source location of the corresponding variable declaration.
src: LazySrcLoc,
};
@@ -1366,9 +1366,9 @@ pub const WipZirCode = struct {
instructions: std.MultiArrayList(zir.Inst) = .{},
string_bytes: std.ArrayListUnmanaged(u8) = .{},
extra: std.ArrayListUnmanaged(u32) = .{},
- /// The end of special indexes. `zir.Inst.Ref` subtracts against this number to convert
- /// to `zir.Inst.Index`. The default here is correct if there are 0 parameters.
- ref_start_index: u32 = zir.const_inst_list.len,
+ /// We need to keep track of this count in order to convert between
+ /// `zir.Inst.Ref` and `zir.Inst.Index` types.
+ param_count: u32 = 0,
decl: *Decl,
gpa: *Allocator,
arena: *Allocator,
@@ -1383,15 +1383,18 @@ pub const WipZirCode = struct {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, wzc.extra.items.len);
inline for (fields) |field| {
- comptime assert(field.field_type == u32);
- wzc.extra.appendAssumeCapacity(@field(extra, field.name));
+ wzc.extra.appendAssumeCapacity(switch (field.field_type) {
+ u32 => @field(extra, field.name),
+ zir.Inst.Ref => @enumToInt(@field(extra, field.name)),
+ else => unreachable,
+ });
}
return result;
}
pub fn refIsNoReturn(wzc: WipZirCode, zir_inst_ref: zir.Inst.Ref) bool {
- if (zir_inst_ref >= wzc.ref_start_index) {
- const zir_inst = zir_inst_ref - wzc.ref_start_index;
+ if (zir_inst_ref == .unreachable_value) return true;
+ if (zir_inst_ref.toIndex(wzc.param_count)) |zir_inst| {
return wzc.instructions.items(.tag)[zir_inst].isNoReturn();
}
return false;
@@ -2072,7 +2075,7 @@ fn astgenAndSemaFn(
// The AST params array does not contain anytype and ... parameters.
// We must iterate to count how many param types to allocate.
const param_count = blk: {
- var count: usize = 0;
+ var count: u32 = 0;
var it = fn_proto.iterate(tree);
while (it.next()) |param| {
if (param.anytype_ellipsis3) |some| if (token_tags[some] == .ellipsis3) break;
@@ -2081,7 +2084,6 @@ fn astgenAndSemaFn(
break :blk count;
};
const param_types = try fn_type_scope_arena.allocator.alloc(zir.Inst.Ref, param_count);
- const type_type_rl: astgen.ResultLoc = .{ .ty = @enumToInt(zir.Const.type_type) };
var is_var_args = false;
{
@@ -2106,7 +2108,7 @@ fn astgenAndSemaFn(
const param_type_node = param.type_expr;
assert(param_type_node != 0);
param_types[param_type_i] =
- try astgen.expr(mod, &fn_type_scope.base, type_type_rl, param_type_node);
+ try astgen.expr(mod, &fn_type_scope.base, .{ .ty = .type_type }, param_type_node);
}
assert(param_type_i == param_count);
}
@@ -2178,7 +2180,7 @@ fn astgenAndSemaFn(
const return_type_inst = try astgen.expr(
mod,
&fn_type_scope.base,
- type_type_rl,
+ .{ .ty = .type_type },
fn_proto.ast.return_type,
);
@@ -2187,19 +2189,22 @@ fn astgenAndSemaFn(
else
false;
- const cc: zir.Inst.Index = if (fn_proto.ast.callconv_expr != 0)
+ const cc: zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0)
// TODO instead of enum literal type, this needs to be the
// std.builtin.CallingConvention enum. We need to implement importing other files
// and enums in order to fix this.
- try astgen.comptimeExpr(mod, &fn_type_scope.base, .{
- .ty = @enumToInt(zir.Const.enum_literal_type),
- }, fn_proto.ast.callconv_expr)
+ try astgen.comptimeExpr(
+ mod,
+ &fn_type_scope.base,
+ .{ .ty = .enum_literal_type },
+ fn_proto.ast.callconv_expr,
+ )
else if (is_extern) // note: https://github.com/ziglang/zig/issues/5269
try fn_type_scope.addSmallStr(.enum_literal_small, "C")
else
- 0;
+ .none;
- const fn_type_inst: zir.Inst.Ref = if (cc != 0) fn_type: {
+ const fn_type_inst: zir.Inst.Ref = if (cc != .none) fn_type: {
const tag: zir.Inst.Tag = if (is_var_args) .fn_type_cc_var_args else .fn_type_cc;
break :fn_type try fn_type_scope.addFnTypeCc(tag, .{
.ret_ty = return_type_inst,
@@ -2292,7 +2297,7 @@ fn astgenAndSemaFn(
.decl = decl,
.arena = &decl_arena.allocator,
.gpa = mod.gpa,
- .ref_start_index = @intCast(u32, zir.const_inst_list.len + param_count),
+ .param_count = param_count,
};
defer wip_zir_code.deinit();
@@ -2309,7 +2314,7 @@ fn astgenAndSemaFn(
try wip_zir_code.extra.ensureCapacity(mod.gpa, param_count);
var params_scope = &gen_scope.base;
- var i: usize = 0;
+ var i: u32 = 0;
var it = fn_proto.iterate(tree);
while (it.next()) |param| : (i += 1) {
const name_token = param.name_token.?;
@@ -2320,7 +2325,7 @@ fn astgenAndSemaFn(
.gen_zir = &gen_scope,
.name = param_name,
// Implicit const list first, then implicit arg list.
- .inst = @intCast(u32, zir.const_inst_list.len + i),
+ .inst = zir.Inst.Ref.fromParam(i),
.src = decl.tokSrcLoc(name_token),
};
params_scope = &sub_scope.base;
@@ -2344,8 +2349,7 @@ fn astgenAndSemaFn(
// astgen uses result location semantics to coerce return operands.
// Since we are adding the return instruction here, we must handle the coercion.
// We do this by using the `ret_coerce` instruction.
- const void_inst: zir.Inst.Ref = @enumToInt(zir.Const.void_value);
- _ = try gen_scope.addUnTok(.ret_coerce, void_inst, tree.lastToken(body_node));
+ _ = try gen_scope.addUnTok(.ret_coerce, .void_value, tree.lastToken(body_node));
}
const code = try gen_scope.finish();
@@ -2514,9 +2518,7 @@ fn astgenAndSemaVarDecl(
defer gen_scope.instructions.deinit(mod.gpa);
const init_result_loc: astgen.ResultLoc = if (var_decl.ast.type_node != 0) .{
- .ty = try astgen.expr(mod, &gen_scope.base, .{
- .ty = @enumToInt(zir.Const.type_type),
- }, var_decl.ast.type_node),
+ .ty = try astgen.expr(mod, &gen_scope.base, .{ .ty = .type_type }, var_decl.ast.type_node),
} else .none;
const init_inst = try astgen.comptimeExpr(
src/Sema.zig
@@ -78,7 +78,7 @@ pub fn rootAsType(sema: *Sema, root_block: *Scope.Block) !Type {
/// return type of `analyzeBody` so that we can tail call them.
/// Only appropriate to return when the instruction is known to be NoReturn
/// solely based on the ZIR tag.
-const always_noreturn: InnerError!zir.Inst.Ref = @as(zir.Inst.Index, 0);
+const always_noreturn: InnerError!zir.Inst.Ref = .none;
/// This function is the main loop of `Sema` and it can be used in two different ways:
/// * The traditional way where there are N breaks out of the block and peer type
@@ -88,7 +88,7 @@ const always_noreturn: InnerError!zir.Inst.Ref = @as(zir.Inst.Index, 0);
/// * The "flat" way. There is only 1 break out of the block, and it is with a `break_flat`
/// instruction. In this case, the `zir.Inst.Index` part of the return value will be
/// the block result value. No block scope needs to be created for this strategy.
-pub fn analyzeBody(sema: *Sema, block: *Scope.Block, body: []const zir.Inst.Index) !zir.Inst.Index {
+pub fn analyzeBody(sema: *Sema, block: *Scope.Block, body: []const zir.Inst.Index) !zir.Inst.Ref {
// No tracy calls here, to avoid interfering with the tail call mechanism.
const map = block.sema.inst_map;
@@ -300,28 +300,18 @@ pub fn analyzeBody(sema: *Sema, block: *Scope.Block, body: []const zir.Inst.Inde
}
/// TODO when we rework TZIR memory layout, this function will no longer have a possible error.
+/// Until then we allocate memory for a new, mutable `ir.Inst` to match what TZIR expects.
pub fn resolveInst(sema: *Sema, zir_ref: zir.Inst.Ref) error{OutOfMemory}!*ir.Inst {
- var i: usize = zir_ref;
-
- // First section of indexes correspond to a set number of constant values.
- if (i < zir.const_inst_list.len) {
- // TODO when we rework TZIR memory layout, this function can be as simple as:
- // if (zir_ref < zir.const_inst_list.len + sema.param_count)
- // return zir_ref;
- // Until then we allocate memory for a new, mutable `ir.Inst` to match what
- // TZIR expects.
- return sema.mod.constInst(sema.arena, .unneeded, zir.const_inst_list[i]);
+ if (zir_ref.toTypedValue()) |typed_value| {
+ return sema.mod.constInst(sema.arena, .unneeded, typed_value);
}
- i -= zir.const_inst_list.len;
- // Next section of indexes correspond to function parameters, if any.
- if (i < sema.param_inst_list.len) {
- return sema.param_inst_list[i];
+ const param_count = @intCast(u32, sema.param_inst_list.len);
+ if (zir_ref.toParam(param_count)) |param| {
+ return sema.param_inst_list[param];
}
- i -= sema.param_inst_list.len;
- // Finally, the last section of indexes refers to the map of ZIR=>TZIR.
- return sema.inst_map[i];
+ return sema.inst_map[zir_ref.toIndex(param_count).?];
}
fn resolveConstString(
@@ -745,7 +735,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!*In
return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int);
}
-fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Index {
+fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -763,7 +753,10 @@ fn zirCompileLog(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(zir.Inst.MultiOp, inst_data.payload_index);
- for (sema.code.extra[extra.end..][0..extra.data.operands_len]) |arg_ref, i| {
+ const raw_args = sema.code.extra[extra.end..][0..extra.data.operands_len];
+ const args = mem.bytesAsSlice(zir.Inst.Ref, mem.sliceAsBytes(raw_args));
+
+ for (args) |arg_ref, i| {
if (i != 0) try writer.print(", ", .{});
const arg = try sema.resolveInst(arg_ref);
@@ -998,7 +991,7 @@ fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
_ = try block.addNoOp(src, Type.initTag(.void), .breakpoint);
}
-fn zirBreak(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Index {
+fn zirBreak(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -1007,7 +1000,7 @@ fn zirBreak(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!z
return sema.analyzeBreak(block, sema.src, inst_data.block_inst, operand);
}
-fn zirBreakVoidNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Index {
+fn zirBreakVoidNode(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError!zir.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -1112,7 +1105,8 @@ fn zirCall(
const func_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
const call_src = inst_data.src();
const extra = sema.code.extraData(zir.Inst.Call, inst_data.payload_index);
- const args = sema.code.extra[extra.end..][0..extra.data.args_len];
+ const raw_args = sema.code.extra[extra.end..][0..extra.data.args_len];
+ const args = mem.bytesAsSlice(zir.Inst.Ref, mem.sliceAsBytes(raw_args));
return sema.analyzeCall(block, extra.data.callee, func_src, call_src, modifier, ensure_result_used, args);
}
@@ -1739,7 +1733,8 @@ fn zirFnType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index, var_args: b
const inst_data = sema.code.instructions.items(.data)[inst].fn_type;
const extra = sema.code.extraData(zir.Inst.FnType, inst_data.payload_index);
- const param_types = sema.code.extra[extra.end..][0..extra.data.param_types_len];
+ const raw_param_types = sema.code.extra[extra.end..][0..extra.data.param_types_len];
+ const param_types = mem.bytesAsSlice(zir.Inst.Ref, mem.sliceAsBytes(raw_param_types));
return sema.fnTypeCommon(
block,
@@ -1757,7 +1752,8 @@ fn zirFnTypeCc(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index, var_args:
const inst_data = sema.code.instructions.items(.data)[inst].fn_type;
const extra = sema.code.extraData(zir.Inst.FnTypeCc, inst_data.payload_index);
- const param_types = sema.code.extra[extra.end..][0..extra.data.param_types_len];
+ const raw_param_types = sema.code.extra[extra.end..][0..extra.data.param_types_len];
+ const param_types = mem.bytesAsSlice(zir.Inst.Ref, mem.sliceAsBytes(raw_param_types));
const cc_tv = try sema.resolveInstConst(block, .todo, extra.data.cc);
// TODO once we're capable of importing and analyzing decls from
@@ -2487,7 +2483,7 @@ fn zirNegate(
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
- const lhs = try sema.resolveInst(@enumToInt(zir.Const.zero));
+ const lhs = try sema.resolveInst(.zero);
const rhs = try sema.resolveInst(inst_data.operand);
return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src);
@@ -2641,7 +2637,7 @@ fn zirAsm(
var extra_i = extra.end;
const Output = struct { name: []const u8, inst: *Inst };
- const output: ?Output = if (extra.data.output != 0) blk: {
+ const output: ?Output = if (extra.data.output != .none) blk: {
const name = sema.code.nullTerminatedString(sema.code.extra[extra_i]);
extra_i += 1;
break :blk Output{
@@ -2655,7 +2651,7 @@ fn zirAsm(
const clobbers = try sema.arena.alloc([]const u8, extra.data.clobbers_len);
for (args) |*arg| {
- arg.* = try sema.resolveInst(sema.code.extra[extra_i]);
+ arg.* = try sema.resolveInst(@intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]));
extra_i += 1;
}
for (inputs) |*name| {
@@ -2772,11 +2768,13 @@ fn zirTypeofPeer(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerEr
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(zir.Inst.MultiOp, inst_data.payload_index);
+ const raw_args = sema.code.extra[extra.end..][0..extra.data.operands_len];
+ const args = mem.bytesAsSlice(zir.Inst.Ref, mem.sliceAsBytes(raw_args));
const inst_list = try sema.gpa.alloc(*ir.Inst, extra.data.operands_len);
defer sema.gpa.free(inst_list);
- for (sema.code.extra[extra.end..][0..extra.data.operands_len]) |arg_ref, i| {
+ for (args) |arg_ref, i| {
inst_list[i] = try sema.resolveInst(arg_ref);
}
@@ -3115,25 +3113,25 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: zir.Inst.Index) InnerError
var extra_i = extra.end;
const sentinel = if (inst_data.flags.has_sentinel) blk: {
- const ref = sema.code.extra[extra_i];
+ const ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk (try sema.resolveInstConst(block, .unneeded, ref)).val;
} else null;
const abi_align = if (inst_data.flags.has_align) blk: {
- const ref = sema.code.extra[extra_i];
+ const ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u32);
} else 0;
const bit_start = if (inst_data.flags.has_bit_range) blk: {
- const ref = sema.code.extra[extra_i];
+ const ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16);
} else 0;
const bit_end = if (inst_data.flags.has_bit_range) blk: {
- const ref = sema.code.extra[extra_i];
+ const ref = @intToEnum(zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u16);
} else 0;
src/zir.zig
@@ -48,8 +48,11 @@ pub const Code = struct {
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
- comptime assert(field.field_type == u32);
- @field(result, field.name) = code.extra[i];
+ @field(result, field.name) = switch (field.field_type) {
+ u32 => code.extra[i],
+ Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
+ else => unreachable,
+ };
i += 1;
}
return .{
@@ -105,284 +108,6 @@ pub const Code = struct {
}
};
-/// These correspond to the first N tags of Value.
-/// A ZIR instruction refers to another one by index. However the first N indexes
-/// correspond to this enum, and the next M indexes correspond to the parameters
-/// of the current function. After that, they refer to other instructions in the
-/// instructions array for the function.
-/// When adding to this, consider adding a corresponding entry o `simple_types`
-/// in astgen.
-pub const Const = enum {
- /// The 0 value is reserved so that ZIR instruction indexes can use it to
- /// mean "null".
- unused,
-
- u8_type,
- i8_type,
- u16_type,
- i16_type,
- u32_type,
- i32_type,
- u64_type,
- i64_type,
- u128_type,
- i128_type,
- usize_type,
- isize_type,
- c_short_type,
- c_ushort_type,
- c_int_type,
- c_uint_type,
- c_long_type,
- c_ulong_type,
- c_longlong_type,
- c_ulonglong_type,
- c_longdouble_type,
- f16_type,
- f32_type,
- f64_type,
- f128_type,
- c_void_type,
- bool_type,
- void_type,
- type_type,
- anyerror_type,
- comptime_int_type,
- comptime_float_type,
- noreturn_type,
- null_type,
- undefined_type,
- fn_noreturn_no_args_type,
- fn_void_no_args_type,
- fn_naked_noreturn_no_args_type,
- fn_ccc_void_no_args_type,
- single_const_pointer_to_comptime_int_type,
- const_slice_u8_type,
- enum_literal_type,
-
- /// `undefined` (untyped)
- undef,
- /// `0` (comptime_int)
- zero,
- /// `1` (comptime_int)
- one,
- /// `{}`
- void_value,
- /// `unreachable` (noreturn type)
- unreachable_value,
- /// `null` (untyped)
- null_value,
- /// `true`
- bool_true,
- /// `false`
- bool_false,
-};
-
-pub const const_inst_list = std.enums.directEnumArray(Const, TypedValue, 0, .{
- .unused = undefined,
- .u8_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u8_type),
- },
- .i8_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i8_type),
- },
- .u16_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u16_type),
- },
- .i16_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i16_type),
- },
- .u32_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u32_type),
- },
- .i32_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i32_type),
- },
- .u64_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u64_type),
- },
- .i64_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i64_type),
- },
- .u128_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u128_type),
- },
- .i128_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i128_type),
- },
- .usize_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.usize_type),
- },
- .isize_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.isize_type),
- },
- .c_short_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_short_type),
- },
- .c_ushort_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_ushort_type),
- },
- .c_int_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_int_type),
- },
- .c_uint_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_uint_type),
- },
- .c_long_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_long_type),
- },
- .c_ulong_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_ulong_type),
- },
- .c_longlong_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_longlong_type),
- },
- .c_ulonglong_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_ulonglong_type),
- },
- .c_longdouble_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_longdouble_type),
- },
- .f16_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.f16_type),
- },
- .f32_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.f32_type),
- },
- .f64_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.f64_type),
- },
- .f128_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.f128_type),
- },
- .c_void_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_void_type),
- },
- .bool_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.bool_type),
- },
- .void_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.void_type),
- },
- .type_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.type_type),
- },
- .anyerror_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.anyerror_type),
- },
- .comptime_int_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.comptime_int_type),
- },
- .comptime_float_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.comptime_float_type),
- },
- .noreturn_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.noreturn_type),
- },
- .null_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.null_type),
- },
- .undefined_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.undefined_type),
- },
- .fn_noreturn_no_args_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.fn_noreturn_no_args_type),
- },
- .fn_void_no_args_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.fn_void_no_args_type),
- },
- .fn_naked_noreturn_no_args_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.fn_naked_noreturn_no_args_type),
- },
- .fn_ccc_void_no_args_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.fn_ccc_void_no_args_type),
- },
- .single_const_pointer_to_comptime_int_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.single_const_pointer_to_comptime_int_type),
- },
- .const_slice_u8_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.const_slice_u8_type),
- },
- .enum_literal_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.enum_literal_type),
- },
-
- .undef = .{
- .ty = Type.initTag(.@"undefined"),
- .val = Value.initTag(.undef),
- },
- .zero = .{
- .ty = Type.initTag(.comptime_int),
- .val = Value.initTag(.zero),
- },
- .one = .{
- .ty = Type.initTag(.comptime_int),
- .val = Value.initTag(.one),
- },
- .void_value = .{
- .ty = Type.initTag(.void),
- .val = Value.initTag(.void_value),
- },
- .unreachable_value = .{
- .ty = Type.initTag(.noreturn),
- .val = Value.initTag(.unreachable_value),
- },
- .null_value = .{
- .ty = Type.initTag(.@"null"),
- .val = Value.initTag(.null_value),
- },
- .bool_true = .{
- .ty = Type.initTag(.bool),
- .val = Value.initTag(.bool_true),
- },
- .bool_false = .{
- .ty = Type.initTag(.bool),
- .val = Value.initTag(.bool_false),
- },
-});
-
/// These are untyped instructions generated from an Abstract Syntax Tree.
/// The data here is immutable because it is possible to have multiple
/// analyses on the same ZIR happening at the same time.
@@ -1032,14 +757,319 @@ pub const Inst = struct {
/// The position of a ZIR instruction within the `Code` instructions array.
pub const Index = u32;
- /// A reference to another ZIR instruction. If this value is below a certain
- /// threshold, it implicitly refers to a constant-known value from the `Const` enum.
- /// Below a second threshold, it implicitly refers to a parameter of the current
- /// function.
- /// Finally, after subtracting that offset, it refers to another instruction in
- /// the instruction array.
- /// This logic is implemented in `Sema.resolveRef`.
- pub const Ref = u32;
+ /// A reference to a TypedValue, parameter of the current function,
+ /// or ZIR instruction.
+ ///
+ /// If the Ref has a tag in this enum, it refers to a TypedValue which may be
+ /// retrieved with Ref.toTypedValue().
+ ///
+ /// If the value of a Ref does not have a tag, it referes to either a parameter
+ /// of the current function or a ZIR instruction.
+ ///
+ /// The first values after the the last tag refer to parameters which may be
+ /// derived by subtracting typed_value_count.
+ ///
+ /// All further values refer to ZIR instructions which may be derived by
+ /// subtracting typed_value_count and the number of parameters.
+ ///
+ /// When adding a tag to this enum, consider adding a corresponding entry to
+ /// `simple_types` in astgen.
+ ///
+ /// This is packed so that it is safe to cast between `[]u32` and `[]Ref`.
+ pub const Ref = packed enum(u32) {
+ /// This Ref does not correspond to any ZIR instruction or constant
+ /// value and may instead be used as a sentinel to indicate null.
+ none,
+
+ u8_type,
+ i8_type,
+ u16_type,
+ i16_type,
+ u32_type,
+ i32_type,
+ u64_type,
+ i64_type,
+ usize_type,
+ isize_type,
+ c_short_type,
+ c_ushort_type,
+ c_int_type,
+ c_uint_type,
+ c_long_type,
+ c_ulong_type,
+ c_longlong_type,
+ c_ulonglong_type,
+ c_longdouble_type,
+ f16_type,
+ f32_type,
+ f64_type,
+ f128_type,
+ c_void_type,
+ bool_type,
+ void_type,
+ type_type,
+ anyerror_type,
+ comptime_int_type,
+ comptime_float_type,
+ noreturn_type,
+ null_type,
+ undefined_type,
+ fn_noreturn_no_args_type,
+ fn_void_no_args_type,
+ fn_naked_noreturn_no_args_type,
+ fn_ccc_void_no_args_type,
+ single_const_pointer_to_comptime_int_type,
+ const_slice_u8_type,
+ enum_literal_type,
+
+ /// `undefined` (untyped)
+ undef,
+ /// `0` (comptime_int)
+ zero,
+ /// `1` (comptime_int)
+ one,
+ /// `{}`
+ void_value,
+ /// `unreachable` (noreturn type)
+ unreachable_value,
+ /// `null` (untyped)
+ null_value,
+ /// `true`
+ bool_true,
+ /// `false`
+ bool_false,
+
+ _,
+
+ pub const typed_value_count = @as(u32, typed_value_map.len);
+ const typed_value_map = std.enums.directEnumArray(Ref, TypedValue, 0, .{
+ .none = undefined,
+
+ .u8_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.u8_type),
+ },
+ .i8_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.i8_type),
+ },
+ .u16_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.u16_type),
+ },
+ .i16_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.i16_type),
+ },
+ .u32_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.u32_type),
+ },
+ .i32_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.i32_type),
+ },
+ .u64_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.u64_type),
+ },
+ .i64_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.i64_type),
+ },
+ .usize_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.usize_type),
+ },
+ .isize_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.isize_type),
+ },
+ .c_short_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_short_type),
+ },
+ .c_ushort_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_ushort_type),
+ },
+ .c_int_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_int_type),
+ },
+ .c_uint_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_uint_type),
+ },
+ .c_long_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_long_type),
+ },
+ .c_ulong_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_ulong_type),
+ },
+ .c_longlong_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_longlong_type),
+ },
+ .c_ulonglong_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_ulonglong_type),
+ },
+ .c_longdouble_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_longdouble_type),
+ },
+ .f16_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.f16_type),
+ },
+ .f32_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.f32_type),
+ },
+ .f64_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.f64_type),
+ },
+ .f128_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.f128_type),
+ },
+ .c_void_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.c_void_type),
+ },
+ .bool_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.bool_type),
+ },
+ .void_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.void_type),
+ },
+ .type_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.type_type),
+ },
+ .anyerror_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.anyerror_type),
+ },
+ .comptime_int_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.comptime_int_type),
+ },
+ .comptime_float_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.comptime_float_type),
+ },
+ .noreturn_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.noreturn_type),
+ },
+ .null_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.null_type),
+ },
+ .undefined_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.undefined_type),
+ },
+ .fn_noreturn_no_args_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.fn_noreturn_no_args_type),
+ },
+ .fn_void_no_args_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.fn_void_no_args_type),
+ },
+ .fn_naked_noreturn_no_args_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.fn_naked_noreturn_no_args_type),
+ },
+ .fn_ccc_void_no_args_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.fn_ccc_void_no_args_type),
+ },
+ .single_const_pointer_to_comptime_int_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.single_const_pointer_to_comptime_int_type),
+ },
+ .const_slice_u8_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.const_slice_u8_type),
+ },
+ .enum_literal_type = .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.enum_literal_type),
+ },
+
+ .undef = .{
+ .ty = Type.initTag(.@"undefined"),
+ .val = Value.initTag(.undef),
+ },
+ .zero = .{
+ .ty = Type.initTag(.comptime_int),
+ .val = Value.initTag(.zero),
+ },
+ .one = .{
+ .ty = Type.initTag(.comptime_int),
+ .val = Value.initTag(.one),
+ },
+ .void_value = .{
+ .ty = Type.initTag(.void),
+ .val = Value.initTag(.void_value),
+ },
+ .unreachable_value = .{
+ .ty = Type.initTag(.noreturn),
+ .val = Value.initTag(.unreachable_value),
+ },
+ .null_value = .{
+ .ty = Type.initTag(.@"null"),
+ .val = Value.initTag(.null_value),
+ },
+ .bool_true = .{
+ .ty = Type.initTag(.bool),
+ .val = Value.initTag(.bool_true),
+ },
+ .bool_false = .{
+ .ty = Type.initTag(.bool),
+ .val = Value.initTag(.bool_false),
+ },
+ });
+
+ pub fn fromParam(param: u32) Ref {
+ return @intToEnum(Ref, typed_value_count + param);
+ }
+
+ pub fn fromIndex(index: Index, param_count: u32) Ref {
+ return @intToEnum(Ref, typed_value_count + param_count + index);
+ }
+
+ pub fn toTypedValue(ref: Ref) ?TypedValue {
+ assert(ref != .none);
+ if (@enumToInt(ref) >= typed_value_count) return null;
+ return typed_value_map[@enumToInt(ref)];
+ }
+
+ pub fn toParam(ref: Ref, param_count: u32) ?u32 {
+ assert(ref != .none);
+ if (@enumToInt(ref) < typed_value_count or
+ @enumToInt(ref) >= typed_value_count + param_count)
+ {
+ return null;
+ }
+ return @enumToInt(ref) - typed_value_count;
+ }
+
+ pub fn toIndex(ref: Ref, param_count: u32) ?Index {
+ assert(ref != .none);
+ if (@enumToInt(ref) < typed_value_count + param_count) return null;
+ return @enumToInt(ref) - typed_value_count - param_count;
+ }
+ };
/// All instructions have an 8-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
@@ -1642,7 +1672,9 @@ const Writer = struct {
fn writePlNodeCall(self: *Writer, stream: anytype, inst: Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Inst.Call, inst_data.payload_index);
- const args = self.code.extra[extra.end..][0..extra.data.args_len];
+ const raw_args = self.code.extra[extra.end..][0..extra.data.args_len];
+ const args = mem.bytesAsSlice(Inst.Ref, mem.sliceAsBytes(raw_args));
+
try self.writeInstRef(stream, extra.data.callee);
try stream.writeAll(", [");
for (args) |arg, i| {
@@ -1735,9 +1767,9 @@ const Writer = struct {
) (@TypeOf(stream).Error || error{OutOfMemory})!void {
const inst_data = self.code.instructions.items(.data)[inst].fn_type;
const extra = self.code.extraData(Inst.FnType, inst_data.payload_index);
- const param_types = self.code.extra[extra.end..][0..extra.data.param_types_len];
- const cc: Inst.Ref = 0;
- return self.writeFnTypeCommon(stream, param_types, inst_data.return_type, var_args, cc);
+ const raw_param_types = self.code.extra[extra.end..][0..extra.data.param_types_len];
+ const param_types = mem.bytesAsSlice(Inst.Ref, mem.sliceAsBytes(raw_param_types));
+ return self.writeFnTypeCommon(stream, param_types, inst_data.return_type, var_args, .none);
}
fn writeBoolBr(self: *Writer, stream: anytype, inst: Inst.Index) !void {
@@ -1761,7 +1793,8 @@ const Writer = struct {
) (@TypeOf(stream).Error || error{OutOfMemory})!void {
const inst_data = self.code.instructions.items(.data)[inst].fn_type;
const extra = self.code.extraData(Inst.FnTypeCc, inst_data.payload_index);
- const param_types = self.code.extra[extra.end..][0..extra.data.param_types_len];
+ const raw_param_types = self.code.extra[extra.end..][0..extra.data.param_types_len];
+ const param_types = mem.bytesAsSlice(Inst.Ref, mem.sliceAsBytes(raw_param_types));
const cc = extra.data.cc;
return self.writeFnTypeCommon(stream, param_types, inst_data.return_type, var_args, cc);
}
@@ -1828,13 +1861,13 @@ const Writer = struct {
try stream.print("\"{}\")", .{std.zig.fmtEscapes(str)});
}
- fn writeInstRef(self: *Writer, stream: anytype, inst: Inst.Ref) !void {
- var i: usize = inst;
+ fn writeInstRef(self: *Writer, stream: anytype, ref: Inst.Ref) !void {
+ var i: usize = @enumToInt(ref);
- if (i < const_inst_list.len) {
- return stream.print("@{d}", .{i});
+ if (i < Inst.Ref.typed_value_count) {
+ return stream.print("@{}", .{ref});
}
- i -= const_inst_list.len;
+ i -= Inst.Ref.typed_value_count;
if (i < self.param_count) {
return stream.print("${d}", .{i});
@@ -1852,9 +1885,9 @@ const Writer = struct {
self: *Writer,
stream: anytype,
prefix: []const u8,
- inst: Inst.Index,
+ inst: Inst.Ref,
) !void {
- if (inst == 0) return;
+ if (inst == .none) return;
try stream.writeAll(prefix);
try self.writeInstRef(stream, inst);
}