Commit 6e0de1d116
Changed files (34)
lib
src
arch
aarch64
arm
riscv64
sparc64
wasm
x86_64
lib/std/array_list.zig
@@ -459,6 +459,28 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
return self.items[prev_len..][0..n];
}
+ /// Resize the array, adding `n` new elements, which have `undefined` values.
+ /// The return value is a slice pointing to the newly allocated elements.
+ /// The returned pointer becomes invalid when the list is resized.
+ /// Resizes list if `self.capacity` is not large enough.
+ pub fn addManyAsSlice(self: *Self, n: usize) Allocator.Error![]T {
+ const prev_len = self.items.len;
+ try self.resize(self.items.len + n);
+ return self.items[prev_len..][0..n];
+ }
+
+ /// Resize the array, adding `n` new elements, which have `undefined` values.
+ /// The return value is a slice pointing to the newly allocated elements.
+ /// Asserts that there is already space for the new item without allocating more.
+ /// **Does not** invalidate element pointers.
+ /// The returned pointer becomes invalid when the list is resized.
+ pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
+ assert(self.items.len + n <= self.capacity);
+ const prev_len = self.items.len;
+ self.items.len += n;
+ return self.items[prev_len..][0..n];
+ }
+
/// Remove and return the last element from the list.
/// Asserts the list has at least one item.
/// Invalidates pointers to the removed element.
@@ -949,6 +971,28 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
return self.items[prev_len..][0..n];
}
+ /// Resize the array, adding `n` new elements, which have `undefined` values.
+ /// The return value is a slice pointing to the newly allocated elements.
+ /// The returned pointer becomes invalid when the list is resized.
+ /// Resizes list if `self.capacity` is not large enough.
+ pub fn addManyAsSlice(self: *Self, allocator: Allocator, n: usize) Allocator.Error![]T {
+ const prev_len = self.items.len;
+ try self.resize(allocator, self.items.len + n);
+ return self.items[prev_len..][0..n];
+ }
+
+ /// Resize the array, adding `n` new elements, which have `undefined` values.
+ /// The return value is a slice pointing to the newly allocated elements.
+ /// Asserts that there is already space for the new item without allocating more.
+ /// **Does not** invalidate element pointers.
+ /// The returned pointer becomes invalid when the list is resized.
+ pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
+ assert(self.items.len + n <= self.capacity);
+ const prev_len = self.items.len;
+ self.items.len += n;
+ return self.items[prev_len..][0..n];
+ }
+
/// Remove and return the last element from the list.
/// Asserts the list has at least one item.
/// Invalidates pointers to last element.
src/arch/aarch64/CodeGen.zig
@@ -328,7 +328,7 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn: *Module.Fn,
+ module_fn_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -339,6 +339,7 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
+ const module_fn = mod.funcPtr(module_fn_index);
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -4311,9 +4312,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (try self.air.value(callee, mod)) |func_value| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
-
+ if (func_value.getFunction(mod)) |func| {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
@@ -4353,10 +4352,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.tag = .blr,
.data = .{ .reg = .x30 },
});
- } else if (func_value.castTag(.extern_fn)) |func_payload| {
- const extern_fn = func_payload.data;
- const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0);
- const lib_name = mem.sliceTo(extern_fn.lib_name, 0);
+ } else if (func_value.getExternFunc(mod)) |extern_func| {
+ const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0);
+ const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name);
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name);
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
@@ -4627,7 +4625,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const function = self.air.values[ty_pl.payload].castTag(.function).?.data;
+ const mod = self.bin_file.options.module.?;
+ const function = self.air.values[ty_pl.payload].getFunction(mod).?;
// TODO emit debug info for function change
_ = function;
return self.finishAir(inst, .dead, .{ .none, .none, .none });
src/arch/arm/CodeGen.zig
@@ -334,7 +334,7 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn: *Module.Fn,
+ module_fn_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -345,6 +345,7 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
+ const module_fn = mod.funcPtr(module_fn_index);
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -4291,9 +4292,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (try self.air.value(callee, mod)) |func_value| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
-
+ if (func_value.getFunction(mod)) |func| {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
@@ -4308,7 +4307,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
@tagName(self.target.cpu.arch),
});
}
- } else if (func_value.castTag(.extern_fn)) |_| {
+ } else if (func_value.getExternFunc(mod)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
@@ -4573,7 +4572,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const function = self.air.values[ty_pl.payload].castTag(.function).?.data;
+ const mod = self.bin_file.options.module.?;
+ const function = self.air.values[ty_pl.payload].getFunction(mod).?;
// TODO emit debug info for function change
_ = function;
return self.finishAir(inst, .dead, .{ .none, .none, .none });
src/arch/riscv64/CodeGen.zig
@@ -217,7 +217,7 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn: *Module.Fn,
+ module_fn_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -228,6 +228,7 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
+ const module_fn = mod.funcPtr(module_fn_index);
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -1745,8 +1746,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
if (try self.air.value(callee, mod)) |func_value| {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
+ if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
_ = try atom.getOrCreateOffsetTableEntry(elf_file);
@@ -1760,7 +1760,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.imm12 = 0,
} },
});
- } else if (func_value.castTag(.extern_fn)) |_| {
+ } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
@@ -1879,7 +1879,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const function = self.air.values[ty_pl.payload].castTag(.function).?.data;
+ const mod = self.bin_file.options.module.?;
+ const function = self.air.values[ty_pl.payload].getFunction(mod).?;
// TODO emit debug info for function change
_ = function;
return self.finishAir(inst, .dead, .{ .none, .none, .none });
src/arch/sparc64/CodeGen.zig
@@ -260,7 +260,7 @@ const BigTomb = struct {
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn: *Module.Fn,
+ module_fn_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -271,6 +271,7 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
+ const module_fn = mod.funcPtr(module_fn_index);
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -1346,8 +1347,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// on linking.
if (try self.air.value(callee, mod)) |func_value| {
if (self.bin_file.tag == link.File.Elf.base_tag) {
- if (func_value.castTag(.function)) |func_payload| {
- const func = func_payload.data;
+ if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| {
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
@@ -1374,7 +1374,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.tag = .nop,
.data = .{ .nop = {} },
});
- } else if (func_value.castTag(.extern_fn)) |_| {
+ } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
@@ -1663,7 +1663,8 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const function = self.air.values[ty_pl.payload].castTag(.function).?.data;
+ const mod = self.bin_file.options.module.?;
+ const function = self.air.values[ty_pl.payload].getFunction(mod).?;
// TODO emit debug info for function change
_ = function;
return self.finishAir(inst, .dead, .{ .none, .none, .none });
src/arch/wasm/CodeGen.zig
@@ -1203,20 +1203,22 @@ fn genFunctype(
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- func: *Module.Fn,
+ func_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
debug_output: codegen.DebugInfoOutput,
) codegen.CodeGenError!codegen.Result {
_ = src_loc;
+ const mod = bin_file.options.module.?;
+ const func = mod.funcPtr(func_index);
var code_gen: CodeGen = .{
.gpa = bin_file.allocator,
.air = air,
.liveness = liveness,
.code = code,
.decl_index = func.owner_decl,
- .decl = bin_file.options.module.?.declPtr(func.owner_decl),
+ .decl = mod.declPtr(func.owner_decl),
.err_msg = undefined,
.locals = .{},
.target = bin_file.options.target,
@@ -2196,27 +2198,33 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const callee: ?Decl.Index = blk: {
const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null;
- if (func_val.castTag(.function)) |function| {
- _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
- break :blk function.data.owner_decl;
- } else if (func_val.castTag(.extern_fn)) |extern_fn| {
- const ext_decl = mod.declPtr(extern_fn.data.owner_decl);
+ if (func_val.getFunction(mod)) |function| {
+ _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl);
+ break :blk function.owner_decl;
+ } else if (func_val.getExternFunc(mod)) |extern_func| {
+ const ext_decl = mod.declPtr(extern_func.decl);
const ext_info = mod.typeToFunc(ext_decl.ty).?;
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod);
defer func_type.deinit(func.gpa);
- const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl);
const atom = func.bin_file.getAtomPtr(atom_index);
- const type_index = try func.bin_file.storeDeclType(extern_fn.data.owner_decl, func_type);
+ const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type);
try func.bin_file.addOrUpdateImport(
mem.sliceTo(ext_decl.name, 0),
atom.getSymbolIndex().?,
- ext_decl.getExternFn().?.lib_name,
+ mod.intern_pool.stringToSliceUnwrap(ext_decl.getExternFunc(mod).?.lib_name),
type_index,
);
- break :blk extern_fn.data.owner_decl;
- } else if (func_val.castTag(.decl_ref)) |decl_ref| {
- _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data);
- break :blk decl_ref.data;
+ break :blk extern_func.decl;
+ } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| {
+ _ = try func.bin_file.getOrCreateAtomForDecl(decl);
+ break :blk decl;
+ },
+ else => {},
+ },
+ else => {},
}
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
};
@@ -2932,29 +2940,41 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
return WValue{ .stack = {} };
}
-fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue {
+fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
- switch (ptr_val.tag()) {
- .decl_ref_mut => {
- const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
- return func.lowerParentPtrDecl(ptr_val, decl_index, offset);
+ const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr;
+ switch (ptr.addr) {
+ .decl => |decl_index| {
+ return func.lowerParentPtrDecl(ptr_val, decl_index, 0);
+ },
+ .mut_decl => |mut_decl| {
+ const decl_index = mut_decl.decl;
+ return func.lowerParentPtrDecl(ptr_val, decl_index, 0);
},
- .decl_ref => {
- const decl_index = ptr_val.castTag(.decl_ref).?.data;
- return func.lowerParentPtrDecl(ptr_val, decl_index, offset);
+ .int, .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}),
+ .opt_payload => |base_ptr| {
+ return func.lowerParentPtr(base_ptr.toValue());
},
- .variable => {
- const decl_index = ptr_val.castTag(.variable).?.data.owner_decl;
- return func.lowerParentPtrDecl(ptr_val, decl_index, offset);
+ .comptime_field => unreachable,
+ .elem => |elem| {
+ const index = elem.index;
+ const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod);
+ const offset = index * elem_type.abiSize(mod);
+ const array_ptr = try func.lowerParentPtr(elem.base.toValue());
+
+ return WValue{ .memory_offset = .{
+ .pointer = array_ptr.memory,
+ .offset = @intCast(u32, offset),
+ } };
},
- .field_ptr => {
- const field_ptr = ptr_val.castTag(.field_ptr).?.data;
- const parent_ty = field_ptr.container_ty;
+ .field => |field| {
+ const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod);
+ const parent_ptr = try func.lowerParentPtr(field.base.toValue());
- const field_offset = switch (parent_ty.zigTypeTag(mod)) {
+ const offset = switch (parent_ty.zigTypeTag(mod)) {
.Struct => switch (parent_ty.containerLayout(mod)) {
- .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod),
- else => parent_ty.structFieldOffset(field_ptr.field_index, mod),
+ .Packed => parent_ty.packedStructFieldByteOffset(field.index, mod),
+ else => parent_ty.structFieldOffset(field.index, mod),
},
.Union => switch (parent_ty.containerLayout(mod)) {
.Packed => 0,
@@ -2964,12 +2984,12 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
if (layout.payload_align > layout.tag_align) break :blk 0;
// tag is stored first so calculate offset from where payload starts
- const field_offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align));
- break :blk field_offset;
+ const offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align));
+ break :blk offset;
},
},
.Pointer => switch (parent_ty.ptrSize(mod)) {
- .Slice => switch (field_ptr.field_index) {
+ .Slice => switch (field.index) {
0 => 0,
1 => func.ptrSize(),
else => unreachable,
@@ -2978,19 +2998,23 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
},
else => unreachable,
};
- return func.lowerParentPtr(field_ptr.container_ptr, offset + @intCast(u32, field_offset));
- },
- .elem_ptr => {
- const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- const index = elem_ptr.index;
- const elem_offset = index * elem_ptr.elem_ty.abiSize(mod);
- return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset));
- },
- .opt_payload_ptr => {
- const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
- return func.lowerParentPtr(payload_ptr.container_ptr, offset);
+
+ return switch (parent_ptr) {
+ .memory => |ptr_| WValue{
+ .memory_offset = .{
+ .pointer = ptr_,
+ .offset = @intCast(u32, offset),
+ },
+ },
+ .memory_offset => |mem_off| WValue{
+ .memory_offset = .{
+ .pointer = mem_off.pointer,
+ .offset = @intCast(u32, offset) + mem_off.offset,
+ },
+ },
+ else => unreachable,
+ };
},
- else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}),
}
}
@@ -3045,21 +3069,97 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
var val = arg_val;
- if (val.castTag(.runtime_value)) |rt| {
- val = rt.data;
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .runtime_value => |rt| val = rt.val.toValue(),
+ else => {},
}
if (val.isUndefDeep(mod)) return func.emitUndefined(ty);
- if (val.castTag(.decl_ref)) |decl_ref| {
- const decl_index = decl_ref.data;
- return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0);
- }
- if (val.castTag(.decl_ref_mut)) |decl_ref_mut| {
- const decl_index = decl_ref_mut.data.decl_index;
- return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0);
- }
- switch (ty.zigTypeTag(mod)) {
- .Void => return WValue{ .none = {} },
- .Int => {
+
+ if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) {
+ .Array => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}),
+ .Struct => {
+ const struct_obj = mod.typeToStruct(ty).?;
+ assert(struct_obj.layout == .Packed);
+ var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
+ val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
+ const int_val = try mod.intValue(
+ struct_obj.backing_int_ty,
+ std.mem.readIntLittle(u64, &buf),
+ );
+ return func.lowerConstant(int_val, struct_obj.backing_int_ty);
+ },
+ .Vector => {
+ assert(determineSimdStoreStrategy(ty, mod) == .direct);
+ var buf: [16]u8 = undefined;
+ val.writeToMemory(ty, mod, &buf) catch unreachable;
+ return func.storeSimdImmd(buf);
+ },
+ .Frame,
+ .AnyFrame,
+ => return func.fail("Wasm TODO: LowerConstant for type {}", .{ty.fmt(mod)}),
+ .Float,
+ .Union,
+ .Optional,
+ .ErrorUnion,
+ .ErrorSet,
+ .Int,
+ .Enum,
+ .Bool,
+ .Pointer,
+ => unreachable, // handled below
+ .Type,
+ .Void,
+ .NoReturn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .Opaque,
+ .EnumLiteral,
+ .Fn,
+ => unreachable, // comptime-only types
+ };
+
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef, .runtime_value => unreachable, // handled above
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_struct,
+ .@"unreachable",
+ .generic_poison,
+ => unreachable, // non-runtime values
+ .false, .true => return WValue{ .imm32 = switch (simple_value) {
+ .false => 0,
+ .true => 1,
+ else => unreachable,
+ } },
+ },
+ .variable,
+ .extern_func,
+ .func,
+ .enum_literal,
+ => unreachable, // non-runtime values
+ .int => {
const int_info = ty.intInfo(mod);
switch (int_info.signedness) {
.signed => switch (int_info.bits) {
@@ -3080,86 +3180,71 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
},
}
},
- .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
- .Float => switch (ty.floatBits(func.target)) {
- 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16, mod)) },
- 32 => return WValue{ .float32 = val.toFloat(f32, mod) },
- 64 => return WValue{ .float64 = val.toFloat(f64, mod) },
- else => unreachable,
- },
- .Pointer => return switch (val.ip_index) {
- .null_value => WValue{ .imm32 = 0 },
- .none => switch (val.tag()) {
- .field_ptr, .elem_ptr, .opt_payload_ptr => func.lowerParentPtr(val, 0),
- else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}),
- },
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => |int| WValue{ .imm32 = @intCast(u32, int.storage.u64) },
- else => unreachable,
- },
- },
- .Enum => {
- const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag;
- const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
- return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType());
- },
- .ErrorSet => switch (val.tag()) {
- .@"error" => {
- const kv = try func.bin_file.base.options.module.?.getErrorValue(val.getError().?);
- return WValue{ .imm32 = kv.value };
- },
- else => return WValue{ .imm32 = 0 },
+ .err => |err| {
+ const name = mod.intern_pool.stringToSlice(err.name);
+ const kv = try mod.getErrorValue(name);
+ return WValue{ .imm32 = kv.value };
},
- .ErrorUnion => {
+ .error_union => {
const error_type = ty.errorUnionSet(mod);
const payload_type = ty.errorUnionPayload(mod);
if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
// We use the error type directly as the type.
- const is_pl = val.errorUnionIsPayload();
+ const is_pl = val.errorUnionIsPayload(mod);
const err_val = if (!is_pl) val else try mod.intValue(error_type, 0);
return func.lowerConstant(err_val, error_type);
}
return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
},
- .Optional => if (ty.optionalReprIsPayload(mod)) {
+ .enum_tag => |enum_tag| {
+ const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
+ return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType());
+ },
+ .float => |float| switch (float.storage) {
+ .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) },
+ .f32 => |f32_val| return WValue{ .float32 = f32_val },
+ .f64 => |f64_val| return WValue{ .float64 = f64_val },
+ else => unreachable,
+ },
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0),
+ .mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0),
+ .int => |int| return func.lowerConstant(int.toValue(), mod.intern_pool.typeOf(int).toType()),
+ .opt_payload, .elem, .field => return func.lowerParentPtr(val),
+ else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}),
+ },
+ .opt => if (ty.optionalReprIsPayload(mod)) {
const pl_ty = ty.optionalChild(mod);
- if (val.castTag(.opt_payload)) |payload| {
- return func.lowerConstant(payload.data, pl_ty);
- } else if (val.isNull(mod)) {
- return WValue{ .imm32 = 0 };
+ if (val.optionalValue(mod)) |payload| {
+ return func.lowerConstant(payload, pl_ty);
} else {
- return func.lowerConstant(val, pl_ty);
+ return WValue{ .imm32 = 0 };
}
} else {
- const is_pl = val.tag() == .opt_payload;
- return WValue{ .imm32 = @boolToInt(is_pl) };
- },
- .Struct => {
- const struct_obj = mod.typeToStruct(ty).?;
- assert(struct_obj.layout == .Packed);
- var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
- val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
- const int_val = try mod.intValue(
- struct_obj.backing_int_ty,
- std.mem.readIntLittle(u64, &buf),
- );
- return func.lowerConstant(int_val, struct_obj.backing_int_ty);
+ return WValue{ .imm32 = @boolToInt(!val.isNull(mod)) };
},
- .Vector => {
- assert(determineSimdStoreStrategy(ty, mod) == .direct);
- var buf: [16]u8 = undefined;
- val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable;
- return func.storeSimdImmd(buf);
- },
- .Union => {
- // in this case we have a packed union which will not be passed by reference.
- const union_ty = mod.typeToUnion(ty).?;
- const union_obj = val.castTag(.@"union").?.data;
- const field_index = ty.unionTagFieldIndex(union_obj.tag, func.bin_file.base.options.module.?).?;
- const field_ty = union_ty.fields.values()[field_index].ty;
- return func.lowerConstant(union_obj.val, field_ty);
+ .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}),
+ .vector_type => {
+ assert(determineSimdStoreStrategy(ty, mod) == .direct);
+ var buf: [16]u8 = undefined;
+ val.writeToMemory(ty, mod, &buf) catch unreachable;
+ return func.storeSimdImmd(buf);
+ },
+ .struct_type, .anon_struct_type => {
+ const struct_obj = mod.typeToStruct(ty).?;
+ assert(struct_obj.layout == .Packed);
+ var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
+ val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
+ const int_val = try mod.intValue(
+ struct_obj.backing_int_ty,
+ std.mem.readIntLittle(u64, &buf),
+ );
+ return func.lowerConstant(int_val, struct_obj.backing_int_ty);
+ },
+ else => unreachable,
},
- else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}),
+ .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}),
}
}
@@ -3221,31 +3306,33 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
.bool_true => return 1,
.bool_false => return 0,
else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int),
- .int => |int| intStorageAsI32(int.storage),
- .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int),
+ .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod),
+ .int => |int| intStorageAsI32(int.storage, mod),
+ .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod),
else => unreachable,
},
}
switch (ty.zigTypeTag(mod)) {
.ErrorSet => {
- const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function
+ const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError(mod).?) catch unreachable; // passed invalid `Value` to function
return @bitCast(i32, kv.value);
},
else => unreachable, // Programmer called this function for an illegal type
}
}
-fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index) i32 {
- return intStorageAsI32(ip.indexToKey(int).int.storage);
+fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 {
+ return intStorageAsI32(ip.indexToKey(int).int.storage, mod);
}
-fn intStorageAsI32(storage: InternPool.Key.Int.Storage) i32 {
+fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
return switch (storage) {
.i64 => |x| @intCast(i32, x),
.u64 => |x| @bitCast(i32, @intCast(u32, x)),
.big_int => unreachable,
+ .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)),
+ .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))),
};
}
@@ -5514,7 +5601,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// As the names are global and the slice elements are constant, we do not have
// to make a copy of the ptr+value but can point towards them directly.
const error_table_symbol = try func.bin_file.getErrorTableSymbol();
- const name_ty = Type.const_slice_u8_sentinel_0;
+ const name_ty = Type.slice_const_u8_sentinel_0;
const mod = func.bin_file.base.options.module.?;
const abi_size = name_ty.abiSize(mod);
@@ -6935,7 +7022,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// finish function body
try writer.writeByte(std.wasm.opcode(.end));
- const slice_ty = Type.const_slice_u8_sentinel_0;
+ const slice_ty = Type.slice_const_u8_sentinel_0;
const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod);
return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
}
src/arch/x86_64/CodeGen.zig
@@ -632,7 +632,7 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- module_fn: *Module.Fn,
+ module_fn_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -643,6 +643,7 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
+ const module_fn = mod.funcPtr(module_fn_index);
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@@ -687,7 +688,7 @@ pub fn generate(
@enumToInt(FrameIndex.stack_frame),
FrameAlloc.init(.{
.size = 0,
- .alignment = if (mod.align_stack_fns.get(module_fn)) |set_align_stack|
+ .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack|
set_align_stack.alignment
else
1,
@@ -2760,19 +2761,18 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const elem_ty = src_ty.childType(mod);
const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits));
- var splat_pl = Value.Payload.SubValue{
- .base = .{ .tag = .repeated },
- .data = mask_val,
- };
- const splat_val = Value.initPayload(&splat_pl.base);
-
- const full_ty = try mod.vectorType(.{
+ const splat_ty = try mod.vectorType(.{
.len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)),
.child = elem_ty.ip_index,
});
- const full_abi_size = @intCast(u32, full_ty.abiSize(mod));
+ const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod));
+
+ const splat_val = try mod.intern(.{ .aggregate = .{
+ .ty = splat_ty.ip_index,
+ .storage = .{ .repeated_elem = mask_val.ip_index },
+ } });
- const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val });
+ const splat_mcv = try self.genTypedValue(.{ .ty = splat_ty, .val = splat_val.toValue() });
const splat_addr_mcv: MCValue = switch (splat_mcv) {
.memory, .indirect, .load_frame => splat_mcv.address(),
else => .{ .register = try self.copyToTmpRegister(Type.usize, splat_mcv.address()) },
@@ -2784,14 +2784,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
.{ .vp_, .@"and" },
dst_reg,
dst_reg,
- splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)),
+ splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)),
);
try self.asmRegisterRegisterRegister(mir_tag, dst_reg, dst_reg, dst_reg);
} else {
try self.asmRegisterMemory(
.{ .p_, .@"and" },
dst_reg,
- splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)),
+ splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)),
);
try self.asmRegisterRegister(mir_tag, dst_reg, dst_reg);
}
@@ -4893,23 +4893,14 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- var arena = std.heap.ArenaAllocator.init(self.gpa);
- defer arena.deinit();
-
- const ExpectedContents = struct {
- repeated: Value.Payload.SubValue,
- };
- var stack align(@alignOf(ExpectedContents)) =
- std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
-
const vec_ty = try mod.vectorType(.{
.len = @divExact(abi_size * 8, scalar_bits),
.child = (try mod.intType(.signed, scalar_bits)).ip_index,
});
const sign_val = switch (tag) {
- .neg => try vec_ty.minInt(stack.get(), mod),
- .fabs => try vec_ty.maxInt(stack.get(), mod, vec_ty),
+ .neg => try vec_ty.minInt(mod),
+ .fabs => try vec_ty.maxInt(mod, vec_ty),
else => unreachable,
};
@@ -8106,13 +8097,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (try self.air.value(callee, mod)) |func_value| {
- if (if (func_value.castTag(.function)) |func_payload|
- func_payload.data.owner_decl
- else if (func_value.castTag(.decl_ref)) |decl_ref_payload|
- decl_ref_payload.data
- else
- null) |owner_decl|
- {
+ const func_key = mod.intern_pool.indexToKey(func_value.ip_index);
+ if (switch (func_key) {
+ .func => |func| mod.funcPtr(func.index).owner_decl,
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| decl,
+ else => null,
+ },
+ else => null,
+ }) |owner_decl| {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(owner_decl);
const atom = elf_file.getAtom(atom_index);
@@ -8145,10 +8138,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.disp = @intCast(i32, fn_got_addr),
}));
} else unreachable;
- } else if (func_value.castTag(.extern_fn)) |func_payload| {
- const extern_fn = func_payload.data;
- const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0);
- const lib_name = mem.sliceTo(extern_fn.lib_name, 0);
+ } else if (func_value.getExternFunc(mod)) |extern_func| {
+ const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0);
+ const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name);
if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const atom_index = try self.owner.getSymbolIndex(self);
const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name);
@@ -8554,7 +8546,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const function = self.air.values[ty_pl.payload].castTag(.function).?.data;
+ const mod = self.bin_file.options.module.?;
+ const function = self.air.values[ty_pl.payload].getFunction(mod).?;
// TODO emit debug info for function change
_ = function;
return self.finishAir(inst, .unreach, .{ .none, .none, .none });
src/codegen/c.zig
@@ -257,7 +257,7 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) {
return .{ .data = ident };
}
-/// This data is available when outputting .c code for a `*Module.Fn`.
+/// This data is available when outputting .c code for a `Module.Fn.Index`.
/// It is not available when generating .h file.
pub const Function = struct {
air: Air,
@@ -268,7 +268,7 @@ pub const Function = struct {
next_block_index: usize = 0,
object: Object,
lazy_fns: LazyFnMap,
- func: *Module.Fn,
+ func_index: Module.Fn.Index,
/// All the locals, to be emitted at the top of the function.
locals: std.ArrayListUnmanaged(Local) = .{},
/// Which locals are available for reuse, based on Type.
@@ -549,33 +549,12 @@ pub const DeclGen = struct {
}
// Chase function values in order to be able to reference the original function.
- inline for (.{ .function, .extern_fn }) |tag|
- if (decl.val.castTag(tag)) |func|
- if (func.data.owner_decl != decl_index)
- return dg.renderDeclValue(writer, ty, val, func.data.owner_decl, location);
+ if (decl.getFunction(mod)) |func| if (func.owner_decl != decl_index)
+ return dg.renderDeclValue(writer, ty, val, func.owner_decl, location);
+ if (decl.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index)
+ return dg.renderDeclValue(writer, ty, val, extern_func.decl, location);
- if (decl.val.castTag(.variable)) |var_payload|
- try dg.renderFwdDecl(decl_index, var_payload.data);
-
- if (ty.isSlice(mod)) {
- if (location == .StaticInitializer) {
- try writer.writeByte('{');
- } else {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeAll("){ .ptr = ");
- }
-
- try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(mod), .Initializer);
-
- const len_val = try mod.intValue(Type.usize, val.sliceLen(mod));
-
- if (location == .StaticInitializer) {
- return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
- } else {
- return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)});
- }
- }
+ if (decl.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable);
// We shouldn't cast C function pointers as this is UB (when you call
// them). The analysis until now should ensure that the C function
@@ -594,125 +573,77 @@ pub const DeclGen = struct {
/// Renders a "parent" pointer by recursing to the root decl/variable
/// that its contents are defined with respect to.
- ///
- /// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr
fn renderParentPtr(
dg: *DeclGen,
writer: anytype,
- ptr_val: Value,
- ptr_ty: Type,
+ ptr_val: InternPool.Index,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
const mod = dg.module;
-
- if (!ptr_ty.isSlice(mod)) {
- try writer.writeByte('(');
- try dg.renderType(writer, ptr_ty);
- try writer.writeByte(')');
- }
- if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) {
- .int => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}),
- else => unreachable,
- };
- switch (ptr_val.tag()) {
- .decl_ref_mut, .decl_ref, .variable => {
- const decl_index = switch (ptr_val.tag()) {
- .decl_ref => ptr_val.castTag(.decl_ref).?.data,
- .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index,
- .variable => ptr_val.castTag(.variable).?.data.owner_decl,
+ const ptr_ty = mod.intern_pool.typeOf(ptr_val).toType();
+ const ptr = mod.intern_pool.indexToKey(ptr_val).ptr;
+ switch (ptr.addr) {
+ .decl, .mut_decl => try dg.renderDeclValue(
+ writer,
+ ptr_ty,
+ ptr_val.toValue(),
+ switch (ptr.addr) {
+ .decl => |decl| decl,
+ .mut_decl => |mut_decl| mut_decl.decl,
else => unreachable,
- };
- try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location);
+ },
+ location,
+ ),
+ .int => |int| try writer.print("{x}", .{
+ try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other),
+ }),
+ .eu_payload, .opt_payload => |base| {
+ const base_ty = mod.intern_pool.typeOf(base).toType().childType(mod);
+ // Ensure complete type definition is visible before accessing fields.
+ _ = try dg.typeToIndex(base_ty, .complete);
+ try writer.writeAll("&(");
+ try dg.renderParentPtr(writer, base, location);
+ try writer.writeAll(")->payload");
},
- .field_ptr => {
- const field_ptr = ptr_val.castTag(.field_ptr).?.data;
-
+ .elem => |elem| {
+ try writer.writeAll("&(");
+ try dg.renderParentPtr(writer, elem.base, location);
+ try writer.print(")[{d}]", .{elem.index});
+ },
+ .field => |field| {
+ const base_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod);
// Ensure complete type definition is visible before accessing fields.
- _ = try dg.typeToIndex(field_ptr.container_ty, .complete);
-
- const container_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, field_ptr.container_ty);
-
- switch (fieldLocation(
- field_ptr.container_ty,
- ptr_ty,
- @intCast(u32, field_ptr.field_index),
- mod,
- )) {
- .begin => try dg.renderParentPtr(
- writer,
- field_ptr.container_ptr,
- container_ptr_ty,
- location,
- ),
- .field => |field| {
+ _ = try dg.typeToIndex(base_ty, .complete);
+ switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) {
+ .begin => try dg.renderParentPtr(writer, field.base, location),
+ .field => |name| {
try writer.writeAll("&(");
- try dg.renderParentPtr(
- writer,
- field_ptr.container_ptr,
- container_ptr_ty,
- location,
- );
+ try dg.renderParentPtr(writer, field.base, location);
try writer.writeAll(")->");
- try dg.writeCValue(writer, field);
+ try dg.writeCValue(writer, name);
},
.byte_offset => |byte_offset| {
const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8);
-
const byte_offset_val = try mod.intValue(Type.usize, byte_offset);
try writer.writeAll("((");
try dg.renderType(writer, u8_ptr_ty);
try writer.writeByte(')');
- try dg.renderParentPtr(
- writer,
- field_ptr.container_ptr,
- container_ptr_ty,
- location,
- );
+ try dg.renderParentPtr(writer, field.base, location);
try writer.print(" + {})", .{
try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other),
});
},
.end => {
try writer.writeAll("((");
- try dg.renderParentPtr(
- writer,
- field_ptr.container_ptr,
- container_ptr_ty,
- location,
- );
+ try dg.renderParentPtr(writer, field.base, location);
try writer.print(") + {})", .{
try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other),
});
},
}
},
- .elem_ptr => {
- const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- const elem_ptr_ty = try mod.ptrType(.{
- .size = .C,
- .elem_type = elem_ptr.elem_ty.ip_index,
- });
-
- try writer.writeAll("&(");
- try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location);
- try writer.print(")[{d}]", .{elem_ptr.index});
- },
- .opt_payload_ptr, .eu_payload_ptr => {
- const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data;
- const container_ptr_ty = try mod.ptrType(.{
- .elem_type = payload_ptr.container_ty.ip_index,
- .size = .C,
- });
-
- // Ensure complete type definition is visible before accessing fields.
- _ = try dg.typeToIndex(payload_ptr.container_ty, .complete);
-
- try writer.writeAll("&(");
- try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location);
- try writer.writeAll(")->payload");
- },
- else => unreachable,
+ .comptime_field => unreachable,
}
}
@@ -723,11 +654,12 @@ pub const DeclGen = struct {
arg_val: Value,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
+ const mod = dg.module;
var val = arg_val;
- if (val.castTag(.runtime_value)) |rt| {
- val = rt.data;
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .runtime_value => |rt| val = rt.val.toValue(),
+ else => {},
}
- const mod = dg.module;
const target = mod.getTarget();
const initializer_type: ValueRenderLocation = switch (location) {
.StaticInitializer => .StaticInitializer,
@@ -928,175 +860,8 @@ pub const DeclGen = struct {
}
unreachable;
}
- switch (ty.zigTypeTag(mod)) {
- .Int => switch (val.tag()) {
- .field_ptr,
- .elem_ptr,
- .opt_payload_ptr,
- .eu_payload_ptr,
- .decl_ref_mut,
- .decl_ref,
- => try dg.renderParentPtr(writer, val, ty, location),
- else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}),
- },
- .Float => {
- const bits = ty.floatBits(target);
- const f128_val = val.toFloat(f128, mod);
-
- // All unsigned ints matching float types are pre-allocated.
- const repr_ty = mod.intType(.unsigned, bits) catch unreachable;
-
- assert(bits <= 128);
- var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
- var repr_val_big = BigInt.Mutable{
- .limbs = &repr_val_limbs,
- .len = undefined,
- .positive = undefined,
- };
- switch (bits) {
- 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))),
- 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))),
- 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))),
- 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))),
- 128 => repr_val_big.set(@bitCast(u128, f128_val)),
- else => unreachable,
- }
-
- const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst());
-
- try writer.writeAll("zig_cast_");
- try dg.renderTypeForBuiltinFnName(writer, ty);
- try writer.writeByte(' ');
- var empty = true;
- if (std.math.isFinite(f128_val)) {
- try writer.writeAll("zig_make_");
- try dg.renderTypeForBuiltinFnName(writer, ty);
- try writer.writeByte('(');
- switch (bits) {
- 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}),
- 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}),
- 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}),
- 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}),
- 128 => try writer.print("{x}", .{f128_val}),
- else => unreachable,
- }
- try writer.writeAll(", ");
- empty = false;
- } else {
- // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan
- const operation = if (std.math.isNan(f128_val))
- "nan"
- else if (std.math.isSignalNan(f128_val))
- "nans"
- else if (std.math.isInf(f128_val))
- "inf"
- else
- unreachable;
-
- if (location == .StaticInitializer) {
- if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val))
- return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{});
-
- // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression
-
- // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly
- // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128)
- // return dg.fail("Only quiet nans are supported in global variable initializers", .{});
- }
-
- try writer.writeAll("zig_");
- try writer.writeAll(if (location == .StaticInitializer) "init" else "make");
- try writer.writeAll("_special_");
- try dg.renderTypeForBuiltinFnName(writer, ty);
- try writer.writeByte('(');
- if (std.math.signbit(f128_val)) try writer.writeByte('-');
- try writer.writeAll(", ");
- try writer.writeAll(operation);
- try writer.writeAll(", ");
- if (std.math.isNan(f128_val)) switch (bits) {
- // We only actually need to pass the significand, but it will get
- // properly masked anyway, so just pass the whole value.
- 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}),
- 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}),
- 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}),
- 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}),
- 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}),
- else => unreachable,
- };
- try writer.writeAll(", ");
- empty = false;
- }
- try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)});
- if (!empty) try writer.writeByte(')');
- return;
- },
- .Pointer => switch (val.ip_index) {
- .null_value => if (ty.isSlice(mod)) {
- var slice_pl = Value.Payload.Slice{
- .base = .{ .tag = .slice },
- .data = .{ .ptr = val, .len = Value.undef },
- };
- const slice_val = Value.initPayload(&slice_pl.base);
-
- return dg.renderValue(writer, ty, slice_val, location);
- } else {
- try writer.writeAll("((");
- try dg.renderType(writer, ty);
- try writer.writeAll(")NULL)");
- },
- .none => switch (val.tag()) {
- .variable => {
- const decl = val.castTag(.variable).?.data.owner_decl;
- return dg.renderDeclValue(writer, ty, val, decl, location);
- },
- .slice => {
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- const slice = val.castTag(.slice).?.data;
-
- try writer.writeByte('{');
- try dg.renderValue(writer, ty.slicePtrFieldType(mod), slice.ptr, initializer_type);
- try writer.writeAll(", ");
- try dg.renderValue(writer, Type.usize, slice.len, initializer_type);
- try writer.writeByte('}');
- },
- .function => {
- const func = val.castTag(.function).?.data;
- try dg.renderDeclName(writer, func.owner_decl, 0);
- },
- .extern_fn => {
- const extern_fn = val.castTag(.extern_fn).?.data;
- try dg.renderDeclName(writer, extern_fn.owner_decl, 0);
- },
- .lazy_align, .lazy_size => {
- try writer.writeAll("((");
- try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
- },
- .field_ptr,
- .elem_ptr,
- .opt_payload_ptr,
- .eu_payload_ptr,
- .decl_ref_mut,
- .decl_ref,
- => try dg.renderParentPtr(writer, val, ty, location),
-
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => {
- try writer.writeAll("((");
- try dg.renderType(writer, ty);
- return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
- },
- else => unreachable,
- },
- },
+ if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) {
.Array, .Vector => {
if (location == .FunctionArgument) {
try writer.writeByte('(');
@@ -1129,17 +894,6 @@ pub const DeclGen = struct {
return;
},
.none => switch (val.tag()) {
- .empty_array => {
- const ai = ty.arrayInfo(mod);
- try writer.writeByte('{');
- if (ai.sentinel) |s| {
- try dg.renderValue(writer, ai.elem_type, s, initializer_type);
- } else {
- try writer.writeByte('0');
- }
- try writer.writeByte('}');
- return;
- },
.bytes, .str_lit => |t| {
const bytes = switch (t) {
.bytes => val.castTag(.bytes).?.data,
@@ -1210,91 +964,6 @@ pub const DeclGen = struct {
try writer.writeByte('}');
}
},
- .Bool => {
- if (val.toBool(mod)) {
- return writer.writeAll("true");
- } else {
- return writer.writeAll("false");
- }
- },
- .Optional => {
- const payload_ty = ty.optionalChild(mod);
-
- const is_null_val = Value.makeBool(val.ip_index == .null_value);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
- return dg.renderValue(writer, Type.bool, is_null_val, location);
-
- if (ty.optionalReprIsPayload(mod)) {
- const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val;
- return dg.renderValue(writer, payload_ty, payload_val, location);
- }
-
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else Value.undef;
-
- try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, payload_val, initializer_type);
- try writer.writeAll(", .is_null = ");
- try dg.renderValue(writer, Type.bool, is_null_val, initializer_type);
- try writer.writeAll(" }");
- },
- .ErrorSet => {
- if (val.castTag(.@"error")) |error_pl| {
- // Error values are already defined by genErrDecls.
- try writer.print("zig_error_{}", .{fmtIdent(error_pl.data.name)});
- } else {
- try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, .Other)});
- }
- },
- .ErrorUnion => {
- const payload_ty = ty.errorUnionPayload(mod);
- const error_ty = ty.errorUnionSet(mod);
- const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val;
-
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return dg.renderValue(writer, error_ty, error_val, location);
- }
-
- if (!location.isInitializer()) {
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
- }
-
- const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef;
- try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, payload_val, initializer_type);
- try writer.writeAll(", .error = ");
- try dg.renderValue(writer, error_ty, error_val, initializer_type);
- try writer.writeAll(" }");
- },
- .Enum => switch (val.ip_index) {
- .none => {
- const int_tag_ty = try ty.intTagType(mod);
- return dg.renderValue(writer, int_tag_ty, val, location);
- },
- else => {
- const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag;
- const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
- return dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location);
- },
- },
- .Fn => switch (val.tag()) {
- .function => {
- const decl = val.castTag(.function).?.data.owner_decl;
- return dg.renderDeclValue(writer, ty, val, decl, location);
- },
- .extern_fn => {
- const decl = val.castTag(.extern_fn).?.data.owner_decl;
- return dg.renderDeclValue(writer, ty, val, decl, location);
- },
- else => unreachable,
- },
.Struct => switch (ty.containerLayout(mod)) {
.Auto, .Extern => {
const field_vals = val.castTag(.aggregate).?.data;
@@ -1408,7 +1077,448 @@ pub const DeclGen = struct {
}
},
},
- .Union => {
+
+ .Frame,
+ .AnyFrame,
+ => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{
+ @tagName(tag),
+ }),
+
+ .Float,
+ .Union,
+ .Optional,
+ .ErrorUnion,
+ .ErrorSet,
+ .Int,
+ .Enum,
+ .Bool,
+ .Pointer,
+ => unreachable, // handled below
+ .Type,
+ .Void,
+ .NoReturn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .Opaque,
+ .EnumLiteral,
+ .Fn,
+ => unreachable, // comptime-only types
+ };
+
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef, .runtime_value => unreachable, // handled above
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_struct,
+ .@"unreachable",
+ .generic_poison,
+ => unreachable, // non-runtime values
+ .false, .true => try writer.writeAll(@tagName(simple_value)),
+ },
+ .variable,
+ .extern_func,
+ .func,
+ .enum_literal,
+ => unreachable, // non-runtime values
+ .int => |int| switch (int.storage) {
+ .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}),
+ .lazy_align, .lazy_size => {
+ try writer.writeAll("((");
+ try dg.renderType(writer, ty);
+ return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)});
+ },
+ },
+ .err => |err| try writer.print("zig_error_{}", .{
+ fmtIdent(mod.intern_pool.stringToSlice(err.name)),
+ }),
+ .error_union => |error_union| {
+ const payload_ty = ty.errorUnionPayload(mod);
+ const error_ty = ty.errorUnionSet(mod);
+ const error_val = if (val.errorUnionIsPayload(mod)) try mod.intValue(Type.anyerror, 0) else val;
+
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ return dg.renderValue(writer, error_ty, error_val, location);
+ }
+
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ const payload_val = switch (error_union.val) {
+ .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ .payload => |payload| payload,
+ }.toValue();
+
+ try writer.writeAll("{ .payload = ");
+ try dg.renderValue(writer, payload_ty, payload_val, initializer_type);
+ try writer.writeAll(", .error = ");
+ try dg.renderValue(writer, error_ty, error_val, initializer_type);
+ try writer.writeAll(" }");
+ },
+ .enum_tag => {
+ const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag;
+ const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
+ try dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location);
+ },
+ .float => {
+ const bits = ty.floatBits(target);
+ const f128_val = val.toFloat(f128, mod);
+
+ // All unsigned ints matching float types are pre-allocated.
+ const repr_ty = mod.intType(.unsigned, bits) catch unreachable;
+
+ assert(bits <= 128);
+ var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
+ var repr_val_big = BigInt.Mutable{
+ .limbs = &repr_val_limbs,
+ .len = undefined,
+ .positive = undefined,
+ };
+
+ switch (bits) {
+ 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))),
+ 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))),
+ 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))),
+ 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))),
+ 128 => repr_val_big.set(@bitCast(u128, f128_val)),
+ else => unreachable,
+ }
+
+ const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst());
+
+ try writer.writeAll("zig_cast_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte(' ');
+ var empty = true;
+ if (std.math.isFinite(f128_val)) {
+ try writer.writeAll("zig_make_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte('(');
+ switch (bits) {
+ 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}),
+ 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}),
+ 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}),
+ 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}),
+ 128 => try writer.print("{x}", .{f128_val}),
+ else => unreachable,
+ }
+ try writer.writeAll(", ");
+ empty = false;
+ } else {
+ // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan
+ const operation = if (std.math.isNan(f128_val))
+ "nan"
+ else if (std.math.isSignalNan(f128_val))
+ "nans"
+ else if (std.math.isInf(f128_val))
+ "inf"
+ else
+ unreachable;
+
+ if (location == .StaticInitializer) {
+ if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val))
+ return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{});
+
+ // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression
+
+ // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly
+ // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128)
+ // return dg.fail("Only quiet nans are supported in global variable initializers", .{});
+ }
+
+ try writer.writeAll("zig_");
+ try writer.writeAll(if (location == .StaticInitializer) "init" else "make");
+ try writer.writeAll("_special_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte('(');
+ if (std.math.signbit(f128_val)) try writer.writeByte('-');
+ try writer.writeAll(", ");
+ try writer.writeAll(operation);
+ try writer.writeAll(", ");
+ if (std.math.isNan(f128_val)) switch (bits) {
+ // We only actually need to pass the significand, but it will get
+ // properly masked anyway, so just pass the whole value.
+ 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}),
+ 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}),
+ 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}),
+ 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}),
+ 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}),
+ else => unreachable,
+ };
+ try writer.writeAll(", ");
+ empty = false;
+ }
+ try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)});
+ if (!empty) try writer.writeByte(')');
+ },
+ .ptr => |ptr| {
+ if (ptr.len != .none) {
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+ try writer.writeByte('{');
+ }
+ switch (ptr.addr) {
+ .decl, .mut_decl => try dg.renderDeclValue(
+ writer,
+ ty,
+ val,
+ switch (ptr.addr) {
+ .decl => |decl| decl,
+ .mut_decl => |mut_decl| mut_decl.decl,
+ else => unreachable,
+ },
+ location,
+ ),
+ .int => |int| {
+ try writer.writeAll("((");
+ try dg.renderType(writer, ty);
+ try writer.print("){x})", .{
+ try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other),
+ });
+ },
+ .eu_payload,
+ .opt_payload,
+ .elem,
+ .field,
+ => try dg.renderParentPtr(writer, val.ip_index, location),
+ .comptime_field => unreachable,
+ }
+ if (ptr.len != .none) {
+ try writer.writeAll(", ");
+ try dg.renderValue(writer, Type.usize, ptr.len.toValue(), initializer_type);
+ try writer.writeByte('}');
+ }
+ },
+ .opt => |opt| {
+ const payload_ty = ty.optionalChild(mod);
+
+ const is_null_val = Value.makeBool(opt.val == .none);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
+ return dg.renderValue(writer, Type.bool, is_null_val, location);
+
+ if (ty.optionalReprIsPayload(mod)) {
+ return dg.renderValue(writer, payload_ty, switch (opt.val) {
+ .none => try mod.intValue(payload_ty, 0),
+ else => opt.val.toValue(),
+ }, location);
+ }
+
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeAll("{ .payload = ");
+ try dg.renderValue(writer, payload_ty, switch (opt.val) {
+ .none => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ else => opt.val,
+ }.toValue(), initializer_type);
+ try writer.writeAll(", .is_null = ");
+ try dg.renderValue(writer, Type.bool, is_null_val, initializer_type);
+ try writer.writeAll(" }");
+ },
+ .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .array_type, .vector_type => {
+ if (location == .FunctionArgument) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+ // Fall back to generic implementation.
+
+ // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal
+ const max_string_initializer_len = 65535;
+
+ const ai = ty.arrayInfo(mod);
+ if (ai.elem_type.eql(Type.u8, mod)) {
+ if (ai.len <= max_string_initializer_len) {
+ var literal = stringLiteral(writer);
+ try literal.start();
+ var index: usize = 0;
+ while (index < ai.len) : (index += 1) {
+ const elem_val = try val.elemValue(mod, index);
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ try literal.writeChar(elem_val_u8);
+ }
+ if (ai.sentinel) |s| {
+ const s_u8 = @intCast(u8, s.toUnsignedInt(mod));
+ if (s_u8 != 0) try literal.writeChar(s_u8);
+ }
+ try literal.end();
+ } else {
+ try writer.writeByte('{');
+ var index: usize = 0;
+ while (index < ai.len) : (index += 1) {
+ if (index != 0) try writer.writeByte(',');
+ const elem_val = try val.elemValue(mod, index);
+ const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
+ try writer.print("'\\x{x}'", .{elem_val_u8});
+ }
+ if (ai.sentinel) |s| {
+ if (index != 0) try writer.writeByte(',');
+ try dg.renderValue(writer, ai.elem_type, s, initializer_type);
+ }
+ try writer.writeByte('}');
+ }
+ } else {
+ try writer.writeByte('{');
+ var index: usize = 0;
+ while (index < ai.len) : (index += 1) {
+ if (index != 0) try writer.writeByte(',');
+ const elem_val = try val.elemValue(mod, index);
+ try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type);
+ }
+ if (ai.sentinel) |s| {
+ if (index != 0) try writer.writeByte(',');
+ try dg.renderValue(writer, ai.elem_type, s, initializer_type);
+ }
+ try writer.writeByte('}');
+ }
+ },
+ .struct_type, .anon_struct_type => switch (ty.containerLayout(mod)) {
+ .Auto, .Extern => {
+ const field_vals = val.castTag(.aggregate).?.data;
+
+ if (!location.isInitializer()) {
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+ }
+
+ try writer.writeByte('{');
+ var empty = true;
+ for (field_vals, 0..) |field_val, field_i| {
+ if (ty.structFieldIsComptime(field_i, mod)) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ if (!empty) try writer.writeByte(',');
+ try dg.renderValue(writer, field_ty, field_val, initializer_type);
+
+ empty = false;
+ }
+ try writer.writeByte('}');
+ },
+ .Packed => {
+ const field_vals = val.castTag(.aggregate).?.data;
+ const int_info = ty.intInfo(mod);
+
+ const bits = Type.smallestUnsignedBits(int_info.bits - 1);
+ const bit_offset_ty = try mod.intType(.unsigned, bits);
+
+ var bit_offset: u64 = 0;
+
+ var eff_num_fields: usize = 0;
+ for (0..field_vals.len) |field_i| {
+ if (ty.structFieldIsComptime(field_i, mod)) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ eff_num_fields += 1;
+ }
+
+ if (eff_num_fields == 0) {
+ try writer.writeByte('(');
+ try dg.renderValue(writer, ty, Value.undef, initializer_type);
+ try writer.writeByte(')');
+ } else if (ty.bitSize(mod) > 64) {
+ // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
+ var num_or = eff_num_fields - 1;
+ while (num_or > 0) : (num_or -= 1) {
+ try writer.writeAll("zig_or_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte('(');
+ }
+
+ var eff_index: usize = 0;
+ var needs_closing_paren = false;
+ for (field_vals, 0..) |field_val, field_i| {
+ if (ty.structFieldIsComptime(field_i, mod)) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ const cast_context = IntCastContext{ .value = .{ .value = field_val } };
+ if (bit_offset != 0) {
+ try writer.writeAll("zig_shl_");
+ try dg.renderTypeForBuiltinFnName(writer, ty);
+ try writer.writeByte('(');
+ try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
+ try writer.writeAll(", ");
+ const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
+ try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ try writer.writeByte(')');
+ } else {
+ try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
+ }
+
+ if (needs_closing_paren) try writer.writeByte(')');
+ if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
+
+ bit_offset += field_ty.bitSize(mod);
+ needs_closing_paren = true;
+ eff_index += 1;
+ }
+ } else {
+ try writer.writeByte('(');
+ // a << a_off | b << b_off | c << c_off
+ var empty = true;
+ for (field_vals, 0..) |field_val, field_i| {
+ if (ty.structFieldIsComptime(field_i, mod)) continue;
+ const field_ty = ty.structFieldType(field_i, mod);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ if (!empty) try writer.writeAll(" | ");
+ try writer.writeByte('(');
+ try dg.renderType(writer, ty);
+ try writer.writeByte(')');
+
+ if (bit_offset != 0) {
+ try dg.renderValue(writer, field_ty, field_val, .Other);
+ try writer.writeAll(" << ");
+ const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
+ try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
+ } else {
+ try dg.renderValue(writer, field_ty, field_val, .Other);
+ }
+
+ bit_offset += field_ty.bitSize(mod);
+ empty = false;
+ }
+ try writer.writeByte(')');
+ }
+ },
+ },
+ else => unreachable,
+ },
+ .un => {
const union_obj = val.castTag(.@"union").?.data;
if (!location.isInitializer()) {
@@ -1461,22 +1571,6 @@ pub const DeclGen = struct {
if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}');
try writer.writeByte('}');
},
-
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- .Frame,
- .AnyFrame,
- => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{
- @tagName(tag),
- }),
}
}
@@ -1504,8 +1598,7 @@ pub const DeclGen = struct {
else => unreachable,
}
}
- if (fn_decl.val.castTag(.function)) |func_payload|
- if (func_payload.data.is_cold) try w.writeAll("zig_cold ");
+ if (fn_decl.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold ");
if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn ");
const trailing = try renderTypePrefix(
@@ -1747,18 +1840,12 @@ pub const DeclGen = struct {
fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool {
const mod = dg.module;
- switch (tv.val.tag()) {
- .extern_fn => return true,
- .function => {
- const func = tv.val.castTag(.function).?.data;
- return mod.decl_exports.contains(func.owner_decl);
- },
- .variable => {
- const variable = tv.val.castTag(.variable).?.data;
- return mod.decl_exports.contains(variable.owner_decl);
- },
+ return switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
+ .variable => |variable| mod.decl_exports.contains(variable.decl),
+ .extern_func => true,
+ .func => |func| mod.decl_exports.contains(mod.funcPtr(func.index).owner_decl),
else => unreachable,
- }
+ };
}
fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void {
@@ -1833,7 +1920,7 @@ pub const DeclGen = struct {
try dg.writeCValue(writer, member);
}
- fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: *Module.Var) !void {
+ fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: InternPool.Key.Variable) !void {
const decl = dg.module.declPtr(decl_index);
const fwd_decl_writer = dg.fwd_decl.writer();
const is_global = dg.declIsGlobal(.{ .ty = decl.ty, .val = decl.val }) or variable.is_extern;
@@ -1844,7 +1931,7 @@ pub const DeclGen = struct {
fwd_decl_writer,
decl.ty,
.{ .decl = decl_index },
- CQualifiers.init(.{ .@"const" = !variable.is_mutable }),
+ CQualifiers.init(.{ .@"const" = variable.is_const }),
decl.@"align",
.complete,
);
@@ -1858,7 +1945,7 @@ pub const DeclGen = struct {
if (mod.decl_exports.get(decl_index)) |exports| {
try writer.writeAll(exports.items[export_index].options.name);
- } else if (decl.isExtern()) {
+ } else if (decl.isExtern(mod)) {
try writer.writeAll(mem.span(decl.name));
} else {
// MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case),
@@ -2416,8 +2503,11 @@ pub fn genErrDecls(o: *Object) !void {
var max_name_len: usize = 0;
for (mod.error_name_list.items, 0..) |name, value| {
max_name_len = std.math.max(name.len, max_name_len);
- var err_pl = Value.Payload.Error{ .data = .{ .name = name } };
- try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other);
+ const err_val = try mod.intern(.{ .err = .{
+ .ty = .anyerror_type,
+ .name = mod.intern_pool.getString(name).unwrap().?,
+ } });
+ try o.dg.renderValue(writer, Type.anyerror, err_val.toValue(), .Other);
try writer.print(" = {d}u,\n", .{value});
}
o.indent_writer.popIndent();
@@ -2451,7 +2541,7 @@ pub fn genErrDecls(o: *Object) !void {
const name_array_ty = try mod.arrayType(.{
.len = mod.error_name_list.items.len,
- .child = .const_slice_u8_sentinel_0_type,
+ .child = .slice_const_u8_sentinel_0_type,
.sentinel = .zero_u8,
});
@@ -2497,7 +2587,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
.tag_name => {
const enum_ty = val.data.tag_name;
- const name_slice_ty = Type.const_slice_u8_sentinel_0;
+ const name_slice_ty = Type.slice_const_u8_sentinel_0;
try w.writeAll("static ");
try o.dg.renderType(w, name_slice_ty);
@@ -2668,14 +2758,13 @@ pub fn genDecl(o: *Object) !void {
const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val };
if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return;
- if (tv.val.tag() == .extern_fn) {
+ if (decl.getExternFunc(mod)) |_| {
const fwd_decl_writer = o.dg.fwd_decl.writer();
try fwd_decl_writer.writeAll("zig_extern ");
try o.dg.renderFunctionSignature(fwd_decl_writer, decl_c_value.decl, .forward, .{ .export_index = 0 });
try fwd_decl_writer.writeAll(";\n");
try genExports(o);
- } else if (tv.val.castTag(.variable)) |var_payload| {
- const variable: *Module.Var = var_payload.data;
+ } else if (decl.getVariable(mod)) |variable| {
try o.dg.renderFwdDecl(decl_c_value.decl, variable);
try genExports(o);
@@ -2690,7 +2779,7 @@ pub fn genDecl(o: *Object) !void {
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete);
if (decl.@"linksection" != null) try w.writeAll(", read, write)");
try w.writeAll(" = ");
- try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer);
+ try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer);
try w.writeByte(';');
try o.indent_writer.insertNewline();
} else {
@@ -4157,10 +4246,13 @@ fn airCall(
known: {
const fn_decl = fn_decl: {
const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known;
- break :fn_decl switch (callee_val.tag()) {
- .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl,
- .function => callee_val.castTag(.function).?.data.owner_decl,
- .decl_ref => callee_val.castTag(.decl_ref).?.data,
+ break :fn_decl switch (mod.intern_pool.indexToKey(callee_val.ip_index)) {
+ .extern_func => |extern_func| extern_func.decl,
+ .func => |func| mod.funcPtr(func.index).owner_decl,
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| decl,
+ else => break :known,
+ },
else => break :known,
};
};
@@ -4231,9 +4323,9 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue {
fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
- const writer = f.object.writer();
- const function = f.air.values[ty_pl.payload].castTag(.function).?.data;
const mod = f.object.dg.module;
+ const writer = f.object.writer();
+ const function = f.air.values[ty_pl.payload].getFunction(mod).?;
try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name});
return .none;
}
@@ -6634,9 +6726,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, accum, .Other);
try writer.writeAll(" = ");
- var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa);
- defer arena.deinit();
-
try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) {
.Or, .Xor, .Add => try mod.intValue(scalar_ty, 0),
.And => switch (scalar_ty.zigTypeTag(mod)) {
@@ -6654,7 +6743,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
},
.Max => switch (scalar_ty.zigTypeTag(mod)) {
.Bool => try mod.intValue(scalar_ty, 0),
- .Int => try scalar_ty.minInt(arena.allocator(), mod),
+ .Int => try scalar_ty.minInt(mod),
.Float => try mod.floatValue(scalar_ty, std.math.nan_f128),
else => unreachable,
},
src/codegen/llvm.zig
@@ -582,7 +582,7 @@ pub const Object = struct {
llvm_usize_ty,
};
const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False);
- const slice_ty = Type.const_slice_u8_sentinel_0;
+ const slice_ty = Type.slice_const_u8_sentinel_0;
const slice_alignment = slice_ty.abiAlignment(mod);
const error_name_list = mod.error_name_list.items;
@@ -866,10 +866,11 @@ pub const Object = struct {
pub fn updateFunc(
o: *Object,
mod: *Module,
- func: *Module.Fn,
+ func_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
) !void {
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
const target = mod.getTarget();
@@ -886,7 +887,7 @@ pub const Object = struct {
const llvm_func = try dg.resolveLlvmFunction(decl_index);
- if (mod.align_stack_fns.get(func)) |align_info| {
+ if (mod.align_stack_fns.get(func_index)) |align_info| {
dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment);
dg.addFnAttr(llvm_func, "noinline");
} else {
@@ -1164,7 +1165,7 @@ pub const Object = struct {
di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope);
const line_number = decl.src_line + 1;
- const is_internal_linkage = decl.val.tag() != .extern_fn and
+ const is_internal_linkage = decl.getExternFunc(mod) == null and
!mod.decl_exports.contains(decl_index);
const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type)
llvm.DIFlags.NoReturn
@@ -1269,18 +1270,20 @@ pub const Object = struct {
// because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`.
const llvm_global = self.decl_map.get(decl_index) orelse return;
const decl = mod.declPtr(decl_index);
- if (decl.isExtern()) {
- const is_wasm_fn = mod.getTarget().isWasm() and try decl.isFunction(mod);
- const mangle_name = is_wasm_fn and
- decl.getExternFn().?.lib_name != null and
- !std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c");
- const decl_name = if (mangle_name) name: {
- const tmp = try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{
- decl.name, decl.getExternFn().?.lib_name.?,
- });
- break :name tmp.ptr;
- } else decl.name;
- defer if (mangle_name) gpa.free(std.mem.sliceTo(decl_name, 0));
+ if (decl.isExtern(mod)) {
+ var free_decl_name = false;
+ const decl_name = decl_name: {
+ if (mod.getTarget().isWasm() and try decl.isFunction(mod)) {
+ if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| {
+ if (!std.mem.eql(u8, lib_name, "c")) {
+ free_decl_name = true;
+ break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ decl.name, lib_name });
+ }
+ }
+ }
+ break :decl_name std.mem.span(decl.name);
+ };
+ defer if (free_decl_name) gpa.free(decl_name);
llvm_global.setValueName(decl_name);
if (self.getLlvmGlobal(decl_name)) |other_global| {
@@ -1303,13 +1306,13 @@ pub const Object = struct {
di_global.replaceLinkageName(linkage_name);
}
}
- if (decl.val.castTag(.variable)) |variable| {
- if (variable.data.is_threadlocal) {
+ if (decl.getVariable(mod)) |variable| {
+ if (variable.is_threadlocal) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
llvm_global.setThreadLocalMode(.NotThreadLocal);
}
- if (variable.data.is_weak_linkage) {
+ if (variable.is_weak_linkage) {
llvm_global.setLinkage(.ExternalWeak);
}
}
@@ -1345,8 +1348,8 @@ pub const Object = struct {
defer gpa.free(section_z);
llvm_global.setSection(section_z);
}
- if (decl.val.castTag(.variable)) |variable| {
- if (variable.data.is_threadlocal) {
+ if (decl.getVariable(mod)) |variable| {
+ if (variable.is_threadlocal) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
}
}
@@ -1379,9 +1382,9 @@ pub const Object = struct {
llvm_global.setLinkage(.Internal);
if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
llvm_global.setUnnamedAddr(.True);
- if (decl.val.castTag(.variable)) |variable| {
+ if (decl.getVariable(mod)) |variable| {
const single_threaded = mod.comp.bin_file.options.single_threaded;
- if (variable.data.is_threadlocal and !single_threaded) {
+ if (variable.is_threadlocal and !single_threaded) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
llvm_global.setThreadLocalMode(.NotThreadLocal);
@@ -1510,12 +1513,11 @@ pub const Object = struct {
for (enum_type.names, 0..) |field_name_ip, i| {
const field_name_z = ip.stringToSlice(field_name_ip);
- var bigint_space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
- const storage = if (enum_type.values.len != 0)
- ip.indexToKey(enum_type.values[i]).int.storage
+ var bigint_space: Value.BigIntSpace = undefined;
+ const bigint = if (enum_type.values.len != 0)
+ enum_type.values[i].toValue().toBigInt(&bigint_space, mod)
else
- InternPool.Key.Int.Storage{ .u64 = i };
- const bigint = storage.toBigInt(&bigint_space);
+ std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst();
if (bigint.limbs.len == 1) {
enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned);
@@ -2442,6 +2444,7 @@ pub const DeclGen = struct {
}
fn genDecl(dg: *DeclGen) !void {
+ const mod = dg.module;
const decl = dg.decl;
const decl_index = dg.decl_index;
assert(decl.has_tv);
@@ -2449,19 +2452,16 @@ pub const DeclGen = struct {
log.debug("gen: {s} type: {}, value: {}", .{
decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(),
});
- assert(decl.val.ip_index != .none or decl.val.tag() != .function);
- if (decl.val.castTag(.extern_fn)) |extern_fn| {
- _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl);
+ if (decl.getExternFunc(mod)) |extern_func| {
+ _ = try dg.resolveLlvmFunction(extern_func.decl);
} else {
- const mod = dg.module;
const target = mod.getTarget();
var global = try dg.resolveGlobalDecl(decl_index);
global.setAlignment(decl.getAlignment(mod));
if (decl.@"linksection") |section| global.setSection(section);
assert(decl.has_tv);
- const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
- const variable = payload.data;
- break :init_val variable.init;
+ const init_val = if (decl.getVariable(mod)) |variable| init_val: {
+ break :init_val variable.init.toValue();
} else init_val: {
global.setGlobalConstant(.True);
break :init_val decl.val;
@@ -2519,7 +2519,7 @@ pub const DeclGen = struct {
);
try dg.object.di_map.put(dg.gpa, dg.decl, di_global.getVariable().toNode());
- if (!is_internal_linkage or decl.isExtern()) global.attachMetaData(di_global);
+ if (!is_internal_linkage or decl.isExtern(mod)) global.attachMetaData(di_global);
}
}
}
@@ -2548,17 +2548,16 @@ pub const DeclGen = struct {
const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace);
gop.value_ptr.* = llvm_fn;
- const is_extern = decl.isExtern();
+ const is_extern = decl.isExtern(mod);
if (!is_extern) {
llvm_fn.setLinkage(.Internal);
llvm_fn.setUnnamedAddr(.True);
} else {
if (target.isWasm()) {
dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0));
- if (decl.getExternFn().?.lib_name) |lib_name| {
- const module_name = std.mem.sliceTo(lib_name, 0);
- if (!std.mem.eql(u8, module_name, "c")) {
- dg.addFnAttrString(llvm_fn, "wasm-import-module", module_name);
+ if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| {
+ if (!std.mem.eql(u8, lib_name, "c")) {
+ dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name);
}
}
}
@@ -2695,11 +2694,12 @@ pub const DeclGen = struct {
if (gop.found_existing) return gop.value_ptr.*;
errdefer assert(dg.object.decl_map.remove(decl_index));
- const decl = dg.module.declPtr(decl_index);
- const fqn = try decl.getFullyQualifiedName(dg.module);
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
+ const fqn = try decl.getFullyQualifiedName(mod);
defer dg.gpa.free(fqn);
- const target = dg.module.getTarget();
+ const target = mod.getTarget();
const llvm_type = try dg.lowerType(decl.ty);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
@@ -2712,18 +2712,18 @@ pub const DeclGen = struct {
gop.value_ptr.* = llvm_global;
// This is needed for declarations created by `@extern`.
- if (decl.isExtern()) {
+ if (decl.isExtern(mod)) {
llvm_global.setValueName(decl.name);
llvm_global.setUnnamedAddr(.False);
llvm_global.setLinkage(.External);
- if (decl.val.castTag(.variable)) |variable| {
- const single_threaded = dg.module.comp.bin_file.options.single_threaded;
- if (variable.data.is_threadlocal and !single_threaded) {
+ if (decl.getVariable(mod)) |variable| {
+ const single_threaded = mod.comp.bin_file.options.single_threaded;
+ if (variable.is_threadlocal and !single_threaded) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
llvm_global.setThreadLocalMode(.NotThreadLocal);
}
- if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak);
+ if (variable.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak);
}
} else {
llvm_global.setLinkage(.Internal);
@@ -3199,468 +3199,344 @@ pub const DeclGen = struct {
const mod = dg.module;
const target = mod.getTarget();
var tv = arg_tv;
- if (tv.val.castTag(.runtime_value)) |rt| {
- tv.val = rt.data;
+ switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
+ .runtime_value => |rt| tv.val = rt.val.toValue(),
+ else => {},
}
- if (tv.val.isUndef(mod)) {
+ if (tv.val.isUndefDeep(mod)) {
const llvm_type = try dg.lowerType(tv.ty);
return llvm_type.getUndef();
}
- switch (tv.ty.zigTypeTag(mod)) {
- .Bool => {
- const llvm_type = try dg.lowerType(tv.ty);
- return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull();
- },
- .Int => switch (tv.val.ip_index) {
- .none => switch (tv.val.tag()) {
- .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index),
- .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
- else => {
- var bigint_space: Value.BigIntSpace = undefined;
- const bigint = tv.val.toBigInt(&bigint_space, mod);
- return lowerBigInt(dg, tv.ty, bigint);
- },
- },
- else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
- .int => |int| {
- var bigint_space: Value.BigIntSpace = undefined;
- const bigint = int.storage.toBigInt(&bigint_space);
- return lowerBigInt(dg, tv.ty, bigint);
- },
- else => unreachable,
- },
- },
- .Enum => {
- const int_val = try tv.enumToInt(mod);
- var bigint_space: Value.BigIntSpace = undefined;
- const bigint = int_val.toBigInt(&bigint_space, mod);
-
- const int_info = tv.ty.intInfo(mod);
- const llvm_type = dg.context.intType(int_info.bits);
-
- const unsigned_val = v: {
- if (bigint.limbs.len == 1) {
- break :v llvm_type.constInt(bigint.limbs[0], .False);
- }
- if (@sizeOf(usize) == @sizeOf(u64)) {
- break :v llvm_type.constIntOfArbitraryPrecision(
- @intCast(c_uint, bigint.limbs.len),
- bigint.limbs.ptr,
- );
- }
- @panic("TODO implement bigint to llvm int for 32-bit compiler builds");
- };
- if (!bigint.positive) {
- return llvm.constNeg(unsigned_val);
- }
- return unsigned_val;
- },
- .Float => {
- const llvm_ty = try dg.lowerType(tv.ty);
- switch (tv.ty.floatBits(target)) {
- 16 => {
- const repr = @bitCast(u16, tv.val.toFloat(f16, mod));
- const llvm_i16 = dg.context.intType(16);
- const int = llvm_i16.constInt(repr, .False);
- return int.constBitCast(llvm_ty);
- },
- 32 => {
- const repr = @bitCast(u32, tv.val.toFloat(f32, mod));
- const llvm_i32 = dg.context.intType(32);
- const int = llvm_i32.constInt(repr, .False);
- return int.constBitCast(llvm_ty);
- },
- 64 => {
- const repr = @bitCast(u64, tv.val.toFloat(f64, mod));
- const llvm_i64 = dg.context.intType(64);
- const int = llvm_i64.constInt(repr, .False);
- return int.constBitCast(llvm_ty);
- },
- 80 => {
- const float = tv.val.toFloat(f80, mod);
- const repr = std.math.break_f80(float);
- const llvm_i80 = dg.context.intType(80);
- var x = llvm_i80.constInt(repr.exp, .False);
- x = x.constShl(llvm_i80.constInt(64, .False));
- x = x.constOr(llvm_i80.constInt(repr.fraction, .False));
- if (backendSupportsF80(target)) {
- return x.constBitCast(llvm_ty);
- } else {
- return x;
- }
- },
- 128 => {
- var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod));
- // LLVM seems to require that the lower half of the f128 be placed first
- // in the buffer.
- if (native_endian == .Big) {
- std.mem.swap(u64, &buf[0], &buf[1]);
- }
- const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf);
- return int.constBitCast(llvm_ty);
- },
- else => unreachable,
- }
- },
- .Pointer => switch (tv.val.ip_index) {
- .null_value => {
- const llvm_type = try dg.lowerType(tv.ty);
- return llvm_type.constNull();
- },
- .none => switch (tv.val.tag()) {
- .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index),
- .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
- .variable => {
- const decl_index = tv.val.castTag(.variable).?.data.owner_decl;
- const decl = dg.module.declPtr(decl_index);
- dg.module.markDeclAlive(decl);
-
- const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
- const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
-
- const val = try dg.resolveGlobalDecl(decl_index);
- const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace)
- val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace))
- else
- val;
- return addrspace_casted_ptr;
- },
- .slice => {
- const slice = tv.val.castTag(.slice).?.data;
- const fields: [2]*llvm.Value = .{
- try dg.lowerValue(.{
- .ty = tv.ty.slicePtrFieldType(mod),
- .val = slice.ptr,
- }),
- try dg.lowerValue(.{
- .ty = Type.usize,
- .val = slice.len,
- }),
- };
- return dg.context.constStruct(&fields, fields.len, .False);
- },
- .lazy_align, .lazy_size => {
- const llvm_usize = try dg.lowerType(Type.usize);
- const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False);
- return llvm_int.constIntToPtr(try dg.lowerType(tv.ty));
- },
- .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => {
- return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0);
- },
- .opt_payload => {
- const payload = tv.val.castTag(.opt_payload).?.data;
- return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0);
- },
- else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{
- tv.ty.fmtDebug(), tag,
- }),
- },
- else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
- .int => |int| return dg.lowerIntAsPtr(int),
- .ptr => |ptr| {
- const ptr_tv: TypedValue = switch (ptr.len) {
- .none => tv,
- else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) },
- };
- const llvm_ptr_val = switch (ptr.addr) {
- .@"var" => |@"var"| ptr: {
- const decl = dg.module.declPtr(@"var".owner_decl);
- dg.module.markDeclAlive(decl);
-
- const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
- const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
-
- const val = try dg.resolveGlobalDecl(@"var".owner_decl);
- const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace)
- val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace))
- else
- val;
- break :ptr addrspace_casted_ptr;
- },
- .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl),
- .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl),
- .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int),
- .eu_payload,
- .opt_payload,
- .elem,
- .field,
- => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0),
- .comptime_field => unreachable,
- };
- switch (ptr.len) {
- .none => return llvm_ptr_val,
- else => {
- const fields: [2]*llvm.Value = .{
- llvm_ptr_val,
- try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }),
- };
- return dg.context.constStruct(&fields, fields.len, .False);
- },
- }
- },
- else => unreachable,
+ if (tv.val.ip_index == .none) switch (tv.ty.zigTypeTag(mod)) {
+ .Array => switch (tv.val.tag()) {
+ .bytes => {
+ const bytes = tv.val.castTag(.bytes).?.data;
+ return dg.context.constString(
+ bytes.ptr,
+ @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)),
+ .True, // Don't null terminate. Bytes has the sentinel, if any.
+ );
},
- },
- .Array => switch (tv.val.ip_index) {
- .none => switch (tv.val.tag()) {
- .bytes => {
- const bytes = tv.val.castTag(.bytes).?.data;
- return dg.context.constString(
- bytes.ptr,
- @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)),
- .True, // Don't null terminate. Bytes has the sentinel, if any.
- );
- },
- .str_lit => {
- const str_lit = tv.val.castTag(.str_lit).?.data;
- const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- if (tv.ty.sentinel(mod)) |sent_val| {
- const byte = @intCast(u8, sent_val.toUnsignedInt(mod));
- if (byte == 0 and bytes.len > 0) {
- return dg.context.constString(
- bytes.ptr,
- @intCast(c_uint, bytes.len),
- .False, // Yes, null terminate.
- );
- }
- var array = std.ArrayList(u8).init(dg.gpa);
- defer array.deinit();
- try array.ensureUnusedCapacity(bytes.len + 1);
- array.appendSliceAssumeCapacity(bytes);
- array.appendAssumeCapacity(byte);
- return dg.context.constString(
- array.items.ptr,
- @intCast(c_uint, array.items.len),
- .True, // Don't null terminate.
- );
- } else {
+ .str_lit => {
+ const str_lit = tv.val.castTag(.str_lit).?.data;
+ const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
+ if (tv.ty.sentinel(mod)) |sent_val| {
+ const byte = @intCast(u8, sent_val.toUnsignedInt(mod));
+ if (byte == 0 and bytes.len > 0) {
return dg.context.constString(
bytes.ptr,
@intCast(c_uint, bytes.len),
- .True, // Don't null terminate. `bytes` has the sentinel, if any.
- );
- }
- },
- .aggregate => {
- const elem_vals = tv.val.castTag(.aggregate).?.data;
- const elem_ty = tv.ty.childType(mod);
- const gpa = dg.gpa;
- const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod));
- const llvm_elems = try gpa.alloc(*llvm.Value, len);
- defer gpa.free(llvm_elems);
- var need_unnamed = false;
- for (elem_vals[0..len], 0..) |elem_val, i| {
- llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val });
- need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
- }
- if (need_unnamed) {
- return dg.context.constStruct(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- .True,
- );
- } else {
- const llvm_elem_ty = try dg.lowerType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
+ .False, // Yes, null terminate.
);
}
- },
- .repeated => {
- const val = tv.val.castTag(.repeated).?.data;
- const elem_ty = tv.ty.childType(mod);
- const sentinel = tv.ty.sentinel(mod);
- const len = @intCast(usize, tv.ty.arrayLen(mod));
- const len_including_sent = len + @boolToInt(sentinel != null);
- const gpa = dg.gpa;
- const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
- defer gpa.free(llvm_elems);
+ var array = std.ArrayList(u8).init(dg.gpa);
+ defer array.deinit();
+ try array.ensureUnusedCapacity(bytes.len + 1);
+ array.appendSliceAssumeCapacity(bytes);
+ array.appendAssumeCapacity(byte);
+ return dg.context.constString(
+ array.items.ptr,
+ @intCast(c_uint, array.items.len),
+ .True, // Don't null terminate.
+ );
+ } else {
+ return dg.context.constString(
+ bytes.ptr,
+ @intCast(c_uint, bytes.len),
+ .True, // Don't null terminate. `bytes` has the sentinel, if any.
+ );
+ }
+ },
+ else => unreachable,
+ },
+ .Struct => {
+ const llvm_struct_ty = try dg.lowerType(tv.ty);
+ const gpa = dg.gpa;
+
+ const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
+ .anon_struct_type => |tuple| {
+ var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
+ defer llvm_fields.deinit(gpa);
+ try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
+
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
var need_unnamed = false;
- if (len != 0) {
- for (llvm_elems[0..len]) |*elem| {
- elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val });
+
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ if (field_val != .none) continue;
+ if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ const field_align = field_ty.toType().abiAlignment(mod);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ // TODO make this and all other padding elsewhere in debug
+ // builds be 0xaa not undef.
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
- need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]);
- }
- if (sentinel) |sent| {
- llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent });
- need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]);
+ const field_llvm_val = try dg.lowerValue(.{
+ .ty = field_ty.toType(),
+ .val = try tv.val.fieldValue(mod, i),
+ });
+
+ need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val);
+
+ llvm_fields.appendAssumeCapacity(field_llvm_val);
+
+ offset += field_ty.toType().abiSize(mod);
+ }
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
}
if (need_unnamed) {
return dg.context.constStruct(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- .True,
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ .False,
);
} else {
- const llvm_elem_ty = try dg.lowerType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
+ return llvm_struct_ty.constNamedStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
);
}
},
- .empty_array_sentinel => {
- const elem_ty = tv.ty.childType(mod);
- const sent_val = tv.ty.sentinel(mod).?;
- const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val });
- const llvm_elems: [1]*llvm.Value = .{sentinel};
- const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]);
- if (need_unnamed) {
- return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True);
- } else {
- const llvm_elem_ty = try dg.lowerType(elem_ty);
- return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len);
- }
- },
+ .struct_type => |struct_type| struct_type,
else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
- .aggregate => |aggregate| switch (aggregate.storage) {
- .elems => |elem_vals| {
- const elem_ty = tv.ty.childType(mod);
- const gpa = dg.gpa;
- const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len);
- defer gpa.free(llvm_elems);
- var need_unnamed = false;
- for (elem_vals, 0..) |elem_val, i| {
- llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() });
- need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
- }
- if (need_unnamed) {
- return dg.context.constStruct(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- .True,
- );
- } else {
- const llvm_elem_ty = try dg.lowerType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- }
- },
- .repeated_elem => |val| {
- const elem_ty = tv.ty.childType(mod);
- const sentinel = tv.ty.sentinel(mod);
- const len = @intCast(usize, tv.ty.arrayLen(mod));
- const len_including_sent = len + @boolToInt(sentinel != null);
- const gpa = dg.gpa;
- const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
- defer gpa.free(llvm_elems);
+ };
- var need_unnamed = false;
- if (len != 0) {
- for (llvm_elems[0..len]) |*elem| {
- elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() });
- }
- need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]);
- }
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- if (sentinel) |sent| {
- llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent });
- need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]);
- }
+ if (struct_obj.layout == .Packed) {
+ assert(struct_obj.haveLayout());
+ const big_bits = struct_obj.backing_int_ty.bitSize(mod);
+ const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
+ const fields = struct_obj.fields.values();
+ comptime assert(Type.packed_struct_layout_version == 2);
+ var running_int: *llvm.Value = int_llvm_ty.constNull();
+ var running_bits: u16 = 0;
+ for (fields, 0..) |field, i| {
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- if (need_unnamed) {
- return dg.context.constStruct(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- .True,
- );
- } else {
- const llvm_elem_ty = try dg.lowerType(elem_ty);
- return llvm_elem_ty.constArray(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- }
- },
- },
- else => unreachable,
- },
- },
- .Optional => {
- comptime assert(optional_layout_version == 3);
- const payload_ty = tv.ty.optionalChild(mod);
+ const non_int_val = try dg.lowerValue(.{
+ .ty = field.ty,
+ .val = try tv.val.fieldValue(mod, i),
+ });
+ const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const small_int_ty = dg.context.intType(ty_bit_size);
+ const small_int_val = if (field.ty.isPtrAtRuntime(mod))
+ non_int_val.constPtrToInt(small_int_ty)
+ else
+ non_int_val.constBitCast(small_int_ty);
+ const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
+ // If the field is as large as the entire packed struct, this
+ // zext would go from, e.g. i16 to i16. This is legal with
+ // constZExtOrBitCast but not legal with constZExt.
+ const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty);
+ const shifted = extended_int_val.constShl(shift_rhs);
+ running_int = running_int.constOr(shifted);
+ running_bits += ty_bit_size;
+ }
+ return running_int;
+ }
- const llvm_i8 = dg.context.intType(8);
- const is_pl = !tv.val.isNull(mod);
- const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- return non_null_bit;
+ const llvm_field_count = llvm_struct_ty.countStructElementTypes();
+ var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count);
+ defer llvm_fields.deinit(gpa);
+
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
+ var need_unnamed = false;
+
+ var it = struct_obj.runtimeFieldIterator(mod);
+ while (it.next()) |field_and_index| {
+ const field = field_and_index.field;
+ const field_align = field.alignment(mod, struct_obj.layout);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ // TODO make this and all other padding elsewhere in debug
+ // builds be 0xaa not undef.
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
+
+ const field_llvm_val = try dg.lowerValue(.{
+ .ty = field.ty,
+ .val = try tv.val.fieldValue(mod, field_and_index.index),
+ });
+
+ need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val);
+
+ llvm_fields.appendAssumeCapacity(field_llvm_val);
+
+ offset += field.ty.abiSize(mod);
+ }
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
}
- const llvm_ty = try dg.lowerType(tv.ty);
- if (tv.ty.optionalReprIsPayload(mod)) return switch (tv.val.ip_index) {
- .none => if (tv.val.castTag(.opt_payload)) |payload|
- try dg.lowerValue(.{ .ty = payload_ty, .val = payload.data })
- else if (is_pl)
- try dg.lowerValue(.{ .ty = payload_ty, .val = tv.val })
- else
- llvm_ty.constNull(),
- .null_value => llvm_ty.constNull(),
- else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
- .opt => |opt| switch (opt.val) {
- .none => llvm_ty.constNull(),
- else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }),
- },
- else => unreachable,
- },
- };
- assert(payload_ty.zigTypeTag(mod) != .Fn);
- const llvm_field_count = llvm_ty.countStructElementTypes();
- var fields_buf: [3]*llvm.Value = undefined;
- fields_buf[0] = try dg.lowerValue(.{
- .ty = payload_ty,
- .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.undef,
- });
- fields_buf[1] = non_null_bit;
- if (llvm_field_count > 2) {
- assert(llvm_field_count == 3);
- fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef();
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ .False,
+ );
+ } else {
+ return llvm_struct_ty.constNamedStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ );
}
- return dg.context.constStruct(&fields_buf, llvm_field_count, .False);
},
- .Fn => {
- const fn_decl_index = switch (tv.val.tag()) {
- .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl,
- .function => tv.val.castTag(.function).?.data.owner_decl,
- else => unreachable,
- };
- const fn_decl = dg.module.declPtr(fn_decl_index);
- dg.module.markDeclAlive(fn_decl);
- return dg.resolveLlvmFunction(fn_decl_index);
+ .Vector => switch (tv.val.tag()) {
+ .bytes => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ const bytes = tv.val.castTag(.bytes).?.data;
+ const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
+ assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
+
+ const elem_ty = tv.ty.childType(mod);
+ const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
+ defer dg.gpa.free(llvm_elems);
+ for (llvm_elems, 0..) |*elem, i| {
+ elem.* = try dg.lowerValue(.{
+ .ty = elem_ty,
+ .val = try mod.intValue(elem_ty, bytes[i]),
+ });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ .str_lit => {
+ // Note, sentinel is not stored
+ const str_lit = tv.val.castTag(.str_lit).?.data;
+ const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
+ const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
+ assert(vector_len == bytes.len);
+
+ const elem_ty = tv.ty.childType(mod);
+ const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
+ defer dg.gpa.free(llvm_elems);
+ for (llvm_elems, 0..) |*elem, i| {
+ elem.* = try dg.lowerValue(.{
+ .ty = elem_ty,
+ .val = try mod.intValue(elem_ty, bytes[i]),
+ });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ else => unreachable,
},
- .ErrorSet => {
+ .Float,
+ .Union,
+ .Optional,
+ .ErrorUnion,
+ .ErrorSet,
+ .Int,
+ .Enum,
+ .Bool,
+ .Pointer,
+ => unreachable, // handled below
+ .Frame,
+ .AnyFrame,
+ => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}),
+ .Type,
+ .Void,
+ .NoReturn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .Opaque,
+ .EnumLiteral,
+ .Fn,
+ => unreachable, // comptime-only types
+ };
+
+ switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef, .runtime_value => unreachable, // handled above
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_struct,
+ .@"unreachable",
+ .generic_poison,
+ => unreachable, // non-runtime values
+ .false, .true => {
+ const llvm_type = try dg.lowerType(tv.ty);
+ return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull();
+ },
+ },
+ .variable,
+ .extern_func,
+ .func,
+ .enum_literal,
+ => unreachable, // non-runtime values
+ .int => |int| {
+ var bigint_space: Value.BigIntSpace = undefined;
+ const bigint = int.storage.toBigInt(&bigint_space);
+ return lowerBigInt(dg, tv.ty, bigint);
+ },
+ .err => |err| {
const llvm_ty = try dg.lowerType(Type.anyerror);
- switch (tv.val.ip_index) {
- .none => switch (tv.val.tag()) {
- .@"error" => {
- const err_name = tv.val.castTag(.@"error").?.data.name;
- const kv = try dg.module.getErrorValue(err_name);
- return llvm_ty.constInt(kv.value, .False);
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return llvm_ty.constNull();
- },
- },
- else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
- .int => |int| return llvm_ty.constInt(int.storage.u64, .False),
- else => unreachable,
- },
- }
+ const name = mod.intern_pool.stringToSlice(err.name);
+ const kv = try mod.getErrorValue(name);
+ return llvm_ty.constInt(kv.value, .False);
},
- .ErrorUnion => {
+ .error_union => |error_union| {
const payload_type = tv.ty.errorUnionPayload(mod);
- const is_pl = tv.val.errorUnionIsPayload();
+ const is_pl = tv.val.errorUnionIsPayload(mod);
if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
// We use the error type directly as the type.
@@ -3676,7 +3552,10 @@ pub const DeclGen = struct {
});
const llvm_payload_value = try dg.lowerValue(.{
.ty = payload_type,
- .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.undef,
+ .val = switch (error_union.val) {
+ .err_name => try mod.intern(.{ .undef = payload_type.ip_index }),
+ .payload => |payload| payload,
+ }.toValue(),
});
var fields_buf: [3]*llvm.Value = undefined;
@@ -3697,172 +3576,396 @@ pub const DeclGen = struct {
return dg.context.constStruct(&fields_buf, llvm_field_count, .False);
}
},
- .Struct => {
- const llvm_struct_ty = try dg.lowerType(tv.ty);
- const gpa = dg.gpa;
+ .enum_tag => {
+ const int_val = try tv.enumToInt(mod);
- const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
- .anon_struct_type => |tuple| {
- var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
- defer llvm_fields.deinit(gpa);
+ var bigint_space: Value.BigIntSpace = undefined;
+ const bigint = int_val.toBigInt(&bigint_space, mod);
- try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
+ const int_info = tv.ty.intInfo(mod);
+ const llvm_type = dg.context.intType(int_info.bits);
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
- var big_align: u32 = 0;
- var need_unnamed = false;
+ const unsigned_val = v: {
+ if (bigint.limbs.len == 1) {
+ break :v llvm_type.constInt(bigint.limbs[0], .False);
+ }
+ if (@sizeOf(usize) == @sizeOf(u64)) {
+ break :v llvm_type.constIntOfArbitraryPrecision(
+ @intCast(c_uint, bigint.limbs.len),
+ bigint.limbs.ptr,
+ );
+ }
+ @panic("TODO implement bigint to llvm int for 32-bit compiler builds");
+ };
+ if (!bigint.positive) {
+ return llvm.constNeg(unsigned_val);
+ }
+ return unsigned_val;
+ },
+ .float => {
+ const llvm_ty = try dg.lowerType(tv.ty);
+ switch (tv.ty.floatBits(target)) {
+ 16 => {
+ const repr = @bitCast(u16, tv.val.toFloat(f16, mod));
+ const llvm_i16 = dg.context.intType(16);
+ const int = llvm_i16.constInt(repr, .False);
+ return int.constBitCast(llvm_ty);
+ },
+ 32 => {
+ const repr = @bitCast(u32, tv.val.toFloat(f32, mod));
+ const llvm_i32 = dg.context.intType(32);
+ const int = llvm_i32.constInt(repr, .False);
+ return int.constBitCast(llvm_ty);
+ },
+ 64 => {
+ const repr = @bitCast(u64, tv.val.toFloat(f64, mod));
+ const llvm_i64 = dg.context.intType(64);
+ const int = llvm_i64.constInt(repr, .False);
+ return int.constBitCast(llvm_ty);
+ },
+ 80 => {
+ const float = tv.val.toFloat(f80, mod);
+ const repr = std.math.break_f80(float);
+ const llvm_i80 = dg.context.intType(80);
+ var x = llvm_i80.constInt(repr.exp, .False);
+ x = x.constShl(llvm_i80.constInt(64, .False));
+ x = x.constOr(llvm_i80.constInt(repr.fraction, .False));
+ if (backendSupportsF80(target)) {
+ return x.constBitCast(llvm_ty);
+ } else {
+ return x;
+ }
+ },
+ 128 => {
+ var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod));
+ // LLVM seems to require that the lower half of the f128 be placed first
+ // in the buffer.
+ if (native_endian == .Big) {
+ std.mem.swap(u64, &buf[0], &buf[1]);
+ }
+ const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf);
+ return int.constBitCast(llvm_ty);
+ },
+ else => unreachable,
+ }
+ },
+ .ptr => |ptr| {
+ const ptr_tv: TypedValue = switch (ptr.len) {
+ .none => tv,
+ else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) },
+ };
+ const llvm_ptr_val = switch (ptr.addr) {
+ .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl),
+ .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl),
+ .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int),
+ .eu_payload,
+ .opt_payload,
+ .elem,
+ .field,
+ => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0),
+ .comptime_field => unreachable,
+ };
+ switch (ptr.len) {
+ .none => return llvm_ptr_val,
+ else => {
+ const fields: [2]*llvm.Value = .{
+ llvm_ptr_val,
+ try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }),
+ };
+ return dg.context.constStruct(&fields, fields.len, .False);
+ },
+ }
+ },
+ .opt => |opt| {
+ comptime assert(optional_layout_version == 3);
+ const payload_ty = tv.ty.optionalChild(mod);
- for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
- if (field_val != .none) continue;
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ const llvm_i8 = dg.context.intType(8);
+ const non_null_bit = switch (opt.val) {
+ .none => llvm_i8.constNull(),
+ else => llvm_i8.constInt(1, .False),
+ };
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ return non_null_bit;
+ }
+ const llvm_ty = try dg.lowerType(tv.ty);
+ if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) {
+ .none => llvm_ty.constNull(),
+ else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }),
+ };
+ assert(payload_ty.zigTypeTag(mod) != .Fn);
- const field_align = field_ty.toType().abiAlignment(mod);
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ const llvm_field_count = llvm_ty.countStructElementTypes();
+ var fields_buf: [3]*llvm.Value = undefined;
+ fields_buf[0] = try dg.lowerValue(.{
+ .ty = payload_ty,
+ .val = switch (opt.val) {
+ .none => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ else => |payload| payload,
+ }.toValue(),
+ });
+ fields_buf[1] = non_null_bit;
+ if (llvm_field_count > 2) {
+ assert(llvm_field_count == 3);
+ fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef();
+ }
+ return dg.context.constStruct(&fields_buf, llvm_field_count, .False);
+ },
+ .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
+ .array_type => switch (aggregate.storage) {
+ .bytes => |bytes| return dg.context.constString(
+ bytes.ptr,
+ @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)),
+ .True, // Don't null terminate. Bytes has the sentinel, if any.
+ ),
+ .elems => |elem_vals| {
+ const elem_ty = tv.ty.childType(mod);
+ const gpa = dg.gpa;
+ const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len);
+ defer gpa.free(llvm_elems);
+ var need_unnamed = false;
+ for (elem_vals, 0..) |elem_val, i| {
+ llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() });
+ need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
+ }
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ .True,
+ );
+ } else {
+ const llvm_elem_ty = try dg.lowerType(elem_ty);
+ return llvm_elem_ty.constArray(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ }
+ },
+ .repeated_elem => |val| {
+ const elem_ty = tv.ty.childType(mod);
+ const sentinel = tv.ty.sentinel(mod);
+ const len = @intCast(usize, tv.ty.arrayLen(mod));
+ const len_including_sent = len + @boolToInt(sentinel != null);
+ const gpa = dg.gpa;
+ const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
+ defer gpa.free(llvm_elems);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- // TODO make this and all other padding elsewhere in debug
- // builds be 0xaa not undef.
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ var need_unnamed = false;
+ if (len != 0) {
+ for (llvm_elems[0..len]) |*elem| {
+ elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() });
}
+ need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]);
+ }
- const field_llvm_val = try dg.lowerValue(.{
- .ty = field_ty.toType(),
- .val = try tv.val.fieldValue(field_ty.toType(), mod, i),
- });
+ if (sentinel) |sent| {
+ llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent });
+ need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]);
+ }
- need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val);
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ .True,
+ );
+ } else {
+ const llvm_elem_ty = try dg.lowerType(elem_ty);
+ return llvm_elem_ty.constArray(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ }
+ },
+ },
+ .vector_type => |vector_type| {
+ const elem_ty = vector_type.child.toType();
+ const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len);
+ defer dg.gpa.free(llvm_elems);
+ for (llvm_elems, 0..) |*llvm_elem, i| {
+ llvm_elem.* = try dg.lowerValue(.{
+ .ty = elem_ty,
+ .val = switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[i],
+ .repeated_elem => |elem| elem,
+ }.toValue(),
+ });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ .struct_type, .anon_struct_type => {
+ const llvm_struct_ty = try dg.lowerType(tv.ty);
+ const gpa = dg.gpa;
- llvm_fields.appendAssumeCapacity(field_llvm_val);
+ const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
+ .anon_struct_type => |tuple| {
+ var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
+ defer llvm_fields.deinit(gpa);
- offset += field_ty.toType().abiSize(mod);
- }
- {
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
- }
- }
+ try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
+
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
+ var need_unnamed = false;
- if (need_unnamed) {
- return dg.context.constStruct(
- llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
- .False,
- );
- } else {
- return llvm_struct_ty.constNamedStruct(
- llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
- );
- }
- },
- .struct_type => |struct_type| struct_type,
- else => unreachable,
- };
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ if (field_val != .none) continue;
+ if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ const field_align = field_ty.toType().abiAlignment(mod);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ // TODO make this and all other padding elsewhere in debug
+ // builds be 0xaa not undef.
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ const field_llvm_val = try dg.lowerValue(.{
+ .ty = field_ty.toType(),
+ .val = try tv.val.fieldValue(mod, i),
+ });
- if (struct_obj.layout == .Packed) {
- assert(struct_obj.haveLayout());
- const big_bits = struct_obj.backing_int_ty.bitSize(mod);
- const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
- const fields = struct_obj.fields.values();
- comptime assert(Type.packed_struct_layout_version == 2);
- var running_int: *llvm.Value = int_llvm_ty.constNull();
- var running_bits: u16 = 0;
- for (fields, 0..) |field, i| {
- if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val);
- const non_int_val = try dg.lowerValue(.{
- .ty = field.ty,
- .val = try tv.val.fieldValue(field.ty, mod, i),
- });
- const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
- const small_int_ty = dg.context.intType(ty_bit_size);
- const small_int_val = if (field.ty.isPtrAtRuntime(mod))
- non_int_val.constPtrToInt(small_int_ty)
- else
- non_int_val.constBitCast(small_int_ty);
- const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
- // If the field is as large as the entire packed struct, this
- // zext would go from, e.g. i16 to i16. This is legal with
- // constZExtOrBitCast but not legal with constZExt.
- const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty);
- const shifted = extended_int_val.constShl(shift_rhs);
- running_int = running_int.constOr(shifted);
- running_bits += ty_bit_size;
- }
- return running_int;
- }
+ llvm_fields.appendAssumeCapacity(field_llvm_val);
- const llvm_field_count = llvm_struct_ty.countStructElementTypes();
- var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count);
- defer llvm_fields.deinit(gpa);
+ offset += field_ty.toType().abiSize(mod);
+ }
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
+ }
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
- var big_align: u32 = 0;
- var need_unnamed = false;
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ .False,
+ );
+ } else {
+ return llvm_struct_ty.constNamedStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ );
+ }
+ },
+ .struct_type => |struct_type| struct_type,
+ else => unreachable,
+ };
- var it = struct_obj.runtimeFieldIterator(mod);
- while (it.next()) |field_and_index| {
- const field = field_and_index.field;
- const field_align = field.alignment(mod, struct_obj.layout);
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- // TODO make this and all other padding elsewhere in debug
- // builds be 0xaa not undef.
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ if (struct_obj.layout == .Packed) {
+ assert(struct_obj.haveLayout());
+ const big_bits = struct_obj.backing_int_ty.bitSize(mod);
+ const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
+ const fields = struct_obj.fields.values();
+ comptime assert(Type.packed_struct_layout_version == 2);
+ var running_int: *llvm.Value = int_llvm_ty.constNull();
+ var running_bits: u16 = 0;
+ for (fields, 0..) |field, i| {
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+
+ const non_int_val = try dg.lowerValue(.{
+ .ty = field.ty,
+ .val = try tv.val.fieldValue(mod, i),
+ });
+ const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const small_int_ty = dg.context.intType(ty_bit_size);
+ const small_int_val = if (field.ty.isPtrAtRuntime(mod))
+ non_int_val.constPtrToInt(small_int_ty)
+ else
+ non_int_val.constBitCast(small_int_ty);
+ const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
+ // If the field is as large as the entire packed struct, this
+ // zext would go from, e.g. i16 to i16. This is legal with
+ // constZExtOrBitCast but not legal with constZExt.
+ const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty);
+ const shifted = extended_int_val.constShl(shift_rhs);
+ running_int = running_int.constOr(shifted);
+ running_bits += ty_bit_size;
+ }
+ return running_int;
}
- const field_llvm_val = try dg.lowerValue(.{
- .ty = field.ty,
- .val = try tv.val.fieldValue(field.ty, mod, field_and_index.index),
- });
+ const llvm_field_count = llvm_struct_ty.countStructElementTypes();
+ var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count);
+ defer llvm_fields.deinit(gpa);
- need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val);
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
+ var need_unnamed = false;
+
+ var it = struct_obj.runtimeFieldIterator(mod);
+ while (it.next()) |field_and_index| {
+ const field = field_and_index.field;
+ const field_align = field.alignment(mod, struct_obj.layout);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ // TODO make this and all other padding elsewhere in debug
+ // builds be 0xaa not undef.
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
- llvm_fields.appendAssumeCapacity(field_llvm_val);
+ const field_llvm_val = try dg.lowerValue(.{
+ .ty = field.ty,
+ .val = try tv.val.fieldValue(mod, field_and_index.index),
+ });
- offset += field.ty.abiSize(mod);
- }
- {
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val);
+
+ llvm_fields.appendAssumeCapacity(field_llvm_val);
+
+ offset += field.ty.abiSize(mod);
+ }
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
}
- }
- if (need_unnamed) {
- return dg.context.constStruct(
- llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
- .False,
- );
- } else {
- return llvm_struct_ty.constNamedStruct(
- llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
- );
- }
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ .False,
+ );
+ } else {
+ return llvm_struct_ty.constNamedStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ );
+ }
+ },
+ else => unreachable,
},
- .Union => {
+ .un => {
const llvm_union_ty = try dg.lowerType(tv.ty);
const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) {
.none => tv.val.castTag(.@"union").?.data,
@@ -3950,96 +4053,6 @@ pub const DeclGen = struct {
return llvm_union_ty.constNamedStruct(&fields, fields_len);
}
},
- .Vector => switch (tv.val.tag()) {
- .bytes => {
- // Note, sentinel is not stored even if the type has a sentinel.
- const bytes = tv.val.castTag(.bytes).?.data;
- const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
- assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
-
- const elem_ty = tv.ty.childType(mod);
- const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
- defer dg.gpa.free(llvm_elems);
- for (llvm_elems, 0..) |*elem, i| {
- elem.* = try dg.lowerValue(.{
- .ty = elem_ty,
- .val = try mod.intValue(elem_ty, bytes[i]),
- });
- }
- return llvm.constVector(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- },
- .aggregate => {
- // Note, sentinel is not stored even if the type has a sentinel.
- // The value includes the sentinel in those cases.
- const elem_vals = tv.val.castTag(.aggregate).?.data;
- const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
- assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
- const elem_ty = tv.ty.childType(mod);
- const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
- defer dg.gpa.free(llvm_elems);
- for (llvm_elems, 0..) |*elem, i| {
- elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] });
- }
- return llvm.constVector(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- },
- .repeated => {
- // Note, sentinel is not stored even if the type has a sentinel.
- const val = tv.val.castTag(.repeated).?.data;
- const elem_ty = tv.ty.childType(mod);
- const len = @intCast(usize, tv.ty.arrayLen(mod));
- const llvm_elems = try dg.gpa.alloc(*llvm.Value, len);
- defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem| {
- elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val });
- }
- return llvm.constVector(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- },
- .str_lit => {
- // Note, sentinel is not stored
- const str_lit = tv.val.castTag(.str_lit).?.data;
- const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
- assert(vector_len == bytes.len);
-
- const elem_ty = tv.ty.childType(mod);
- const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
- defer dg.gpa.free(llvm_elems);
- for (llvm_elems, 0..) |*elem, i| {
- elem.* = try dg.lowerValue(.{
- .ty = elem_ty,
- .val = try mod.intValue(elem_ty, bytes[i]),
- });
- }
- return llvm.constVector(
- llvm_elems.ptr,
- @intCast(c_uint, llvm_elems.len),
- );
- },
- else => unreachable,
- },
-
- .ComptimeInt => unreachable,
- .ComptimeFloat => unreachable,
- .Type => unreachable,
- .EnumLiteral => unreachable,
- .Void => unreachable,
- .NoReturn => unreachable,
- .Undefined => unreachable,
- .Null => unreachable,
- .Opaque => unreachable,
-
- .Frame,
- .AnyFrame,
- => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}),
}
}
@@ -4094,10 +4107,9 @@ pub const DeclGen = struct {
fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value {
const mod = dg.module;
const target = mod.getTarget();
- if (ptr_val.ip_index != .none) return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) {
.int => |int| dg.lowerIntAsPtr(int),
.ptr => |ptr| switch (ptr.addr) {
- .@"var" => |@"var"| dg.lowerParentPtrDecl(ptr_val, @"var".owner_decl),
.decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl),
.mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl),
.int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int),
@@ -4150,7 +4162,7 @@ pub const DeclGen = struct {
const indices: [1]*llvm.Value = .{
llvm_usize.constInt(elem_ptr.index, .False),
};
- const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().childType(mod));
+ const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().elemType2(mod));
return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
},
.field => |field_ptr| {
@@ -4185,7 +4197,7 @@ pub const DeclGen = struct {
.Struct => {
if (parent_ty.containerLayout(mod) == .Packed) {
if (!byte_aligned) return parent_llvm_ptr;
- const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth());
+ const llvm_usize = dg.context.intType(target.ptrBitWidth());
const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
// count bits of fields before this one
const prev_bits = b: {
@@ -4230,148 +4242,6 @@ pub const DeclGen = struct {
},
else => unreachable,
};
- switch (ptr_val.tag()) {
- .decl_ref_mut => {
- const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
- return dg.lowerParentPtrDecl(ptr_val, decl);
- },
- .decl_ref => {
- const decl = ptr_val.castTag(.decl_ref).?.data;
- return dg.lowerParentPtrDecl(ptr_val, decl);
- },
- .variable => {
- const decl = ptr_val.castTag(.variable).?.data.owner_decl;
- return dg.lowerParentPtrDecl(ptr_val, decl);
- },
- .field_ptr => {
- const field_ptr = ptr_val.castTag(.field_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, byte_aligned);
- const parent_ty = field_ptr.container_ty;
-
- const field_index = @intCast(u32, field_ptr.field_index);
- const llvm_u32 = dg.context.intType(32);
- switch (parent_ty.zigTypeTag(mod)) {
- .Union => {
- if (parent_ty.containerLayout(mod) == .Packed) {
- return parent_llvm_ptr;
- }
-
- const layout = parent_ty.unionGetLayout(mod);
- if (layout.payload_size == 0) {
- // In this case a pointer to the union and a pointer to any
- // (void) payload is the same.
- return parent_llvm_ptr;
- }
- const llvm_pl_index = if (layout.tag_size == 0)
- 0
- else
- @boolToInt(layout.tag_align >= layout.payload_align);
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(llvm_pl_index, .False),
- };
- const parent_llvm_ty = try dg.lowerType(parent_ty);
- return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- },
- .Struct => {
- if (parent_ty.containerLayout(mod) == .Packed) {
- if (!byte_aligned) return parent_llvm_ptr;
- const llvm_usize = dg.context.intType(target.ptrBitWidth());
- const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize);
- // count bits of fields before this one
- const prev_bits = b: {
- var b: usize = 0;
- for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
- if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- b += @intCast(usize, field.ty.bitSize(mod));
- }
- break :b b;
- };
- const byte_offset = llvm_usize.constInt(prev_bits / 8, .False);
- const field_addr = base_addr.constAdd(byte_offset);
- const final_llvm_ty = dg.context.pointerType(0);
- return field_addr.constIntToPtr(final_llvm_ty);
- }
-
- const parent_llvm_ty = try dg.lowerType(parent_ty);
- if (llvmField(parent_ty, field_index, mod)) |llvm_field| {
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(llvm_field.index, .False),
- };
- return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- } else {
- const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False);
- const indices: [1]*llvm.Value = .{llvm_index};
- return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- }
- },
- .Pointer => {
- assert(parent_ty.isSlice(mod));
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(field_index, .False),
- };
- const parent_llvm_ty = try dg.lowerType(parent_ty);
- return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- },
- else => unreachable,
- }
- },
- .elem_ptr => {
- const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, true);
-
- const llvm_usize = try dg.lowerType(Type.usize);
- const indices: [1]*llvm.Value = .{
- llvm_usize.constInt(elem_ptr.index, .False),
- };
- const elem_llvm_ty = try dg.lowerType(elem_ptr.elem_ty);
- return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- },
- .opt_payload_ptr => {
- const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true);
-
- const payload_ty = opt_payload_ptr.container_ty.optionalChild(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
- payload_ty.optionalReprIsPayload(mod))
- {
- // In this case, we represent pointer to optional the same as pointer
- // to the payload.
- return parent_llvm_ptr;
- }
-
- const llvm_u32 = dg.context.intType(32);
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(0, .False),
- };
- const opt_llvm_ty = try dg.lowerType(opt_payload_ptr.container_ty);
- return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- },
- .eu_payload_ptr => {
- const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
- const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true);
-
- const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- // In this case, we represent pointer to error union the same as pointer
- // to the payload.
- return parent_llvm_ptr;
- }
-
- const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1;
- const llvm_u32 = dg.context.intType(32);
- const indices: [2]*llvm.Value = .{
- llvm_u32.constInt(0, .False),
- llvm_u32.constInt(payload_offset, .False),
- };
- const eu_llvm_ty = try dg.lowerType(eu_payload_ptr.container_ty);
- return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
- },
- else => unreachable,
- }
}
fn lowerDeclRefValue(
@@ -4380,20 +4250,6 @@ pub const DeclGen = struct {
decl_index: Module.Decl.Index,
) Error!*llvm.Value {
const mod = self.module;
- if (tv.ty.isSlice(mod)) {
- const ptr_ty = tv.ty.slicePtrFieldType(mod);
- const fields: [2]*llvm.Value = .{
- try self.lowerValue(.{
- .ty = ptr_ty,
- .val = tv.val,
- }),
- try self.lowerValue(.{
- .ty = Type.usize,
- .val = try mod.intValue(Type.usize, tv.val.sliceLen(mod)),
- }),
- };
- return self.context.constStruct(&fields, fields.len, .False);
- }
// In the case of something like:
// fn foo() void {}
@@ -4401,13 +4257,13 @@ pub const DeclGen = struct {
// ... &bar;
// `bar` is just an alias and we actually want to lower a reference to `foo`.
const decl = mod.declPtr(decl_index);
- if (decl.val.castTag(.function)) |func| {
- if (func.data.owner_decl != decl_index) {
- return self.lowerDeclRefValue(tv, func.data.owner_decl);
+ if (decl.getFunction(mod)) |func| {
+ if (func.owner_decl != decl_index) {
+ return self.lowerDeclRefValue(tv, func.owner_decl);
}
- } else if (decl.val.castTag(.extern_fn)) |func| {
- if (func.data.owner_decl != decl_index) {
- return self.lowerDeclRefValue(tv, func.data.owner_decl);
+ } else if (decl.getExternFunc(mod)) |func| {
+ if (func.decl != decl_index) {
+ return self.lowerDeclRefValue(tv, func.decl);
}
}
@@ -6333,11 +6189,11 @@ pub const FuncGen = struct {
}
fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- const mod = self.dg.module;
const dib = self.dg.object.di_builder orelse return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const func = self.air.values[ty_pl.payload].castTag(.function).?.data;
+ const mod = self.dg.module;
+ const func = self.air.values[ty_pl.payload].getFunction(mod).?;
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope);
@@ -6395,8 +6251,8 @@ pub const FuncGen = struct {
if (self.dg.object.di_builder == null) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const func = self.air.values[ty_pl.payload].castTag(.function).?.data;
const mod = self.dg.module;
+ const func = self.air.values[ty_pl.payload].getFunction(mod).?;
const decl = mod.declPtr(func.owner_decl);
const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope);
self.di_file = di_file;
@@ -8349,7 +8205,7 @@ pub const FuncGen = struct {
}
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
- const func = self.dg.decl.getFunction().?;
+ const func = self.dg.decl.getFunction(mod).?;
const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
const di_local_var = dib.createParameterVariable(
@@ -9147,7 +9003,7 @@ pub const FuncGen = struct {
defer self.gpa.free(fqn);
const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn});
- const slice_ty = Type.const_slice_u8_sentinel_0;
+ const slice_ty = Type.slice_const_u8_sentinel_0;
const llvm_ret_ty = try self.dg.lowerType(slice_ty);
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
const slice_alignment = slice_ty.abiAlignment(mod);
@@ -9861,7 +9717,7 @@ pub const FuncGen = struct {
}
const mod = self.dg.module;
- const slice_ty = Type.const_slice_u8_sentinel_0;
+ const slice_ty = Type.slice_const_u8_sentinel_0;
const slice_alignment = slice_ty.abiAlignment(mod);
const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space
src/codegen/spirv.zig
@@ -236,9 +236,9 @@ pub const DeclGen = struct {
if (try self.air.value(inst, mod)) |val| {
const ty = self.typeOf(inst);
if (ty.zigTypeTag(mod) == .Fn) {
- const fn_decl_index = switch (val.tag()) {
- .extern_fn => val.castTag(.extern_fn).?.data.owner_decl,
- .function => val.castTag(.function).?.data.owner_decl,
+ const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .extern_func => |extern_func| extern_func.decl,
+ .func => |func| mod.funcPtr(func.index).owner_decl,
else => unreachable,
};
const spv_decl_index = try self.resolveDecl(fn_decl_index);
@@ -261,7 +261,7 @@ pub const DeclGen = struct {
const entry = try self.decl_link.getOrPut(decl_index);
if (!entry.found_existing) {
// TODO: Extern fn?
- const kind: SpvModule.DeclKind = if (decl.val.tag() == .function)
+ const kind: SpvModule.DeclKind = if (decl.getFunctionIndex(self.module) != .none)
.func
else
.global;
@@ -573,6 +573,7 @@ pub const DeclGen = struct {
fn addDeclRef(self: *@This(), ty: Type, decl_index: Decl.Index) !void {
const dg = self.dg;
+ const mod = dg.module;
const ty_ref = try self.dg.resolveType(ty, .indirect);
const ty_id = dg.typeId(ty_ref);
@@ -580,8 +581,8 @@ pub const DeclGen = struct {
const decl = dg.module.declPtr(decl_index);
const spv_decl_index = try dg.resolveDecl(decl_index);
- switch (decl.val.tag()) {
- .function => {
+ switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
+ .func => {
// TODO: Properly lower function pointers. For now we are going to hack around it and
// just generate an empty pointer. Function pointers are represented by usize for now,
// though.
@@ -589,7 +590,7 @@ pub const DeclGen = struct {
// TODO: Add dependency
return;
},
- .extern_fn => unreachable, // TODO
+ .extern_func => unreachable, // TODO
else => {
const result_id = dg.spv.allocId();
log.debug("addDeclRef: id = {}, index = {}, name = {s}", .{ result_id.id, @enumToInt(spv_decl_index), decl.name });
@@ -610,39 +611,23 @@ pub const DeclGen = struct {
}
}
- fn lower(self: *@This(), ty: Type, val: Value) !void {
+ fn lower(self: *@This(), ty: Type, arg_val: Value) !void {
const dg = self.dg;
const mod = dg.module;
- if (val.isUndef(mod)) {
+ var val = arg_val;
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .runtime_value => |rt| val = rt.val.toValue(),
+ else => {},
+ }
+
+ if (val.isUndefDeep(mod)) {
const size = ty.abiSize(mod);
return try self.addUndef(size);
}
- switch (ty.zigTypeTag(mod)) {
- .Int => try self.addInt(ty, val),
- .Float => try self.addFloat(ty, val),
- .Bool => try self.addConstBool(val.toBool(mod)),
+ if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) {
.Array => switch (val.tag()) {
- .aggregate => {
- const elem_vals = val.castTag(.aggregate).?.data;
- const elem_ty = ty.childType(mod);
- const len = @intCast(u32, ty.arrayLenIncludingSentinel(mod)); // TODO: limit spir-v to 32 bit arrays in a more elegant way.
- for (elem_vals[0..len]) |elem_val| {
- try self.lower(elem_ty, elem_val);
- }
- },
- .repeated => {
- const elem_val = val.castTag(.repeated).?.data;
- const elem_ty = ty.childType(mod);
- const len = @intCast(u32, ty.arrayLen(mod));
- for (0..len) |_| {
- try self.lower(elem_ty, elem_val);
- }
- if (ty.sentinel(mod)) |sentinel| {
- try self.lower(elem_ty, sentinel);
- }
- },
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
@@ -657,29 +642,6 @@ pub const DeclGen = struct {
},
else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}),
},
- .Pointer => switch (val.tag()) {
- .decl_ref_mut => {
- const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
- try self.addDeclRef(ty, decl_index);
- },
- .decl_ref => {
- const decl_index = val.castTag(.decl_ref).?.data;
- try self.addDeclRef(ty, decl_index);
- },
- .slice => {
- const slice = val.castTag(.slice).?.data;
-
- const ptr_ty = ty.slicePtrFieldType(mod);
-
- try self.lower(ptr_ty, slice.ptr);
- try self.addInt(Type.usize, slice.len);
- },
- .zero => try self.addNullPtr(try dg.resolveType(ty, .indirect)),
- .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => {
- try self.addInt(Type.usize, val);
- },
- else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}),
- },
.Struct => {
if (ty.isSimpleTupleOrAnonStruct(mod)) {
unreachable; // TODO
@@ -705,20 +667,134 @@ pub const DeclGen = struct {
}
}
},
- .Optional => {
+ .Vector,
+ .Frame,
+ .AnyFrame,
+ => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}),
+ .Float,
+ .Union,
+ .Optional,
+ .ErrorUnion,
+ .ErrorSet,
+ .Int,
+ .Enum,
+ .Bool,
+ .Pointer,
+ => unreachable, // handled below
+ .Type,
+ .Void,
+ .NoReturn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .Opaque,
+ .EnumLiteral,
+ .Fn,
+ => unreachable, // comptime-only types
+ };
+
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef, .runtime_value => unreachable, // handled above
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_struct,
+ .@"unreachable",
+ .generic_poison,
+ => unreachable, // non-runtime values
+ .false, .true => try self.addConstBool(val.toBool(mod)),
+ },
+ .variable,
+ .extern_func,
+ .func,
+ .enum_literal,
+ => unreachable, // non-runtime values
+ .int => try self.addInt(ty, val),
+ .err => |err| {
+ const name = mod.intern_pool.stringToSlice(err.name);
+ const kv = try mod.getErrorValue(name);
+ try self.addConstInt(u16, @intCast(u16, kv.value));
+ },
+ .error_union => |error_union| {
+ const payload_ty = ty.errorUnionPayload(mod);
+ const is_pl = val.errorUnionIsPayload(mod);
+ const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0);
+
+ const eu_layout = dg.errorUnionLayout(payload_ty);
+ if (!eu_layout.payload_has_bits) {
+ return try self.lower(Type.anyerror, error_val);
+ }
+
+ const payload_size = payload_ty.abiSize(mod);
+ const error_size = Type.anyerror.abiAlignment(mod);
+ const ty_size = ty.abiSize(mod);
+ const padding = ty_size - payload_size - error_size;
+
+ const payload_val = switch (error_union.val) {
+ .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ .payload => |payload| payload,
+ }.toValue();
+
+ if (eu_layout.error_first) {
+ try self.lower(Type.anyerror, error_val);
+ try self.lower(payload_ty, payload_val);
+ } else {
+ try self.lower(payload_ty, payload_val);
+ try self.lower(Type.anyerror, error_val);
+ }
+
+ try self.addUndef(padding);
+ },
+ .enum_tag => {
+ const int_val = try val.enumToInt(ty, mod);
+
+ const int_ty = try ty.intTagType(mod);
+
+ try self.lower(int_ty, int_val);
+ },
+ .float => try self.addFloat(ty, val),
+ .ptr => |ptr| {
+ switch (ptr.addr) {
+ .decl => |decl| try self.addDeclRef(ty, decl),
+ .mut_decl => |mut_decl| try self.addDeclRef(ty, mut_decl.decl),
+ else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}),
+ }
+ if (ptr.len != .none) {
+ try self.addInt(Type.usize, ptr.len.toValue());
+ }
+ },
+ .opt => {
const payload_ty = ty.optionalChild(mod);
- const has_payload = !val.isNull(mod);
+ const payload_val = val.optionalValue(mod);
const abi_size = ty.abiSize(mod);
if (!payload_ty.hasRuntimeBits(mod)) {
- try self.addConstBool(has_payload);
+ try self.addConstBool(payload_val != null);
return;
} else if (ty.optionalReprIsPayload(mod)) {
// Optional representation is a nullable pointer or slice.
- if (val.castTag(.opt_payload)) |payload| {
- try self.lower(payload_ty, payload.data);
- } else if (has_payload) {
- try self.lower(payload_ty, val);
+ if (payload_val) |pl_val| {
+ try self.lower(payload_ty, pl_val);
} else {
const ptr_ty_ref = try dg.resolveType(ty, .indirect);
try self.addNullPtr(ptr_ty_ref);
@@ -734,27 +810,63 @@ pub const DeclGen = struct {
const payload_size = payload_ty.abiSize(mod);
const padding = abi_size - payload_size - 1;
- if (val.castTag(.opt_payload)) |payload| {
- try self.lower(payload_ty, payload.data);
+ if (payload_val) |pl_val| {
+ try self.lower(payload_ty, pl_val);
} else {
try self.addUndef(payload_size);
}
- try self.addConstBool(has_payload);
+ try self.addConstBool(payload_val != null);
try self.addUndef(padding);
},
- .Enum => {
- const int_val = try val.enumToInt(ty, mod);
+ .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .array_type => |array_type| {
+ const elem_ty = array_type.child.toType();
+ switch (aggregate.storage) {
+ .bytes => |bytes| try self.addBytes(bytes),
+ .elems, .repeated_elem => {
+ for (0..array_type.len) |i| {
+ try self.lower(elem_ty, switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(),
+ .repeated_elem => |elem_val| elem_val.toValue(),
+ });
+ }
+ },
+ }
+ if (array_type.sentinel != .none) {
+ try self.lower(elem_ty, array_type.sentinel.toValue());
+ }
+ },
+ .vector_type => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}),
+ .struct_type => {
+ const struct_ty = mod.typeToStruct(ty).?;
- const int_ty = try ty.intTagType(mod);
+ if (struct_ty.layout == .Packed) {
+ return dg.todo("packed struct constants", .{});
+ }
- try self.lower(int_ty, int_val);
+ const struct_begin = self.size;
+ const field_vals = val.castTag(.aggregate).?.data;
+ for (struct_ty.fields.values(), 0..) |field, i| {
+ if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
+ try self.lower(field.ty, field_vals[i]);
+
+ // Add padding if required.
+ // TODO: Add to type generation as well?
+ const unpadded_field_end = self.size - struct_begin;
+ const padded_field_end = ty.structFieldOffset(i + 1, mod);
+ const padding = padded_field_end - unpadded_field_end;
+ try self.addUndef(padding);
+ }
+ },
+ .anon_struct_type => unreachable, // TODO
+ else => unreachable,
},
- .Union => {
- const tag_and_val = val.castTag(.@"union").?.data;
+ .un => |un| {
const layout = ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
- return try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag);
+ return try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue());
}
const union_ty = mod.typeToUnion(ty).?;
@@ -762,18 +874,18 @@ pub const DeclGen = struct {
return dg.todo("packed union constants", .{});
}
- const active_field = ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?;
+ const active_field = ty.unionTagFieldIndex(un.tag.toValue(), dg.module).?;
const active_field_ty = union_ty.fields.values()[active_field].ty;
const has_tag = layout.tag_size != 0;
const tag_first = layout.tag_align >= layout.payload_align;
if (has_tag and tag_first) {
- try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag);
+ try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue());
}
const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
- try self.lower(active_field_ty, tag_and_val.val);
+ try self.lower(active_field_ty, un.val.toValue());
break :blk active_field_ty.abiSize(mod);
} else 0;
@@ -781,53 +893,11 @@ pub const DeclGen = struct {
try self.addUndef(payload_padding_len);
if (has_tag and !tag_first) {
- try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag);
+ try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue());
}
try self.addUndef(layout.padding);
},
- .ErrorSet => switch (val.ip_index) {
- .none => switch (val.tag()) {
- .@"error" => {
- const err_name = val.castTag(.@"error").?.data.name;
- const kv = try dg.module.getErrorValue(err_name);
- try self.addConstInt(u16, @intCast(u16, kv.value));
- },
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => |int| try self.addConstInt(u16, @intCast(u16, int.storage.u64)),
- else => unreachable,
- },
- },
- .ErrorUnion => {
- const payload_ty = ty.errorUnionPayload(mod);
- const is_pl = val.errorUnionIsPayload();
- const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0);
-
- const eu_layout = dg.errorUnionLayout(payload_ty);
- if (!eu_layout.payload_has_bits) {
- return try self.lower(Type.anyerror, error_val);
- }
-
- const payload_size = payload_ty.abiSize(mod);
- const error_size = Type.anyerror.abiAlignment(mod);
- const ty_size = ty.abiSize(mod);
- const padding = ty_size - payload_size - error_size;
-
- const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef;
-
- if (eu_layout.error_first) {
- try self.lower(Type.anyerror, error_val);
- try self.lower(payload_ty, payload_val);
- } else {
- try self.lower(payload_ty, payload_val);
- try self.lower(Type.anyerror, error_val);
- }
-
- try self.addUndef(padding);
- },
- else => |tag| return dg.todo("indirect constant of type {s}", .{@tagName(tag)}),
}
}
};
@@ -1542,7 +1612,7 @@ pub const DeclGen = struct {
const decl_id = self.spv.declPtr(spv_decl_index).result_id;
log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name });
- if (decl.val.castTag(.function)) |_| {
+ if (decl.getFunction(mod)) |_| {
assert(decl.ty.zigTypeTag(mod) == .Fn);
const prototype_id = try self.resolveTypeId(decl.ty);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
@@ -1595,8 +1665,8 @@ pub const DeclGen = struct {
try self.generateTestEntryPoint(fqn, spv_decl_index);
}
} else {
- const init_val = if (decl.val.castTag(.variable)) |payload|
- payload.data.init
+ const init_val = if (decl.getVariable(mod)) |payload|
+ payload.init.toValue()
else
decl.val;
src/link/C.zig
@@ -87,12 +87,13 @@ pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void {
}
}
-pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *C, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.allocator;
+ const func = module.funcPtr(func_index);
const decl_index = func.owner_decl;
const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
@@ -111,7 +112,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
.value_map = codegen.CValueMap.init(gpa),
.air = air,
.liveness = liveness,
- .func = func,
+ .func_index = func_index,
.object = .{
.dg = .{
.gpa = gpa,
@@ -555,7 +556,8 @@ fn flushDecl(
export_names: std.StringHashMapUnmanaged(void),
) FlushDeclError!void {
const gpa = self.base.allocator;
- const decl = self.base.options.module.?.declPtr(decl_index);
+ const mod = self.base.options.module.?;
+ const decl = mod.declPtr(decl_index);
// Before flushing any particular Decl we must ensure its
// dependencies are already flushed, so that the order in the .c
// file comes out correctly.
@@ -569,7 +571,7 @@ fn flushDecl(
try self.flushLazyFns(f, decl_block.lazy_fns);
try f.all_buffers.ensureUnusedCapacity(gpa, 1);
- if (!(decl.isExtern() and export_names.contains(mem.span(decl.name))))
+ if (!(decl.isExtern(mod) and export_names.contains(mem.span(decl.name))))
f.appendBufAssumeCapacity(decl_block.fwd_decl.items);
}
src/link/Coff.zig
@@ -1032,18 +1032,19 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
self.getAtomPtr(atom_index).sym_index = 0;
}
-pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| {
- return llvm_object.updateFunc(mod, func, air, liveness);
+ return llvm_object.updateFunc(mod, func_index, air, liveness);
}
}
const tracy = trace(@src());
defer tracy.end();
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -1057,7 +1058,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, livenes
const res = try codegen.generateFunction(
&self.base,
decl.srcLoc(mod),
- func,
+ func_index,
air,
liveness,
&code_buffer,
@@ -1155,11 +1156,10 @@ pub fn updateDecl(
const decl = mod.declPtr(decl_index);
- if (decl.val.tag() == .extern_fn) {
+ if (decl.getExternFunc(mod)) |_| {
return; // TODO Should we do more when front-end analyzed extern decl?
}
- if (decl.val.castTag(.variable)) |payload| {
- const variable = payload.data;
+ if (decl.getVariable(mod)) |variable| {
if (variable.is_extern) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@@ -1172,7 +1172,7 @@ pub fn updateDecl(
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
+ const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
@@ -1313,7 +1313,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
// TODO: what if this is a function pointer?
.Fn => break :blk self.text_section_index.?,
else => {
- if (val.castTag(.variable)) |_| {
+ if (decl.getVariable(mod)) |_| {
break :blk self.data_section_index.?;
}
break :blk self.rdata_section_index.?;
@@ -1425,7 +1425,7 @@ pub fn updateDeclExports(
// detect the default subsystem.
for (exports) |exp| {
const exported_decl = mod.declPtr(exp.exported_decl);
- if (exported_decl.getFunction() == null) continue;
+ if (exported_decl.getFunctionIndex(mod) == .none) continue;
const winapi_cc = switch (self.base.options.target.cpu.arch) {
.x86 => std.builtin.CallingConvention.Stdcall,
else => std.builtin.CallingConvention.C,
src/link/Dwarf.zig
@@ -971,7 +971,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureTotalCapacity(26);
- const func = decl.val.castTag(.function).?.data;
+ const func = decl.getFunction(mod).?;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
decl.src_line,
func.lbrace_line,
@@ -1514,7 +1514,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons
}
}
-pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void {
+pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1522,8 +1522,8 @@ pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.De
const atom = self.getAtom(.src_fn, atom_index);
if (atom.len == 0) return;
- const decl = module.declPtr(decl_index);
- const func = decl.val.castTag(.function).?.data;
+ const decl = mod.declPtr(decl_index);
+ const func = decl.getFunction(mod).?;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
decl.src_line,
func.lbrace_line,
src/link/Elf.zig
@@ -2465,7 +2465,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 {
// TODO: what if this is a function pointer?
.Fn => break :blk self.text_section_index.?,
else => {
- if (val.castTag(.variable)) |_| {
+ if (decl.getVariable(mod)) |_| {
break :blk self.data_section_index.?;
}
break :blk self.rodata_section_index.?;
@@ -2574,17 +2574,18 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
return local_sym;
}
-pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
- if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
}
const tracy = trace(@src());
defer tracy.end();
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -2599,11 +2600,11 @@ pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness
defer if (decl_state) |*ds| ds.deinit();
const res = if (decl_state) |*ds|
- try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{
+ try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{
.dwarf = ds,
})
else
- try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none);
+ try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none);
const code = switch (res) {
.ok => code_buffer.items,
@@ -2646,11 +2647,10 @@ pub fn updateDecl(
const decl = mod.declPtr(decl_index);
- if (decl.val.tag() == .extern_fn) {
+ if (decl.getExternFunc(mod)) |_| {
return; // TODO Should we do more when front-end analyzed extern decl?
}
- if (decl.val.castTag(.variable)) |payload| {
- const variable = payload.data;
+ if (decl.getVariable(mod)) |variable| {
if (variable.is_extern) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@@ -2667,7 +2667,7 @@ pub fn updateDecl(
defer if (decl_state) |*ds| ds.deinit();
// TODO implement .debug_info for global variables
- const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
+ const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
src/link/MachO.zig
@@ -1847,16 +1847,17 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void {
self.markRelocsDirtyByTarget(target);
}
-pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
- if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness);
+ if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
}
const tracy = trace(@src());
defer tracy.end();
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -1874,11 +1875,11 @@ pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, livene
defer if (decl_state) |*ds| ds.deinit();
const res = if (decl_state) |*ds|
- try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{
+ try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{
.dwarf = ds,
})
else
- try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none);
+ try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none);
var code = switch (res) {
.ok => code_buffer.items,
@@ -1983,18 +1984,17 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo
const decl = mod.declPtr(decl_index);
- if (decl.val.tag() == .extern_fn) {
+ if (decl.getExternFunc(mod)) |_| {
return; // TODO Should we do more when front-end analyzed extern decl?
}
- if (decl.val.castTag(.variable)) |payload| {
- const variable = payload.data;
+ if (decl.getVariable(mod)) |variable| {
if (variable.is_extern) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
}
- const is_threadlocal = if (decl.val.castTag(.variable)) |payload|
- payload.data.is_threadlocal and !self.base.options.single_threaded
+ const is_threadlocal = if (decl.getVariable(mod)) |variable|
+ variable.is_threadlocal and !self.base.options.single_threaded
else
false;
if (is_threadlocal) return self.updateThreadlocalVariable(mod, decl_index);
@@ -2012,7 +2012,7 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo
null;
defer if (decl_state) |*ds| ds.deinit();
- const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
+ const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
@@ -2177,7 +2177,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D
const decl = module.declPtr(decl_index);
const decl_metadata = self.decls.get(decl_index).?;
- const decl_val = decl.val.castTag(.variable).?.data.init;
+ const decl_val = decl.getVariable(mod).?.init.toValue();
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
@@ -2278,8 +2278,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
}
}
- if (val.castTag(.variable)) |variable| {
- if (variable.data.is_threadlocal and !single_threaded) {
+ if (decl.getVariable(mod)) |variable| {
+ if (variable.is_threadlocal and !single_threaded) {
break :blk self.thread_data_section_index.?;
}
break :blk self.data_section_index.?;
@@ -2289,7 +2289,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
// TODO: what if this is a function pointer?
.Fn => break :blk self.text_section_index.?,
else => {
- if (val.castTag(.variable)) |_| {
+ if (decl.getVariable(mod)) |_| {
break :blk self.data_section_index.?;
}
break :blk self.data_const_section_index.?;
src/link/NvPtx.zig
@@ -68,9 +68,9 @@ pub fn deinit(self: *NvPtx) void {
self.base.allocator.free(self.ptx_file_name);
}
-pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *NvPtx, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (!build_options.have_llvm) return;
- try self.llvm_object.updateFunc(module, func, air, liveness);
+ try self.llvm_object.updateFunc(module, func_index, air, liveness);
}
pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: Module.Decl.Index) !void {
src/link/Plan9.zig
@@ -276,11 +276,12 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi
}
}
-pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
@@ -299,7 +300,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, livene
const res = try codegen.generateFunction(
&self.base,
decl.srcLoc(mod),
- func,
+ func_index,
air,
liveness,
&code_buffer,
@@ -391,11 +392,10 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I
pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void {
const decl = mod.declPtr(decl_index);
- if (decl.val.tag() == .extern_fn) {
+ if (decl.getExternFunc(mod)) |_| {
return; // TODO Should we do more when front-end analyzed extern decl?
}
- if (decl.val.castTag(.variable)) |payload| {
- const variable = payload.data;
+ if (decl.getVariable(mod)) |variable| {
if (variable.is_extern) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@@ -407,7 +407,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
+ const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
@@ -771,7 +771,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
// in the deleteUnusedDecl function.
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
- const is_fn = (decl.val.tag() == .function);
+ const is_fn = decl.getFunctionIndex(mod) != .none;
if (is_fn) {
var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?;
var submap = symidx_and_submap.functions;
src/link/SpirV.zig
@@ -103,11 +103,13 @@ pub fn deinit(self: *SpirV) void {
self.decl_link.deinit();
}
-pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(self: *SpirV, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
+ const func = module.funcPtr(func_index);
+
var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &self.spv, &self.decl_link);
defer decl_gen.deinit();
@@ -136,7 +138,7 @@ pub fn updateDeclExports(
exports: []const *Module.Export,
) !void {
const decl = mod.declPtr(decl_index);
- if (decl.val.tag() == .function and decl.ty.fnCallingConvention(mod) == .Kernel) {
+ if (decl.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) {
// TODO: Unify with resolveDecl in spirv.zig.
const entry = try self.decl_link.getOrPut(decl_index);
if (!entry.found_existing) {
src/link/Wasm.zig
@@ -1324,17 +1324,18 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 {
return index;
}
-pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
- if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness);
+ if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
}
const tracy = trace(@src());
defer tracy.end();
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
@@ -1358,7 +1359,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
const result = try codegen.generateFunction(
&wasm.base,
decl.srcLoc(mod),
- func,
+ func_index,
air,
liveness,
&code_writer,
@@ -1403,9 +1404,9 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
defer tracy.end();
const decl = mod.declPtr(decl_index);
- if (decl.val.castTag(.function)) |_| {
+ if (decl.getFunction(mod)) |_| {
return;
- } else if (decl.val.castTag(.extern_fn)) |_| {
+ } else if (decl.getExternFunc(mod)) |_| {
return;
}
@@ -1413,12 +1414,13 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
const atom = wasm.getAtomPtr(atom_index);
atom.clear();
- if (decl.isExtern()) {
- const variable = decl.getVariable().?;
+ if (decl.isExtern(mod)) {
+ const variable = decl.getVariable(mod).?;
const name = mem.sliceTo(decl.name, 0);
- return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null);
+ const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name);
+ return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null);
}
- const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
+ const val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
var code_writer = std.ArrayList(u8).init(wasm.base.allocator);
defer code_writer.deinit();
@@ -1791,7 +1793,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
assert(wasm.symbol_atom.remove(local_atom.symbolLoc()));
}
- if (decl.isExtern()) {
+ if (decl.isExtern(mod)) {
_ = wasm.imports.remove(atom.symbolLoc());
}
_ = wasm.resolved_symbols.swapRemove(atom.symbolLoc());
@@ -1852,7 +1854,7 @@ pub fn addOrUpdateImport(
/// Symbol index that is external
symbol_index: u32,
/// Optional library name (i.e. `extern "c" fn foo() void`
- lib_name: ?[*:0]const u8,
+ lib_name: ?[:0]const u8,
/// The index of the type that represents the function signature
/// when the extern is a function. When this is null, a data-symbol
/// is asserted instead.
@@ -1863,7 +1865,7 @@ pub fn addOrUpdateImport(
// Also mangle the name when the lib name is set and not equal to "C" so imports with the same
// name but different module can be resolved correctly.
const mangle_name = lib_name != null and
- !std.mem.eql(u8, std.mem.sliceTo(lib_name.?, 0), "c");
+ !std.mem.eql(u8, lib_name.?, "c");
const full_name = if (mangle_name) full_name: {
break :full_name try std.fmt.allocPrint(wasm.base.allocator, "{s}|{s}", .{ name, lib_name.? });
} else name;
@@ -1889,7 +1891,7 @@ pub fn addOrUpdateImport(
if (type_index) |ty_index| {
const gop = try wasm.imports.getOrPut(wasm.base.allocator, .{ .index = symbol_index, .file = null });
const module_name = if (lib_name) |l_name| blk: {
- break :blk mem.sliceTo(l_name, 0);
+ break :blk l_name;
} else wasm.host_name;
if (!gop.found_existing) {
gop.value_ptr.* = .{
@@ -2931,7 +2933,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
const atom_index = try wasm.createAtom();
const atom = wasm.getAtomPtr(atom_index);
- const slice_ty = Type.const_slice_u8_sentinel_0;
+ const slice_ty = Type.slice_const_u8_sentinel_0;
const mod = wasm.base.options.module.?;
atom.alignment = slice_ty.abiAlignment(mod);
const sym_index = atom.sym_index;
@@ -2988,7 +2990,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
for (mod.error_name_list.items) |error_name| {
const len = @intCast(u32, error_name.len + 1); // names are 0-termianted
- const slice_ty = Type.const_slice_u8_sentinel_0;
+ const slice_ty = Type.slice_const_u8_sentinel_0;
const offset = @intCast(u32, atom.code.items.len);
// first we create the data for the slice of the name
try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated
@@ -3366,15 +3368,15 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
var decl_it = wasm.decls.iterator();
while (decl_it.next()) |entry| {
const decl = mod.declPtr(entry.key_ptr.*);
- if (decl.isExtern()) continue;
+ if (decl.isExtern(mod)) continue;
const atom_index = entry.value_ptr.*;
const atom = wasm.getAtomPtr(atom_index);
if (decl.ty.zigTypeTag(mod) == .Fn) {
try wasm.parseAtom(atom_index, .function);
- } else if (decl.getVariable()) |variable| {
- if (!variable.is_mutable) {
+ } else if (decl.getVariable(mod)) |variable| {
+ if (variable.is_const) {
try wasm.parseAtom(atom_index, .{ .data = .read_only });
- } else if (variable.init.isUndefDeep(mod)) {
+ } else if (variable.init.toValue().isUndefDeep(mod)) {
// for safe build modes, we store the atom in the data segment,
// whereas for unsafe build modes we store it in bss.
const is_initialized = wasm.base.options.optimize_mode == .Debug or
src/Air.zig
@@ -901,8 +901,8 @@ pub const Inst = struct {
manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type),
manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type),
single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type),
- const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type),
- const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type),
+ slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type),
+ slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type),
anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type),
@@ -1382,7 +1382,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type {
.bool_to_int => return Type.u1,
- .tag_name, .error_name => return Type.const_slice_u8_sentinel_0,
+ .tag_name, .error_name => return Type.slice_const_u8_sentinel_0,
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip);
src/AstGen.zig
@@ -3934,7 +3934,7 @@ fn fnDecl(
var section_gz = decl_gz.makeSubBlock(params_scope);
defer section_gz.unstack();
const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: {
- const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .const_slice_u8_type } }, fn_proto.ast.section_expr);
+ const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr);
if (section_gz.instructionsSlice().len == 0) {
// In this case we will send a len=0 body which can be encoded more efficiently.
break :inst inst;
@@ -4137,7 +4137,7 @@ fn globalVarDecl(
break :inst try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .address_space_type } }, var_decl.ast.addrspace_node);
};
const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: {
- break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .const_slice_u8_type } }, var_decl.ast.section_node);
+ break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .slice_const_u8_type } }, var_decl.ast.section_node);
};
const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none;
wip_members.nextDecl(is_pub, is_export, align_inst != .none, has_section_or_addrspace);
@@ -7878,7 +7878,7 @@ fn unionInit(
params: []const Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const union_type = try typeExpr(gz, scope, params[0]);
- const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]);
+ const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]);
const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{
.container_type = union_type,
.field_name = field_name,
@@ -8100,12 +8100,12 @@ fn builtinCall(
if (ri.rl == .ref) {
return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]),
- .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]),
+ .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]),
});
}
const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
- .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]),
+ .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]),
});
return rvalue(gz, ri, result, node);
},
@@ -8271,11 +8271,11 @@ fn builtinCall(
.align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of),
.ptr_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ptr_to_int),
- .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .compile_error),
+ .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .compile_error),
.set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota),
.enum_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .enum_to_int),
.bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int),
- .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file),
+ .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .embed_file),
.error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name),
.set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety),
.sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt),
@@ -8334,7 +8334,7 @@ fn builtinCall(
},
.panic => {
try emitDbgNode(gz, node);
- return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .panic);
+ return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .panic);
},
.trap => {
try emitDbgNode(gz, node);
@@ -8450,7 +8450,7 @@ fn builtinCall(
},
.c_define => {
if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{});
- const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0]);
+ const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0]);
const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]);
const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(node),
@@ -8546,7 +8546,7 @@ fn builtinCall(
},
.field_parent_ptr => {
const parent_type = try typeExpr(gz, scope, params[0]);
- const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]);
+ const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]);
const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{
.parent_type = parent_type,
.field_name = field_name,
@@ -8701,7 +8701,7 @@ fn hasDeclOrField(
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const container_type = try typeExpr(gz, scope, lhs_node);
- const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node);
+ const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node);
const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{
.lhs = container_type,
.rhs = name,
@@ -8851,7 +8851,7 @@ fn simpleCBuiltin(
) InnerError!Zir.Inst.Ref {
const name: []const u8 = if (tag == .c_undef) "C undef" else "C include";
if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name});
- const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, operand_node);
+ const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, operand_node);
_ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = operand,
@@ -8869,7 +8869,7 @@ fn offsetOf(
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const type_inst = try typeExpr(gz, scope, lhs_node);
- const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node);
+ const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node);
const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{
.lhs = type_inst,
.rhs = field_name,
@@ -10317,8 +10317,8 @@ fn rvalue(
as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type),
as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type),
as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type),
- as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type),
- as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_sentinel_0_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_type),
+ as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_sentinel_0_type),
as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type),
as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type),
as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type),
src/codegen.zig
@@ -14,6 +14,7 @@ const Air = @import("Air.zig");
const Allocator = mem.Allocator;
const Compilation = @import("Compilation.zig");
const ErrorMsg = Module.ErrorMsg;
+const InternPool = @import("InternPool.zig");
const Liveness = @import("Liveness.zig");
const Module = @import("Module.zig");
const Target = std.Target;
@@ -66,7 +67,7 @@ pub const DebugInfoOutput = union(enum) {
pub fn generateFunction(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- func: *Module.Fn,
+ func_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@@ -75,17 +76,17 @@ pub fn generateFunction(
switch (bin_file.options.target.cpu.arch) {
.arm,
.armeb,
- => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output),
+ => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output),
.aarch64,
.aarch64_be,
.aarch64_32,
- => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output),
- .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output),
- .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output),
- .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output),
+ => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output),
+ .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output),
+ .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output),
+ .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output),
.wasm32,
.wasm64,
- => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output),
+ => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output),
else => unreachable,
}
}
@@ -182,12 +183,13 @@ pub fn generateSymbol(
const tracy = trace(@src());
defer tracy.end();
+ const mod = bin_file.options.module.?;
var typed_value = arg_tv;
- if (arg_tv.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
+ switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
+ .runtime_value => |rt| typed_value.val = rt.val.toValue(),
+ else => {},
}
- const mod = bin_file.options.module.?;
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
@@ -199,35 +201,10 @@ pub fn generateSymbol(
if (typed_value.val.isUndefDeep(mod)) {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size);
- return Result.ok;
+ return .ok;
}
- switch (typed_value.ty.zigTypeTag(mod)) {
- .Fn => {
- return Result{
- .fail = try ErrorMsg.create(
- bin_file.allocator,
- src_loc,
- "TODO implement generateSymbol function pointers",
- .{},
- ),
- };
- },
- .Float => {
- switch (typed_value.ty.floatBits(target)) {
- 16 => writeFloat(f16, typed_value.val.toFloat(f16, mod), target, endian, try code.addManyAsArray(2)),
- 32 => writeFloat(f32, typed_value.val.toFloat(f32, mod), target, endian, try code.addManyAsArray(4)),
- 64 => writeFloat(f64, typed_value.val.toFloat(f64, mod), target, endian, try code.addManyAsArray(8)),
- 80 => {
- writeFloat(f80, typed_value.val.toFloat(f80, mod), target, endian, try code.addManyAsArray(10));
- const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
- try code.appendNTimes(0, abi_size - 10);
- },
- 128 => writeFloat(f128, typed_value.val.toFloat(f128, mod), target, endian, try code.addManyAsArray(16)),
- else => unreachable,
- }
- return Result.ok;
- },
+ if (typed_value.val.ip_index == .none) switch (typed_value.ty.zigTypeTag(mod)) {
.Array => switch (typed_value.val.tag()) {
.bytes => {
const bytes = typed_value.val.castTag(.bytes).?.data;
@@ -248,62 +225,6 @@ pub fn generateSymbol(
}
return Result.ok;
},
- .aggregate => {
- const elem_vals = typed_value.val.castTag(.aggregate).?.data;
- const elem_ty = typed_value.ty.childType(mod);
- const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod));
- for (elem_vals[0..len]) |elem_val| {
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = elem_ty,
- .val = elem_val,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
- }
- return Result.ok;
- },
- .repeated => {
- const array = typed_value.val.castTag(.repeated).?.data;
- const elem_ty = typed_value.ty.childType(mod);
- const sentinel = typed_value.ty.sentinel(mod);
- const len = typed_value.ty.arrayLen(mod);
-
- var index: u64 = 0;
- while (index < len) : (index += 1) {
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = elem_ty,
- .val = array,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
- }
-
- if (sentinel) |sentinel_val| {
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = elem_ty,
- .val = sentinel_val,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
- }
-
- return Result.ok;
- },
- .empty_array_sentinel => {
- const elem_ty = typed_value.ty.childType(mod);
- const sentinel_val = typed_value.ty.sentinel(mod).?;
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = elem_ty,
- .val = sentinel_val,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
- return Result.ok;
- },
else => return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
@@ -313,195 +234,6 @@ pub fn generateSymbol(
),
},
},
- .Pointer => switch (typed_value.val.ip_index) {
- .null_value => {
- switch (target.ptrBitWidth()) {
- 32 => {
- mem.writeInt(u32, try code.addManyAsArray(4), 0, endian);
- if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 4);
- },
- 64 => {
- mem.writeInt(u64, try code.addManyAsArray(8), 0, endian);
- if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 8);
- },
- else => unreachable,
- }
- return Result.ok;
- },
- .none => switch (typed_value.val.tag()) {
- .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef(
- bin_file,
- src_loc,
- typed_value,
- switch (tag) {
- .variable => typed_value.val.castTag(.variable).?.data.owner_decl,
- .decl_ref => typed_value.val.castTag(.decl_ref).?.data,
- .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index,
- else => unreachable,
- },
- code,
- debug_output,
- reloc_info,
- ),
- .slice => {
- const slice = typed_value.val.castTag(.slice).?.data;
-
- // generate ptr
- const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod);
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = slice_ptr_field_type,
- .val = slice.ptr,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
-
- // generate length
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = Type.usize,
- .val = slice.len,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
-
- return Result.ok;
- },
- .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr(
- bin_file,
- src_loc,
- typed_value,
- typed_value.val,
- code,
- debug_output,
- reloc_info,
- ),
- else => return Result{
- .fail = try ErrorMsg.create(
- bin_file.allocator,
- src_loc,
- "TODO implement generateSymbol for pointer type value: '{s}'",
- .{@tagName(typed_value.val.tag())},
- ),
- },
- },
- else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
- .int => {
- switch (target.ptrBitWidth()) {
- 32 => {
- const x = typed_value.val.toUnsignedInt(mod);
- mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian);
- },
- 64 => {
- const x = typed_value.val.toUnsignedInt(mod);
- mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
- },
- else => unreachable,
- }
- return Result.ok;
- },
- else => unreachable,
- },
- },
- .Int => {
- const info = typed_value.ty.intInfo(mod);
- if (info.bits <= 8) {
- const x: u8 = switch (info.signedness) {
- .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(mod)),
- .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(mod))),
- };
- try code.append(x);
- return Result.ok;
- }
- if (info.bits > 64) {
- var bigint_buffer: Value.BigIntSpace = undefined;
- const bigint = typed_value.val.toBigInt(&bigint_buffer, mod);
- const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
- const start = code.items.len;
- try code.resize(start + abi_size);
- bigint.writeTwosComplement(code.items[start..][0..abi_size], endian);
- return Result.ok;
- }
- switch (info.signedness) {
- .unsigned => {
- if (info.bits <= 16) {
- const x = @intCast(u16, typed_value.val.toUnsignedInt(mod));
- mem.writeInt(u16, try code.addManyAsArray(2), x, endian);
- } else if (info.bits <= 32) {
- const x = @intCast(u32, typed_value.val.toUnsignedInt(mod));
- mem.writeInt(u32, try code.addManyAsArray(4), x, endian);
- } else {
- const x = typed_value.val.toUnsignedInt(mod);
- mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
- }
- },
- .signed => {
- if (info.bits <= 16) {
- const x = @intCast(i16, typed_value.val.toSignedInt(mod));
- mem.writeInt(i16, try code.addManyAsArray(2), x, endian);
- } else if (info.bits <= 32) {
- const x = @intCast(i32, typed_value.val.toSignedInt(mod));
- mem.writeInt(i32, try code.addManyAsArray(4), x, endian);
- } else {
- const x = typed_value.val.toSignedInt(mod);
- mem.writeInt(i64, try code.addManyAsArray(8), x, endian);
- }
- },
- }
- return Result.ok;
- },
- .Enum => {
- const int_val = try typed_value.enumToInt(mod);
-
- const info = typed_value.ty.intInfo(mod);
- if (info.bits <= 8) {
- const x = @intCast(u8, int_val.toUnsignedInt(mod));
- try code.append(x);
- return Result.ok;
- }
- if (info.bits > 64) {
- return Result{
- .fail = try ErrorMsg.create(
- bin_file.allocator,
- src_loc,
- "TODO implement generateSymbol for big int enums ('{}')",
- .{typed_value.ty.fmt(mod)},
- ),
- };
- }
- switch (info.signedness) {
- .unsigned => {
- if (info.bits <= 16) {
- const x = @intCast(u16, int_val.toUnsignedInt(mod));
- mem.writeInt(u16, try code.addManyAsArray(2), x, endian);
- } else if (info.bits <= 32) {
- const x = @intCast(u32, int_val.toUnsignedInt(mod));
- mem.writeInt(u32, try code.addManyAsArray(4), x, endian);
- } else {
- const x = int_val.toUnsignedInt(mod);
- mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
- }
- },
- .signed => {
- if (info.bits <= 16) {
- const x = @intCast(i16, int_val.toSignedInt(mod));
- mem.writeInt(i16, try code.addManyAsArray(2), x, endian);
- } else if (info.bits <= 32) {
- const x = @intCast(i32, int_val.toSignedInt(mod));
- mem.writeInt(i32, try code.addManyAsArray(4), x, endian);
- } else {
- const x = int_val.toSignedInt(mod);
- mem.writeInt(i64, try code.addManyAsArray(8), x, endian);
- }
- },
- }
- return Result.ok;
- },
- .Bool => {
- const x: u8 = @boolToInt(typed_value.val.toBool(mod));
- try code.append(x);
- return Result.ok;
- },
.Struct => {
if (typed_value.ty.containerLayout(mod) == .Packed) {
const struct_obj = mod.typeToStruct(typed_value.ty).?;
@@ -562,370 +294,497 @@ pub fn generateSymbol(
return Result.ok;
},
- .Union => {
- const union_obj = typed_value.val.castTag(.@"union").?.data;
- const layout = typed_value.ty.unionGetLayout(mod);
+ .Vector => switch (typed_value.val.tag()) {
+ .bytes => {
+ const bytes = typed_value.val.castTag(.bytes).?.data;
+ const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow;
+ const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse
+ return error.Overflow;
+ try code.ensureUnusedCapacity(len + padding);
+ code.appendSliceAssumeCapacity(bytes[0..len]);
+ if (padding > 0) try code.writer().writeByteNTimes(0, padding);
+ return Result.ok;
+ },
+ .str_lit => {
+ const str_lit = typed_value.val.castTag(.str_lit).?.data;
+ const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
+ const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse
+ return error.Overflow;
+ try code.ensureUnusedCapacity(str_lit.len + padding);
+ code.appendSliceAssumeCapacity(bytes);
+ if (padding > 0) try code.writer().writeByteNTimes(0, padding);
+ return Result.ok;
+ },
+ else => unreachable,
+ },
+ .Frame,
+ .AnyFrame,
+ => return .{ .fail = try ErrorMsg.create(
+ bin_file.allocator,
+ src_loc,
+ "TODO generateSymbol for type {}",
+ .{typed_value.ty.fmt(mod)},
+ ) },
+ .Float,
+ .Union,
+ .Optional,
+ .ErrorUnion,
+ .ErrorSet,
+ .Int,
+ .Enum,
+ .Bool,
+ .Pointer,
+ => unreachable, // handled below
+ .Type,
+ .Void,
+ .NoReturn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .Opaque,
+ .EnumLiteral,
+ .Fn,
+ => unreachable, // comptime-only types
+ };
- if (layout.payload_size == 0) {
- return generateSymbol(bin_file, src_loc, .{
- .ty = typed_value.ty.unionTagType(mod).?,
- .val = union_obj.tag,
- }, code, debug_output, reloc_info);
+ switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+ => unreachable, // types, not values
+
+ .undef, .runtime_value => unreachable, // handled above
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined,
+ .void,
+ .null,
+ .empty_struct,
+ .@"unreachable",
+ .generic_poison,
+ => unreachable, // non-runtime values
+ .false, .true => try code.append(switch (simple_value) {
+ .false => 0,
+ .true => 1,
+ else => unreachable,
+ }),
+ },
+ .variable,
+ .extern_func,
+ .func,
+ .enum_literal,
+ => unreachable, // non-runtime values
+ .int => {
+ const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
+ var space: Value.BigIntSpace = undefined;
+ const val = typed_value.val.toBigInt(&space, mod);
+ val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
+ },
+ .err => |err| {
+ const name = mod.intern_pool.stringToSlice(err.name);
+ const kv = try mod.getErrorValue(name);
+ try code.writer().writeInt(u16, @intCast(u16, kv.value), endian);
+ },
+ .error_union => |error_union| {
+ const payload_ty = typed_value.ty.errorUnionPayload(mod);
+
+ const err_val = switch (error_union.val) {
+ .err_name => |err_name| @intCast(u16, (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value),
+ .payload => @as(u16, 0),
+ };
+
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ try code.writer().writeInt(u16, err_val, endian);
+ return .ok;
}
- // Check if we should store the tag first.
- if (layout.tag_align >= layout.payload_align) {
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = typed_value.ty.unionTagType(mod).?,
- .val = union_obj.tag,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
+ const payload_align = payload_ty.abiAlignment(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
+ const abi_align = typed_value.ty.abiAlignment(mod);
+
+ // error value first when its type is larger than the error union's payload
+ if (error_align > payload_align) {
+ try code.writer().writeInt(u16, err_val, endian);
}
- const union_ty = mod.typeToUnion(typed_value.ty).?;
- const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?;
- assert(union_ty.haveFieldTypes());
- const field_ty = union_ty.fields.values()[field_index].ty;
- if (!field_ty.hasRuntimeBits(mod)) {
- try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
- } else {
+ // emit payload part of the error union
+ {
+ const begin = code.items.len;
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = field_ty,
- .val = union_obj.val,
+ .ty = payload_ty,
+ .val = switch (error_union.val) {
+ .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ .payload => |payload| payload,
+ }.toValue(),
}, code, debug_output, reloc_info)) {
.ok => {},
- .fail => |em| return Result{ .fail = em },
+ .fail => |em| return .{ .fail = em },
}
+ const unpadded_end = code.items.len - begin;
+ const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align);
+ const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
- const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow;
if (padding > 0) {
try code.writer().writeByteNTimes(0, padding);
}
}
- if (layout.tag_size > 0) {
+ // Payload size is larger than error set, so emit our error set last
+ if (error_align <= payload_align) {
+ const begin = code.items.len;
+ try code.writer().writeInt(u16, err_val, endian);
+ const unpadded_end = code.items.len - begin;
+ const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align);
+ const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
+
+ if (padding > 0) {
+ try code.writer().writeByteNTimes(0, padding);
+ }
+ }
+ },
+ .enum_tag => |enum_tag| {
+ const int_tag_ty = try typed_value.ty.intTagType(mod);
+ switch (try generateSymbol(bin_file, src_loc, .{
+ .ty = int_tag_ty,
+ .val = (try mod.intern_pool.getCoerced(mod.gpa, enum_tag.int, int_tag_ty.ip_index)).toValue(),
+ }, code, debug_output, reloc_info)) {
+ .ok => {},
+ .fail => |em| return .{ .fail = em },
+ }
+ },
+ .float => |float| switch (float.storage) {
+ .f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(2)),
+ .f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(4)),
+ .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)),
+ .f80 => |f80_val| {
+ writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10));
+ const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
+ try code.appendNTimes(0, abi_size - 10);
+ },
+ .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)),
+ },
+ .ptr => |ptr| {
+ // generate ptr
+ switch (try lowerParentPtr(bin_file, src_loc, switch (ptr.len) {
+ .none => typed_value.val,
+ else => typed_value.val.slicePtr(mod),
+ }.ip_index, code, debug_output, reloc_info)) {
+ .ok => {},
+ .fail => |em| return .{ .fail = em },
+ }
+ if (ptr.len != .none) {
+ // generate len
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = union_ty.tag_ty,
- .val = union_obj.tag,
+ .ty = Type.usize,
+ .val = ptr.len.toValue(),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
}
-
- if (layout.padding > 0) {
- try code.writer().writeByteNTimes(0, layout.padding);
- }
-
- return Result.ok;
},
- .Optional => {
+ .opt => {
const payload_type = typed_value.ty.optionalChild(mod);
- const is_pl = !typed_value.val.isNull(mod);
+ const payload_val = typed_value.val.optionalValue(mod);
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
- if (!payload_type.hasRuntimeBits(mod)) {
- try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size);
- return Result.ok;
- }
-
if (typed_value.ty.optionalReprIsPayload(mod)) {
- if (typed_value.val.castTag(.opt_payload)) |payload| {
+ if (payload_val) |value| {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
- .val = payload.data,
+ .val = value,
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
- } else if (!typed_value.val.isNull(mod)) {
+ } else {
+ try code.writer().writeByteNTimes(0, abi_size);
+ }
+ } else {
+ const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1;
+ if (payload_type.hasRuntimeBits(mod)) {
+ const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.ip_index })).toValue();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
- .val = typed_value.val,
+ .val = value,
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
- } else {
- try code.writer().writeByteNTimes(0, abi_size);
}
-
- return Result.ok;
+ try code.writer().writeByte(@boolToInt(payload_val != null));
+ try code.writer().writeByteNTimes(0, padding);
}
+ },
+ .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.ip_index)) {
+ .array_type => |array_type| {
+ var index: u64 = 0;
+ while (index < array_type.len) : (index += 1) {
+ switch (aggregate.storage) {
+ .bytes => |bytes| try code.appendSlice(bytes),
+ .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{
+ .ty = array_type.child.toType(),
+ .val = switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[@intCast(usize, index)],
+ .repeated_elem => |elem| elem,
+ }.toValue(),
+ }, code, debug_output, reloc_info)) {
+ .ok => {},
+ .fail => |em| return .{ .fail = em },
+ },
+ }
+ }
- const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1;
- const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.undef;
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = payload_type,
- .val = value,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
- try code.writer().writeByte(@boolToInt(is_pl));
- try code.writer().writeByteNTimes(0, padding);
+ if (array_type.sentinel != .none) {
+ switch (try generateSymbol(bin_file, src_loc, .{
+ .ty = array_type.child.toType(),
+ .val = array_type.sentinel.toValue(),
+ }, code, debug_output, reloc_info)) {
+ .ok => {},
+ .fail => |em| return .{ .fail = em },
+ }
+ }
+ },
+ .vector_type => |vector_type| {
+ var index: u32 = 0;
+ while (index < vector_type.len) : (index += 1) {
+ switch (aggregate.storage) {
+ .bytes => |bytes| try code.appendSlice(bytes),
+ .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{
+ .ty = vector_type.child.toType(),
+ .val = switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[@intCast(usize, index)],
+ .repeated_elem => |elem| elem,
+ }.toValue(),
+ }, code, debug_output, reloc_info)) {
+ .ok => {},
+ .fail => |em| return .{ .fail = em },
+ },
+ }
+ }
- return Result.ok;
+ const padding = math.cast(usize, typed_value.ty.abiSize(mod) -
+ (math.divCeil(u64, vector_type.child.toType().bitSize(mod) * vector_type.len, 8) catch |err| switch (err) {
+ error.DivisionByZero => unreachable,
+ else => |e| return e,
+ })) orelse return error.Overflow;
+ if (padding > 0) try code.writer().writeByteNTimes(0, padding);
+ },
+ .struct_type, .anon_struct_type => {
+ if (typed_value.ty.containerLayout(mod) == .Packed) {
+ const struct_obj = mod.typeToStruct(typed_value.ty).?;
+ const fields = struct_obj.fields.values();
+ const field_vals = typed_value.val.castTag(.aggregate).?.data;
+ const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
+ const current_pos = code.items.len;
+ try code.resize(current_pos + abi_size);
+ var bits: u16 = 0;
+
+ for (field_vals, 0..) |field_val, index| {
+ const field_ty = fields[index].ty;
+ // pointer may point to a decl which must be marked used
+ // but can also result in a relocation. Therefore we handle those seperately.
+ if (field_ty.zigTypeTag(mod) == .Pointer) {
+ const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow;
+ var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
+ defer tmp_list.deinit();
+ switch (try generateSymbol(bin_file, src_loc, .{
+ .ty = field_ty,
+ .val = field_val,
+ }, &tmp_list, debug_output, reloc_info)) {
+ .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
+ .fail => |em| return Result{ .fail = em },
+ }
+ } else {
+ field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
+ }
+ bits += @intCast(u16, field_ty.bitSize(mod));
+ }
+ } else {
+ const struct_begin = code.items.len;
+ const field_vals = typed_value.val.castTag(.aggregate).?.data;
+ for (field_vals, 0..) |field_val, index| {
+ const field_ty = typed_value.ty.structFieldType(index, mod);
+ if (!field_ty.hasRuntimeBits(mod)) continue;
+
+ switch (try generateSymbol(bin_file, src_loc, .{
+ .ty = field_ty,
+ .val = field_val,
+ }, code, debug_output, reloc_info)) {
+ .ok => {},
+ .fail => |em| return Result{ .fail = em },
+ }
+ const unpadded_field_end = code.items.len - struct_begin;
+
+ // Pad struct members if required
+ const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod);
+ const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow;
+
+ if (padding > 0) {
+ try code.writer().writeByteNTimes(0, padding);
+ }
+ }
+ }
+ },
+ else => unreachable,
},
- .ErrorUnion => {
- const error_ty = typed_value.ty.errorUnionSet(mod);
- const payload_ty = typed_value.ty.errorUnionPayload(mod);
- const is_payload = typed_value.val.errorUnionIsPayload();
+ .un => |un| {
+ const layout = typed_value.ty.unionGetLayout(mod);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const err_val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val;
+ if (layout.payload_size == 0) {
return generateSymbol(bin_file, src_loc, .{
- .ty = error_ty,
- .val = err_val,
+ .ty = typed_value.ty.unionTagType(mod).?,
+ .val = un.tag.toValue(),
}, code, debug_output, reloc_info);
}
- const payload_align = payload_ty.abiAlignment(mod);
- const error_align = Type.anyerror.abiAlignment(mod);
- const abi_align = typed_value.ty.abiAlignment(mod);
-
- // error value first when its type is larger than the error union's payload
- if (error_align > payload_align) {
+ // Check if we should store the tag first.
+ if (layout.tag_align >= layout.payload_align) {
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = error_ty,
- .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val,
+ .ty = typed_value.ty.unionTagType(mod).?,
+ .val = un.tag.toValue(),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
}
- // emit payload part of the error union
- {
- const begin = code.items.len;
- const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.undef;
+ const union_ty = mod.typeToUnion(typed_value.ty).?;
+ const field_index = typed_value.ty.unionTagFieldIndex(un.tag.toValue(), mod).?;
+ assert(union_ty.haveFieldTypes());
+ const field_ty = union_ty.fields.values()[field_index].ty;
+ if (!field_ty.hasRuntimeBits(mod)) {
+ try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
+ } else {
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = payload_ty,
- .val = payload_val,
+ .ty = field_ty,
+ .val = un.val.toValue(),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
- const unpadded_end = code.items.len - begin;
- const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align);
- const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
+ const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow;
if (padding > 0) {
try code.writer().writeByteNTimes(0, padding);
}
}
- // Payload size is larger than error set, so emit our error set last
- if (error_align <= payload_align) {
- const begin = code.items.len;
+ if (layout.tag_size > 0) {
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = error_ty,
- .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val,
+ .ty = union_ty.tag_ty,
+ .val = un.tag.toValue(),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
- const unpadded_end = code.items.len - begin;
- const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align);
- const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
-
- if (padding > 0) {
- try code.writer().writeByteNTimes(0, padding);
- }
- }
-
- return Result.ok;
- },
- .ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const name = typed_value.val.getError().?;
- const kv = try bin_file.options.module.?.getErrorValue(name);
- try code.writer().writeInt(u32, kv.value, endian);
- },
- else => {
- try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(mod)));
- },
}
- return Result.ok;
},
- .Vector => switch (typed_value.val.tag()) {
- .bytes => {
- const bytes = typed_value.val.castTag(.bytes).?.data;
- const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow;
- const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse
- return error.Overflow;
- try code.ensureUnusedCapacity(len + padding);
- code.appendSliceAssumeCapacity(bytes[0..len]);
- if (padding > 0) try code.writer().writeByteNTimes(0, padding);
- return Result.ok;
- },
- .aggregate => {
- const elem_vals = typed_value.val.castTag(.aggregate).?.data;
- const elem_ty = typed_value.ty.childType(mod);
- const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow;
- const padding = math.cast(usize, typed_value.ty.abiSize(mod) -
- (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) {
- error.DivisionByZero => unreachable,
- else => |e| return e,
- })) orelse return error.Overflow;
- for (elem_vals[0..len]) |elem_val| {
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = elem_ty,
- .val = elem_val,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
- }
- if (padding > 0) try code.writer().writeByteNTimes(0, padding);
- return Result.ok;
- },
- .repeated => {
- const array = typed_value.val.castTag(.repeated).?.data;
- const elem_ty = typed_value.ty.childType(mod);
- const len = typed_value.ty.arrayLen(mod);
- const padding = math.cast(usize, typed_value.ty.abiSize(mod) -
- (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) {
- error.DivisionByZero => unreachable,
- else => |e| return e,
- })) orelse return error.Overflow;
- var index: u64 = 0;
- while (index < len) : (index += 1) {
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = elem_ty,
- .val = array,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
- }
- if (padding > 0) try code.writer().writeByteNTimes(0, padding);
- return Result.ok;
- },
- .str_lit => {
- const str_lit = typed_value.val.castTag(.str_lit).?.data;
- const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse
- return error.Overflow;
- try code.ensureUnusedCapacity(str_lit.len + padding);
- code.appendSliceAssumeCapacity(bytes);
- if (padding > 0) try code.writer().writeByteNTimes(0, padding);
- return Result.ok;
- },
- else => unreachable,
- },
- else => |tag| return Result{ .fail = try ErrorMsg.create(
- bin_file.allocator,
- src_loc,
- "TODO implement generateSymbol for type '{s}'",
- .{@tagName(tag)},
- ) },
}
+ return .ok;
}
fn lowerParentPtr(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- typed_value: TypedValue,
- parent_ptr: Value,
+ parent_ptr: InternPool.Index,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
) CodeGenError!Result {
const mod = bin_file.options.module.?;
- switch (parent_ptr.tag()) {
- .field_ptr => {
- const field_ptr = parent_ptr.castTag(.field_ptr).?.data;
+ const ptr = mod.intern_pool.indexToKey(parent_ptr).ptr;
+ assert(ptr.len == .none);
+ return switch (ptr.addr) {
+ .decl, .mut_decl => try lowerDeclRef(
+ bin_file,
+ src_loc,
+ switch (ptr.addr) {
+ .decl => |decl| decl,
+ .mut_decl => |mut_decl| mut_decl.decl,
+ else => unreachable,
+ },
+ code,
+ debug_output,
+ reloc_info,
+ ),
+ .int => |int| try generateSymbol(bin_file, src_loc, .{
+ .ty = Type.usize,
+ .val = int.toValue(),
+ }, code, debug_output, reloc_info),
+ .eu_payload => |eu_payload| try lowerParentPtr(
+ bin_file,
+ src_loc,
+ eu_payload,
+ code,
+ debug_output,
+ reloc_info.offset(@intCast(u32, errUnionPayloadOffset(
+ mod.intern_pool.typeOf(eu_payload).toType(),
+ mod,
+ ))),
+ ),
+ .opt_payload => |opt_payload| try lowerParentPtr(
+ bin_file,
+ src_loc,
+ opt_payload,
+ code,
+ debug_output,
+ reloc_info,
+ ),
+ .elem => |elem| try lowerParentPtr(
+ bin_file,
+ src_loc,
+ elem.base,
+ code,
+ debug_output,
+ reloc_info.offset(@intCast(u32, elem.index *
+ mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))),
+ ),
+ .field => |field| {
+ const base_type = mod.intern_pool.typeOf(field.base);
return lowerParentPtr(
bin_file,
src_loc,
- typed_value,
- field_ptr.container_ptr,
+ field.base,
code,
debug_output,
- reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) {
- .Pointer => offset: {
- assert(field_ptr.container_ty.isSlice(mod));
- break :offset switch (field_ptr.field_index) {
+ reloc_info.offset(switch (mod.intern_pool.indexToKey(base_type)) {
+ .ptr_type => |ptr_type| switch (ptr_type.size) {
+ .One, .Many, .C => unreachable,
+ .Slice => switch (field.index) {
0 => 0,
- 1 => field_ptr.container_ty.slicePtrFieldType(mod).abiSize(mod),
+ 1 => @divExact(mod.getTarget().ptrBitWidth(), 8),
else => unreachable,
- };
+ },
},
- .Struct, .Union => field_ptr.container_ty.structFieldOffset(
- field_ptr.field_index,
+ .struct_type,
+ .anon_struct_type,
+ .union_type,
+ => @intCast(u32, base_type.toType().childType(mod).structFieldOffset(
+ @intCast(u32, field.index),
mod,
- ),
- else => return Result{ .fail = try ErrorMsg.create(
- bin_file.allocator,
- src_loc,
- "TODO implement lowerParentPtr for field_ptr with a container of type {}",
- .{field_ptr.container_ty.fmt(bin_file.options.module.?)},
- ) },
- })),
- );
- },
- .elem_ptr => {
- const elem_ptr = parent_ptr.castTag(.elem_ptr).?.data;
- return lowerParentPtr(
- bin_file,
- src_loc,
- typed_value,
- elem_ptr.array_ptr,
- code,
- debug_output,
- reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(mod))),
- );
- },
- .opt_payload_ptr => {
- const opt_payload_ptr = parent_ptr.castTag(.opt_payload_ptr).?.data;
- return lowerParentPtr(
- bin_file,
- src_loc,
- typed_value,
- opt_payload_ptr.container_ptr,
- code,
- debug_output,
- reloc_info,
- );
- },
- .eu_payload_ptr => {
- const eu_payload_ptr = parent_ptr.castTag(.eu_payload_ptr).?.data;
- const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod);
- return lowerParentPtr(
- bin_file,
- src_loc,
- typed_value,
- eu_payload_ptr.container_ptr,
- code,
- debug_output,
- reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, mod))),
+ )),
+ else => unreachable,
+ }),
);
},
- .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef(
- bin_file,
- src_loc,
- typed_value,
- switch (tag) {
- .variable => parent_ptr.castTag(.variable).?.data.owner_decl,
- .decl_ref => parent_ptr.castTag(.decl_ref).?.data,
- .decl_ref_mut => parent_ptr.castTag(.decl_ref_mut).?.data.decl_index,
- else => unreachable,
- },
- code,
- debug_output,
- reloc_info,
- ),
- else => |tag| return Result{ .fail = try ErrorMsg.create(
- bin_file.allocator,
- src_loc,
- "TODO implement lowerParentPtr for type '{s}'",
- .{@tagName(tag)},
- ) },
- }
+ .comptime_field => unreachable,
+ };
}
const RelocInfo = struct {
@@ -940,36 +799,15 @@ const RelocInfo = struct {
fn lowerDeclRef(
bin_file: *link.File,
src_loc: Module.SrcLoc,
- typed_value: TypedValue,
decl_index: Module.Decl.Index,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
) CodeGenError!Result {
+ _ = src_loc;
+ _ = debug_output;
const target = bin_file.options.target;
const mod = bin_file.options.module.?;
- if (typed_value.ty.isSlice(mod)) {
- // generate ptr
- const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod);
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = slice_ptr_field_type,
- .val = typed_value.val,
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
-
- // generate length
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = Type.usize,
- .val = try mod.intValue(Type.usize, typed_value.val.sliceLen(mod)),
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
-
- return Result.ok;
- }
const ptr_width = target.ptrBitWidth();
const decl = mod.declPtr(decl_index);
@@ -1154,12 +992,13 @@ pub fn genTypedValue(
arg_tv: TypedValue,
owner_decl_index: Module.Decl.Index,
) CodeGenError!GenResult {
+ const mod = bin_file.options.module.?;
var typed_value = arg_tv;
- if (typed_value.val.castTag(.runtime_value)) |rt| {
- typed_value.val = rt.data;
+ switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
+ .runtime_value => |rt| typed_value.val = rt.val.toValue(),
+ else => {},
}
- const mod = bin_file.options.module.?;
log.debug("genTypedValue: ty = {}, val = {}", .{
typed_value.ty.fmt(mod),
typed_value.val.fmtValue(typed_value.ty, mod),
@@ -1171,17 +1010,14 @@ pub fn genTypedValue(
const target = bin_file.options.target;
const ptr_bits = target.ptrBitWidth();
- if (!typed_value.ty.isSlice(mod)) {
- if (typed_value.val.castTag(.variable)) |payload| {
- return genDeclRef(bin_file, src_loc, typed_value, payload.data.owner_decl);
- }
- if (typed_value.val.castTag(.decl_ref)) |payload| {
- return genDeclRef(bin_file, src_loc, typed_value, payload.data);
- }
- if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
- return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index);
- }
- }
+ if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| return genDeclRef(bin_file, src_loc, typed_value, decl),
+ .mut_decl => |mut_decl| return genDeclRef(bin_file, src_loc, typed_value, mut_decl.decl),
+ else => {},
+ },
+ else => {},
+ };
switch (typed_value.ty.zigTypeTag(mod)) {
.Void => return GenResult.mcv(.none),
@@ -1215,11 +1051,9 @@ pub fn genTypedValue(
},
.Optional => {
if (typed_value.ty.isPtrLikeOptional(mod)) {
- if (typed_value.val.ip_index == .null_value) return GenResult.mcv(.{ .immediate = 0 });
-
return genTypedValue(bin_file, src_loc, .{
.ty = typed_value.ty.optionalChild(mod),
- .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val,
+ .val = typed_value.val.optionalValue(mod) orelse return GenResult.mcv(.{ .immediate = 0 }),
}, owner_decl_index);
} else if (typed_value.ty.abiSize(mod) == 1) {
return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) });
@@ -1234,24 +1068,15 @@ pub fn genTypedValue(
}, owner_decl_index);
},
.ErrorSet => {
- switch (typed_value.val.tag()) {
- .@"error" => {
- const err_name = typed_value.val.castTag(.@"error").?.data.name;
- const module = bin_file.options.module.?;
- const global_error_set = module.global_error_set;
- const error_index = global_error_set.get(err_name).?;
- return GenResult.mcv(.{ .immediate = error_index });
- },
- else => {
- // In this case we are rendering an error union which has a 0 bits payload.
- return GenResult.mcv(.{ .immediate = 0 });
- },
- }
+ const err_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(typed_value.val.ip_index).err.name);
+ const global_error_set = mod.global_error_set;
+ const error_index = global_error_set.get(err_name).?;
+ return GenResult.mcv(.{ .immediate = error_index });
},
.ErrorUnion => {
const error_type = typed_value.ty.errorUnionSet(mod);
const payload_type = typed_value.ty.errorUnionPayload(mod);
- const is_pl = typed_value.val.errorUnionIsPayload();
+ const is_pl = typed_value.val.errorUnionIsPayload(mod);
if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
// We use the error type directly as the type.
src/Compilation.zig
@@ -226,7 +226,7 @@ const Job = union(enum) {
/// Write the constant value for a Decl to the output file.
codegen_decl: Module.Decl.Index,
/// Write the machine code for a function to the output file.
- codegen_func: *Module.Fn,
+ codegen_func: Module.Fn.Index,
/// Render the .h file snippet for the Decl.
emit_h_decl: Module.Decl.Index,
/// The Decl needs to be analyzed and possibly export itself.
@@ -3208,7 +3208,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
// Tests are always emitted in test binaries. The decl_refs are created by
// Module.populateTestFunctions, but this will not queue body analysis, so do
// that now.
- try module.ensureFuncBodyAnalysisQueued(decl.val.castTag(.function).?.data);
+ const func_index = module.intern_pool.indexToFunc(decl.val.ip_index).unwrap().?;
+ try module.ensureFuncBodyAnalysisQueued(func_index);
}
},
.update_embed_file => |embed_file| {
src/InternPool.zig
@@ -34,6 +34,12 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{},
/// When a Union object is freed from `allocated_unions`, it is pushed into this stack.
unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{},
+/// Fn objects are stored in this data structure because:
+/// * They need to be mutated after creation.
+allocated_funcs: std.SegmentedList(Module.Fn, 0) = .{},
+/// When a Fn object is freed from `allocated_funcs`, it is pushed into this stack.
+funcs_free_list: std.ArrayListUnmanaged(Module.Fn.Index) = .{},
+
/// InferredErrorSet objects are stored in this data structure because:
/// * They contain pointers such as the errors map and the set of other inferred error sets.
/// * They need to be mutated after creation.
@@ -66,18 +72,18 @@ const Limb = std.math.big.Limb;
const InternPool = @This();
const Module = @import("Module.zig");
+const Sema = @import("Sema.zig");
const KeyAdapter = struct {
intern_pool: *const InternPool,
pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool {
_ = b_void;
- return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a);
+ return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a, ctx.intern_pool);
}
pub fn hash(ctx: @This(), a: Key) u32 {
- _ = ctx;
- return a.hash32();
+ return a.hash32(ctx.intern_pool);
}
};
@@ -111,10 +117,19 @@ pub const RuntimeIndex = enum(u32) {
}
};
+/// An index into `string_bytes`.
+pub const String = enum(u32) {
+ _,
+};
+
/// An index into `string_bytes`.
pub const NullTerminatedString = enum(u32) {
_,
+ pub fn toString(self: NullTerminatedString) String {
+ return @intToEnum(String, @enumToInt(self));
+ }
+
pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString {
return @intToEnum(OptionalNullTerminatedString, @enumToInt(self));
}
@@ -180,23 +195,20 @@ pub const Key = union(enum) {
/// Typed `undefined`. This will never be `none`; untyped `undefined` is represented
/// via `simple_value` and has a named `Index` tag for it.
undef: Index,
+ runtime_value: TypeValue,
simple_value: SimpleValue,
- extern_func: struct {
- ty: Index,
- /// The Decl that corresponds to the function itself.
- decl: Module.Decl.Index,
- /// Library name if specified.
- /// For example `extern "c" fn write(...) usize` would have 'c' as library name.
- /// Index into the string table bytes.
- lib_name: u32,
- },
+ variable: Key.Variable,
+ extern_func: ExternFunc,
+ func: Func,
int: Key.Int,
+ err: Error,
+ error_union: ErrorUnion,
+ enum_literal: NullTerminatedString,
/// A specific enum tag, indicated by the integer tag value.
enum_tag: Key.EnumTag,
float: Key.Float,
ptr: Ptr,
opt: Opt,
-
/// An instance of a struct, array, or vector.
/// Each element/field stored as an `Index`.
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
@@ -261,7 +273,7 @@ pub const Key = union(enum) {
pub const ArrayType = struct {
len: u64,
child: Index,
- sentinel: Index,
+ sentinel: Index = .none,
};
pub const VectorType = struct {
@@ -369,6 +381,7 @@ pub const Key = union(enum) {
return @intCast(u32, x);
},
.i64, .big_int => return null, // out of range
+ .lazy_align, .lazy_size => unreachable,
}
}
};
@@ -441,6 +454,32 @@ pub const Key = union(enum) {
}
};
+ pub const Variable = struct {
+ ty: Index,
+ init: Index,
+ decl: Module.Decl.Index,
+ lib_name: OptionalNullTerminatedString = .none,
+ is_extern: bool = false,
+ is_const: bool = false,
+ is_threadlocal: bool = false,
+ is_weak_linkage: bool = false,
+ };
+
+ pub const ExternFunc = struct {
+ ty: Index,
+ /// The Decl that corresponds to the function itself.
+ decl: Module.Decl.Index,
+ /// Library name if specified.
+ /// For example `extern "c" fn write(...) usize` would have 'c' as library name.
+ /// Index into the string table bytes.
+ lib_name: OptionalNullTerminatedString,
+ };
+
+ pub const Func = struct {
+ ty: Index,
+ index: Module.Fn.Index,
+ };
+
pub const Int = struct {
ty: Index,
storage: Storage,
@@ -449,6 +488,8 @@ pub const Key = union(enum) {
u64: u64,
i64: i64,
big_int: BigIntConst,
+ lazy_align: Index,
+ lazy_size: Index,
/// Big enough to fit any non-BigInt value
pub const BigIntSpace = struct {
@@ -460,13 +501,26 @@ pub const Key = union(enum) {
pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst {
return switch (storage) {
.big_int => |x| x,
- .u64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
- .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
+ inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
+ .lazy_align, .lazy_size => unreachable,
};
}
};
};
+ pub const Error = struct {
+ ty: Index,
+ name: NullTerminatedString,
+ };
+
+ pub const ErrorUnion = struct {
+ ty: Index,
+ val: union(enum) {
+ err_name: NullTerminatedString,
+ payload: Index,
+ },
+ };
+
pub const EnumTag = struct {
/// The enum type.
ty: Index,
@@ -497,19 +551,8 @@ pub const Key = union(enum) {
len: Index = .none,
pub const Addr = union(enum) {
- @"var": struct {
- init: Index,
- owner_decl: Module.Decl.Index,
- lib_name: OptionalNullTerminatedString,
- is_const: bool,
- is_threadlocal: bool,
- is_weak_linkage: bool,
- },
decl: Module.Decl.Index,
- mut_decl: struct {
- decl: Module.Decl.Index,
- runtime_index: RuntimeIndex,
- },
+ mut_decl: MutDecl,
int: Index,
eu_payload: Index,
opt_payload: Index,
@@ -517,6 +560,10 @@ pub const Key = union(enum) {
elem: BaseIndex,
field: BaseIndex,
+ pub const MutDecl = struct {
+ decl: Module.Decl.Index,
+ runtime_index: RuntimeIndex,
+ };
pub const BaseIndex = struct {
base: Index,
index: u64,
@@ -546,22 +593,31 @@ pub const Key = union(enum) {
storage: Storage,
pub const Storage = union(enum) {
+ bytes: []const u8,
elems: []const Index,
repeated_elem: Index,
+
+ pub fn values(self: *const Storage) []const Index {
+ return switch (self.*) {
+ .bytes => &.{},
+ .elems => |elems| elems,
+ .repeated_elem => |*elem| @as(*const [1]Index, elem),
+ };
+ }
};
};
- pub fn hash32(key: Key) u32 {
- return @truncate(u32, key.hash64());
+ pub fn hash32(key: Key, ip: *const InternPool) u32 {
+ return @truncate(u32, key.hash64(ip));
}
- pub fn hash64(key: Key) u64 {
+ pub fn hash64(key: Key, ip: *const InternPool) u64 {
var hasher = std.hash.Wyhash.init(0);
- key.hashWithHasher(&hasher);
+ key.hashWithHasher(&hasher, ip);
return hasher.final();
}
- pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash) void {
+ pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void {
const KeyTag = @typeInfo(Key).Union.tag_type.?;
const key_tag: KeyTag = key;
std.hash.autoHash(hasher, key_tag);
@@ -575,27 +631,45 @@ pub const Key = union(enum) {
.error_union_type,
.simple_type,
.simple_value,
- .extern_func,
.opt,
.struct_type,
.union_type,
.un,
.undef,
+ .err,
+ .error_union,
+ .enum_literal,
.enum_tag,
.inferred_error_set_type,
=> |info| std.hash.autoHash(hasher, info),
+ .runtime_value => |runtime_value| std.hash.autoHash(hasher, runtime_value.val),
.opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl),
.enum_type => |enum_type| std.hash.autoHash(hasher, enum_type.decl),
+ .variable => |variable| std.hash.autoHash(hasher, variable.decl),
+ .extern_func => |extern_func| std.hash.autoHash(hasher, extern_func.decl),
+ .func => |func| std.hash.autoHash(hasher, func.index),
+
.int => |int| {
// Canonicalize all integers by converting them to BigIntConst.
- var buffer: Key.Int.Storage.BigIntSpace = undefined;
- const big_int = int.storage.toBigInt(&buffer);
-
- std.hash.autoHash(hasher, int.ty);
- std.hash.autoHash(hasher, big_int.positive);
- for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb);
+ switch (int.storage) {
+ .u64, .i64, .big_int => {
+ var buffer: Key.Int.Storage.BigIntSpace = undefined;
+ const big_int = int.storage.toBigInt(&buffer);
+
+ std.hash.autoHash(hasher, int.ty);
+ std.hash.autoHash(hasher, big_int.positive);
+ for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb);
+ },
+ .lazy_align, .lazy_size => |lazy_ty| {
+ std.hash.autoHash(
+ hasher,
+ @as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage),
+ );
+ std.hash.autoHash(hasher, lazy_ty);
+ },
+ }
},
.float => |float| {
@@ -615,7 +689,6 @@ pub const Key = union(enum) {
// This is sound due to pointer provenance rules.
std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr));
switch (ptr.addr) {
- .@"var" => |@"var"| std.hash.autoHash(hasher, @"var".owner_decl),
.decl => |decl| std.hash.autoHash(hasher, decl),
.mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl),
.int => |int| std.hash.autoHash(hasher, int),
@@ -629,13 +702,47 @@ pub const Key = union(enum) {
.aggregate => |aggregate| {
std.hash.autoHash(hasher, aggregate.ty);
- std.hash.autoHash(hasher, @as(
- @typeInfo(Key.Aggregate.Storage).Union.tag_type.?,
- aggregate.storage,
- ));
+ switch (ip.indexToKey(aggregate.ty)) {
+ .array_type => |array_type| if (array_type.child == .u8_type) switch (aggregate.storage) {
+ .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte),
+ .elems => |elems| {
+ var buffer: Key.Int.Storage.BigIntSpace = undefined;
+ for (elems) |elem| std.hash.autoHash(
+ hasher,
+ ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
+ unreachable,
+ );
+ },
+ .repeated_elem => |elem| {
+ const len = ip.aggregateTypeLen(aggregate.ty);
+ var buffer: Key.Int.Storage.BigIntSpace = undefined;
+ const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
+ unreachable;
+ var i: u64 = 0;
+ while (i < len) : (i += 1) std.hash.autoHash(hasher, byte);
+ },
+ },
+ else => {},
+ }
+
switch (aggregate.storage) {
- .elems => |elems| for (elems) |elem| std.hash.autoHash(hasher, elem),
- .repeated_elem => |elem| std.hash.autoHash(hasher, elem),
+ .bytes => unreachable,
+ .elems => |elems| {
+ var buffer: Key.Int.Storage.BigIntSpace = undefined;
+ for (elems) |elem| std.hash.autoHash(
+ hasher,
+ ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
+ unreachable,
+ );
+ },
+ .repeated_elem => |elem| {
+ const len = ip.aggregateTypeLen(aggregate.ty);
+ var buffer: Key.Int.Storage.BigIntSpace = undefined;
+ const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
+ unreachable;
+ var i: u64 = 0;
+ while (i < len) : (i += 1) std.hash.autoHash(hasher, byte);
+ },
}
},
@@ -663,7 +770,7 @@ pub const Key = union(enum) {
}
}
- pub fn eql(a: Key, b: Key) bool {
+ pub fn eql(a: Key, b: Key, ip: *const InternPool) bool {
const KeyTag = @typeInfo(Key).Union.tag_type.?;
const a_tag: KeyTag = a;
const b_tag: KeyTag = b;
@@ -709,9 +816,9 @@ pub const Key = union(enum) {
const b_info = b.undef;
return a_info == b_info;
},
- .extern_func => |a_info| {
- const b_info = b.extern_func;
- return std.meta.eql(a_info, b_info);
+ .runtime_value => |a_info| {
+ const b_info = b.runtime_value;
+ return a_info.val == b_info.val;
},
.opt => |a_info| {
const b_info = b.opt;
@@ -729,11 +836,36 @@ pub const Key = union(enum) {
const b_info = b.un;
return std.meta.eql(a_info, b_info);
},
+ .err => |a_info| {
+ const b_info = b.err;
+ return std.meta.eql(a_info, b_info);
+ },
+ .error_union => |a_info| {
+ const b_info = b.error_union;
+ return std.meta.eql(a_info, b_info);
+ },
+ .enum_literal => |a_info| {
+ const b_info = b.enum_literal;
+ return a_info == b_info;
+ },
.enum_tag => |a_info| {
const b_info = b.enum_tag;
return std.meta.eql(a_info, b_info);
},
+ .variable => |a_info| {
+ const b_info = b.variable;
+ return a_info.decl == b_info.decl;
+ },
+ .extern_func => |a_info| {
+ const b_info = b.extern_func;
+ return a_info.decl == b_info.decl;
+ },
+ .func => |a_info| {
+ const b_info = b.func;
+ return a_info.index == b_info.index;
+ },
+
.ptr => |a_info| {
const b_info = b.ptr;
if (a_info.ty != b_info.ty or a_info.len != b_info.len) return false;
@@ -742,7 +874,6 @@ pub const Key = union(enum) {
if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false;
return switch (a_info.addr) {
- .@"var" => |a_var| a_var.owner_decl == b_info.addr.@"var".owner_decl,
.decl => |a_decl| a_decl == b_info.addr.decl,
.mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl),
.int => |a_int| a_int == b_info.addr.int,
@@ -765,16 +896,27 @@ pub const Key = union(enum) {
.u64 => |bb| aa == bb,
.i64 => |bb| aa == bb,
.big_int => |bb| bb.orderAgainstScalar(aa) == .eq,
+ .lazy_align, .lazy_size => false,
},
.i64 => |aa| switch (b_info.storage) {
.u64 => |bb| aa == bb,
.i64 => |bb| aa == bb,
.big_int => |bb| bb.orderAgainstScalar(aa) == .eq,
+ .lazy_align, .lazy_size => false,
},
.big_int => |aa| switch (b_info.storage) {
.u64 => |bb| aa.orderAgainstScalar(bb) == .eq,
.i64 => |bb| aa.orderAgainstScalar(bb) == .eq,
.big_int => |bb| aa.eq(bb),
+ .lazy_align, .lazy_size => false,
+ },
+ .lazy_align => |aa| switch (b_info.storage) {
+ .u64, .i64, .big_int, .lazy_size => false,
+ .lazy_align => |bb| aa == bb,
+ },
+ .lazy_size => |aa| switch (b_info.storage) {
+ .u64, .i64, .big_int, .lazy_align => false,
+ .lazy_size => |bb| aa == bb,
},
};
},
@@ -818,12 +960,43 @@ pub const Key = union(enum) {
if (a_info.ty != b_info.ty) return false;
const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?;
- if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) return false;
+ if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) {
+ for (0..@intCast(usize, ip.aggregateTypeLen(a_info.ty))) |elem_index| {
+ const a_elem = switch (a_info.storage) {
+ .bytes => |bytes| ip.getIfExists(.{ .int = .{
+ .ty = .u8_type,
+ .storage = .{ .u64 = bytes[elem_index] },
+ } }) orelse return false,
+ .elems => |elems| elems[elem_index],
+ .repeated_elem => |elem| elem,
+ };
+ const b_elem = switch (b_info.storage) {
+ .bytes => |bytes| ip.getIfExists(.{ .int = .{
+ .ty = .u8_type,
+ .storage = .{ .u64 = bytes[elem_index] },
+ } }) orelse return false,
+ .elems => |elems| elems[elem_index],
+ .repeated_elem => |elem| elem,
+ };
+ if (a_elem != b_elem) return false;
+ }
+ return true;
+ }
- return switch (a_info.storage) {
- .elems => |a_elems| std.mem.eql(Index, a_elems, b_info.storage.elems),
- .repeated_elem => |a_elem| a_elem == b_info.storage.repeated_elem,
- };
+ switch (a_info.storage) {
+ .bytes => |a_bytes| {
+ const b_bytes = b_info.storage.bytes;
+ return std.mem.eql(u8, a_bytes, b_bytes);
+ },
+ .elems => |a_elems| {
+ const b_elems = b_info.storage.elems;
+ return std.mem.eql(Index, a_elems, b_elems);
+ },
+ .repeated_elem => |a_elem| {
+ const b_elem = b_info.storage.repeated_elem;
+ return a_elem == b_elem;
+ },
+ }
},
.anon_struct_type => |a_info| {
const b_info = b.anon_struct_type;
@@ -876,16 +1049,23 @@ pub const Key = union(enum) {
.func_type,
=> .type_type,
- inline .ptr,
+ inline .runtime_value,
+ .ptr,
.int,
.float,
.opt,
+ .variable,
.extern_func,
+ .func,
+ .err,
+ .error_union,
.enum_tag,
.aggregate,
.un,
=> |x| x.ty,
+ .enum_literal => .enum_literal_type,
+
.undef => |x| x,
.simple_value => |s| switch (s) {
@@ -977,8 +1157,8 @@ pub const Index = enum(u32) {
manyptr_const_u8_type,
manyptr_const_u8_sentinel_0_type,
single_const_pointer_to_comptime_int_type,
- const_slice_u8_type,
- const_slice_u8_sentinel_0_type,
+ slice_const_u8_type,
+ slice_const_u8_sentinel_0_type,
anyerror_void_error_union_type,
generic_poison_type,
inferred_alloc_const_type,
@@ -1128,11 +1308,11 @@ pub const Index = enum(u32) {
},
undef: DataIsIndex,
+ runtime_value: DataIsIndex,
simple_value: struct { data: SimpleValue },
- ptr_var: struct { data: *PtrVar },
ptr_mut_decl: struct { data: *PtrMutDecl },
ptr_decl: struct { data: *PtrDecl },
- ptr_int: struct { data: *PtrInt },
+ ptr_int: struct { data: *PtrAddr },
ptr_eu_payload: DataIsIndex,
ptr_opt_payload: DataIsIndex,
ptr_comptime_field: struct { data: *PtrComptimeField },
@@ -1151,6 +1331,12 @@ pub const Index = enum(u32) {
int_small: struct { data: *IntSmall },
int_positive: struct { data: u32 },
int_negative: struct { data: u32 },
+ int_lazy_align: struct { data: *IntLazy },
+ int_lazy_size: struct { data: *IntLazy },
+ error_set_error: struct { data: *Key.Error },
+ error_union_error: struct { data: *Key.Error },
+ error_union_payload: struct { data: *TypeValue },
+ enum_literal: struct { data: NullTerminatedString },
enum_tag: struct { data: *Key.EnumTag },
float_f16: struct { data: f16 },
float_f32: struct { data: f32 },
@@ -1160,18 +1346,21 @@ pub const Index = enum(u32) {
float_c_longdouble_f80: struct { data: *Float80 },
float_c_longdouble_f128: struct { data: *Float128 },
float_comptime_float: struct { data: *Float128 },
+ variable: struct { data: *Variable },
extern_func: struct { data: void },
func: struct { data: void },
only_possible_value: DataIsIndex,
union_value: struct { data: *Key.Union },
+ bytes: struct { data: *Bytes },
aggregate: struct { data: *Aggregate },
repeated: struct { data: *Repeated },
}) void {
_ = self;
- @setEvalBranchQuota(10_000);
- inline for (@typeInfo(Tag).Enum.fields) |tag| {
- inline for (@typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields) |entry| {
- if (comptime std.mem.eql(u8, tag.name, entry.name)) break;
+ const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields;
+ @setEvalBranchQuota(2_000);
+ inline for (@typeInfo(Tag).Enum.fields, 0..) |tag, start| {
+ inline for (0..map_fields.len) |offset| {
+ if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break;
} else {
@compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry");
}
@@ -1318,14 +1507,14 @@ pub const static_keys = [_]Key{
.is_const = true,
} },
- // const_slice_u8_type
+ // slice_const_u8_type
.{ .ptr_type = .{
.elem_type = .u8_type,
.size = .Slice,
.is_const = true,
} },
- // const_slice_u8_sentinel_0_type
+ // slice_const_u8_sentinel_0_type
.{ .ptr_type = .{
.elem_type = .u8_type,
.sentinel = .zero_u8,
@@ -1505,12 +1694,13 @@ pub const Tag = enum(u8) {
/// `data` is `Index` of the type.
/// Untyped `undefined` is stored instead via `simple_value`.
undef,
+ /// A wrapper for values which are comptime-known but should
+ /// semantically be runtime-known.
+ /// `data` is `Index` of the value.
+ runtime_value,
/// A value that can be represented with only an enum tag.
/// data is SimpleValue enum value.
simple_value,
- /// A pointer to a var.
- /// data is extra index of PtrVal, which contains the type and address.
- ptr_var,
/// A pointer to a decl that can be mutated at comptime.
/// data is extra index of PtrMutDecl, which contains the type and address.
ptr_mut_decl,
@@ -1518,7 +1708,7 @@ pub const Tag = enum(u8) {
/// data is extra index of PtrDecl, which contains the type and address.
ptr_decl,
/// A pointer with an integer value.
- /// data is extra index of PtrInt, which contains the type and address.
+ /// data is extra index of PtrAddr, which contains the type and address.
/// Only pointer types are allowed to have this encoding. Optional types must use
/// `opt_payload` or `opt_null`.
ptr_int,
@@ -1585,6 +1775,24 @@ pub const Tag = enum(u8) {
/// A negative integer value.
/// data is a limbs index to `Int`.
int_negative,
+ /// The ABI alignment of a lazy type.
+ /// data is extra index of `IntLazy`.
+ int_lazy_align,
+ /// The ABI size of a lazy type.
+ /// data is extra index of `IntLazy`.
+ int_lazy_size,
+ /// An error value.
+ /// data is extra index of `Key.Error`.
+ error_set_error,
+ /// An error union error.
+ /// data is extra index of `Key.Error`.
+ error_union_error,
+ /// An error union payload.
+ /// data is extra index of `TypeValue`.
+ error_union_payload,
+ /// An enum literal value.
+ /// data is `NullTerminatedString` of the error name.
+ enum_literal,
/// An enum tag value.
/// data is extra index of `Key.EnumTag`.
enum_tag,
@@ -1617,9 +1825,14 @@ pub const Tag = enum(u8) {
/// A comptime_float value.
/// data is extra index to Float128.
float_comptime_float,
+ /// A global variable.
+ /// data is extra index to Variable.
+ variable,
/// An extern function.
+ /// data is extra index to Key.ExternFunc.
extern_func,
/// A regular function.
+ /// data is extra index to Key.Func.
func,
/// This represents the only possible value for *some* types which have
/// only one possible value. Not all only-possible-values are encoded this way;
@@ -1631,6 +1844,9 @@ pub const Tag = enum(u8) {
only_possible_value,
/// data is extra index to Key.Union.
union_value,
+ /// An array of bytes.
+ /// data is extra index to `Bytes`.
+ bytes,
/// An instance of a struct, array, or vector.
/// data is extra index to `Aggregate`.
aggregate,
@@ -1670,6 +1886,13 @@ pub const TypeFunction = struct {
};
};
+pub const Bytes = struct {
+ /// The type of the aggregate
+ ty: Index,
+ /// Index into string_bytes, of len ip.aggregateTypeLen(ty)
+ bytes: String,
+};
+
/// Trailing:
/// 0. element: Index for each len
/// len is determined by the aggregate type.
@@ -1843,6 +2066,11 @@ pub const Array = struct {
}
};
+pub const TypeValue = struct {
+ ty: Index,
+ val: Index,
+};
+
/// Trailing:
/// 0. field name: NullTerminatedString for each fields_len; declaration order
/// 1. tag value: Index for each fields_len; declaration order
@@ -1888,21 +2116,22 @@ pub const PackedU64 = packed struct(u64) {
}
};
-pub const PtrVar = struct {
- ty: Index,
- /// If flags.is_extern == true this is `none`.
+pub const Variable = struct {
+ /// This is a value if has_init is true, otherwise a type.
init: Index,
- owner_decl: Module.Decl.Index,
+ decl: Module.Decl.Index,
/// Library name if specified.
/// For example `extern "c" var stderrp = ...` would have 'c' as library name.
lib_name: OptionalNullTerminatedString,
flags: Flags,
pub const Flags = packed struct(u32) {
+ has_init: bool,
+ is_extern: bool,
is_const: bool,
is_threadlocal: bool,
is_weak_linkage: bool,
- _: u29 = 0,
+ _: u27 = 0,
};
};
@@ -1917,7 +2146,7 @@ pub const PtrMutDecl = struct {
runtime_index: RuntimeIndex,
};
-pub const PtrInt = struct {
+pub const PtrAddr = struct {
ty: Index,
addr: Index,
};
@@ -1949,6 +2178,11 @@ pub const IntSmall = struct {
value: u32,
};
+pub const IntLazy = struct {
+ ty: Index,
+ lazy_ty: Index,
+};
+
/// A f64 value, broken up into 2 u32 parts.
pub const Float64 = struct {
piece0: u32,
@@ -2063,6 +2297,9 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
ip.unions_free_list.deinit(gpa);
ip.allocated_unions.deinit(gpa);
+ ip.funcs_free_list.deinit(gpa);
+ ip.allocated_funcs.deinit(gpa);
+
ip.inferred_error_sets_free_list.deinit(gpa);
ip.allocated_inferred_error_sets.deinit(gpa);
@@ -2235,6 +2472,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.type_function => .{ .func_type = indexToKeyFuncType(ip, data) },
.undef => .{ .undef = @intToEnum(Index, data) },
+ .runtime_value => {
+ const val = @intToEnum(Index, data);
+ return .{ .runtime_value = .{
+ .ty = ip.typeOf(val),
+ .val = val,
+ } };
+ },
.opt_null => .{ .opt = .{
.ty = @intToEnum(Index, data),
.val = .none,
@@ -2251,18 +2495,11 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.val = payload_val,
} };
},
- .ptr_var => {
- const info = ip.extraData(PtrVar, data);
+ .ptr_decl => {
+ const info = ip.extraData(PtrDecl, data);
return .{ .ptr = .{
.ty = info.ty,
- .addr = .{ .@"var" = .{
- .init = info.init,
- .owner_decl = info.owner_decl,
- .lib_name = info.lib_name,
- .is_const = info.flags.is_const,
- .is_threadlocal = info.flags.is_threadlocal,
- .is_weak_linkage = info.flags.is_weak_linkage,
- } },
+ .addr = .{ .decl = info.decl },
} };
},
.ptr_mut_decl => {
@@ -2275,15 +2512,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
} },
} };
},
- .ptr_decl => {
- const info = ip.extraData(PtrDecl, data);
- return .{ .ptr = .{
- .ty = info.ty,
- .addr = .{ .decl = info.decl },
- } };
- },
.ptr_int => {
- const info = ip.extraData(PtrInt, data);
+ const info = ip.extraData(PtrAddr, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .int = info.addr },
@@ -2383,6 +2613,17 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.storage = .{ .u64 = info.value },
} };
},
+ .int_lazy_align, .int_lazy_size => |tag| {
+ const info = ip.extraData(IntLazy, data);
+ return .{ .int = .{
+ .ty = info.ty,
+ .storage = switch (tag) {
+ .int_lazy_align => .{ .lazy_align = info.lazy_ty },
+ .int_lazy_size => .{ .lazy_size = info.lazy_ty },
+ else => unreachable,
+ },
+ } };
+ },
.float_f16 => .{ .float = .{
.ty = .f16_type,
.storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) },
@@ -2415,8 +2656,21 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.ty = .comptime_float_type,
.storage = .{ .f128 = ip.extraData(Float128, data).get() },
} },
- .extern_func => @panic("TODO"),
- .func => @panic("TODO"),
+ .variable => {
+ const extra = ip.extraData(Variable, data);
+ return .{ .variable = .{
+ .ty = if (extra.flags.has_init) ip.typeOf(extra.init) else extra.init,
+ .init = if (extra.flags.has_init) extra.init else .none,
+ .decl = extra.decl,
+ .lib_name = extra.lib_name,
+ .is_extern = extra.flags.is_extern,
+ .is_const = extra.flags.is_const,
+ .is_threadlocal = extra.flags.is_threadlocal,
+ .is_weak_linkage = extra.flags.is_weak_linkage,
+ } };
+ },
+ .extern_func => .{ .extern_func = ip.extraData(Key.ExternFunc, data) },
+ .func => .{ .func = ip.extraData(Key.Func, data) },
.only_possible_value => {
const ty = @intToEnum(Index, data);
return switch (ip.indexToKey(ty)) {
@@ -2438,6 +2692,14 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
else => unreachable,
};
},
+ .bytes => {
+ const extra = ip.extraData(Bytes, data);
+ const len = @intCast(u32, ip.aggregateTypeLen(extra.ty));
+ return .{ .aggregate = .{
+ .ty = extra.ty,
+ .storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] },
+ } };
+ },
.aggregate => {
const extra = ip.extraDataTrail(Aggregate, data);
const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty));
@@ -2455,6 +2717,22 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
} };
},
.union_value => .{ .un = ip.extraData(Key.Union, data) },
+ .error_set_error => .{ .err = ip.extraData(Key.Error, data) },
+ .error_union_error => {
+ const extra = ip.extraData(Key.Error, data);
+ return .{ .error_union = .{
+ .ty = extra.ty,
+ .val = .{ .err_name = extra.name },
+ } };
+ },
+ .error_union_payload => {
+ const extra = ip.extraData(TypeValue, data);
+ return .{ .error_union = .{
+ .ty = extra.ty,
+ .val = .{ .payload = extra.val },
+ } };
+ },
+ .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) },
.enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) },
};
}
@@ -2547,7 +2825,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
_ = ip.map.pop();
var new_key = key;
new_key.ptr_type.size = .Many;
- const ptr_type_index = try get(ip, gpa, new_key);
+ const ptr_type_index = try ip.get(gpa, new_key);
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
@@ -2677,6 +2955,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.data = @enumToInt(ty),
});
},
+ .runtime_value => |runtime_value| {
+ assert(runtime_value.ty == ip.typeOf(runtime_value.val));
+ ip.items.appendAssumeCapacity(.{
+ .tag = .runtime_value,
+ .data = @enumToInt(runtime_value.val),
+ });
+ },
.struct_type => |struct_type| {
ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{
@@ -2809,7 +3094,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types));
},
- .extern_func => @panic("TODO"),
+ .variable => |variable| {
+ const has_init = variable.init != .none;
+ if (has_init) assert(variable.ty == ip.typeOf(variable.init));
+ ip.items.appendAssumeCapacity(.{
+ .tag = .variable,
+ .data = try ip.addExtra(gpa, Variable{
+ .init = if (has_init) variable.init else variable.ty,
+ .decl = variable.decl,
+ .lib_name = variable.lib_name,
+ .flags = .{
+ .has_init = has_init,
+ .is_extern = variable.is_extern,
+ .is_const = variable.is_const,
+ .is_threadlocal = variable.is_threadlocal,
+ .is_weak_linkage = variable.is_weak_linkage,
+ },
+ }),
+ });
+ },
+
+ .extern_func => |extern_func| ip.items.appendAssumeCapacity(.{
+ .tag = .extern_func,
+ .data = try ip.addExtra(gpa, extern_func),
+ }),
+
+ .func => |func| ip.items.appendAssumeCapacity(.{
+ .tag = .func,
+ .data = try ip.addExtra(gpa, func),
+ }),
.ptr => |ptr| {
const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
@@ -2817,20 +3130,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.none => {
assert(ptr_type.size != .Slice);
switch (ptr.addr) {
- .@"var" => |@"var"| ip.items.appendAssumeCapacity(.{
- .tag = .ptr_var,
- .data = try ip.addExtra(gpa, PtrVar{
- .ty = ptr.ty,
- .init = @"var".init,
- .owner_decl = @"var".owner_decl,
- .lib_name = @"var".lib_name,
- .flags = .{
- .is_const = @"var".is_const,
- .is_threadlocal = @"var".is_threadlocal,
- .is_weak_linkage = @"var".is_weak_linkage,
- },
- }),
- }),
.decl => |decl| ip.items.appendAssumeCapacity(.{
.tag = .ptr_decl,
.data = try ip.addExtra(gpa, PtrDecl{
@@ -2846,31 +3145,41 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.runtime_index = mut_decl.runtime_index,
}),
}),
- .int => |int| ip.items.appendAssumeCapacity(.{
- .tag = .ptr_int,
- .data = try ip.addExtra(gpa, PtrInt{
- .ty = ptr.ty,
- .addr = int,
- }),
- }),
- .eu_payload, .opt_payload => |data| ip.items.appendAssumeCapacity(.{
- .tag = switch (ptr.addr) {
- .eu_payload => .ptr_eu_payload,
- .opt_payload => .ptr_opt_payload,
- else => unreachable,
- },
- .data = @enumToInt(data),
- }),
- .comptime_field => |field_val| ip.items.appendAssumeCapacity(.{
- .tag = .ptr_comptime_field,
- .data = try ip.addExtra(gpa, PtrComptimeField{
- .ty = ptr.ty,
- .field_val = field_val,
- }),
- }),
+ .int => |int| {
+ assert(int != .none);
+ ip.items.appendAssumeCapacity(.{
+ .tag = .ptr_int,
+ .data = try ip.addExtra(gpa, PtrAddr{
+ .ty = ptr.ty,
+ .addr = int,
+ }),
+ });
+ },
+ .eu_payload, .opt_payload => |data| {
+ assert(data != .none);
+ ip.items.appendAssumeCapacity(.{
+ .tag = switch (ptr.addr) {
+ .eu_payload => .ptr_eu_payload,
+ .opt_payload => .ptr_opt_payload,
+ else => unreachable,
+ },
+ .data = @enumToInt(data),
+ });
+ },
+ .comptime_field => |field_val| {
+ assert(field_val != .none);
+ ip.items.appendAssumeCapacity(.{
+ .tag = .ptr_comptime_field,
+ .data = try ip.addExtra(gpa, PtrComptimeField{
+ .ty = ptr.ty,
+ .field_val = field_val,
+ }),
+ });
+ },
.elem, .field => |base_index| {
+ assert(base_index.base != .none);
_ = ip.map.pop();
- const index_index = try get(ip, gpa, .{ .int = .{
+ const index_index = try ip.get(gpa, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = base_index.index },
} });
@@ -2894,7 +3203,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
new_key.ptr.ty = ip.slicePtrType(ptr.ty);
new_key.ptr.len = .none;
assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many);
- const ptr_index = try get(ip, gpa, new_key);
+ const ptr_index = try ip.get(gpa, new_key);
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
@@ -2921,8 +3230,25 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.int => |int| b: {
+ assert(int.ty == .comptime_int_type or ip.indexToKey(int.ty) == .int_type);
+ switch (int.storage) {
+ .u64, .i64, .big_int => {},
+ .lazy_align, .lazy_size => |lazy_ty| {
+ ip.items.appendAssumeCapacity(.{
+ .tag = switch (int.storage) {
+ else => unreachable,
+ .lazy_align => .int_lazy_align,
+ .lazy_size => .int_lazy_size,
+ },
+ .data = try ip.addExtra(gpa, IntLazy{
+ .ty = int.ty,
+ .lazy_ty = lazy_ty,
+ }),
+ });
+ return @intToEnum(Index, ip.items.len - 1);
+ },
+ }
switch (int.ty) {
- .none => unreachable,
.u8_type => switch (int.storage) {
.big_int => |big_int| {
ip.items.appendAssumeCapacity(.{
@@ -2938,6 +3264,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
break :b;
},
+ .lazy_align, .lazy_size => unreachable,
},
.u16_type => switch (int.storage) {
.big_int => |big_int| {
@@ -2954,6 +3281,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
break :b;
},
+ .lazy_align, .lazy_size => unreachable,
},
.u32_type => switch (int.storage) {
.big_int => |big_int| {
@@ -2970,6 +3298,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
break :b;
},
+ .lazy_align, .lazy_size => unreachable,
},
.i32_type => switch (int.storage) {
.big_int => |big_int| {
@@ -2987,6 +3316,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
break :b;
},
+ .lazy_align, .lazy_size => unreachable,
},
.usize_type => switch (int.storage) {
.big_int => |big_int| {
@@ -3007,6 +3337,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
break :b;
}
},
+ .lazy_align, .lazy_size => unreachable,
},
.comptime_int_type => switch (int.storage) {
.big_int => |big_int| {
@@ -3041,6 +3372,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
break :b;
}
},
+ .lazy_align, .lazy_size => unreachable,
},
else => {},
}
@@ -3077,9 +3409,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
const tag: Tag = if (big_int.positive) .int_positive else .int_negative;
try addInt(ip, gpa, int.ty, tag, big_int.limbs);
},
+ .lazy_align, .lazy_size => unreachable,
}
},
+ .err => |err| ip.items.appendAssumeCapacity(.{
+ .tag = .error_set_error,
+ .data = try ip.addExtra(gpa, err),
+ }),
+
+ .error_union => |error_union| ip.items.appendAssumeCapacity(switch (error_union.val) {
+ .err_name => |err_name| .{
+ .tag = .error_union_error,
+ .data = try ip.addExtra(gpa, Key.Error{
+ .ty = error_union.ty,
+ .name = err_name,
+ }),
+ },
+ .payload => |payload| .{
+ .tag = .error_union_payload,
+ .data = try ip.addExtra(gpa, TypeValue{
+ .ty = error_union.ty,
+ .val = payload,
+ }),
+ },
+ }),
+
+ .enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{
+ .tag = .enum_literal,
+ .data = @enumToInt(enum_literal),
+ }),
+
.enum_tag => |enum_tag| {
assert(enum_tag.ty != .none);
assert(enum_tag.int != .none);
@@ -3131,9 +3491,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.aggregate => |aggregate| {
- assert(aggregate.ty != .none);
+ const ty_key = ip.indexToKey(aggregate.ty);
const aggregate_len = ip.aggregateTypeLen(aggregate.ty);
switch (aggregate.storage) {
+ .bytes => {
+ assert(ty_key.array_type.child == .u8_type);
+ },
.elems => |elems| {
assert(elems.len == aggregate_len);
for (elems) |elem| assert(elem != .none);
@@ -3151,9 +3514,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
return @intToEnum(Index, ip.items.len - 1);
}
- switch (ip.indexToKey(aggregate.ty)) {
+ switch (ty_key) {
.anon_struct_type => |anon_struct_type| {
if (switch (aggregate.storage) {
+ .bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| {
+ if (value != ip.getIfExists(.{ .int = .{
+ .ty = .u8_type,
+ .storage = .{ .u64 = byte },
+ } })) break false;
+ } else true,
.elems => |elems| std.mem.eql(Index, anon_struct_type.values, elems),
.repeated_elem => |elem| for (anon_struct_type.values) |value| {
if (value != elem) break false;
@@ -3173,34 +3542,80 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
if (switch (aggregate.storage) {
+ .bytes => |bytes| for (bytes[1..]) |byte| {
+ if (byte != bytes[0]) break false;
+ } else true,
.elems => |elems| for (elems[1..]) |elem| {
if (elem != elems[0]) break false;
} else true,
.repeated_elem => true,
}) {
+ const elem = switch (aggregate.storage) {
+ .bytes => |bytes| elem: {
+ _ = ip.map.pop();
+ const elem = try ip.get(gpa, .{ .int = .{
+ .ty = .u8_type,
+ .storage = .{ .u64 = bytes[0] },
+ } });
+ assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
+ try ip.items.ensureUnusedCapacity(gpa, 1);
+ break :elem elem;
+ },
+ .elems => |elems| elems[0],
+ .repeated_elem => |elem| elem,
+ };
+
try ip.extra.ensureUnusedCapacity(
gpa,
@typeInfo(Repeated).Struct.fields.len,
);
-
ip.items.appendAssumeCapacity(.{
.tag = .repeated,
.data = ip.addExtraAssumeCapacity(Repeated{
.ty = aggregate.ty,
- .elem_val = switch (aggregate.storage) {
- .elems => |elems| elems[0],
- .repeated_elem => |elem| elem,
- },
+ .elem_val = elem,
}),
});
return @intToEnum(Index, ip.items.len - 1);
}
+ switch (ty_key) {
+ .array_type => |array_type| if (array_type.child == .u8_type) {
+ const len_including_sentinel = aggregate_len + @boolToInt(array_type.sentinel != .none);
+ try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1);
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
+ var buffer: Key.Int.Storage.BigIntSpace = undefined;
+ switch (aggregate.storage) {
+ .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes),
+ .elems => |elems| for (elems) |elem| ip.string_bytes.appendAssumeCapacity(
+ ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable,
+ ),
+ .repeated_elem => |elem| @memset(
+ ip.string_bytes.addManyAsSliceAssumeCapacity(aggregate_len),
+ ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable,
+ ),
+ }
+ if (array_type.sentinel != .none) ip.string_bytes.appendAssumeCapacity(
+ ip.indexToKey(array_type.sentinel).int.storage.toBigInt(&buffer).to(u8) catch
+ unreachable,
+ );
+ const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel);
+ ip.items.appendAssumeCapacity(.{
+ .tag = .bytes,
+ .data = ip.addExtraAssumeCapacity(Bytes{
+ .ty = aggregate.ty,
+ .bytes = bytes.toString(),
+ }),
+ });
+ return @intToEnum(Index, ip.items.len - 1);
+ },
+ else => {},
+ }
+
try ip.extra.ensureUnusedCapacity(
gpa,
@typeInfo(Aggregate).Struct.fields.len + aggregate_len,
);
-
ip.items.appendAssumeCapacity(.{
.tag = .aggregate,
.data = ip.addExtraAssumeCapacity(Aggregate{
@@ -3423,12 +3838,16 @@ pub fn finishGetEnum(
return @intToEnum(Index, ip.items.len - 1);
}
-pub fn getAssumeExists(ip: *const InternPool, key: Key) Index {
+pub fn getIfExists(ip: *const InternPool, key: Key) ?Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
- const index = ip.map.getIndexAdapted(key, adapter).?;
+ const index = ip.map.getIndexAdapted(key, adapter) orelse return null;
return @intToEnum(Index, index);
}
+pub fn getAssumeExists(ip: *const InternPool, key: Key) Index {
+ return ip.getIfExists(key).?;
+}
+
fn addStringsToMap(
ip: *InternPool,
gpa: Allocator,
@@ -3500,9 +3919,11 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
Module.Decl.Index => @enumToInt(@field(extra, field.name)),
Module.Namespace.Index => @enumToInt(@field(extra, field.name)),
Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)),
+ Module.Fn.Index => @enumToInt(@field(extra, field.name)),
MapIndex => @enumToInt(@field(extra, field.name)),
OptionalMapIndex => @enumToInt(@field(extra, field.name)),
RuntimeIndex => @enumToInt(@field(extra, field.name)),
+ String => @enumToInt(@field(extra, field.name)),
NullTerminatedString => @enumToInt(@field(extra, field.name)),
OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
@@ -3510,7 +3931,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)),
Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)),
Pointer.VectorIndex => @enumToInt(@field(extra, field.name)),
- PtrVar.Flags => @bitCast(u32, @field(extra, field.name)),
+ Variable.Flags => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type: " ++ @typeName(field.type)),
});
}
@@ -3566,9 +3987,11 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data:
Module.Decl.Index => @intToEnum(Module.Decl.Index, int32),
Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32),
Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32),
+ Module.Fn.Index => @intToEnum(Module.Fn.Index, int32),
MapIndex => @intToEnum(MapIndex, int32),
OptionalMapIndex => @intToEnum(OptionalMapIndex, int32),
RuntimeIndex => @intToEnum(RuntimeIndex, int32),
+ String => @intToEnum(String, int32),
NullTerminatedString => @intToEnum(NullTerminatedString, int32),
OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32),
i32 => @bitCast(i32, int32),
@@ -3576,7 +3999,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data:
TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32),
Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32),
Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32),
- PtrVar.Flags => @bitCast(PtrVar.Flags, int32),
+ Variable.Flags => @bitCast(Variable.Flags, int32),
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
}
@@ -3700,8 +4123,8 @@ pub fn childType(ip: InternPool, i: Index) Index {
/// Given a slice type, returns the type of the ptr field.
pub fn slicePtrType(ip: InternPool, i: Index) Index {
switch (i) {
- .const_slice_u8_type => return .manyptr_const_u8_type,
- .const_slice_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type,
+ .slice_const_u8_type => return .manyptr_const_u8_type,
+ .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type,
else => {},
}
const item = ip.items.get(@enumToInt(i));
@@ -3830,6 +4253,8 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind
} },
} });
},
+
+ .lazy_align, .lazy_size => unreachable,
}
}
@@ -3862,6 +4287,14 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType {
}
}
+pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex {
+ assert(val != .none);
+ const tags = ip.items.items(.tag);
+ if (tags[@enumToInt(val)] != .func) return .none;
+ const datas = ip.items.items(.data);
+ return ip.extraData(Key.Func, datas[@enumToInt(val)]).index.toOptional();
+}
+
pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex {
assert(val != .none);
const tags = ip.items.items(.tag);
@@ -3891,6 +4324,15 @@ pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool {
return tags[@enumToInt(ty)] == .type_inferred_error_set;
}
+/// The is only legal because the initializer is not part of the hash.
+pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void {
+ assert(ip.items.items(.tag)[@enumToInt(index)] == .variable);
+ const field_index = inline for (@typeInfo(Variable).Struct.fields, 0..) |field, field_index| {
+ if (comptime std.mem.eql(u8, field.name, "init")) break field_index;
+ } else unreachable;
+ ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index);
+}
+
pub fn dump(ip: InternPool) void {
dumpFallible(ip, std.heap.page_allocator) catch return;
}
@@ -3903,10 +4345,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
(@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl));
const unions_size = ip.allocated_unions.len *
(@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl));
+ const funcs_size = ip.allocated_funcs.len *
+ (@sizeOf(Module.Fn) + @sizeOf(Module.Decl));
// TODO: map overhead size is not taken into account
const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size +
- structs_size + unions_size;
+ structs_size + unions_size + funcs_size;
std.debug.print(
\\InternPool size: {d} bytes
@@ -3915,6 +4359,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
\\ {d} limbs: {d} bytes
\\ {d} structs: {d} bytes
\\ {d} unions: {d} bytes
+ \\ {d} funcs: {d} bytes
\\
, .{
total_size,
@@ -3928,6 +4373,8 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
structs_size,
ip.allocated_unions.len,
unions_size,
+ ip.allocated_funcs.len,
+ funcs_size,
});
const tags = ip.items.items(.tag);
@@ -3982,12 +4429,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
},
.undef => 0,
+ .runtime_value => 0,
.simple_type => 0,
.simple_value => 0,
- .ptr_var => @sizeOf(PtrVar),
.ptr_decl => @sizeOf(PtrDecl),
.ptr_mut_decl => @sizeOf(PtrMutDecl),
- .ptr_int => @sizeOf(PtrInt),
+ .ptr_int => @sizeOf(PtrAddr),
.ptr_eu_payload => 0,
.ptr_opt_payload => 0,
.ptr_comptime_field => @sizeOf(PtrComptimeField),
@@ -4011,8 +4458,20 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
const int = ip.limbData(Int, data);
break :b @sizeOf(Int) + int.limbs_len * 8;
},
+
+ .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy),
+
+ .error_set_error, .error_union_error => @sizeOf(Key.Error),
+ .error_union_payload => @sizeOf(TypeValue),
+ .enum_literal => 0,
.enum_tag => @sizeOf(Key.EnumTag),
+ .bytes => b: {
+ const info = ip.extraData(Bytes, data);
+ const len = @intCast(u32, ip.aggregateTypeLen(info.ty));
+ break :b @sizeOf(Bytes) + len +
+ @boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0);
+ },
.aggregate => b: {
const info = ip.extraData(Aggregate, data);
const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty));
@@ -4028,8 +4487,9 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.float_c_longdouble_f80 => @sizeOf(Float80),
.float_c_longdouble_f128 => @sizeOf(Float128),
.float_comptime_float => @sizeOf(Float128),
- .extern_func => @panic("TODO"),
- .func => @panic("TODO"),
+ .variable => @sizeOf(Variable) + @sizeOf(Module.Decl),
+ .extern_func => @sizeOf(Key.ExternFunc) + @sizeOf(Module.Decl),
+ .func => @sizeOf(Key.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl),
.only_possible_value => 0,
.union_value => @sizeOf(Key.Union),
});
@@ -4071,6 +4531,14 @@ pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Un
return ip.allocated_unions.at(@enumToInt(index));
}
+pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn {
+ return ip.allocated_funcs.at(@enumToInt(index));
+}
+
+pub fn funcPtrConst(ip: InternPool, index: Module.Fn.Index) *const Module.Fn {
+ return ip.allocated_funcs.at(@enumToInt(index));
+}
+
pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet {
return ip.allocated_inferred_error_sets.at(@enumToInt(index));
}
@@ -4117,6 +4585,25 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index)
};
}
+pub fn createFunc(
+ ip: *InternPool,
+ gpa: Allocator,
+ initialization: Module.Fn,
+) Allocator.Error!Module.Fn.Index {
+ if (ip.funcs_free_list.popOrNull()) |index| return index;
+ const ptr = try ip.allocated_funcs.addOne(gpa);
+ ptr.* = initialization;
+ return @intToEnum(Module.Fn.Index, ip.allocated_funcs.len - 1);
+}
+
+pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void {
+ ip.funcPtr(index).* = undefined;
+ ip.funcs_free_list.append(gpa, index) catch {
+ // In order to keep `destroyFunc` a non-fallible function, we ignore memory
+ // allocation failures here, instead leaking the Union until garbage collection.
+ };
+}
+
pub fn createInferredErrorSet(
ip: *InternPool,
gpa: Allocator,
@@ -4142,9 +4629,25 @@ pub fn getOrPutString(
s: []const u8,
) Allocator.Error!NullTerminatedString {
const string_bytes = &ip.string_bytes;
- const str_index = @intCast(u32, string_bytes.items.len);
try string_bytes.ensureUnusedCapacity(gpa, s.len + 1);
string_bytes.appendSliceAssumeCapacity(s);
+ string_bytes.appendAssumeCapacity(0);
+ return ip.getOrPutTrailingString(gpa, s.len + 1);
+}
+
+/// Uses the last len bytes of ip.string_bytes as the key.
+pub fn getOrPutTrailingString(
+ ip: *InternPool,
+ gpa: Allocator,
+ len: usize,
+) Allocator.Error!NullTerminatedString {
+ const string_bytes = &ip.string_bytes;
+ const str_index = @intCast(u32, string_bytes.items.len - len);
+ if (len > 0 and string_bytes.getLast() == 0) {
+ _ = string_bytes.pop();
+ } else {
+ try string_bytes.ensureUnusedCapacity(gpa, 1);
+ }
const key: []const u8 = string_bytes.items[str_index..];
const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{
.bytes = string_bytes,
@@ -4179,6 +4682,10 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 {
return string_bytes[start..end :0];
}
+pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 {
+ return ip.stringToSlice(s.unwrap() orelse return null);
+}
+
pub fn typeOf(ip: InternPool, index: Index) Index {
return ip.indexToKey(index).typeOf();
}
@@ -4199,7 +4706,7 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 {
};
}
-pub fn isNoReturn(ip: InternPool, ty: InternPool.Index) bool {
+pub fn isNoReturn(ip: InternPool, ty: Index) bool {
return switch (ty) {
.noreturn_type => true,
else => switch (ip.indexToKey(ty)) {
src/link.zig
@@ -564,7 +564,8 @@ pub const File = struct {
}
/// May be called before or after updateDeclExports for any given Decl.
- pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void {
+ pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void {
+ const func = module.funcPtr(func_index);
const owner_decl = module.declPtr(func.owner_decl);
log.debug("updateFunc {*} ({s}), type={}", .{
owner_decl, owner_decl.name, owner_decl.ty.fmt(module),
@@ -575,14 +576,14 @@ pub const File = struct {
}
switch (base.tag) {
// zig fmt: off
- .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness),
- .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness),
- .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness),
- .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness),
- .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness),
- .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness),
- .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness),
- .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func, air, liveness),
+ .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func_index, air, liveness),
+ .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func_index, air, liveness),
+ .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func_index, air, liveness),
+ .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness),
+ .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func_index, air, liveness),
+ .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func_index, air, liveness),
+ .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func_index, air, liveness),
+ .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func_index, air, liveness),
// zig fmt: on
}
}
src/Module.zig
@@ -109,7 +109,7 @@ memoized_calls: MemoizedCallSet = .{},
/// Contains the values from `@setAlignStack`. A sparse table is used here
/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
/// functions are many.
-align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{},
+align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{},
/// We optimize memory usage for a compilation with no compile errors by storing the
/// error messages and mapping outside of `Decl`.
@@ -242,22 +242,23 @@ pub const StringLiteralAdapter = struct {
};
const MonomorphedFuncsSet = std.HashMapUnmanaged(
- *Fn,
+ Fn.Index,
void,
MonomorphedFuncsContext,
std.hash_map.default_max_load_percentage,
);
const MonomorphedFuncsContext = struct {
- pub fn eql(ctx: @This(), a: *Fn, b: *Fn) bool {
+ mod: *Module,
+
+ pub fn eql(ctx: @This(), a: Fn.Index, b: Fn.Index) bool {
_ = ctx;
return a == b;
}
/// Must match `Sema.GenericCallAdapter.hash`.
- pub fn hash(ctx: @This(), key: *Fn) u64 {
- _ = ctx;
- return key.hash;
+ pub fn hash(ctx: @This(), key: Fn.Index) u64 {
+ return ctx.mod.funcPtr(key).hash;
}
};
@@ -272,7 +273,7 @@ pub const MemoizedCall = struct {
module: *Module,
pub const Key = struct {
- func: *Fn,
+ func: Fn.Index,
args: []TypedValue,
};
@@ -652,21 +653,12 @@ pub const Decl = struct {
pub fn clearValues(decl: *Decl, mod: *Module) void {
const gpa = mod.gpa;
- if (decl.getExternFn()) |extern_fn| {
- extern_fn.deinit(gpa);
- gpa.destroy(extern_fn);
- }
- if (decl.getFunction()) |func| {
+ if (decl.getFunctionIndex(mod).unwrap()) |func| {
_ = mod.align_stack_fns.remove(func);
- if (func.comptime_args != null) {
- _ = mod.monomorphed_funcs.remove(func);
+ if (mod.funcPtr(func).comptime_args != null) {
+ _ = mod.monomorphed_funcs.removeContext(func, .{ .mod = mod });
}
- func.deinit(gpa);
- gpa.destroy(func);
- }
- if (decl.getVariable()) |variable| {
- variable.deinit(gpa);
- gpa.destroy(variable);
+ mod.destroyFunc(func);
}
if (decl.value_arena) |value_arena| {
if (decl.owns_tv) {
@@ -835,11 +827,11 @@ pub const Decl = struct {
/// If the Decl has a value and it is a struct, return it,
/// otherwise null.
- pub fn getStruct(decl: *Decl, mod: *Module) ?*Struct {
- return mod.structPtrUnwrap(getStructIndex(decl, mod));
+ pub fn getStruct(decl: Decl, mod: *Module) ?*Struct {
+ return mod.structPtrUnwrap(decl.getStructIndex(mod));
}
- pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex {
+ pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex {
if (!decl.owns_tv) return .none;
if (decl.val.ip_index == .none) return .none;
return mod.intern_pool.indexToStructType(decl.val.ip_index);
@@ -847,7 +839,7 @@ pub const Decl = struct {
/// If the Decl has a value and it is a union, return it,
/// otherwise null.
- pub fn getUnion(decl: *Decl, mod: *Module) ?*Union {
+ pub fn getUnion(decl: Decl, mod: *Module) ?*Union {
if (!decl.owns_tv) return null;
if (decl.val.ip_index == .none) return null;
return mod.typeToUnion(decl.val.toType());
@@ -855,32 +847,30 @@ pub const Decl = struct {
/// If the Decl has a value and it is a function, return it,
/// otherwise null.
- pub fn getFunction(decl: *const Decl) ?*Fn {
- if (!decl.owns_tv) return null;
- const func = (decl.val.castTag(.function) orelse return null).data;
- return func;
+ pub fn getFunction(decl: Decl, mod: *Module) ?*Fn {
+ return mod.funcPtrUnwrap(decl.getFunctionIndex(mod));
+ }
+
+ pub fn getFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex {
+ return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none;
}
/// If the Decl has a value and it is an extern function, returns it,
/// otherwise null.
- pub fn getExternFn(decl: *const Decl) ?*ExternFn {
- if (!decl.owns_tv) return null;
- const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data;
- return extern_fn;
+ pub fn getExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc {
+ return if (decl.owns_tv) decl.val.getExternFunc(mod) else null;
}
/// If the Decl has a value and it is a variable, returns it,
/// otherwise null.
- pub fn getVariable(decl: *const Decl) ?*Var {
- if (!decl.owns_tv) return null;
- const variable = (decl.val.castTag(.variable) orelse return null).data;
- return variable;
+ pub fn getVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable {
+ return if (decl.owns_tv) decl.val.getVariable(mod) else null;
}
/// Gets the namespace that this Decl creates by being a struct, union,
/// enum, or opaque.
/// Only returns it if the Decl is the owner.
- pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex {
+ pub fn getInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex {
if (!decl.owns_tv) return .none;
return switch (decl.val.ip_index) {
.empty_struct_type => .none,
@@ -896,8 +886,8 @@ pub const Decl = struct {
}
/// Same as `getInnerNamespaceIndex` but additionally obtains the pointer.
- pub fn getInnerNamespace(decl: *Decl, mod: *Module) ?*Namespace {
- return if (getInnerNamespaceIndex(decl, mod).unwrap()) |i| mod.namespacePtr(i) else null;
+ pub fn getInnerNamespace(decl: Decl, mod: *Module) ?*Namespace {
+ return if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| mod.namespacePtr(i) else null;
}
pub fn dump(decl: *Decl) void {
@@ -927,14 +917,11 @@ pub const Decl = struct {
assert(decl.dependencies.swapRemove(other));
}
- pub fn isExtern(decl: Decl) bool {
+ pub fn isExtern(decl: Decl, mod: *Module) bool {
assert(decl.has_tv);
- return switch (decl.val.ip_index) {
- .none => switch (decl.val.tag()) {
- .extern_fn => true,
- .variable => decl.val.castTag(.variable).?.data.init.ip_index == .unreachable_value,
- else => false,
- },
+ return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
+ .variable => |variable| variable.is_extern,
+ .extern_func => true,
else => false,
};
}
@@ -1494,6 +1481,28 @@ pub const Fn = struct {
is_noinline: bool,
calls_or_awaits_errorable_fn: bool = false,
+ pub const Index = enum(u32) {
+ _,
+
+ pub fn toOptional(i: Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(i));
+ }
+ };
+
+ pub const OptionalIndex = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+
+ pub fn init(oi: ?Index) OptionalIndex {
+ return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none));
+ }
+
+ pub fn unwrap(oi: OptionalIndex) ?Index {
+ if (oi == .none) return null;
+ return @intToEnum(Index, @enumToInt(oi));
+ }
+ };
+
pub const Analysis = enum {
/// This function has not yet undergone analysis, because we have not
/// seen a potential runtime call. It may be analyzed in future.
@@ -1519,7 +1528,7 @@ pub const Fn = struct {
/// or comptime functions.
pub const InferredErrorSet = struct {
/// The function from which this error set originates.
- func: *Fn,
+ func: Fn.Index,
/// All currently known errors that this error set contains. This includes
/// direct additions via `return error.Foo;`, and possibly also errors that
@@ -1543,8 +1552,8 @@ pub const Fn = struct {
pub const Index = enum(u32) {
_,
- pub fn toOptional(i: Index) OptionalIndex {
- return @intToEnum(OptionalIndex, @enumToInt(i));
+ pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
+ return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(i));
}
};
@@ -1552,13 +1561,13 @@ pub const Fn = struct {
none = std.math.maxInt(u32),
_,
- pub fn init(oi: ?Index) OptionalIndex {
- return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none));
+ pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
+ return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(oi orelse return .none));
}
- pub fn unwrap(oi: OptionalIndex) ?Index {
+ pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index {
if (oi == .none) return null;
- return @intToEnum(Index, @enumToInt(oi));
+ return @intToEnum(InferredErrorSet.Index, @enumToInt(oi));
}
};
@@ -1587,12 +1596,6 @@ pub const Fn = struct {
}
};
- /// TODO: remove this function
- pub fn deinit(func: *Fn, gpa: Allocator) void {
- _ = func;
- _ = gpa;
- }
-
pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool {
const file = mod.declPtr(func.owner_decl).getFileScope(mod);
@@ -1647,28 +1650,6 @@ pub const Fn = struct {
}
};
-pub const Var = struct {
- /// if is_extern == true this is undefined
- init: Value,
- owner_decl: Decl.Index,
-
- /// Library name if specified.
- /// For example `extern "c" var stderrp = ...` would have 'c' as library name.
- /// Allocated with Module's allocator; outlives the ZIR code.
- lib_name: ?[*:0]const u8,
-
- is_extern: bool,
- is_mutable: bool,
- is_threadlocal: bool,
- is_weak_linkage: bool,
-
- pub fn deinit(variable: *Var, gpa: Allocator) void {
- if (variable.lib_name) |lib_name| {
- gpa.free(mem.sliceTo(lib_name, 0));
- }
- }
-};
-
pub const DeclAdapter = struct {
mod: *Module,
@@ -3472,6 +3453,10 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
return mod.intern_pool.structPtr(index);
}
+pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn {
+ return mod.intern_pool.funcPtr(index);
+}
+
pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet {
return mod.intern_pool.inferredErrorSetPtr(index);
}
@@ -3479,7 +3464,11 @@ pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.I
/// This one accepts an index from the InternPool and asserts that it is not
/// the anonymous empty struct type.
pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct {
- return structPtr(mod, index.unwrap() orelse return null);
+ return mod.structPtr(index.unwrap() orelse return null);
+}
+
+pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn {
+ return mod.funcPtr(index.unwrap() orelse return null);
}
/// Returns true if and only if the Decl is the top level struct associated with a File.
@@ -3952,7 +3941,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
};
}
- if (decl.getFunction()) |func| {
+ if (decl.getFunction(mod)) |func| {
func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse {
try file.deleted_decls.append(gpa, decl_index);
continue;
@@ -4139,7 +4128,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
try mod.deleteDeclExports(decl_index);
// Similarly, `@setAlignStack` invocations will be re-discovered.
- if (decl.getFunction()) |func| {
+ if (decl.getFunctionIndex(mod).unwrap()) |func| {
_ = mod.align_stack_fns.remove(func);
}
@@ -4229,10 +4218,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
}
}
-pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
+pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void {
const tracy = trace(@src());
defer tracy.end();
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -4264,7 +4254,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
defer tmp_arena.deinit();
const sema_arena = tmp_arena.allocator();
- var air = mod.analyzeFnBody(func, sema_arena) catch |err| switch (err) {
+ var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) {
error.AnalysisFail => {
if (func.state == .in_progress) {
// If this decl caused the compile error, the analysis field would
@@ -4333,7 +4323,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
if (no_bin_file and !dump_llvm_ir) return;
- comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) {
+ comp.bin_file.updateFunc(mod, func_index, air, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
decl.analysis = .codegen_failure;
@@ -4363,7 +4353,8 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
/// analyzed, and for ensuring it can exist at runtime (see
/// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body
/// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`.
-pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void {
+pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void {
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -4401,7 +4392,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void {
// Decl itself is safely analyzed, and body analysis is not yet queued
- try mod.comp.work_queue.writeItem(.{ .codegen_func = func });
+ try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index });
if (mod.emit_h != null) {
// TODO: we ideally only want to do this if the function's type changed
// since the last update
@@ -4532,8 +4523,10 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.owner_decl = new_decl,
.owner_decl_index = new_decl_index,
.func = null,
+ .func_index = .none,
.fn_ret_ty = Type.void,
.owner_func = null,
+ .owner_func_index = .none,
};
defer sema.deinit();
@@ -4628,8 +4621,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = null,
+ .func_index = .none,
.fn_ret_ty = Type.void,
.owner_func = null,
+ .owner_func_index = .none,
};
defer sema.deinit();
@@ -4707,8 +4702,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
return true;
}
- if (decl_tv.val.castTag(.function)) |fn_payload| {
- const func = fn_payload.data;
+ if (mod.intern_pool.indexToFunc(decl_tv.val.ip_index).unwrap()) |func_index| {
+ const func = mod.funcPtr(func_index);
const owns_tv = func.owner_decl == decl_index;
if (owns_tv) {
var prev_type_has_bits = false;
@@ -4718,7 +4713,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
if (decl.has_tv) {
prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod);
type_changed = !decl.ty.eql(decl_tv.ty, mod);
- if (decl.getFunction()) |prev_func| {
+ if (decl.getFunction(mod)) |prev_func| {
prev_is_inline = prev_func.state == .inline_only;
}
}
@@ -4757,38 +4752,25 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
switch (decl_tv.val.ip_index) {
.generic_poison => unreachable,
.unreachable_value => unreachable,
-
- .none => switch (decl_tv.val.tag()) {
- .variable => {
- const variable = decl_tv.val.castTag(.variable).?.data;
- if (variable.owner_decl == decl_index) {
- decl.owns_tv = true;
- queue_linker_work = true;
-
- const copied_init = try variable.init.copy(decl_arena_allocator);
- variable.init = copied_init;
- }
+ else => switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) {
+ .variable => |variable| if (variable.decl == decl_index) {
+ decl.owns_tv = true;
+ queue_linker_work = true;
},
- .extern_fn => {
- const extern_fn = decl_tv.val.castTag(.extern_fn).?.data;
- if (extern_fn.owner_decl == decl_index) {
- decl.owns_tv = true;
- queue_linker_work = true;
- is_extern = true;
- }
+
+ .extern_func => |extern_fn| if (extern_fn.decl == decl_index) {
+ decl.owns_tv = true;
+ queue_linker_work = true;
+ is_extern = true;
},
- .function => {},
+ .func => {},
else => {
log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name });
queue_linker_work = true;
},
},
- else => {
- log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name });
- queue_linker_work = true;
- },
}
decl.ty = decl_tv.ty;
@@ -4810,12 +4792,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr;
};
decl.@"addrspace" = blk: {
- const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.ip_index) {
- .none => switch (decl_tv.val.tag()) {
- .function, .extern_fn => .function,
- .variable => .variable,
- else => .constant,
- },
+ const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) {
+ .variable => .variable,
+ .extern_func, .func => .function,
else => .constant,
};
@@ -5388,7 +5367,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
decl.has_align = has_align;
decl.has_linksection_or_addrspace = has_linksection_or_addrspace;
decl.zir_decl_index = @intCast(u32, decl_sub_index);
- if (decl.getFunction()) |_| {
+ if (decl.getFunctionIndex(mod) != .none) {
switch (comp.bin_file.tag) {
.coff, .elf, .macho, .plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state
@@ -5572,11 +5551,12 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void
export_owners.deinit(mod.gpa);
}
-pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
+pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air {
const tracy = trace(@src());
defer tracy.end();
const gpa = mod.gpa;
+ const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
@@ -5597,8 +5577,10 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = func,
+ .func_index = func_index.toOptional(),
.fn_ret_ty = fn_ty_info.return_type.toType(),
.owner_func = func,
+ .owner_func_index = func_index.toOptional(),
.branch_quota = @max(func.branch_quota, Sema.default_branch_quota),
};
defer sema.deinit();
@@ -5807,8 +5789,7 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void {
for (kv.value) |err| err.deinit(mod.gpa);
}
if (decl.has_tv and decl.owns_tv) {
- if (decl.val.castTag(.function)) |payload| {
- const func = payload.data;
+ if (decl.getFunctionIndex(mod).unwrap()) |func| {
_ = mod.align_stack_fns.remove(func);
}
}
@@ -5852,6 +5833,14 @@ pub fn destroyUnion(mod: *Module, index: Union.Index) void {
return mod.intern_pool.destroyUnion(mod.gpa, index);
}
+pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index {
+ return mod.intern_pool.createFunc(mod.gpa, initialization);
+}
+
+pub fn destroyFunc(mod: *Module, index: Fn.Index) void {
+ return mod.intern_pool.destroyFunc(mod.gpa, index);
+}
+
pub fn allocateNewDecl(
mod: *Module,
namespace: Namespace.Index,
@@ -6499,7 +6488,11 @@ pub fn populateTestFunctions(
try mod.ensureDeclAnalyzed(decl_index);
}
const decl = mod.declPtr(decl_index);
- const tmp_test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod);
+ const test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod);
+ const null_usize = try mod.intern(.{ .opt = .{
+ .ty = try mod.intern(.{ .opt_type = .usize_type }),
+ .val = .none,
+ } });
const array_decl_index = d: {
// Add mod.test_functions to an array decl then make the test_functions
@@ -6512,7 +6505,7 @@ pub fn populateTestFunctions(
const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
.ty = try mod.arrayType(.{
.len = test_fn_vals.len,
- .child = tmp_test_fn_ty.ip_index,
+ .child = test_fn_ty.ip_index,
.sentinel = .none,
}),
.val = try Value.Tag.aggregate.create(arena, test_fn_vals),
@@ -6530,7 +6523,7 @@ pub fn populateTestFunctions(
errdefer name_decl_arena.deinit();
const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice);
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{
- .ty = try Type.array(name_decl_arena.allocator(), bytes.len, null, Type.u8, mod),
+ .ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type }),
.val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes),
});
try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena);
@@ -6540,16 +6533,24 @@ pub fn populateTestFunctions(
array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal);
try mod.linkerUpdateDecl(test_name_decl_index);
- const field_vals = try arena.create([3]Value);
- field_vals.* = .{
- try Value.Tag.slice.create(arena, .{
- .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index),
- .len = try mod.intValue(Type.usize, test_name_slice.len),
- }), // name
- try Value.Tag.decl_ref.create(arena, test_decl_index), // func
- Value.null, // async_frame_size
+ const test_fn_fields = .{
+ // name
+ try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_type,
+ .addr = .{ .decl = test_name_decl_index },
+ } }),
+ // func
+ try mod.intern(.{ .ptr = .{
+ .ty = test_decl.ty.ip_index,
+ .addr = .{ .decl = test_decl_index },
+ } }),
+ // async_frame_size
+ null_usize,
};
- test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals);
+ test_fn_vals[i] = (try mod.intern(.{ .aggregate = .{
+ .ty = test_fn_ty.ip_index,
+ .storage = .{ .elems = &test_fn_fields },
+ } })).toValue();
}
try array_decl.finalizeNewArena(&new_decl_arena);
@@ -6558,36 +6559,25 @@ pub fn populateTestFunctions(
try mod.linkerUpdateDecl(array_decl_index);
{
- var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
- errdefer new_decl_arena.deinit();
- const arena = new_decl_arena.allocator();
-
- {
- // This copy accesses the old Decl Type/Value so it must be done before `clearValues`.
- const new_ty = try Type.ptr(arena, mod, .{
- .size = .Slice,
- .pointee_type = tmp_test_fn_ty,
- .mutable = false,
- .@"addrspace" = .generic,
- });
- const new_var = try gpa.create(Var);
- errdefer gpa.destroy(new_var);
- new_var.* = decl.val.castTag(.variable).?.data.*;
- new_var.init = try Value.Tag.slice.create(arena, .{
- .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index),
- .len = try mod.intValue(Type.usize, mod.test_functions.count()),
- });
- const new_val = try Value.Tag.variable.create(arena, new_var);
-
- // Since we are replacing the Decl's value we must perform cleanup on the
- // previous value.
- decl.clearValues(mod);
- decl.ty = new_ty;
- decl.val = new_val;
- decl.has_tv = true;
- }
+ const new_ty = try mod.ptrType(.{
+ .elem_type = test_fn_ty.ip_index,
+ .is_const = true,
+ .size = .Slice,
+ });
+ const new_val = decl.val;
+ const new_init = try mod.intern(.{ .ptr = .{
+ .ty = new_ty.ip_index,
+ .addr = .{ .decl = array_decl_index },
+ .len = (try mod.intValue(Type.usize, mod.test_functions.count())).ip_index,
+ } });
+ mod.intern_pool.mutateVarInit(decl.val.ip_index, new_init);
- try decl.finalizeNewArena(&new_decl_arena);
+ // Since we are replacing the Decl's value we must perform cleanup on the
+ // previous value.
+ decl.clearValues(mod);
+ decl.ty = new_ty;
+ decl.val = new_val;
+ decl.has_tv = true;
}
try mod.linkerUpdateDecl(decl_index);
}
@@ -6660,50 +6650,47 @@ fn reportRetryableFileError(
}
pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void {
- if (val.ip_index != .none) return;
- switch (val.tag()) {
- .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index),
- .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl),
- .function => return mod.markDeclIndexAlive(val.castTag(.function).?.data.owner_decl),
- .variable => return mod.markDeclIndexAlive(val.castTag(.variable).?.data.owner_decl),
- .decl_ref => return mod.markDeclIndexAlive(val.cast(Value.Payload.Decl).?.data),
-
- .repeated,
- .eu_payload,
- .opt_payload,
- .empty_array_sentinel,
- => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.SubValue).?.data),
-
- .eu_payload_ptr,
- .opt_payload_ptr,
- => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.PayloadPtr).?.data.container_ptr),
-
- .slice => {
- const slice = val.cast(Value.Payload.Slice).?.data;
- mod.markReferencedDeclsAlive(slice.ptr);
- mod.markReferencedDeclsAlive(slice.len);
- },
-
- .elem_ptr => {
- const elem_ptr = val.cast(Value.Payload.ElemPtr).?.data;
- return mod.markReferencedDeclsAlive(elem_ptr.array_ptr);
- },
- .field_ptr => {
- const field_ptr = val.cast(Value.Payload.FieldPtr).?.data;
- return mod.markReferencedDeclsAlive(field_ptr.container_ptr);
- },
- .aggregate => {
- for (val.castTag(.aggregate).?.data) |field_val| {
- mod.markReferencedDeclsAlive(field_val);
- }
+ switch (val.ip_index) {
+ .none => switch (val.tag()) {
+ .aggregate => {
+ for (val.castTag(.aggregate).?.data) |field_val| {
+ mod.markReferencedDeclsAlive(field_val);
+ }
+ },
+ .@"union" => {
+ const data = val.castTag(.@"union").?.data;
+ mod.markReferencedDeclsAlive(data.tag);
+ mod.markReferencedDeclsAlive(data.val);
+ },
+ else => {},
},
- .@"union" => {
- const data = val.cast(Value.Payload.Union).?.data;
- mod.markReferencedDeclsAlive(data.tag);
- mod.markReferencedDeclsAlive(data.val);
+ else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .variable => |variable| mod.markDeclIndexAlive(variable.decl),
+ .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl),
+ .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl),
+ .error_union => |error_union| switch (error_union.val) {
+ .err_name => {},
+ .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()),
+ },
+ .ptr => |ptr| {
+ switch (ptr.addr) {
+ .decl => |decl| mod.markDeclIndexAlive(decl),
+ .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl),
+ .int, .comptime_field => {},
+ .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()),
+ .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()),
+ }
+ if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue());
+ },
+ .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()),
+ .aggregate => |aggregate| for (aggregate.storage.values()) |elem|
+ mod.markReferencedDeclsAlive(elem.toValue()),
+ .un => |un| {
+ mod.markReferencedDeclsAlive(un.tag.toValue());
+ mod.markReferencedDeclsAlive(un.val.toValue());
+ },
+ else => {},
},
-
- else => {},
}
}
@@ -7075,6 +7062,12 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
return @intCast(u16, big.bitCountTwosComp());
},
+ .lazy_align => |lazy_ty| {
+ return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @boolToInt(sign);
+ },
+ .lazy_size => |lazy_ty| {
+ return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @boolToInt(sign);
+ },
}
}
src/print_air.zig
@@ -699,8 +699,8 @@ const Writer = struct {
fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
- const function = w.air.values[ty_pl.payload].castTag(.function).?.data;
- const owner_decl = w.module.declPtr(function.owner_decl);
+ const func_index = w.module.intern_pool.indexToFunc(w.air.values[ty_pl.payload].ip_index);
+ const owner_decl = w.module.declPtr(w.module.funcPtrUnwrap(func_index).?.owner_decl);
try s.print("{s}", .{owner_decl.name});
}
src/Sema.zig
@@ -28,10 +28,12 @@ owner_decl_index: Decl.Index,
/// For an inline or comptime function call, this will be the root parent function
/// which contains the callsite. Corresponds to `owner_decl`.
owner_func: ?*Module.Fn,
+owner_func_index: Module.Fn.OptionalIndex,
/// The function this ZIR code is the body of, according to the source code.
/// This starts out the same as `owner_func` and then diverges in the case of
/// an inline or comptime function call.
func: ?*Module.Fn,
+func_index: Module.Fn.OptionalIndex,
/// Used to restore the error return trace when returning a non-error from a function.
error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
/// When semantic analysis needs to know the return type of the function whose body
@@ -65,7 +67,7 @@ comptime_args_fn_inst: Zir.Inst.Index = 0,
/// to use this instead of allocating a fresh one. This avoids an unnecessary
/// extra hash table lookup in the `monomorphed_funcs` set.
/// Sema will set this to null when it takes ownership.
-preallocated_new_func: ?*Module.Fn = null,
+preallocated_new_func: Module.Fn.OptionalIndex = .none,
/// The key is types that must be fully resolved prior to machine code
/// generation pass. Types are added to this set when resolving them
/// immediately could cause a dependency loop, but they do need to be resolved
@@ -92,7 +94,7 @@ unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}
const std = @import("std");
const math = std.math;
const mem = std.mem;
-const Allocator = std.mem.Allocator;
+const Allocator = mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.sema);
@@ -1777,7 +1779,7 @@ pub fn resolveConstString(
reason: []const u8,
) ![]u8 {
const air_inst = try sema.resolveInst(zir_ref);
- const wanted_type = Type.const_slice_u8;
+ const wanted_type = Type.slice_const_u8;
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced_inst, reason);
return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod);
@@ -1866,11 +1868,10 @@ fn resolveConstMaybeUndefVal(
if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| {
switch (val.ip_index) {
.generic_poison => return error.GenericPoison,
- .none => switch (val.tag()) {
+ else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) {
.variable => return sema.failWithNeededComptime(block, src, reason),
else => return val,
},
- else => return val,
}
}
return sema.failWithNeededComptime(block, src, reason);
@@ -1889,11 +1890,11 @@ fn resolveConstValue(
switch (val.ip_index) {
.generic_poison => return error.GenericPoison,
.undef => return sema.failWithUseOfUndef(block, src),
- .none => switch (val.tag()) {
+ else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) {
+ .undef => return sema.failWithUseOfUndef(block, src),
.variable => return sema.failWithNeededComptime(block, src, reason),
else => return val,
},
- else => return val,
}
}
return sema.failWithNeededComptime(block, src, reason);
@@ -1928,11 +1929,11 @@ fn resolveMaybeUndefVal(
const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null;
switch (val.ip_index) {
.generic_poison => return error.GenericPoison,
- .none => switch (val.tag()) {
+ .none => return val,
+ else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) {
.variable => return null,
else => return val,
},
- else => return val,
}
}
@@ -1948,21 +1949,20 @@ fn resolveMaybeUndefValIntable(
var check = val;
while (true) switch (check.ip_index) {
.generic_poison => return error.GenericPoison,
- .none => switch (check.tag()) {
- .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return null,
- .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr,
- .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr,
- .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr,
- else => {
- try sema.resolveLazyValue(val);
- return val;
+ .none => break,
+ else => switch (sema.mod.intern_pool.indexToKey(check.ip_index)) {
+ .variable => return null,
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl, .mut_decl, .comptime_field => return null,
+ .int => break,
+ .eu_payload, .opt_payload => |base| check = base.toValue(),
+ .elem, .field => |base_index| check = base_index.base.toValue(),
},
- },
- else => {
- try sema.resolveLazyValue(val);
- return val;
+ else => break,
},
};
+ try sema.resolveLazyValue(val);
+ return val;
}
/// Returns all Value tags including `variable` and `undef`.
@@ -1994,7 +1994,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime(
if (air_tags[i] == .constant) {
const ty_pl = sema.air_instructions.items(.data)[i].ty_pl;
const val = sema.air_values.items[ty_pl.payload];
- if (val.tagIsVariable()) return val;
+ if (val.getVariable(sema.mod) != null) return val;
}
return opv;
}
@@ -2003,7 +2003,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime(
.constant => {
const ty_pl = air_datas[i].ty_pl;
const val = sema.air_values.items[ty_pl.payload];
- if (val.isRuntimeValue()) make_runtime.* = true;
+ if (val.isRuntimeValue(sema.mod)) make_runtime.* = true;
if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true;
return val;
},
@@ -2489,13 +2489,13 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
.@"addrspace" = addr_space,
});
try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index);
- return sema.addConstant(
- ptr_ty,
- try Value.Tag.decl_ref_mut.create(sema.arena, .{
- .decl_index = iac.data.decl_index,
+ return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{
+ .ty = ptr_ty.ip_index,
+ .addr = .{ .mut_decl = .{
+ .decl = iac.data.decl_index,
.runtime_index = block.runtime_index,
- }),
- );
+ } },
+ } })).toValue());
},
else => {},
}
@@ -2949,12 +2949,18 @@ fn zirEnumDecl(
}
const prev_owner_func = sema.owner_func;
+ const prev_owner_func_index = sema.owner_func_index;
sema.owner_func = null;
+ sema.owner_func_index = .none;
defer sema.owner_func = prev_owner_func;
+ defer sema.owner_func_index = prev_owner_func_index;
const prev_func = sema.func;
+ const prev_func_index = sema.func_index;
sema.func = null;
+ sema.func_index = .none;
defer sema.func = prev_func;
+ defer sema.func_index = prev_func_index;
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope);
defer wip_captures.deinit();
@@ -3735,14 +3741,13 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst;
try sema.maybeQueueFuncBodyAnalysis(decl_index);
- if (var_is_mut) {
- sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{
- .decl_index = decl_index,
+ sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{
+ .ty = final_ptr_ty.ip_index,
+ .addr = if (var_is_mut) .{ .mut_decl = .{
+ .decl = decl_index,
.runtime_index = block.runtime_index,
- });
- } else {
- sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl_index);
- }
+ } } else .{ .decl = decl_index },
+ } })).toValue();
},
.inferred_alloc => {
assert(sema.unresolved_inferred_allocs.remove(ptr_inst));
@@ -3836,7 +3841,10 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
// block so that codegen does not see it.
block.instructions.shrinkRetainingCapacity(search_index);
try sema.maybeQueueFuncBodyAnalysis(new_decl_index);
- sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl_index);
+ sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{
+ .ty = final_elem_ty.ip_index,
+ .addr = .{ .decl = new_decl_index },
+ } })).toValue();
// if bitcast ty ref needs to be made const, make_ptr_const
// ZIR handles it later, so we can just use the ty ref here.
air_datas[ptr_inst].ty_pl.ty = air_datas[bitcast_inst].ty_op.ty;
@@ -4332,12 +4340,16 @@ fn validateUnionInit(
// instead a single `store` to the result ptr with a comptime union value.
block.instructions.shrinkRetainingCapacity(first_block_index);
- var union_val = try Value.Tag.@"union".create(sema.arena, .{
- .tag = tag_val,
- .val = val,
- });
- if (make_runtime) union_val = try Value.Tag.runtime_value.create(sema.arena, union_val);
- const union_init = try sema.addConstant(union_ty, union_val);
+ var union_val = try mod.intern(.{ .un = .{
+ .ty = union_ty.ip_index,
+ .tag = tag_val.ip_index,
+ .val = val.ip_index,
+ } });
+ if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{
+ .ty = union_ty.ip_index,
+ .val = union_val,
+ } });
+ const union_init = try sema.addConstant(union_ty, union_val.toValue());
try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store);
return;
} else if (try sema.typeRequiresComptime(union_ty)) {
@@ -4464,14 +4476,15 @@ fn validateStructInit(
// We collect the comptime field values in case the struct initialization
// ends up being comptime-known.
- const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount(mod));
+ const field_values = try sema.gpa.alloc(InternPool.Index, struct_ty.structFieldCount(mod));
+ defer sema.gpa.free(field_values);
field: for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) {
// Determine whether the value stored to this pointer is comptime-known.
const field_ty = struct_ty.structFieldType(i, mod);
if (try sema.typeHasOnePossibleValue(field_ty)) |opv| {
- field_values[i] = opv;
+ field_values[i] = opv.ip_index;
continue;
}
@@ -4536,7 +4549,7 @@ fn validateStructInit(
first_block_index = @min(first_block_index, block_index);
}
if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| {
- field_values[i] = val;
+ field_values[i] = val.ip_index;
} else if (require_comptime) {
const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node;
return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known");
@@ -4570,7 +4583,7 @@ fn validateStructInit(
}
continue;
}
- field_values[i] = default_val;
+ field_values[i] = default_val.ip_index;
}
if (root_msg) |msg| {
@@ -4593,9 +4606,15 @@ fn validateStructInit(
// instead a single `store` to the struct_ptr with a comptime struct value.
block.instructions.shrinkRetainingCapacity(first_block_index);
- var struct_val = try Value.Tag.aggregate.create(sema.arena, field_values);
- if (make_runtime) struct_val = try Value.Tag.runtime_value.create(sema.arena, struct_val);
- const struct_init = try sema.addConstant(struct_ty, struct_val);
+ var struct_val = try mod.intern(.{ .aggregate = .{
+ .ty = struct_ty.ip_index,
+ .storage = .{ .elems = field_values },
+ } });
+ if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{
+ .ty = struct_ty.ip_index,
+ .val = struct_val,
+ } });
+ const struct_init = try sema.addConstant(struct_ty, struct_val.toValue());
try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store);
return;
}
@@ -4611,7 +4630,7 @@ fn validateStructInit(
else
try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true);
const field_ty = sema.typeOf(default_field_ptr).childType(mod);
- const init = try sema.addConstant(field_ty, field_values[i]);
+ const init = try sema.addConstant(field_ty, field_values[i].toValue());
try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store);
}
}
@@ -4691,7 +4710,8 @@ fn zirValidateArrayInit(
// Collect the comptime element values in case the array literal ends up
// being comptime-known.
const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod));
- const element_vals = try sema.arena.alloc(Value, array_len_s);
+ const element_vals = try sema.gpa.alloc(InternPool.Index, array_len_s);
+ defer sema.gpa.free(element_vals);
const opt_opv = try sema.typeHasOnePossibleValue(array_ty);
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
@@ -4701,13 +4721,13 @@ fn zirValidateArrayInit(
if (array_ty.isTuple(mod)) {
if (try array_ty.structFieldValueComptime(mod, i)) |opv| {
- element_vals[i] = opv;
+ element_vals[i] = opv.ip_index;
continue;
}
} else {
// Array has one possible value, so value is always comptime-known
if (opt_opv) |opv| {
- element_vals[i] = opv;
+ element_vals[i] = opv.ip_index;
continue;
}
}
@@ -4768,7 +4788,7 @@ fn zirValidateArrayInit(
first_block_index = @min(first_block_index, block_index);
}
if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| {
- element_vals[i] = val;
+ element_vals[i] = val.ip_index;
} else {
array_is_comptime = false;
}
@@ -4780,9 +4800,12 @@ fn zirValidateArrayInit(
if (array_is_comptime) {
if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| {
- if (ptr_val.tag() == .comptime_field_ptr) {
- // This store was validated by the individual elem ptrs.
- return;
+ switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .comptime_field => return, // This store was validated by the individual elem ptrs.
+ else => {},
+ },
+ else => {},
}
}
@@ -4790,14 +4813,20 @@ fn zirValidateArrayInit(
// instead a single `store` to the array_ptr with a comptime struct value.
// Also to populate the sentinel value, if any.
if (array_ty.sentinel(mod)) |sentinel_val| {
- element_vals[instrs.len] = sentinel_val;
+ element_vals[instrs.len] = sentinel_val.ip_index;
}
block.instructions.shrinkRetainingCapacity(first_block_index);
- var array_val = try Value.Tag.aggregate.create(sema.arena, element_vals);
- if (make_runtime) array_val = try Value.Tag.runtime_value.create(sema.arena, array_val);
- const array_init = try sema.addConstant(array_ty, array_val);
+ var array_val = try mod.intern(.{ .aggregate = .{
+ .ty = array_ty.ip_index,
+ .storage = .{ .elems = element_vals },
+ } });
+ if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{
+ .ty = array_ty.ip_index,
+ .val = array_val,
+ } });
+ const array_init = try sema.addConstant(array_ty, array_val.toValue());
try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store);
}
}
@@ -5029,7 +5058,7 @@ fn storeToInferredAllocComptime(
// There will be only one store_to_inferred_ptr because we are running at comptime.
// The alloc will turn into a Decl.
if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: {
- if (operand_val.tagIsVariable()) break :store;
+ if (operand_val.getVariable(sema.mod) != null) break :store;
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
iac.data.decl_index = try anon_decl.finish(
@@ -5717,8 +5746,8 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
{
try mod.ensureDeclAnalyzed(decl_index);
const exported_decl = mod.declPtr(decl_index);
- if (exported_decl.val.castTag(.function)) |some| {
- return sema.analyzeExport(block, src, options, some.data.owner_decl);
+ if (exported_decl.getFunction(mod)) |function| {
+ return sema.analyzeExport(block, src, options, function.owner_decl);
}
}
try sema.analyzeExport(block, src, options, decl_index);
@@ -5741,17 +5770,14 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
},
else => |e| return e,
};
- const decl_index = switch (operand.val.tag()) {
- .function => operand.val.castTag(.function).?.data.owner_decl,
- else => blk: {
- var anon_decl = try block.startAnonDecl();
- defer anon_decl.deinit();
- break :blk try anon_decl.finish(
- operand.ty,
- try operand.val.copy(anon_decl.arena()),
- 0,
- );
- },
+ const decl_index = if (operand.val.getFunction(sema.mod)) |function| function.owner_decl else blk: {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ break :blk try anon_decl.finish(
+ operand.ty,
+ try operand.val.copy(anon_decl.arena()),
+ 0,
+ );
};
try sema.analyzeExport(block, src, options, decl_index);
}
@@ -5788,7 +5814,7 @@ pub fn analyzeExport(
}
// TODO: some backends might support re-exporting extern decls
- if (exported_decl.isExtern()) {
+ if (exported_decl.isExtern(mod)) {
return sema.fail(block, src, "export target cannot be extern", .{});
}
@@ -5796,7 +5822,7 @@ pub fn analyzeExport(
mod.markDeclAlive(exported_decl);
try sema.maybeQueueFuncBodyAnalysis(exported_decl_index);
- const gpa = mod.gpa;
+ const gpa = sema.gpa;
try mod.decl_exports.ensureUnusedCapacity(gpa, 1);
try mod.export_owners.ensureUnusedCapacity(gpa, 1);
@@ -5852,8 +5878,9 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
alignment,
});
}
- const func = sema.func orelse
+ const func_index = sema.func_index.unwrap() orelse
return sema.fail(block, src, "@setAlignStack outside function body", .{});
+ const func = mod.funcPtr(func_index);
const fn_owner_decl = mod.declPtr(func.owner_decl);
switch (fn_owner_decl.ty.fnCallingConvention(mod)) {
@@ -5864,7 +5891,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
},
}
- const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func);
+ const gop = try mod.align_stack_fns.getOrPut(sema.gpa, func_index);
if (gop.found_existing) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{});
@@ -6191,10 +6218,13 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
const mod = sema.mod;
const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null;
if (func_val.isUndef(mod)) return null;
- const owner_decl_index = switch (func_val.tag()) {
- .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl,
- .function => func_val.castTag(.function).?.data.owner_decl,
- .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl,
+ const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) {
+ .extern_func => |extern_func| extern_func.decl,
+ .func => |func| mod.funcPtr(func.index).owner_decl,
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| decl,
+ else => return null,
+ },
else => return null,
};
return mod.declPtr(owner_decl_index);
@@ -6576,20 +6606,22 @@ const GenericCallAdapter = struct {
is_anytype: bool,
};
- pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool {
+ pub fn eql(ctx: @This(), adapted_key: void, other_key: Module.Fn.Index) bool {
_ = adapted_key;
+ const other_func = ctx.module.funcPtr(other_key);
+
// Checking for equality may happen on an item that has been inserted
// into the map but is not yet fully initialized. In such case, the
// two initialized fields are `hash` and `generic_owner_decl`.
- if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false;
+ if (ctx.generic_fn.owner_decl != other_func.generic_owner_decl.unwrap().?) return false;
- const other_comptime_args = other_key.comptime_args.?;
+ const other_comptime_args = other_func.comptime_args.?;
for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| {
const this_arg = ctx.args[i];
const this_is_comptime = !this_arg.val.isGenericPoison();
const other_is_comptime = !other_arg.val.isGenericPoison();
const this_is_anytype = this_arg.is_anytype;
- const other_is_anytype = other_key.isAnytypeParam(ctx.module, @intCast(u32, i));
+ const other_is_anytype = other_func.isAnytypeParam(ctx.module, @intCast(u32, i));
if (other_is_anytype != this_is_anytype) return false;
if (other_is_comptime != this_is_comptime) return false;
@@ -6663,7 +6695,7 @@ fn analyzeCall(
);
errdefer msg.destroy(sema.gpa);
- if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{});
+ if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -6760,18 +6792,21 @@ fn analyzeCall(
if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err);
return err;
};
- const module_fn = switch (func_val.tag()) {
- .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data,
- .function => func_val.castTag(.function).?.data,
- .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{
+ const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) {
+ .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
}),
- else => {
- assert(callee_ty.isPtrAtRuntime(mod));
- return sema.fail(block, call_src, "{s} call of function pointer", .{
- @as([]const u8, if (is_comptime_call) "comptime" else "inline"),
- });
+ .func => |function| function.index,
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| mod.declPtr(decl).getFunctionIndex(mod).unwrap().?,
+ else => {
+ assert(callee_ty.isPtrAtRuntime(mod));
+ return sema.fail(block, call_src, "{s} call of function pointer", .{
+ @as([]const u8, if (is_comptime_call) "comptime" else "inline"),
+ });
+ },
},
+ else => unreachable,
};
if (func_ty_info.is_var_args) {
return sema.fail(block, call_src, "{s} call of variadic function", .{
@@ -6804,6 +6839,7 @@ fn analyzeCall(
// In order to save a bit of stack space, directly modify Sema rather
// than create a child one.
const parent_zir = sema.code;
+ const module_fn = mod.funcPtr(module_fn_index);
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
sema.code = fn_owner_decl.getFileScope(mod).zir;
defer sema.code = parent_zir;
@@ -6819,8 +6855,11 @@ fn analyzeCall(
}
const parent_func = sema.func;
+ const parent_func_index = sema.func_index;
sema.func = module_fn;
+ sema.func_index = module_fn_index.toOptional();
defer sema.func = parent_func;
+ defer sema.func_index = parent_func_index;
const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry;
sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index;
@@ -6856,7 +6895,7 @@ fn analyzeCall(
defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args);
if (is_comptime_call) {
memoized_call_key = .{
- .func = module_fn,
+ .func = module_fn_index,
.args = try gpa.alloc(TypedValue, func_ty_info.param_types.len),
};
delete_memoized_call_key = true;
@@ -6889,7 +6928,7 @@ fn analyzeCall(
&child_block,
.unneeded,
inst,
- new_fn_info,
+ &new_fn_info,
&arg_i,
uncasted_args,
is_comptime_call,
@@ -6907,7 +6946,7 @@ fn analyzeCall(
&child_block,
mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src),
inst,
- new_fn_info,
+ &new_fn_info,
&arg_i,
uncasted_args,
is_comptime_call,
@@ -6950,7 +6989,7 @@ fn analyzeCall(
const fn_ret_ty = blk: {
if (module_fn.hasInferredErrorSet(mod)) {
const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{
- .func = module_fn,
+ .func = module_fn_index,
});
const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index });
break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type);
@@ -6982,7 +7021,7 @@ fn analyzeCall(
const new_func_resolved_ty = try mod.funcType(new_fn_info);
if (!is_comptime_call and !block.is_typeof) {
- try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin);
+ try sema.emitDbgInline(block, parent_func_index.unwrap().?, module_fn_index, new_func_resolved_ty, .dbg_inline_begin);
const zir_tags = sema.code.instructions.items(.tag);
for (fn_info.param_body) |param| switch (zir_tags[param]) {
@@ -7014,7 +7053,7 @@ fn analyzeCall(
error.ComptimeReturn => break :result inlining.comptime_result,
error.AnalysisFail => {
const err_msg = sema.err orelse return err;
- if (std.mem.eql(u8, err_msg.msg, recursive_msg)) return err;
+ if (mem.eql(u8, err_msg.msg, recursive_msg)) return err;
try sema.errNote(block, call_src, err_msg, "called from here", .{});
err_msg.clearTrace(sema.gpa);
return err;
@@ -7027,8 +7066,8 @@ fn analyzeCall(
if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) {
try sema.emitDbgInline(
block,
- module_fn,
- parent_func.?,
+ module_fn_index,
+ parent_func_index.unwrap().?,
mod.declPtr(parent_func.?.owner_decl).ty,
.dbg_inline_end,
);
@@ -7120,8 +7159,8 @@ fn analyzeCall(
}
if (try sema.resolveMaybeUndefVal(func)) |func_val| {
- if (func_val.castTag(.function)) |func_obj| {
- try sema.mod.ensureFuncBodyAnalysisQueued(func_obj.data);
+ if (mod.intern_pool.indexToFunc(func_val.toIntern()).unwrap()) |func_index| {
+ try sema.mod.ensureFuncBodyAnalysisQueued(func_index);
}
}
@@ -7147,9 +7186,9 @@ fn analyzeCall(
// Function pointers and extern functions aren't guaranteed to
// actually be noreturn so we add a safety check for them.
check: {
- var func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check;
- switch (func_val.tag()) {
- .function, .decl_ref => {
+ const func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check;
+ switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
+ .func, .extern_func, .ptr => {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
},
@@ -7196,7 +7235,7 @@ fn analyzeInlineCallArg(
param_block: *Block,
arg_src: LazySrcLoc,
inst: Zir.Inst.Index,
- new_fn_info: InternPool.Key.FuncType,
+ new_fn_info: *InternPool.Key.FuncType,
arg_i: *usize,
uncasted_args: []const Air.Inst.Ref,
is_comptime_call: bool,
@@ -7263,7 +7302,7 @@ fn analyzeInlineCallArg(
try sema.resolveLazyValue(arg_val);
},
}
- should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState();
+ should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod);
memoized_call_key.args[arg_i.*] = .{
.ty = param_ty.toType(),
.val = arg_val,
@@ -7302,7 +7341,7 @@ fn analyzeInlineCallArg(
try sema.resolveLazyValue(arg_val);
},
}
- should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState();
+ should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod);
memoized_call_key.args[arg_i.*] = .{
.ty = sema.typeOf(uncasted_arg),
.val = arg_val,
@@ -7387,11 +7426,11 @@ fn instantiateGenericCall(
const gpa = sema.gpa;
const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known");
- const module_fn = switch (func_val.tag()) {
- .function => func_val.castTag(.function).?.data,
- .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data,
+ const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.ip_index)) {
+ .func => |function| function.index,
+ .ptr => |ptr| mod.declPtr(ptr.addr.decl).getFunctionIndex(mod).unwrap().?,
else => unreachable,
- };
+ });
// Check the Module's generic function map with an adapted context, so that we
// can match against `uncasted_args` rather than doing the work below to create a
// generic Scope only to junk it if it matches an existing instantiation.
@@ -7496,16 +7535,17 @@ fn instantiateGenericCall(
.args = generic_args,
.module = mod,
};
- const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter);
- const callee = if (!gop.found_existing) callee: {
- const new_module_func = try gpa.create(Module.Fn);
+ const gop = try mod.monomorphed_funcs.getOrPutContextAdapted(gpa, {}, adapter, .{ .mod = mod });
+ const callee_index = if (!gop.found_existing) callee: {
+ const new_module_func_index = try mod.createFunc(undefined);
+ const new_module_func = mod.funcPtr(new_module_func_index);
// This ensures that we can operate on the hash map before the Module.Fn
// struct is fully initialized.
new_module_func.hash = precomputed_hash;
new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional();
new_module_func.comptime_args = null;
- gop.key_ptr.* = new_module_func;
+ gop.key_ptr.* = new_module_func_index;
try namespace.anon_decls.ensureUnusedCapacity(gpa, 1);
@@ -7549,7 +7589,7 @@ fn instantiateGenericCall(
new_decl_index,
uncasted_args,
module_fn,
- new_module_func,
+ new_module_func_index,
namespace_index,
func_ty_info,
call_src,
@@ -7565,12 +7605,12 @@ fn instantiateGenericCall(
}
assert(namespace.anon_decls.orderedRemove(new_decl_index));
mod.destroyDecl(new_decl_index);
- assert(mod.monomorphed_funcs.remove(new_module_func));
- gpa.destroy(new_module_func);
+ assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod }));
+ mod.destroyFunc(new_module_func_index);
return err;
},
else => {
- assert(mod.monomorphed_funcs.remove(new_module_func));
+ assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod }));
{
errdefer new_decl_arena.deinit();
try new_decl.finalizeNewArena(&new_decl_arena);
@@ -7590,6 +7630,7 @@ fn instantiateGenericCall(
try new_decl.finalizeNewArena(&new_decl_arena);
break :callee new_func;
} else gop.key_ptr.*;
+ const callee = mod.funcPtr(callee_index);
callee.branch_quota = @max(callee.branch_quota, sema.branch_quota);
@@ -7645,7 +7686,7 @@ fn instantiateGenericCall(
sema.owner_func.?.calls_or_awaits_errorable_fn = true;
}
- try sema.mod.ensureFuncBodyAnalysisQueued(callee);
+ try sema.mod.ensureFuncBodyAnalysisQueued(callee_index);
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len +
runtime_args_len);
@@ -7682,12 +7723,12 @@ fn resolveGenericInstantiationType(
new_decl_index: Decl.Index,
uncasted_args: []const Air.Inst.Ref,
module_fn: *Module.Fn,
- new_module_func: *Module.Fn,
+ new_module_func: Module.Fn.Index,
namespace: Namespace.Index,
func_ty_info: InternPool.Key.FuncType,
call_src: LazySrcLoc,
bound_arg_src: ?LazySrcLoc,
-) !*Module.Fn {
+) !Module.Fn.Index {
const mod = sema.mod;
const gpa = sema.gpa;
@@ -7707,11 +7748,13 @@ fn resolveGenericInstantiationType(
.owner_decl = new_decl,
.owner_decl_index = new_decl_index,
.func = null,
+ .func_index = .none,
.fn_ret_ty = Type.void,
.owner_func = null,
+ .owner_func_index = .none,
.comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len),
.comptime_args_fn_inst = module_fn.zir_body_inst,
- .preallocated_new_func = new_module_func,
+ .preallocated_new_func = new_module_func.toOptional(),
.is_generic_instantiation = true,
.branch_quota = sema.branch_quota,
.branch_count = sema.branch_count,
@@ -7802,8 +7845,8 @@ fn resolveGenericInstantiationType(
const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst);
const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable;
- const new_func = new_func_val.castTag(.function).?.data;
- errdefer new_func.deinit(gpa);
+ const new_func = new_func_val.getFunctionIndex(mod).unwrap().?;
+ errdefer mod.destroyFunc(new_func);
assert(new_func == new_module_func);
arg_i = 0;
@@ -7867,7 +7910,10 @@ fn resolveGenericInstantiationType(
return error.GenericPoison;
}
- new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func);
+ new_decl.val = (try mod.intern(.{ .func = .{
+ .ty = new_decl.ty.ip_index,
+ .index = new_func,
+ } })).toValue();
new_decl.@"align" = 0;
new_decl.has_tv = true;
new_decl.owns_tv = true;
@@ -7900,8 +7946,8 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type)
fn emitDbgInline(
sema: *Sema,
block: *Block,
- old_func: *Module.Fn,
- new_func: *Module.Fn,
+ old_func: Module.Fn.Index,
+ new_func: Module.Fn.Index,
new_func_ty: Type,
tag: Air.Inst.Tag,
) CompileError!void {
@@ -7910,7 +7956,10 @@ fn emitDbgInline(
// Recursive inline call; no dbg_inline needed.
if (old_func == new_func) return;
- try sema.air_values.append(sema.gpa, try Value.Tag.function.create(sema.arena, new_func));
+ try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{
+ .ty = new_func_ty.ip_index,
+ .index = new_func,
+ } })).toValue());
_ = try block.addInst(.{
.tag = tag,
.data = .{ .ty_pl = .{
@@ -8078,12 +8127,11 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const name = inst_data.get(sema.code);
// Create an error set type with only this error value, and return the value.
const kv = try sema.mod.getErrorValue(name);
- return sema.addConstant(
- try mod.singleErrorSetType(kv.key),
- try Value.Tag.@"error".create(sema.arena, .{
- .name = kv.key,
- }),
- );
+ const error_set_type = try mod.singleErrorSetType(kv.key);
+ return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{
+ .ty = error_set_type.ip_index,
+ .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key),
+ } })).toValue());
}
fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
@@ -8101,23 +8149,11 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
if (val.isUndef(mod)) {
return sema.addConstUndef(Type.err_int);
}
- switch (val.tag()) {
- .@"error" => {
- return sema.addConstant(
- Type.err_int,
- try mod.intValue(
- Type.err_int,
- (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value,
- ),
- );
- },
-
- // This is not a valid combination with the type `anyerror`.
- .the_only_possible_value => unreachable,
-
- // Assume it's already encoded as an integer.
- else => return sema.addConstant(Type.err_int, val),
- }
+ const err_name = mod.intern_pool.indexToKey(val.ip_index).err.name;
+ return sema.addConstant(Type.err_int, try mod.intValue(
+ Type.err_int,
+ (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value,
+ ));
}
const op_ty = sema.typeOf(uncasted_operand);
@@ -8142,23 +8178,21 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const uncasted_operand = try sema.resolveInst(extra.operand);
const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src);
- const mod = sema.mod;
if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod));
if (int > sema.mod.global_error_set.count() or int == 0)
return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
- const payload = try sema.arena.create(Value.Payload.Error);
- payload.* = .{
- .base = .{ .tag = .@"error" },
- .data = .{ .name = sema.mod.error_name_list.items[int] },
- };
- return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base));
+ return sema.addConstant(Type.anyerror, (try mod.intern(.{ .err = .{
+ .ty = .anyerror_type,
+ .name = mod.intern_pool.getString(sema.mod.error_name_list.items[int]).unwrap().?,
+ } })).toValue());
}
try sema.requireRuntimeBlock(block, src, operand_src);
if (block.wantSafety()) {
@@ -8234,12 +8268,12 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
- const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code));
- return sema.addConstant(
- .{ .ip_index = .enum_literal_type },
- try Value.Tag.enum_literal.create(sema.arena, duped_name),
- );
+ const name = inst_data.get(sema.code);
+ return sema.addConstant(.{ .ip_index = .enum_literal_type }, (try mod.intern(.{
+ .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name),
+ })).toValue());
}
fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -8404,32 +8438,26 @@ fn analyzeOptionalPayloadPtr(
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| {
if (initializing) {
- if (!ptr_val.isComptimeMutablePtr()) {
+ if (!ptr_val.isComptimeMutablePtr(mod)) {
// If the pointer resulting from this function was stored at comptime,
// the optional non-null bit would be set that way. But in this case,
// we need to emit a runtime instruction to do it.
_ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr);
}
- return sema.addConstant(
- child_pointer,
- try Value.Tag.opt_payload_ptr.create(sema.arena, .{
- .container_ptr = ptr_val,
- .container_ty = optional_ptr_ty.childType(mod),
- }),
- );
+ return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{
+ .ty = child_pointer.ip_index,
+ .addr = .{ .opt_payload = ptr_val.ip_index },
+ } })).toValue());
}
if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| {
if (val.isNull(mod)) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
// The same Value represents the pointer to the optional and the payload.
- return sema.addConstant(
- child_pointer,
- try Value.Tag.opt_payload_ptr.create(sema.arena, .{
- .container_ptr = ptr_val,
- .container_ty = optional_ptr_ty.childType(mod),
- }),
- );
+ return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{
+ .ty = child_pointer.ip_index,
+ .addr = .{ .opt_payload = ptr_val.ip_index },
+ } })).toValue());
}
}
@@ -8532,11 +8560,13 @@ fn analyzeErrUnionPayload(
const mod = sema.mod;
const payload_ty = err_union_ty.errorUnionPayload(mod);
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
- if (val.getError()) |name| {
+ if (val.getError(mod)) |name| {
return sema.fail(block, src, "caught unexpected error '{s}'", .{name});
}
- const data = val.castTag(.eu_payload).?.data;
- return sema.addConstant(payload_ty, data);
+ return sema.addConstant(
+ payload_ty,
+ mod.intern_pool.indexToKey(val.ip_index).error_union.val.payload.toValue(),
+ );
}
try sema.requireRuntimeBlock(block, src, null);
@@ -8595,33 +8625,26 @@ fn analyzeErrUnionPayloadPtr(
if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| {
if (initializing) {
- if (!ptr_val.isComptimeMutablePtr()) {
+ if (!ptr_val.isComptimeMutablePtr(mod)) {
// If the pointer resulting from this function was stored at comptime,
// the error union error code would be set that way. But in this case,
// we need to emit a runtime instruction to do it.
try sema.requireRuntimeBlock(block, src, null);
_ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand);
}
- return sema.addConstant(
- operand_pointer_ty,
- try Value.Tag.eu_payload_ptr.create(sema.arena, .{
- .container_ptr = ptr_val,
- .container_ty = operand_ty.childType(mod),
- }),
- );
+ return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{
+ .ty = operand_pointer_ty.ip_index,
+ .addr = .{ .eu_payload = ptr_val.ip_index },
+ } })).toValue());
}
if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| {
- if (val.getError()) |name| {
+ if (val.getError(mod)) |name| {
return sema.fail(block, src, "caught unexpected error '{s}'", .{name});
}
-
- return sema.addConstant(
- operand_pointer_ty,
- try Value.Tag.eu_payload_ptr.create(sema.arena, .{
- .container_ptr = ptr_val,
- .container_ty = operand_ty.childType(mod),
- }),
- );
+ return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{
+ .ty = operand_pointer_ty.ip_index,
+ .addr = .{ .eu_payload = ptr_val.ip_index },
+ } })).toValue());
}
}
@@ -8664,7 +8687,7 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air
const result_ty = operand_ty.errorUnionSet(mod);
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
- assert(val.getError() != null);
+ assert(val.getError(mod) != null);
return sema.addConstant(result_ty, val);
}
@@ -8694,7 +8717,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
- assert(val.getError() != null);
+ assert(val.getError(mod) != null);
return sema.addConstant(result_ty, val);
}
}
@@ -8931,20 +8954,21 @@ fn funcCommon(
}
var destroy_fn_on_error = false;
- const new_func: *Module.Fn = new_func: {
+ const new_func_index = new_func: {
if (!has_body) break :new_func undefined;
if (sema.comptime_args_fn_inst == func_inst) {
- const new_func = sema.preallocated_new_func.?;
- sema.preallocated_new_func = null; // take ownership
- break :new_func new_func;
+ const new_func_index = sema.preallocated_new_func.unwrap().?;
+ sema.preallocated_new_func = .none; // take ownership
+ break :new_func new_func_index;
}
destroy_fn_on_error = true;
- const new_func = try gpa.create(Module.Fn);
+ var new_func: Module.Fn = undefined;
// Set this here so that the inferred return type can be printed correctly if it appears in an error.
new_func.owner_decl = sema.owner_decl_index;
- break :new_func new_func;
+ const new_func_index = try mod.createFunc(new_func);
+ break :new_func new_func_index;
};
- errdefer if (destroy_fn_on_error) gpa.destroy(new_func);
+ errdefer if (destroy_fn_on_error) mod.destroyFunc(new_func_index);
const target = sema.mod.getTarget();
const fn_ty: Type = fn_ty: {
@@ -9008,7 +9032,7 @@ fn funcCommon(
else blk: {
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{
- .func = new_func,
+ .func = new_func_index,
});
const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index });
break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type);
@@ -9158,26 +9182,16 @@ fn funcCommon(
sema.owner_decl.@"addrspace" = address_space orelse .generic;
if (is_extern) {
- const new_extern_fn = try gpa.create(Module.ExternFn);
- errdefer gpa.destroy(new_extern_fn);
-
- new_extern_fn.* = Module.ExternFn{
- .owner_decl = sema.owner_decl_index,
- .lib_name = null,
- };
-
- if (opt_lib_name) |lib_name| {
- new_extern_fn.lib_name = try sema.handleExternLibName(block, .{
- .node_offset_lib_name = src_node_offset,
- }, lib_name);
- }
-
- const extern_fn_payload = try sema.arena.create(Value.Payload.ExternFn);
- extern_fn_payload.* = .{
- .base = .{ .tag = .extern_fn },
- .data = new_extern_fn,
- };
- return sema.addConstant(fn_ty, Value.initPayload(&extern_fn_payload.base));
+ return sema.addConstant(fn_ty, (try mod.intern(.{ .extern_func = .{
+ .ty = fn_ty.ip_index,
+ .decl = sema.owner_decl_index,
+ .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString(
+ gpa,
+ try sema.handleExternLibName(block, .{
+ .node_offset_lib_name = src_node_offset,
+ }, lib_name),
+ )).toOptional() else .none,
+ } })).toValue());
}
if (!has_body) {
@@ -9191,9 +9205,9 @@ fn funcCommon(
break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr;
} else null;
+ const new_func = mod.funcPtr(new_func_index);
const hash = new_func.hash;
const generic_owner_decl = if (comptime_args == null) .none else new_func.generic_owner_decl;
- const fn_payload = try sema.arena.create(Value.Payload.Function);
new_func.* = .{
.state = anal_state,
.zir_body_inst = func_inst,
@@ -9208,11 +9222,10 @@ fn funcCommon(
.branch_quota = default_branch_quota,
.is_noinline = is_noinline,
};
- fn_payload.* = .{
- .base = .{ .tag = .function },
- .data = new_func,
- };
- return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base));
+ return sema.addConstant(fn_ty, (try mod.intern(.{ .func = .{
+ .ty = fn_ty.ip_index,
+ .index = new_func_index,
+ } })).toValue());
}
fn analyzeParameter(
@@ -9312,7 +9325,7 @@ fn zirParam(
const prev_preallocated_new_func = sema.preallocated_new_func;
const prev_no_partial_func_type = sema.no_partial_func_ty;
block.params = .{};
- sema.preallocated_new_func = null;
+ sema.preallocated_new_func = .none;
sema.no_partial_func_ty = true;
defer {
block.params.deinit(sema.gpa);
@@ -9369,7 +9382,7 @@ fn zirParam(
else => |e| return e,
} or comptime_syntax;
if (sema.inst_map.get(inst)) |arg| {
- if (is_comptime and sema.preallocated_new_func != null) {
+ if (is_comptime and sema.preallocated_new_func != .none) {
// We have a comptime value for this parameter so it should be elided from the
// function type of the function instruction in this block.
const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) {
@@ -9392,7 +9405,7 @@ fn zirParam(
assert(sema.inst_map.remove(inst));
}
- if (sema.preallocated_new_func != null) {
+ if (sema.preallocated_new_func != .none) {
if (try sema.typeHasOnePossibleValue(param_ty)) |opv| {
// In this case we are instantiating a generic function call with a non-comptime
// non-anytype parameter that ended up being a one-possible-type.
@@ -9640,8 +9653,8 @@ fn intCast(
if (wanted_bits == 0) {
const ok = if (is_vector) ok: {
- const zeros = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0));
- const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros);
+ const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0));
+ const zero_inst = try sema.addConstant(operand_ty, zeros);
const is_in_range = try block.addCmpVector(operand, zero_inst, .eq);
const all_in_range = try block.addInst(.{
.tag = .reduce,
@@ -9649,7 +9662,7 @@ fn intCast(
});
break :ok all_in_range;
} else ok: {
- const zero_inst = try sema.addConstant(sema.typeOf(operand), try mod.intValue(operand_ty, 0));
+ const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0));
const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst);
break :ok is_in_range;
};
@@ -9673,10 +9686,7 @@ fn intCast(
// requirement: int value fits into target type
if (wanted_value_bits < actual_value_bits) {
const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_ty);
- const dest_max_val = if (is_vector)
- try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar)
- else
- dest_max_val_scalar;
+ const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar);
const dest_max = try sema.addConstant(operand_ty, dest_max_val);
const diff = try block.addBinOp(.subwrap, dest_max, operand);
@@ -9732,7 +9742,8 @@ fn intCast(
// no shrinkage, yes sign loss
// requirement: signed to unsigned >= 0
const ok = if (is_vector) ok: {
- const zero_val = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0));
+ const scalar_zero = try mod.intValue(operand_scalar_ty, 0);
+ const zero_val = try sema.splat(operand_ty, scalar_zero);
const zero_inst = try sema.addConstant(operand_ty, zero_val);
const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
const all_in_range = try block.addInst(.{
@@ -10139,17 +10150,18 @@ fn zirSwitchCapture(
.@"volatile" = operand_ptr_ty.isVolatilePtr(mod),
.@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod),
});
- return sema.addConstant(
- ptr_field_ty,
- try Value.Tag.field_ptr.create(sema.arena, .{
- .container_ptr = union_val,
- .container_ty = operand_ty,
- .field_index = field_index,
- }),
- );
+ return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{
+ .ty = ptr_field_ty.ip_index,
+ .addr = .{ .field = .{
+ .base = union_val.ip_index,
+ .index = field_index,
+ } },
+ } })).toValue());
}
- const tag_and_val = union_val.castTag(.@"union").?.data;
- return sema.addConstant(field_ty, tag_and_val.val);
+ return sema.addConstant(
+ field_ty,
+ mod.intern_pool.indexToKey(union_val.ip_index).un.val.toValue(),
+ );
}
if (is_ref) {
const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{
@@ -10243,14 +10255,13 @@ fn zirSwitchCapture(
});
if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| {
- return sema.addConstant(
- field_ty_ptr,
- try Value.Tag.field_ptr.create(sema.arena, .{
- .container_ptr = op_ptr_val,
- .container_ty = operand_ty,
- .field_index = first_field_index,
- }),
- );
+ return sema.addConstant(field_ty_ptr, (try mod.intern(.{ .ptr = .{
+ .ty = field_ty_ptr.ip_index,
+ .addr = .{ .field = .{
+ .base = op_ptr_val.ip_index,
+ .index = first_field_index,
+ } },
+ } })).toValue());
}
try sema.requireRuntimeBlock(block, operand_src, null);
return block.addStructFieldPtr(operand_ptr, first_field_index, field_ty_ptr);
@@ -10273,7 +10284,7 @@ fn zirSwitchCapture(
const item_ref = try sema.resolveInst(item);
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
- const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError().?);
+ const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError(mod).?);
names.putAssumeCapacityNoClobber(name_ip, {});
}
const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys());
@@ -10284,7 +10295,7 @@ fn zirSwitchCapture(
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
- const item_ty = try mod.singleErrorSetType(item_val.getError().?);
+ const item_ty = try mod.singleErrorSetType(item_val.getError(mod).?);
return sema.bitCast(block, item_ty, operand, operand_src, null);
}
},
@@ -10809,10 +10820,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
check_range: {
if (operand_ty.zigTypeTag(mod) == .Int) {
- var arena = std.heap.ArenaAllocator.init(gpa);
- defer arena.deinit();
-
- const min_int = try operand_ty.minInt(arena.allocator(), mod);
+ const min_int = try operand_ty.minInt(mod);
const max_int = try operand_ty.maxIntScalar(mod, Type.comptime_int);
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
@@ -11493,8 +11501,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
if (seen_errors.contains(error_name)) continue;
cases_len += 1;
- const item_val = try Value.Tag.@"error".create(sema.arena, .{ .name = error_name });
- const item_ref = try sema.addConstant(operand_ty, item_val);
+ const item_val = try mod.intern(.{ .err = .{
+ .ty = operand_ty.ip_index,
+ .name = error_name_ip,
+ } });
+ const item_ref = try sema.addConstant(operand_ty, item_val.toValue());
case_block.inline_case_capture = item_ref;
case_block.instructions.shrinkRetainingCapacity(0);
@@ -11665,7 +11676,7 @@ const RangeSetUnhandledIterator = struct {
fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator {
const mod = sema.mod;
- const min = try ty.minInt(sema.arena, mod);
+ const min = try ty.minInt(mod);
const max = try ty.maxIntScalar(mod, Type.comptime_int);
return RangeSetUnhandledIterator{
@@ -11788,9 +11799,10 @@ fn validateSwitchItemError(
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
+ const ip = &sema.mod.intern_pool;
const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none);
// TODO: Do i need to typecheck here?
- const error_name = item_tv.val.castTag(.@"error").?.data.name;
+ const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.ip_index).err.name);
const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev|
prev.value
else
@@ -11983,7 +11995,7 @@ fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Ind
}
if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| {
if (!operand_ty.isError(mod)) return;
- if (val.getError() == null) return;
+ if (val.getError(mod) == null) return;
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
}
}
@@ -12005,7 +12017,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I
const src = inst_data.src();
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
- if (val.getError()) |name| {
+ if (val.getError(sema.mod)) |name| {
return sema.fail(block, src, "caught unexpected error '{s}'", .{name});
}
}
@@ -12172,11 +12184,11 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R
// Return the error code from the function.
const kv = try mod.getErrorValue(err_name);
- const result_inst = try sema.addConstant(
- try mod.singleErrorSetType(kv.key),
- try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }),
- );
- return result_inst;
+ const error_set_type = try mod.singleErrorSetType(kv.key);
+ return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{
+ .ty = error_set_type.ip_index,
+ .name = mod.intern_pool.getString(kv.key).unwrap().?,
+ } })).toValue());
}
fn zirShl(
@@ -12301,7 +12313,7 @@ fn zirShl(
{
const max_int = try sema.addConstant(
lhs_ty,
- try lhs_ty.maxInt(sema.arena, mod, lhs_ty),
+ try lhs_ty.maxInt(mod, lhs_ty),
);
const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src });
break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false);
@@ -12316,7 +12328,7 @@ fn zirShl(
if (!std.math.isPowerOfTwo(bit_count)) {
const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count);
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
- const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
+ const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val));
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
@@ -12466,7 +12478,7 @@ fn zirShr(
const bit_count_val = try mod.intValue(scalar_ty, bit_count);
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
- const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
+ const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val));
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
.tag = .reduce,
@@ -13179,11 +13191,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs);
}
- const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector)
- try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0)))
- else
- try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0));
-
+ const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0)));
return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true);
}
@@ -13203,11 +13211,7 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}),
}
- const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector)
- try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0)))
- else
- try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0));
-
+ const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0)));
return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true);
}
@@ -13254,8 +13258,6 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
-
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -13325,9 +13327,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
}
@@ -13427,8 +13427,6 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
-
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -13469,9 +13467,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
}
@@ -13555,7 +13551,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else => unreachable,
};
if (resolved_type.zigTypeTag(mod) == .Vector) {
- const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero);
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
const zero = try sema.addConstant(resolved_type, zero_val);
const eql = try block.addCmpVector(remainder, zero, .eq);
break :ok try block.addInst(.{
@@ -13600,8 +13596,6 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
-
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -13644,9 +13638,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
}
@@ -13721,8 +13713,6 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
-
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -13765,9 +13755,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
}
@@ -13843,12 +13831,9 @@ fn addDivIntOverflowSafety(
return;
}
- const min_int = try resolved_type.minInt(sema.arena, mod);
+ const min_int = try resolved_type.minInt(mod);
const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1);
- const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector)
- try Value.Tag.repeated.create(sema.arena, neg_one_scalar)
- else
- neg_one_scalar;
+ const neg_one = try sema.splat(resolved_type, neg_one_scalar);
// If the LHS is comptime-known to be not equal to the min int,
// no overflow is possible.
@@ -13924,7 +13909,7 @@ fn addDivByZeroSafety(
else
try mod.floatValue(resolved_type.scalarType(mod), 0);
const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: {
- const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero);
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
const zero = try sema.addConstant(resolved_type, zero_val);
const ok = try block.addCmpVector(casted_rhs, zero, .neq);
break :ok try block.addInst(.{
@@ -14012,9 +13997,10 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{
+ .ty = resolved_type.ip_index,
+ .storage = .{ .repeated_elem = scalar_zero.ip_index },
+ } })).toValue() else scalar_zero;
return sema.addConstant(resolved_type, zero_val);
}
} else if (lhs_scalar_ty.isSignedInt(mod)) {
@@ -14399,12 +14385,12 @@ fn zirOverflowArithmetic(
// Otherwise, if either of the argument is undefined, undefined is returned.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
- break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs };
+ break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
- break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
@@ -14425,7 +14411,7 @@ fn zirOverflowArithmetic(
if (rhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
} else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs };
} else if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef };
@@ -14444,9 +14430,9 @@ fn zirOverflowArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
- } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) {
- break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs };
+ break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs };
+ } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
+ break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs };
}
}
}
@@ -14454,9 +14440,9 @@ fn zirOverflowArithmetic(
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(mod)) {
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs };
- } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) {
- break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs };
+ } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
+ break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs };
}
}
}
@@ -14478,12 +14464,12 @@ fn zirOverflowArithmetic(
// Oterhwise if either of the arguments is undefined, both results are undefined.
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
- break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) {
- break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs };
+ break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs };
}
}
if (maybe_lhs_val) |lhs_val| {
@@ -14544,10 +14530,14 @@ fn zirOverflowArithmetic(
return block.addAggregateInit(tuple_ty, element_refs);
}
-fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value {
+fn splat(sema: *Sema, ty: Type, val: Value) !Value {
const mod = sema.mod;
if (ty.zigTypeTag(mod) != .Vector) return val;
- return Value.Tag.repeated.create(sema.arena, val);
+ const repeated = try mod.intern(.{ .aggregate = .{
+ .ty = ty.ip_index,
+ .storage = .{ .repeated_elem = val.ip_index },
+ } });
+ return repeated.toValue();
}
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
@@ -14603,8 +14593,6 @@ fn analyzeArithmetic(
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
-
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -14853,9 +14841,7 @@ fn analyzeArithmetic(
} else if (resolved_type.isAnyFloat()) {
break :lz;
}
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
@@ -14886,9 +14872,7 @@ fn analyzeArithmetic(
} else if (resolved_type.isAnyFloat()) {
break :rz;
}
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
@@ -14931,9 +14915,7 @@ fn analyzeArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
@@ -14947,9 +14929,7 @@ fn analyzeArithmetic(
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
@@ -14979,9 +14959,7 @@ fn analyzeArithmetic(
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(mod)) {
if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
@@ -14994,9 +14972,7 @@ fn analyzeArithmetic(
return sema.addConstUndef(resolved_type);
}
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
- const zero_val = if (is_vector) b: {
- break :b try Value.Tag.repeated.create(sema.arena, scalar_zero);
- } else scalar_zero;
+ const zero_val = try sema.splat(resolved_type, scalar_zero);
return sema.addConstant(resolved_type, zero_val);
}
if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) {
@@ -15138,7 +15114,7 @@ fn analyzePtrArithmetic(
if (air_tag == .ptr_sub) {
return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{});
}
- const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int, sema.mod);
+ const new_ptr_val = try ptr_val.elemPtr(ptr_ty, offset_int, sema.mod);
return sema.addConstant(new_ptr_ty, new_ptr_val);
} else break :rs offset_src;
} else break :rs ptr_src;
@@ -15184,7 +15160,7 @@ fn zirAsm(
const inputs_len = @truncate(u5, extended.small >> 5);
const clobbers_len = @truncate(u5, extended.small >> 10);
const is_volatile = @truncate(u1, extended.small >> 15) != 0;
- const is_global_assembly = sema.func == null;
+ const is_global_assembly = sema.func_index == .none;
const asm_source: []const u8 = if (tmpl_is_expr) blk: {
const tmpl = @intToEnum(Zir.Inst.Ref, extra.data.asm_source);
@@ -15387,12 +15363,7 @@ fn zirCmpEq(
if (lval.isUndef(mod) or rval.isUndef(mod)) {
return sema.addConstUndef(Type.bool);
}
- // TODO optimisation opportunity: evaluate if mem.eql is faster with the names,
- // or calling to Module.getErrorValue to get the values and then compare them is
- // faster.
- const lhs_name = lval.castTag(.@"error").?.data.name;
- const rhs_name = rval.castTag(.@"error").?.data.name;
- if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) {
+ if (lval.toIntern() == rval.toIntern()) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
@@ -15650,8 +15621,8 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.AnyFrame,
=> {},
}
- const val = try ty.lazyAbiSize(mod, sema.arena);
- if (val.isLazySize()) {
+ const val = try ty.lazyAbiSize(mod);
+ if (val.isLazySize(mod)) {
try sema.queueFullTypeResolution(ty);
}
return sema.addConstant(Type.comptime_int, val);
@@ -15760,11 +15731,11 @@ fn zirClosureGet(
scope = scope.parent.?;
};
- if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func == null) {
+ if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func_index == .none) {
const msg = msg: {
const name = name: {
const file = sema.owner_decl.getFileScope(mod);
- const tree = file.getTree(mod.gpa) catch |err| {
+ const tree = file.getTree(sema.gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
file.sub_file_path, @errorName(err),
@@ -15788,11 +15759,11 @@ fn zirClosureGet(
return sema.failWithOwnedErrorMsg(msg);
}
- if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) {
+ if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) {
const msg = msg: {
const name = name: {
const file = sema.owner_decl.getFileScope(mod);
- const tree = file.getTree(mod.gpa) catch |err| {
+ const tree = file.getTree(sema.gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
file.sub_file_path, @errorName(err),
@@ -15868,14 +15839,17 @@ fn zirBuiltinSrc(
const func_name_val = blk: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
- const name = std.mem.span(fn_owner_decl.name);
+ const name = mem.span(fn_owner_decl.name);
const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]);
const new_decl = try anon_decl.finish(
try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod),
try Value.Tag.bytes.create(anon_decl.arena(), bytes),
0, // default alignment
);
- break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl);
+ break :blk try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_sentinel_0_type,
+ .addr = .{ .decl = new_decl },
+ } });
};
const file_name_val = blk: {
@@ -15888,27 +15862,35 @@ fn zirBuiltinSrc(
try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]),
0, // default alignment
);
- break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl);
+ break :blk try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_sentinel_0_type,
+ .addr = .{ .decl = new_decl },
+ } });
};
- const field_values = try sema.arena.alloc(Value, 4);
- // file: [:0]const u8,
- field_values[0] = file_name_val;
- // fn_name: [:0]const u8,
- field_values[1] = func_name_val;
- // line: u32
- field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try mod.intValue(Type.u32, extra.line + 1));
- // column: u32,
- field_values[3] = try mod.intValue(Type.u32, extra.column + 1);
-
- return sema.addConstant(
- try sema.getBuiltinType("SourceLocation"),
- try Value.Tag.aggregate.create(sema.arena, field_values),
- );
+ const src_loc_ty = try sema.getBuiltinType("SourceLocation");
+ const fields = .{
+ // file: [:0]const u8,
+ file_name_val,
+ // fn_name: [:0]const u8,
+ func_name_val,
+ // line: u32,
+ try mod.intern(.{ .runtime_value = .{
+ .ty = .u32_type,
+ .val = (try mod.intValue(Type.u32, extra.line + 1)).ip_index,
+ } }),
+ // column: u32,
+ (try mod.intValue(Type.u32, extra.column + 1)).ip_index,
+ };
+ return sema.addConstant(src_loc_ty, (try mod.intern(.{ .aggregate = .{
+ .ty = src_loc_ty.ip_index,
+ .storage = .{ .elems = &fields },
+ } })).toValue());
}
fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const mod = sema.mod;
+ const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ty = try sema.resolveType(block, src, inst_data.operand);
@@ -15916,69 +15898,20 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
switch (ty.zigTypeTag(mod)) {
- .Type => return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Type)),
- .val = Value.void,
- }),
- ),
- .Void => return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Void)),
- .val = Value.void,
- }),
- ),
- .Bool => return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Bool)),
- .val = Value.void,
- }),
- ),
- .NoReturn => return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.NoReturn)),
- .val = Value.void,
- }),
- ),
- .ComptimeFloat => return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeFloat)),
- .val = Value.void,
- }),
- ),
- .ComptimeInt => return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeInt)),
- .val = Value.void,
- }),
- ),
- .Undefined => return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Undefined)),
- .val = Value.void,
- }),
- ),
- .Null => return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Null)),
- .val = Value.void,
- }),
- ),
- .EnumLiteral => return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.EnumLiteral)),
- .val = Value.void,
- }),
- ),
+ .Type,
+ .Void,
+ .Bool,
+ .NoReturn,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .Undefined,
+ .Null,
+ .EnumLiteral,
+ => |type_info_tag| return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
+ .ty = type_info_ty.ip_index,
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).ip_index,
+ .val = .void_value,
+ } })).toValue()),
.Fn => {
// TODO: look into memoizing this result.
const info = mod.typeToFunc(ty).?;
@@ -15986,11 +15919,34 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var params_anon_decl = try block.startAnonDecl();
defer params_anon_decl.deinit();
- const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len);
+ const fn_info_decl_index = (try sema.namespaceLookup(
+ block,
+ src,
+ type_info_ty.getNamespaceIndex(mod).unwrap().?,
+ "Fn",
+ )).?;
+ try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index);
+ try sema.ensureDeclAnalyzed(fn_info_decl_index);
+ const fn_info_decl = mod.declPtr(fn_info_decl_index);
+ const fn_info_ty = fn_info_decl.val.toType();
+
+ const param_info_decl_index = (try sema.namespaceLookup(
+ block,
+ src,
+ fn_info_ty.getNamespaceIndex(mod).unwrap().?,
+ "Param",
+ )).?;
+ try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index);
+ try sema.ensureDeclAnalyzed(param_info_decl_index);
+ const param_info_decl = mod.declPtr(param_info_decl_index);
+ const param_info_ty = param_info_decl.val.toType();
+
+ const param_vals = try gpa.alloc(InternPool.Index, info.param_types.len);
+ defer gpa.free(param_vals);
for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| {
const is_generic = param_ty == .generic_poison_type;
- const param_ty_val = try mod.intern_pool.get(mod.gpa, .{ .opt = .{
- .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }),
+ const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{
+ .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }),
.val = if (is_generic) .none else param_ty,
} });
@@ -15999,87 +15955,74 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :blk @truncate(u1, info.noalias_bits >> index) != 0;
};
- const param_fields = try params_anon_decl.arena().create([3]Value);
- param_fields.* = .{
+ const param_fields = .{
// is_generic: bool,
- Value.makeBool(is_generic),
+ Value.makeBool(is_generic).ip_index,
// is_noalias: bool,
- Value.makeBool(is_noalias),
+ Value.makeBool(is_noalias).ip_index,
// type: ?type,
- param_ty_val.toValue(),
+ param_ty_val,
};
- param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields);
+ param_val.* = try mod.intern(.{ .aggregate = .{
+ .ty = param_info_ty.ip_index,
+ .storage = .{ .elems = ¶m_fields },
+ } });
}
const args_val = v: {
- const fn_info_decl_index = (try sema.namespaceLookup(
- block,
- src,
- type_info_ty.getNamespaceIndex(mod).unwrap().?,
- "Fn",
- )).?;
- try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index);
- try sema.ensureDeclAnalyzed(fn_info_decl_index);
- const fn_info_decl = mod.declPtr(fn_info_decl_index);
- const fn_ty = fn_info_decl.val.toType();
- const param_info_decl_index = (try sema.namespaceLookup(
- block,
- src,
- fn_ty.getNamespaceIndex(mod).unwrap().?,
- "Param",
- )).?;
- try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index);
- try sema.ensureDeclAnalyzed(param_info_decl_index);
- const param_info_decl = mod.declPtr(param_info_decl_index);
- const param_ty = param_info_decl.val.toType();
+ const args_slice_ty = try mod.ptrType(.{
+ .elem_type = param_info_ty.ip_index,
+ .size = .Slice,
+ .is_const = true,
+ });
const new_decl = try params_anon_decl.finish(
try mod.arrayType(.{
.len = param_vals.len,
- .child = param_ty.ip_index,
+ .child = param_info_ty.ip_index,
.sentinel = .none,
}),
- try Value.Tag.aggregate.create(
- params_anon_decl.arena(),
- param_vals,
- ),
+ (try mod.intern(.{ .aggregate = .{
+ .ty = args_slice_ty.ip_index,
+ .storage = .{ .elems = param_vals },
+ } })).toValue(),
0, // default alignment
);
- break :v try Value.Tag.slice.create(sema.arena, .{
- .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl),
- .len = try mod.intValue(Type.usize, param_vals.len),
- });
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = args_slice_ty.ip_index,
+ .addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, param_vals.len)).ip_index,
+ } });
};
- const ret_ty_opt = try mod.intern_pool.get(mod.gpa, .{ .opt = .{
- .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }),
+ const ret_ty_opt = try mod.intern(.{ .opt = .{
+ .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }),
.val = if (info.return_type == .generic_poison_type) .none else info.return_type,
} });
const callconv_ty = try sema.getBuiltinType("CallingConvention");
- const field_values = try sema.arena.create([6]Value);
- field_values.* = .{
+ const field_values = .{
// calling_convention: CallingConvention,
- try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc)),
+ (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).ip_index,
// alignment: comptime_int,
- try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)),
+ (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).ip_index,
// is_generic: bool,
- Value.makeBool(info.is_generic),
+ Value.makeBool(info.is_generic).ip_index,
// is_var_args: bool,
- Value.makeBool(info.is_var_args),
+ Value.makeBool(info.is_var_args).ip_index,
// return_type: ?type,
- ret_ty_opt.toValue(),
+ ret_ty_opt,
// args: []const Fn.Param,
args_val,
};
-
- return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn)),
- .val = try Value.Tag.aggregate.create(sema.arena, field_values),
- }),
- );
+ return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
+ .ty = type_info_ty.ip_index,
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).ip_index,
+ .val = try mod.intern(.{ .aggregate = .{
+ .ty = fn_info_ty.ip_index,
+ .storage = .{ .elems = &field_values },
+ } }),
+ } })).toValue());
},
.Int => {
const signedness_ty = try sema.getBuiltinType("Signedness");
@@ -16099,24 +16042,36 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
},
.Float => {
- const field_values = try sema.arena.alloc(Value, 1);
- // bits: u16,
- field_values[0] = try mod.intValue(Type.u16, ty.bitSize(mod));
-
- return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float)),
- .val = try Value.Tag.aggregate.create(sema.arena, field_values),
- }),
- );
+ const float_info_decl_index = (try sema.namespaceLookup(
+ block,
+ src,
+ type_info_ty.getNamespaceIndex(mod).unwrap().?,
+ "Float",
+ )).?;
+ try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index);
+ try sema.ensureDeclAnalyzed(float_info_decl_index);
+ const float_info_decl = mod.declPtr(float_info_decl_index);
+ const float_ty = float_info_decl.val.toType();
+
+ const field_vals = .{
+ // bits: u16,
+ (try mod.intValue(Type.u16, ty.bitSize(mod))).ip_index,
+ };
+ return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
+ .ty = type_info_ty.ip_index,
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).ip_index,
+ .val = try mod.intern(.{ .aggregate = .{
+ .ty = float_ty.ip_index,
+ .storage = .{ .elems = &field_vals },
+ } }),
+ } })).toValue());
},
.Pointer => {
const info = ty.ptrInfo(mod);
const alignment = if (info.@"align" != 0)
try mod.intValue(Type.comptime_int, info.@"align")
else
- try info.pointee_type.lazyAbiAlignment(mod, sema.arena);
+ try info.pointee_type.lazyAbiAlignment(mod);
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
const pointer_ty = t: {
@@ -16245,9 +16200,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// Build our list of Error values
// Optional value is only null if anyerror
// Value can be zero-length slice otherwise
- const error_field_vals: ?[]Value = if (ty.isAnyError(mod)) null else blk: {
+ const error_field_vals = if (ty.isAnyError(mod)) null else blk: {
const names = ty.errorSetNames(mod);
- const vals = try fields_anon_decl.arena().alloc(Value, names.len);
+ const vals = try gpa.alloc(InternPool.Index, names.len);
+ defer gpa.free(vals);
for (vals, names) |*field_val, name_ip| {
const name = mod.intern_pool.stringToSlice(name_ip);
const name_val = v: {
@@ -16259,70 +16215,91 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
- break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl);
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_type,
+ .addr = .{ .decl = new_decl },
+ } });
};
- const error_field_fields = try fields_anon_decl.arena().create([1]Value);
- error_field_fields.* = .{
+ const error_field_fields = .{
// name: []const u8,
name_val,
};
-
- field_val.* = try Value.Tag.aggregate.create(
- fields_anon_decl.arena(),
- error_field_fields,
- );
+ field_val.* = try mod.intern(.{ .aggregate = .{
+ .ty = error_field_ty.ip_index,
+ .storage = .{ .elems = &error_field_fields },
+ } });
}
break :blk vals;
};
// Build our ?[]const Error value
- const errors_val = if (error_field_vals) |vals| v: {
+ const slice_errors_ty = try mod.ptrType(.{
+ .elem_type = error_field_ty.ip_index,
+ .size = .Slice,
+ .is_const = true,
+ });
+ const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.ip_index);
+ const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: {
+ const array_errors_ty = try mod.arrayType(.{
+ .len = vals.len,
+ .child = error_field_ty.ip_index,
+ .sentinel = .none,
+ });
const new_decl = try fields_anon_decl.finish(
- try mod.arrayType(.{
- .len = vals.len,
- .child = error_field_ty.ip_index,
- .sentinel = .none,
- }),
- try Value.Tag.aggregate.create(
- fields_anon_decl.arena(),
- vals,
- ),
+ array_errors_ty,
+ (try mod.intern(.{ .aggregate = .{
+ .ty = array_errors_ty.ip_index,
+ .storage = .{ .elems = vals },
+ } })).toValue(),
0, // default alignment
);
-
- const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl);
- const slice_val = try Value.Tag.slice.create(sema.arena, .{
- .ptr = new_decl_val,
- .len = try mod.intValue(Type.usize, vals.len),
- });
- break :v try Value.Tag.opt_payload.create(sema.arena, slice_val);
- } else Value.null;
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = slice_errors_ty.ip_index,
+ .addr = .{ .decl = new_decl },
+ } });
+ } else .none;
+ const errors_val = try mod.intern(.{ .opt = .{
+ .ty = opt_slice_errors_ty.ip_index,
+ .val = errors_payload_val,
+ } });
// Construct Type{ .ErrorSet = errors_val }
- return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet)),
- .val = errors_val,
- }),
- );
+ return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
+ .ty = type_info_ty.ip_index,
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).ip_index,
+ .val = errors_val,
+ } })).toValue());
},
.ErrorUnion => {
- const field_values = try sema.arena.alloc(Value, 2);
- // error_set: type,
- field_values[0] = ty.errorUnionSet(mod).toValue();
- // payload: type,
- field_values[1] = ty.errorUnionPayload(mod).toValue();
+ const error_union_field_ty = t: {
+ const error_union_field_ty_decl_index = (try sema.namespaceLookup(
+ block,
+ src,
+ type_info_ty.getNamespaceIndex(mod).unwrap().?,
+ "ErrorUnion",
+ )).?;
+ try mod.declareDeclDependency(sema.owner_decl_index, error_union_field_ty_decl_index);
+ try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index);
+ const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index);
+ break :t error_union_field_ty_decl.val.toType();
+ };
- return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion)),
- .val = try Value.Tag.aggregate.create(sema.arena, field_values),
- }),
- );
+ const field_values = .{
+ // error_set: type,
+ ty.errorUnionSet(mod).ip_index,
+ // payload: type,
+ ty.errorUnionPayload(mod).ip_index,
+ };
+ return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
+ .ty = type_info_ty.ip_index,
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).ip_index,
+ .val = try mod.intern(.{ .aggregate = .{
+ .ty = error_union_field_ty.ip_index,
+ .storage = .{ .elems = &field_values },
+ } }),
+ } })).toValue());
},
.Enum => {
// TODO: look into memoizing this result.
@@ -16346,7 +16323,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :t enum_field_ty_decl.val.toType();
};
- const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_type.names.len);
+ const enum_field_vals = try gpa.alloc(InternPool.Index, enum_type.names.len);
+ defer gpa.free(enum_field_vals);
for (enum_field_vals, 0..) |*field_val, i| {
const name_ip = enum_type.names[i];
@@ -16360,56 +16338,81 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
- break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl);
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_type,
+ .addr = .{ .decl = new_decl },
+ } });
};
- const enum_field_fields = try fields_anon_decl.arena().create([2]Value);
- enum_field_fields.* = .{
+ const enum_field_fields = .{
// name: []const u8,
name_val,
// value: comptime_int,
- try mod.intValue(Type.comptime_int, i),
+ (try mod.intValue(Type.comptime_int, i)).ip_index,
};
- field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), enum_field_fields);
+ field_val.* = try mod.intern(.{ .aggregate = .{
+ .ty = enum_field_ty.ip_index,
+ .storage = .{ .elems = &enum_field_fields },
+ } });
}
const fields_val = v: {
+ const fields_array_ty = try mod.arrayType(.{
+ .len = enum_field_vals.len,
+ .child = enum_field_ty.ip_index,
+ .sentinel = .none,
+ });
const new_decl = try fields_anon_decl.finish(
- try mod.arrayType(.{
- .len = enum_field_vals.len,
- .child = enum_field_ty.ip_index,
- .sentinel = .none,
- }),
- try Value.Tag.aggregate.create(
- fields_anon_decl.arena(),
- enum_field_vals,
- ),
+ fields_array_ty,
+ (try mod.intern(.{ .aggregate = .{
+ .ty = fields_array_ty.ip_index,
+ .storage = .{ .elems = enum_field_vals },
+ } })).toValue(),
0, // default alignment
);
- break :v try Value.Tag.decl_ref.create(sema.arena, new_decl);
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = (try mod.ptrType(.{
+ .elem_type = enum_field_ty.ip_index,
+ .size = .Slice,
+ .is_const = true,
+ })).ip_index,
+ .addr = .{ .decl = new_decl },
+ } });
};
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, enum_type.namespace);
- const field_values = try sema.arena.create([4]Value);
- field_values.* = .{
+ const type_enum_ty = t: {
+ const type_enum_ty_decl_index = (try sema.namespaceLookup(
+ block,
+ src,
+ type_info_ty.getNamespaceIndex(mod).unwrap().?,
+ "Enum",
+ )).?;
+ try mod.declareDeclDependency(sema.owner_decl_index, type_enum_ty_decl_index);
+ try sema.ensureDeclAnalyzed(type_enum_ty_decl_index);
+ const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index);
+ break :t type_enum_ty_decl.val.toType();
+ };
+
+ const field_values = .{
// tag_type: type,
- enum_type.tag_ty.toValue(),
+ enum_type.tag_ty,
// fields: []const EnumField,
fields_val,
// decls: []const Declaration,
decls_val,
// is_exhaustive: bool,
- is_exhaustive,
+ is_exhaustive.ip_index,
};
-
- return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum)),
- .val = try Value.Tag.aggregate.create(sema.arena, field_values),
- }),
- );
+ return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
+ .ty = type_info_ty.ip_index,
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).ip_index,
+ .val = try mod.intern(.{ .aggregate = .{
+ .ty = type_enum_ty.ip_index,
+ .storage = .{ .elems = &field_values },
+ } }),
+ } })).toValue());
},
.Union => {
// TODO: look into memoizing this result.
@@ -16417,6 +16420,19 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var fields_anon_decl = try block.startAnonDecl();
defer fields_anon_decl.deinit();
+ const type_union_ty = t: {
+ const type_union_ty_decl_index = (try sema.namespaceLookup(
+ block,
+ src,
+ type_info_ty.getNamespaceIndex(mod).unwrap().?,
+ "Union",
+ )).?;
+ try mod.declareDeclDependency(sema.owner_decl_index, type_union_ty_decl_index);
+ try sema.ensureDeclAnalyzed(type_union_ty_decl_index);
+ const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index);
+ break :t type_union_ty_decl.val.toType();
+ };
+
const union_field_ty = t: {
const union_field_ty_decl_index = (try sema.namespaceLookup(
block,
@@ -16435,7 +16451,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const layout = union_ty.containerLayout(mod);
const union_fields = union_ty.unionFields(mod);
- const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count());
+ const union_field_vals = try gpa.alloc(InternPool.Index, union_fields.count());
+ defer gpa.free(union_field_vals);
for (union_field_vals, 0..) |*field_val, i| {
const field = union_fields.values()[i];
@@ -16449,51 +16466,62 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
- break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl);
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_type,
+ .addr = .{ .decl = new_decl },
+ } });
};
- const union_field_fields = try fields_anon_decl.arena().create([3]Value);
const alignment = switch (layout) {
.Auto, .Extern => try sema.unionFieldAlignment(field),
.Packed => 0,
};
- union_field_fields.* = .{
+ const union_field_fields = .{
// name: []const u8,
name_val,
// type: type,
- field.ty.toValue(),
+ field.ty.ip_index,
// alignment: comptime_int,
- try mod.intValue(Type.comptime_int, alignment),
+ (try mod.intValue(Type.comptime_int, alignment)).ip_index,
};
- field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields);
+ field_val.* = try mod.intern(.{ .aggregate = .{
+ .ty = union_field_ty.ip_index,
+ .storage = .{ .elems = &union_field_fields },
+ } });
}
const fields_val = v: {
+ const array_fields_ty = try mod.arrayType(.{
+ .len = union_field_vals.len,
+ .child = union_field_ty.ip_index,
+ .sentinel = .none,
+ });
const new_decl = try fields_anon_decl.finish(
- try mod.arrayType(.{
- .len = union_field_vals.len,
- .child = union_field_ty.ip_index,
- .sentinel = .none,
- }),
- try Value.Tag.aggregate.create(
- fields_anon_decl.arena(),
- try fields_anon_decl.arena().dupe(Value, union_field_vals),
- ),
+ array_fields_ty,
+ (try mod.intern(.{ .aggregate = .{
+ .ty = array_fields_ty.ip_index,
+ .storage = .{ .elems = union_field_vals },
+ } })).toValue(),
0, // default alignment
);
- break :v try Value.Tag.slice.create(sema.arena, .{
- .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl),
- .len = try mod.intValue(Type.usize, union_field_vals.len),
- });
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = (try mod.ptrType(.{
+ .elem_type = union_field_ty.ip_index,
+ .size = .Slice,
+ .is_const = true,
+ })).ip_index,
+ .addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, union_field_vals.len)).ip_index,
+ } });
};
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod));
- const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: {
- const ty_val = tag_ty.toValue();
- break :v try Value.Tag.opt_payload.create(sema.arena, ty_val);
- } else Value.null;
+ const enum_tag_ty_val = try mod.intern(.{ .opt = .{
+ .ty = (try mod.optionalType(.type_type)).ip_index,
+ .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.ip_index else .none,
+ } });
const container_layout_ty = t: {
const decl_index = (try sema.namespaceLookup(
@@ -16508,10 +16536,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :t decl.val.toType();
};
- const field_values = try sema.arena.create([4]Value);
- field_values.* = .{
+ const field_values = .{
// layout: ContainerLayout,
- try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)),
+ (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index,
// tag_type: ?type,
enum_tag_ty_val,
@@ -16520,14 +16547,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// decls: []const Declaration,
decls_val,
};
-
- return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union)),
- .val = try Value.Tag.aggregate.create(sema.arena, field_values),
- }),
- );
+ return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
+ .ty = type_info_ty.ip_index,
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).ip_index,
+ .val = try mod.intern(.{ .aggregate = .{
+ .ty = type_union_ty.ip_index,
+ .storage = .{ .elems = &field_values },
+ } }),
+ } })).toValue());
},
.Struct => {
// TODO: look into memoizing this result.
@@ -16535,6 +16562,19 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var fields_anon_decl = try block.startAnonDecl();
defer fields_anon_decl.deinit();
+ const type_struct_ty = t: {
+ const type_struct_ty_decl_index = (try sema.namespaceLookup(
+ block,
+ src,
+ type_info_ty.getNamespaceIndex(mod).unwrap().?,
+ "Struct",
+ )).?;
+ try mod.declareDeclDependency(sema.owner_decl_index, type_struct_ty_decl_index);
+ try sema.ensureDeclAnalyzed(type_struct_ty_decl_index);
+ const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index);
+ break :t type_struct_ty_decl.val.toType();
+ };
+
const struct_field_ty = t: {
const struct_field_ty_decl_index = (try sema.namespaceLookup(
block,
@@ -16547,14 +16587,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index);
break :t struct_field_ty_decl.val.toType();
};
+
const struct_ty = try sema.resolveTypeFields(ty);
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
const layout = struct_ty.containerLayout(mod);
- const struct_field_vals = fv: {
+ var struct_field_vals: []InternPool.Index = &.{};
+ defer gpa.free(struct_field_vals);
+ fv: {
const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.anon_struct_type => |tuple| {
- const struct_field_vals = try fields_anon_decl.arena().alloc(Value, tuple.types.len);
+ struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len);
for (
tuple.types,
tuple.values,
@@ -16574,38 +16617,40 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try Value.Tag.bytes.create(anon_decl.arena(), bytes.ptr[0 .. bytes.len + 1]),
0, // default alignment
);
- break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{
- .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl),
- .len = try mod.intValue(Type.usize, bytes.len),
- });
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_type,
+ .addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, bytes.len)).ip_index,
+ } });
};
- const struct_field_fields = try fields_anon_decl.arena().create([5]Value);
const is_comptime = field_val != .none;
const opt_default_val = if (is_comptime) field_val.toValue() else null;
const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val);
- struct_field_fields.* = .{
+ const struct_field_fields = .{
// name: []const u8,
name_val,
// type: type,
- field_ty.toValue(),
+ field_ty,
// default_value: ?*const anyopaque,
- try default_val_ptr.copy(fields_anon_decl.arena()),
+ default_val_ptr.ip_index,
// is_comptime: bool,
- Value.makeBool(is_comptime),
+ Value.makeBool(is_comptime).ip_index,
// alignment: comptime_int,
- try field_ty.toType().lazyAbiAlignment(mod, fields_anon_decl.arena()),
+ (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).ip_index,
};
- struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields);
+ struct_field_val.* = try mod.intern(.{ .aggregate = .{
+ .ty = struct_field_ty.ip_index,
+ .storage = .{ .elems = &struct_field_fields },
+ } });
}
- break :fv struct_field_vals;
+ break :fv;
},
.struct_type => |s| s,
else => unreachable,
};
- const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
- break :fv &[0]Value{};
- const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_obj.fields.count());
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv;
+ struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count());
for (
struct_field_vals,
@@ -16621,13 +16666,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
- break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{
- .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl),
- .len = try mod.intValue(Type.usize, bytes.len),
- });
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_type,
+ .addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, bytes.len)).ip_index,
+ } });
};
- const struct_field_fields = try fields_anon_decl.arena().create([5]Value);
const opt_default_val = if (field.default_val.ip_index == .unreachable_value)
null
else
@@ -16635,55 +16680,61 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val);
const alignment = field.alignment(mod, layout);
- struct_field_fields.* = .{
+ const struct_field_fields = .{
// name: []const u8,
name_val,
// type: type,
- field.ty.toValue(),
+ field.ty.ip_index,
// default_value: ?*const anyopaque,
- try default_val_ptr.copy(fields_anon_decl.arena()),
+ default_val_ptr.ip_index,
// is_comptime: bool,
- Value.makeBool(field.is_comptime),
+ Value.makeBool(field.is_comptime).ip_index,
// alignment: comptime_int,
- try mod.intValue(Type.comptime_int, alignment),
+ (try mod.intValue(Type.comptime_int, alignment)).ip_index,
};
- field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields);
+ field_val.* = try mod.intern(.{ .aggregate = .{
+ .ty = struct_field_ty.ip_index,
+ .storage = .{ .elems = &struct_field_fields },
+ } });
}
- break :fv struct_field_vals;
- };
+ }
const fields_val = v: {
+ const array_fields_ty = try mod.arrayType(.{
+ .len = struct_field_vals.len,
+ .child = struct_field_ty.ip_index,
+ .sentinel = .none,
+ });
const new_decl = try fields_anon_decl.finish(
- try mod.arrayType(.{
- .len = struct_field_vals.len,
- .child = struct_field_ty.ip_index,
- .sentinel = .none,
- }),
- try Value.Tag.aggregate.create(
- fields_anon_decl.arena(),
- try fields_anon_decl.arena().dupe(Value, struct_field_vals),
- ),
+ array_fields_ty,
+ (try mod.intern(.{ .aggregate = .{
+ .ty = array_fields_ty.ip_index,
+ .storage = .{ .elems = struct_field_vals },
+ } })).toValue(),
0, // default alignment
);
- break :v try Value.Tag.slice.create(sema.arena, .{
- .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl),
- .len = try mod.intValue(Type.usize, struct_field_vals.len),
- });
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = (try mod.ptrType(.{
+ .elem_type = struct_field_ty.ip_index,
+ .size = .Slice,
+ .is_const = true,
+ })).ip_index,
+ .addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, struct_field_vals.len)).ip_index,
+ } });
};
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod));
- const backing_integer_val = blk: {
- if (layout == .Packed) {
+ const backing_integer_val = try mod.intern(.{ .opt = .{
+ .ty = (try mod.optionalType(.type_type)).ip_index,
+ .val = if (layout == .Packed) val: {
const struct_obj = mod.typeToStruct(struct_ty).?;
assert(struct_obj.haveLayout());
assert(struct_obj.backing_int_ty.isInt(mod));
- const backing_int_ty_val = struct_obj.backing_int_ty.toValue();
- break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val);
- } else {
- break :blk Value.null;
- }
- };
+ break :val struct_obj.backing_int_ty.ip_index;
+ } else .none,
+ } });
const container_layout_ty = t: {
const decl_index = (try sema.namespaceLookup(
@@ -16698,10 +16749,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :t decl.val.toType();
};
- const field_values = try sema.arena.create([5]Value);
- field_values.* = .{
+ const field_values = [_]InternPool.Index{
// layout: ContainerLayout,
- try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)),
+ (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index,
// backing_integer: ?type,
backing_integer_val,
// fields: []const StructField,
@@ -16709,36 +16759,48 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// decls: []const Declaration,
decls_val,
// is_tuple: bool,
- Value.makeBool(struct_ty.isTuple(mod)),
+ Value.makeBool(struct_ty.isTuple(mod)).ip_index,
};
-
- return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct)),
- .val = try Value.Tag.aggregate.create(sema.arena, field_values),
- }),
- );
+ return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
+ .ty = type_info_ty.ip_index,
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).ip_index,
+ .val = try mod.intern(.{ .aggregate = .{
+ .ty = type_struct_ty.ip_index,
+ .storage = .{ .elems = &field_values },
+ } }),
+ } })).toValue());
},
.Opaque => {
// TODO: look into memoizing this result.
+ const type_opaque_ty = t: {
+ const type_opaque_ty_decl_index = (try sema.namespaceLookup(
+ block,
+ src,
+ type_info_ty.getNamespaceIndex(mod).unwrap().?,
+ "Opaque",
+ )).?;
+ try mod.declareDeclDependency(sema.owner_decl_index, type_opaque_ty_decl_index);
+ try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index);
+ const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index);
+ break :t type_opaque_ty_decl.val.toType();
+ };
+
const opaque_ty = try sema.resolveTypeFields(ty);
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespaceIndex(mod));
- const field_values = try sema.arena.create([1]Value);
- field_values.* = .{
+ const field_values = .{
// decls: []const Declaration,
decls_val,
};
-
- return sema.addConstant(
- type_info_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque)),
- .val = try Value.Tag.aggregate.create(sema.arena, field_values),
- }),
- );
+ return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
+ .ty = type_info_ty.ip_index,
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).ip_index,
+ .val = try mod.intern(.{ .aggregate = .{
+ .ty = type_opaque_ty.ip_index,
+ .storage = .{ .elems = &field_values },
+ } }),
+ } })).toValue());
},
.Frame => return sema.failWithUseOfAsync(block, src),
.AnyFrame => return sema.failWithUseOfAsync(block, src),
@@ -16751,7 +16813,7 @@ fn typeInfoDecls(
src: LazySrcLoc,
type_info_ty: Type,
opt_namespace: Module.Namespace.OptionalIndex,
-) CompileError!Value {
+) CompileError!InternPool.Index {
const mod = sema.mod;
var decls_anon_decl = try block.startAnonDecl();
defer decls_anon_decl.deinit();
@@ -16770,7 +16832,7 @@ fn typeInfoDecls(
};
try sema.queueFullTypeResolution(declaration_ty);
- var decl_vals = std.ArrayList(Value).init(sema.gpa);
+ var decl_vals = std.ArrayList(InternPool.Index).init(sema.gpa);
defer decl_vals.deinit();
var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa);
@@ -16778,33 +16840,39 @@ fn typeInfoDecls(
if (opt_namespace.unwrap()) |namespace_index| {
const namespace = mod.namespacePtr(namespace_index);
- try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), namespace, &decl_vals, &seen_namespaces);
+ try sema.typeInfoNamespaceDecls(block, namespace, declaration_ty, &decl_vals, &seen_namespaces);
}
+ const array_decl_ty = try mod.arrayType(.{
+ .len = decl_vals.items.len,
+ .child = declaration_ty.ip_index,
+ .sentinel = .none,
+ });
const new_decl = try decls_anon_decl.finish(
- try mod.arrayType(.{
- .len = decl_vals.items.len,
- .child = declaration_ty.ip_index,
- .sentinel = .none,
- }),
- try Value.Tag.aggregate.create(
- decls_anon_decl.arena(),
- try decls_anon_decl.arena().dupe(Value, decl_vals.items),
- ),
+ array_decl_ty,
+ (try mod.intern(.{ .aggregate = .{
+ .ty = array_decl_ty.ip_index,
+ .storage = .{ .elems = decl_vals.items },
+ } })).toValue(),
0, // default alignment
);
- return try Value.Tag.slice.create(sema.arena, .{
- .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl),
- .len = try mod.intValue(Type.usize, decl_vals.items.len),
- });
+ return try mod.intern(.{ .ptr = .{
+ .ty = (try mod.ptrType(.{
+ .elem_type = declaration_ty.ip_index,
+ .size = .Slice,
+ .is_const = true,
+ })).ip_index,
+ .addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, decl_vals.items.len)).ip_index,
+ } });
}
fn typeInfoNamespaceDecls(
sema: *Sema,
block: *Block,
- decls_anon_decl: Allocator,
namespace: *Namespace,
- decl_vals: *std.ArrayList(Value),
+ declaration_ty: Type,
+ decl_vals: *std.ArrayList(InternPool.Index),
seen_namespaces: *std.AutoHashMap(*Namespace, void),
) !void {
const mod = sema.mod;
@@ -16817,7 +16885,7 @@ fn typeInfoNamespaceDecls(
if (decl.analysis == .in_progress) continue;
try mod.ensureDeclAnalyzed(decl_index);
const new_ns = decl.val.toType().getNamespace(mod).?;
- try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces);
+ try sema.typeInfoNamespaceDecls(block, new_ns, declaration_ty, decl_vals, seen_namespaces);
continue;
}
if (decl.kind != .named) continue;
@@ -16830,20 +16898,23 @@ fn typeInfoNamespaceDecls(
try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
0, // default alignment
);
- break :v try Value.Tag.slice.create(decls_anon_decl, .{
- .ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl),
- .len = try mod.intValue(Type.usize, bytes.len),
- });
+ break :v try mod.intern(.{ .ptr = .{
+ .ty = .slice_const_u8_type,
+ .addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, bytes.len)).ip_index,
+ } });
};
- const fields = try decls_anon_decl.create([2]Value);
- fields.* = .{
+ const fields = .{
//name: []const u8,
name_val,
//is_pub: bool,
- Value.makeBool(decl.is_pub),
+ Value.makeBool(decl.is_pub).ip_index,
};
- try decl_vals.append(try Value.Tag.aggregate.create(decls_anon_decl, fields));
+ try decl_vals.append(try mod.intern(.{ .aggregate = .{
+ .ty = declaration_ty.ip_index,
+ .storage = .{ .elems = &fields },
+ } }));
}
}
@@ -17454,10 +17525,11 @@ fn zirRetErrValue(
// Return the error code from the function.
const kv = try mod.getErrorValue(err_name);
- const result_inst = try sema.addConstant(
- try mod.singleErrorSetType(err_name),
- try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }),
- );
+ const error_set_type = try mod.singleErrorSetType(err_name);
+ const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{
+ .ty = error_set_type.ip_index,
+ .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key),
+ } })).toValue());
return sema.analyzeRet(block, result_inst, src);
}
@@ -17782,10 +17854,12 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known");
// Check if this happens to be the lazy alignment of our element type, in
// which case we can make this 0 without resolving it.
- if (val.castTag(.lazy_align)) |payload| {
- if (payload.data.eql(elem_ty, sema.mod)) {
- break :blk .none;
- }
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .int => |int| switch (int.storage) {
+ .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.ip_index) break :blk .none,
+ else => {},
+ },
+ else => {},
}
const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?);
try sema.validateAlign(block, align_src, abi_align);
@@ -17910,12 +17984,10 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com
return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len});
}
}
- if (obj_ty.sentinel(mod)) |sentinel| {
- const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel);
- return sema.addConstant(obj_ty, val);
- } else {
- return sema.addConstant(obj_ty, Value.initTag(.empty_array));
- }
+ return sema.addConstant(obj_ty, (try mod.intern(.{ .aggregate = .{
+ .ty = obj_ty.ip_index,
+ .storage = .{ .elems = &.{} },
+ } })).toValue());
}
fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -18679,8 +18751,8 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
if (ty.isNoReturn(mod)) {
return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)});
}
- const val = try ty.lazyAbiAlignment(mod, sema.arena);
- if (val.isLazyAlign()) {
+ const val = try ty.lazyAbiAlignment(mod);
+ if (val.isLazyAlign(mod)) {
try sema.queueFullTypeResolution(ty);
}
return sema.addConstant(Type.comptime_int, val);
@@ -18704,7 +18776,8 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
- const bytes = val.castTag(.@"error").?.data.name;
+ const err_name = sema.mod.intern_pool.indexToKey(val.ip_index).err.name;
+ const bytes = sema.mod.intern_pool.stringToSlice(err_name);
return sema.addStrLit(block, bytes);
}
@@ -18794,7 +18867,8 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const enum_ty = switch (operand_ty.zigTypeTag(mod)) {
.EnumLiteral => {
const val = try sema.resolveConstValue(block, .unneeded, operand, "");
- const bytes = val.castTag(.enum_literal).?.data;
+ const tag_name = mod.intern_pool.indexToKey(val.ip_index).enum_literal;
+ const bytes = mod.intern_pool.stringToSlice(tag_name);
return sema.addStrLit(block, bytes);
},
.Enum => operand_ty,
@@ -18883,11 +18957,8 @@ fn zirReify(
.EnumLiteral => return Air.Inst.Ref.enum_literal_type,
.Int => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const signedness_index = fields.getIndex("signedness").?;
- const bits_index = fields.getIndex("bits").?;
-
- const signedness_val = try union_val.val.fieldValue(fields.values()[signedness_index].ty, mod, signedness_index);
- const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index);
+ const signedness_val = try union_val.val.fieldValue(mod, fields.getIndex("signedness").?);
+ const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?);
const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
const bits = @intCast(u16, bits_val.toUnsignedInt(mod));
@@ -18896,11 +18967,8 @@ fn zirReify(
},
.Vector => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const len_index = fields.getIndex("len").?;
- const child_index = fields.getIndex("child").?;
-
- const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index);
- const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index);
+ const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?);
+ const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?);
const len = @intCast(u32, len_val.toUnsignedInt(mod));
const child_ty = child_val.toType();
@@ -18915,9 +18983,7 @@ fn zirReify(
},
.Float => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const bits_index = fields.getIndex("bits").?;
-
- const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index);
+ const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?);
const bits = @intCast(u16, bits_val.toUnsignedInt(mod));
const ty = switch (bits) {
@@ -18932,23 +18998,14 @@ fn zirReify(
},
.Pointer => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const size_index = fields.getIndex("size").?;
- const is_const_index = fields.getIndex("is_const").?;
- const is_volatile_index = fields.getIndex("is_volatile").?;
- const alignment_index = fields.getIndex("alignment").?;
- const address_space_index = fields.getIndex("address_space").?;
- const child_index = fields.getIndex("child").?;
- const is_allowzero_index = fields.getIndex("is_allowzero").?;
- const sentinel_index = fields.getIndex("sentinel").?;
-
- const size_val = try union_val.val.fieldValue(fields.values()[size_index].ty, mod, size_index);
- const is_const_val = try union_val.val.fieldValue(fields.values()[is_const_index].ty, mod, is_const_index);
- const is_volatile_val = try union_val.val.fieldValue(fields.values()[is_volatile_index].ty, mod, is_volatile_index);
- const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index);
- const address_space_val = try union_val.val.fieldValue(fields.values()[address_space_index].ty, mod, address_space_index);
- const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index);
- const is_allowzero_val = try union_val.val.fieldValue(fields.values()[is_allowzero_index].ty, mod, is_allowzero_index);
- const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index);
+ const size_val = try union_val.val.fieldValue(mod, fields.getIndex("size").?);
+ const is_const_val = try union_val.val.fieldValue(mod, fields.getIndex("is_const").?);
+ const is_volatile_val = try union_val.val.fieldValue(mod, fields.getIndex("is_volatile").?);
+ const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?);
+ const address_space_val = try union_val.val.fieldValue(mod, fields.getIndex("address_space").?);
+ const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?);
+ const is_allowzero_val = try union_val.val.fieldValue(mod, fields.getIndex("is_allowzero").?);
+ const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?);
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
@@ -19032,22 +19089,18 @@ fn zirReify(
},
.Array => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const len_index = fields.getIndex("len").?;
- const child_index = fields.getIndex("child").?;
- const sentinel_index = fields.getIndex("sentinel").?;
-
- const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index);
- const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index);
- const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index);
+ const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?);
+ const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?);
+ const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?);
const len = len_val.toUnsignedInt(mod);
const child_ty = child_val.toType();
- const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: {
+ const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: {
const ptr_ty = try Type.ptr(sema.arena, mod, .{
.@"addrspace" = .generic,
.pointee_type = child_ty,
});
- break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?;
+ break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?;
} else null;
const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod);
@@ -19055,9 +19108,7 @@ fn zirReify(
},
.Optional => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const child_index = fields.getIndex("child").?;
-
- const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index);
+ const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?);
const child_ty = child_val.toType();
@@ -19066,11 +19117,8 @@ fn zirReify(
},
.ErrorUnion => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const error_set_index = fields.getIndex("error_set").?;
- const payload_index = fields.getIndex("payload").?;
-
- const error_set_val = try union_val.val.fieldValue(fields.values()[error_set_index].ty, mod, error_set_index);
- const payload_val = try union_val.val.fieldValue(fields.values()[payload_index].ty, mod, payload_index);
+ const error_set_val = try union_val.val.fieldValue(mod, fields.getIndex("error_set").?);
+ const payload_val = try union_val.val.fieldValue(mod, fields.getIndex("payload").?);
const error_set_ty = error_set_val.toType();
const payload_ty = payload_val.toType();
@@ -19085,18 +19133,17 @@ fn zirReify(
.ErrorSet => {
const payload_val = union_val.val.optionalValue(mod) orelse
return sema.addType(Type.anyerror);
- const slice_val = payload_val.castTag(.slice).?.data;
- const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod));
+ const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod));
var names: Module.Fn.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
- const elem_val = try slice_val.ptr.elemValue(mod, i);
+ const elem_val = try payload_val.elemValue(mod, i);
const struct_val = elem_val.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// error_set: type,
const name_val = struct_val[0];
- const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod);
+ const name_str = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod);
const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str);
const gop = names.getOrPutAssumeCapacity(name_ip);
if (gop.found_existing) {
@@ -19109,17 +19156,11 @@ fn zirReify(
},
.Struct => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const layout_index = fields.getIndex("layout").?;
- const backing_integer_index = fields.getIndex("backing_integer").?;
- const fields_index = fields.getIndex("fields").?;
- const decls_index = fields.getIndex("decls").?;
- const is_tuple_index = fields.getIndex("is_tuple").?;
-
- const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index);
- const backing_integer_val = try union_val.val.fieldValue(fields.values()[backing_integer_index].ty, mod, backing_integer_index);
- const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index);
- const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index);
- const is_tuple_val = try union_val.val.fieldValue(fields.values()[is_tuple_index].ty, mod, is_tuple_index);
+ const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?);
+ const backing_integer_val = try union_val.val.fieldValue(mod, fields.getIndex("backing_integer").?);
+ const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?);
+ const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?);
+ const is_tuple_val = try union_val.val.fieldValue(mod, fields.getIndex("is_tuple").?);
const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val);
@@ -19136,15 +19177,10 @@ fn zirReify(
},
.Enum => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const tag_type_index = fields.getIndex("tag_type").?;
- const fields_index = fields.getIndex("fields").?;
- const decls_index = fields.getIndex("decls").?;
- const is_exhaustive_index = fields.getIndex("is_exhaustive").?;
-
- const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index);
- const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index);
- const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index);
- const is_exhaustive_val = try union_val.val.fieldValue(fields.values()[is_exhaustive_index].ty, mod, is_exhaustive_index);
+ const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?);
+ const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?);
+ const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?);
+ const is_exhaustive_val = try union_val.val.fieldValue(mod, fields.getIndex("is_exhaustive").?);
// Decls
if (decls_val.sliceLen(mod) > 0) {
@@ -19195,7 +19231,7 @@ fn zirReify(
const value_val = field_struct_val[1];
const field_name = try name_val.toAllocatedBytes(
- Type.const_slice_u8,
+ Type.slice_const_u8,
sema.arena,
mod,
);
@@ -19237,9 +19273,7 @@ fn zirReify(
},
.Opaque => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const decls_index = fields.getIndex("decls").?;
-
- const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index);
+ const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?);
// Decls
if (decls_val.sliceLen(mod) > 0) {
@@ -19283,15 +19317,10 @@ fn zirReify(
},
.Union => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const layout_index = fields.getIndex("layout").?;
- const tag_type_index = fields.getIndex("tag_type").?;
- const fields_index = fields.getIndex("fields").?;
- const decls_index = fields.getIndex("decls").?;
-
- const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index);
- const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index);
- const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index);
- const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index);
+ const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?);
+ const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?);
+ const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?);
+ const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?);
// Decls
if (decls_val.sliceLen(mod) > 0) {
@@ -19386,7 +19415,7 @@ fn zirReify(
const alignment_val = field_struct_val[2];
const field_name = try name_val.toAllocatedBytes(
- Type.const_slice_u8,
+ Type.slice_const_u8,
new_decl_arena_allocator,
mod,
);
@@ -19489,19 +19518,12 @@ fn zirReify(
},
.Fn => {
const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod);
- const calling_convention_index = fields.getIndex("calling_convention").?;
- const alignment_index = fields.getIndex("alignment").?;
- const is_generic_index = fields.getIndex("is_generic").?;
- const is_var_args_index = fields.getIndex("is_var_args").?;
- const return_type_index = fields.getIndex("return_type").?;
- const params_index = fields.getIndex("params").?;
-
- const calling_convention_val = try union_val.val.fieldValue(fields.values()[calling_convention_index].ty, mod, calling_convention_index);
- const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index);
- const is_generic_val = try union_val.val.fieldValue(fields.values()[is_generic_index].ty, mod, is_generic_index);
- const is_var_args_val = try union_val.val.fieldValue(fields.values()[is_var_args_index].ty, mod, is_var_args_index);
- const return_type_val = try union_val.val.fieldValue(fields.values()[return_type_index].ty, mod, return_type_index);
- const params_val = try union_val.val.fieldValue(fields.values()[params_index].ty, mod, params_index);
+ const calling_convention_val = try union_val.val.fieldValue(mod, fields.getIndex("calling_convention").?);
+ const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?);
+ const is_generic_val = try union_val.val.fieldValue(mod, fields.getIndex("is_generic").?);
+ const is_var_args_val = try union_val.val.fieldValue(mod, fields.getIndex("is_var_args").?);
+ const return_type_val = try union_val.val.fieldValue(mod, fields.getIndex("return_type").?);
+ const params_val = try union_val.val.fieldValue(mod, fields.getIndex("params").?);
const is_generic = is_generic_val.toBool(mod);
if (is_generic) {
@@ -19528,14 +19550,12 @@ fn zirReify(
const return_type = return_type_val.optionalValue(mod) orelse
return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
- const args_slice_val = params_val.castTag(.slice).?.data;
- const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod));
-
+ const args_len = try sema.usizeCast(block, src, params_val.sliceLen(mod));
const param_types = try sema.arena.alloc(InternPool.Index, args_len);
var noalias_bits: u32 = 0;
for (param_types, 0..) |*param_type, i| {
- const arg = try args_slice_val.ptr.elemValue(mod, i);
+ const arg = try params_val.elemValue(mod, i);
const arg_val = arg.castTag(.aggregate).?.data;
// TODO use reflection instead of magic numbers here
// is_generic: bool,
@@ -19676,7 +19696,7 @@ fn reifyStruct(
}
const field_name = try name_val.toAllocatedBytes(
- Type.const_slice_u8,
+ Type.slice_const_u8,
new_decl_arena_allocator,
mod,
);
@@ -19707,7 +19727,7 @@ fn reifyStruct(
}
const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: {
- const payload_val = if (opt_val.pointerDecl()) |opt_decl|
+ const payload_val = if (opt_val.pointerDecl(mod)) |opt_decl|
mod.declPtr(opt_decl).val
else
opt_val;
@@ -20137,7 +20157,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
if (maybe_operand_val) |val| {
if (!dest_ty.isAnyError(mod)) {
- const error_name = val.castTag(.@"error").?.data.name;
+ const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.ip_index).err.name);
if (!dest_ty.errorSetHasField(error_name, mod)) {
const msg = msg: {
const msg = try sema.errMsg(
@@ -20279,7 +20299,10 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
}
if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) {
- return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val));
+ return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{
+ .ty = dest_ty.ip_index,
+ .val = operand_val.toIntern(),
+ } })).toValue());
}
return sema.addConstant(aligned_dest_ty, operand_val);
}
@@ -20944,7 +20967,7 @@ fn checkPtrIsNotComptimeMutable(
operand_src: LazySrcLoc,
) CompileError!void {
_ = operand_src;
- if (ptr_val.isComptimeMutablePtr()) {
+ if (ptr_val.isComptimeMutablePtr(sema.mod)) {
return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{});
}
}
@@ -20953,7 +20976,7 @@ fn checkComptimeVarStore(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
- decl_ref_mut: Value.Payload.DeclRefMut.Data,
+ decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl,
) CompileError!void {
if (@enumToInt(decl_ref_mut.runtime_index) < @enumToInt(block.runtime_index)) {
if (block.runtime_cond) |cond_src| {
@@ -21159,7 +21182,7 @@ fn resolveExportOptions(
const name_operand = try sema.fieldVal(block, src, options, "name", name_src);
const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known");
- const name_ty = Type.const_slice_u8;
+ const name_ty = Type.slice_const_u8;
const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod);
const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src);
@@ -21168,7 +21191,7 @@ fn resolveExportOptions(
const section_operand = try sema.fieldVal(block, src, options, "section", section_src);
const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known");
- const section_ty = Type.const_slice_u8;
+ const section_ty = Type.slice_const_u8;
const section = if (section_opt_val.optionalValue(mod)) |section_val|
try section_val.toAllocatedBytes(section_ty, sema.arena, mod)
else
@@ -21298,12 +21321,14 @@ fn zirCmpxchg(
}
const ptr_ty = sema.typeOf(ptr);
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
- const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: {
- try sema.storePtr(block, src, ptr, new_value);
- break :blk Value.null;
- } else try Value.Tag.opt_payload.create(sema.arena, stored_val);
-
- return sema.addConstant(result_ty, result_val);
+ const result_val = try mod.intern(.{ .opt = .{
+ .ty = result_ty.ip_index,
+ .val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: {
+ try sema.storePtr(block, src, ptr, new_value);
+ break :blk .none;
+ } else stored_val.toIntern(),
+ } });
+ return sema.addConstant(result_ty, result_val.toValue());
} else break :rs new_value_src;
} else break :rs expected_src;
} else ptr_src;
@@ -21342,11 +21367,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
});
if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| {
if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty);
-
- return sema.addConstant(
- vector_ty,
- try Value.Tag.repeated.create(sema.arena, scalar_val),
- );
+ return sema.addConstant(vector_ty, try sema.splat(vector_ty, scalar_val));
}
try sema.requireRuntimeBlock(block, inst_data.src(), scalar_src);
@@ -21800,7 +21821,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
break :rs operand_src;
};
- if (ptr_val.isComptimeMutablePtr()) {
+ if (ptr_val.isComptimeMutablePtr(mod)) {
const ptr_ty = sema.typeOf(ptr);
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const new_val = switch (op) {
@@ -22081,10 +22102,15 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data);
if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| {
- const payload = field_ptr_val.castTag(.field_ptr) orelse {
- return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{});
- };
- if (payload.data.field_index != field_index) {
+ const field = switch (mod.intern_pool.indexToKey(field_ptr_val.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .field => |field| field,
+ else => null,
+ },
+ else => null,
+ } orelse return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{});
+
+ if (field.index != field_index) {
const msg = msg: {
const msg = try sema.errMsg(
block,
@@ -22093,7 +22119,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
.{
field_name,
field_index,
- payload.data.field_index,
+ field.index,
parent_ty.fmt(sema.mod),
},
);
@@ -22103,7 +22129,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
};
return sema.failWithOwnedErrorMsg(msg);
}
- return sema.addConstant(result_ptr, payload.data.container_ptr);
+ return sema.addConstant(result_ptr, field.base.toValue());
}
try sema.requireRuntimeBlock(block, src, ptr_src);
@@ -22335,13 +22361,13 @@ fn analyzeMinMax(
// Compute the final bounds based on the runtime type and the comptime-known bound type
const min_val = switch (air_tag) {
- .min => try unrefined_elem_ty.minInt(sema.arena, mod),
- .max => try comptime_elem_ty.minInt(sema.arena, mod), // @max(ct, rt) >= ct
+ .min => try unrefined_elem_ty.minInt(mod),
+ .max => try comptime_elem_ty.minInt(mod), // @max(ct, rt) >= ct
else => unreachable,
};
const max_val = switch (air_tag) {
- .min => try comptime_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), // @min(ct, rt) <= ct
- .max => try unrefined_elem_ty.maxInt(sema.arena, mod, Type.comptime_int),
+ .min => try comptime_elem_ty.maxInt(mod, Type.comptime_int), // @min(ct, rt) <= ct
+ .max => try unrefined_elem_ty.maxInt(mod, Type.comptime_int),
else => unreachable,
};
@@ -22464,7 +22490,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
}
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
- if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src;
+ if (!dest_ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src;
if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?;
const len = try sema.usizeCast(block, dest_src, len_u64);
@@ -22618,7 +22644,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
return;
}
- if (!ptr_val.isComptimeMutablePtr()) break :rs dest_src;
+ if (!ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src;
if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| {
for (0..len) |i| {
const elem_index = try sema.addIntUnsigned(Type.usize, i);
@@ -22696,6 +22722,7 @@ fn zirVarExtended(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 };
const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 };
@@ -22737,32 +22764,17 @@ fn zirVarExtended(
try sema.validateVarType(block, ty_src, var_ty, small.is_extern);
- const new_var = try sema.gpa.create(Module.Var);
- errdefer sema.gpa.destroy(new_var);
-
- log.debug("created variable {*} owner_decl: {*} ({s})", .{
- new_var, sema.owner_decl, sema.owner_decl.name,
- });
-
- new_var.* = .{
- .owner_decl = sema.owner_decl_index,
- .init = init_val,
+ return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{
+ .ty = var_ty.ip_index,
+ .init = init_val.toIntern(),
+ .decl = sema.owner_decl_index,
+ .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString(
+ sema.gpa,
+ try sema.handleExternLibName(block, ty_src, lname),
+ )).toOptional() else .none,
.is_extern = small.is_extern,
- .is_mutable = true,
.is_threadlocal = small.is_threadlocal,
- .is_weak_linkage = false,
- .lib_name = null,
- };
-
- if (lib_name) |lname| {
- new_var.lib_name = try sema.handleExternLibName(block, ty_src, lname);
- }
-
- const result = try sema.addConstant(
- var_ty,
- try Value.Tag.variable.create(sema.arena, new_var),
- );
- return result;
+ } })).toValue());
}
fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -22861,7 +22873,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const body = sema.code.extra[extra_index..][0..body_len];
extra_index += body.len;
- const ty = Type.const_slice_u8;
+ const ty = Type.slice_const_u8;
const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known");
if (val.isGenericPoison()) {
break :blk FuncLinkSection{ .generic = {} };
@@ -23133,10 +23145,10 @@ fn resolveExternOptions(
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) CompileError!std.builtin.ExternOptions {
+ const mod = sema.mod;
const options_inst = try sema.resolveInst(zir_ref);
const extern_options_ty = try sema.getBuiltinType("ExternOptions");
const options = try sema.coerce(block, extern_options_ty, options_inst, src);
- const mod = sema.mod;
const name_src = sema.maybeOptionsSrc(block, src, "name");
const library_src = sema.maybeOptionsSrc(block, src, "library");
@@ -23145,7 +23157,7 @@ fn resolveExternOptions(
const name_ref = try sema.fieldVal(block, src, options, "name", name_src);
const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known");
- const name = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod);
+ const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod);
const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src);
const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known");
@@ -23157,9 +23169,8 @@ fn resolveExternOptions(
const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src);
const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known");
- const library_name = if (!library_name_val.isNull(mod)) blk: {
- const payload = library_name_val.castTag(.opt_payload).?.data;
- const library_name = try payload.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod);
+ const library_name = if (library_name_val.optionalValue(mod)) |payload| blk: {
+ const library_name = try payload.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod);
if (library_name.len == 0) {
return sema.fail(block, library_src, "library name cannot be empty", .{});
}
@@ -23227,40 +23238,36 @@ fn zirBuiltinExtern(
new_decl.name = try sema.gpa.dupeZ(u8, options.name);
{
- var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
- errdefer new_decl_arena.deinit();
- const new_decl_arena_allocator = new_decl_arena.allocator();
-
- const new_var = try new_decl_arena_allocator.create(Module.Var);
- new_var.* = .{
- .owner_decl = sema.owner_decl_index,
- .init = Value.@"unreachable",
+ const new_var = try mod.intern(.{ .variable = .{
+ .ty = ty.ip_index,
+ .init = .none,
+ .decl = sema.owner_decl_index,
.is_extern = true,
- .is_mutable = false,
+ .is_const = true,
.is_threadlocal = options.is_thread_local,
.is_weak_linkage = options.linkage == .Weak,
- .lib_name = null,
- };
+ } });
new_decl.src_line = sema.owner_decl.src_line;
// We only access this decl through the decl_ref with the correct type created
// below, so this type doesn't matter
- new_decl.ty = Type.anyopaque;
- new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var);
+ new_decl.ty = ty;
+ new_decl.val = new_var.toValue();
new_decl.@"align" = 0;
new_decl.@"linksection" = null;
new_decl.has_tv = true;
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
-
- try new_decl.finalizeNewArena(&new_decl_arena);
}
try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index);
try sema.ensureDeclAnalyzed(new_decl_index);
- const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index);
- return sema.addConstant(ty, ref);
+ const ref = try mod.intern(.{ .ptr = .{
+ .ty = (try mod.singleConstPtrType(ty)).ip_index,
+ .addr = .{ .decl = new_decl_index },
+ } });
+ return sema.addConstant(ty, ref.toValue());
}
fn zirWorkItem(
@@ -24117,7 +24124,6 @@ fn fieldVal(
const mod = sema.mod;
const gpa = sema.gpa;
- const arena = sema.arena;
const ip = &mod.intern_pool;
const object_src = src; // TODO better source location
const object_ty = sema.typeOf(object);
@@ -24221,13 +24227,14 @@ fn fieldVal(
else => unreachable,
}
- return sema.addConstant(
- if (!child_type.isAnyError(mod))
- child_type
- else
- try mod.singleErrorSetTypeNts(name),
- try Value.Tag.@"error".create(arena, .{ .name = ip.stringToSlice(name) }),
- );
+ const error_set_type = if (!child_type.isAnyError(mod))
+ child_type
+ else
+ try mod.singleErrorSetTypeNts(name);
+ return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{
+ .ty = error_set_type.ip_index,
+ .name = name,
+ } })).toValue());
},
.Union => {
if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| {
@@ -24368,14 +24375,13 @@ fn fieldPtr(
});
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
- return sema.addConstant(
- result_ty,
- try Value.Tag.field_ptr.create(sema.arena, .{
- .container_ptr = val,
- .container_ty = inner_ty,
- .field_index = Value.Payload.Slice.ptr_index,
- }),
- );
+ return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{
+ .ty = result_ty.ip_index,
+ .addr = .{ .field = .{
+ .base = val.ip_index,
+ .index = Value.slice_ptr_index,
+ } },
+ } })).toValue());
}
try sema.requireRuntimeBlock(block, src, null);
@@ -24389,14 +24395,13 @@ fn fieldPtr(
});
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
- return sema.addConstant(
- result_ty,
- try Value.Tag.field_ptr.create(sema.arena, .{
- .container_ptr = val,
- .container_ty = inner_ty,
- .field_index = Value.Payload.Slice.len_index,
- }),
- );
+ return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{
+ .ty = result_ty.ip_index,
+ .addr = .{ .field = .{
+ .base = val.ip_index,
+ .index = Value.slice_len_index,
+ } },
+ } })).toValue());
}
try sema.requireRuntimeBlock(block, src, null);
@@ -24442,14 +24447,16 @@ fn fieldPtr(
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
+ const error_set_type = if (!child_type.isAnyError(mod))
+ child_type
+ else
+ try mod.singleErrorSetTypeNts(name);
return sema.analyzeDeclRef(try anon_decl.finish(
- if (!child_type.isAnyError(mod))
- child_type
- else
- try mod.singleErrorSetTypeNts(name),
- try Value.Tag.@"error".create(anon_decl.arena(), .{
- .name = ip.stringToSlice(name),
- }),
+ error_set_type,
+ (try mod.intern(.{ .err = .{
+ .ty = error_set_type.ip_index,
+ .name = name,
+ } })).toValue(),
0, // default alignment
));
},
@@ -24714,14 +24721,13 @@ fn finishFieldCallBind(
}
if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| {
- const pointer = try sema.addConstant(
- ptr_field_ty,
- try Value.Tag.field_ptr.create(arena, .{
- .container_ptr = struct_ptr_val,
- .container_ty = container_ty,
- .field_index = field_index,
- }),
- );
+ const pointer = try sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{
+ .ty = ptr_field_ty.ip_index,
+ .addr = .{ .field = .{
+ .base = struct_ptr_val.ip_index,
+ .index = field_index,
+ } },
+ } })).toValue());
return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) };
}
@@ -24901,22 +24907,22 @@ fn structFieldPtrByIndex(
const ptr_field_ty = try Type.ptr(sema.arena, mod, ptr_ty_data);
if (field.is_comptime) {
- const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{
- .field_ty = field.ty,
- .field_val = try field.default_val.copy(sema.arena),
- });
- return sema.addConstant(ptr_field_ty, val);
+ const val = try mod.intern(.{ .ptr = .{
+ .ty = ptr_field_ty.ip_index,
+ .addr = .{ .comptime_field = try field.default_val.intern(field.ty, mod) },
+ } });
+ return sema.addConstant(ptr_field_ty, val.toValue());
}
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
- return sema.addConstant(
- ptr_field_ty,
- try Value.Tag.field_ptr.create(sema.arena, .{
- .container_ptr = struct_ptr_val,
- .container_ty = struct_ptr_ty.childType(mod),
- .field_index = field_index,
- }),
- );
+ const val = try mod.intern(.{ .ptr = .{
+ .ty = ptr_field_ty.ip_index,
+ .addr = .{ .field = .{
+ .base = try struct_ptr_val.intern(struct_ptr_ty, mod),
+ .index = field_index,
+ } },
+ } });
+ return sema.addConstant(ptr_field_ty, val.toValue());
}
try sema.requireRuntimeBlock(block, src, null);
@@ -24955,7 +24961,7 @@ fn structFieldVal(
if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| {
return sema.addConstant(field.ty, opv);
}
- return sema.addConstant(field.ty, try struct_val.fieldValue(field.ty, mod, field_index));
+ return sema.addConstant(field.ty, try struct_val.fieldValue(mod, field_index));
}
try sema.requireRuntimeBlock(block, src, null);
@@ -24999,7 +25005,7 @@ fn tupleFieldIndex(
field_name_src: LazySrcLoc,
) CompileError!u32 {
const mod = sema.mod;
- assert(!std.mem.eql(u8, field_name, "len"));
+ assert(!mem.eql(u8, field_name, "len"));
if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| {
if (field_index < tuple_ty.structFieldCount(mod)) return field_index;
return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{
@@ -25109,14 +25115,13 @@ fn unionFieldPtr(
},
.Packed, .Extern => {},
}
- return sema.addConstant(
- ptr_field_ty,
- try Value.Tag.field_ptr.create(arena, .{
- .container_ptr = union_ptr_val,
- .container_ty = union_ty,
- .field_index = field_index,
- }),
- );
+ return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{
+ .ty = ptr_field_ty.ip_index,
+ .addr = .{ .field = .{
+ .base = union_ptr_val.ip_index,
+ .index = field_index,
+ } },
+ } })).toValue());
}
try sema.requireRuntimeBlock(block, src, null);
@@ -25267,7 +25272,7 @@ fn elemPtrOneLayerOnly(
const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt(mod));
- const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, mod);
+ const elem_ptr = try ptr_val.elemPtr(indexable_ty, index, mod);
const result_ty = try sema.elemPtrType(indexable_ty, index);
return sema.addConstant(result_ty, elem_ptr);
};
@@ -25313,7 +25318,7 @@ fn elemVal(
const indexable_val = maybe_indexable_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt(mod));
- const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, mod);
+ const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, index, mod);
if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| {
return sema.addConstant(indexable_ty.elemType2(mod), elem_val);
}
@@ -25407,22 +25412,20 @@ fn tupleFieldPtr(
});
if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
- const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{
- .field_ty = field_ty,
- .field_val = default_val,
- });
- return sema.addConstant(ptr_field_ty, val);
+ return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{
+ .ty = ptr_field_ty.ip_index,
+ .addr = .{ .comptime_field = default_val.ip_index },
+ } })).toValue());
}
if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| {
- return sema.addConstant(
- ptr_field_ty,
- try Value.Tag.field_ptr.create(sema.arena, .{
- .container_ptr = tuple_ptr_val,
- .container_ty = tuple_ty,
- .field_index = field_index,
- }),
- );
+ return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{
+ .ty = ptr_field_ty.ip_index,
+ .addr = .{ .field = .{
+ .base = tuple_ptr_val.ip_index,
+ .index = field_index,
+ } },
+ } })).toValue());
}
if (!init) {
@@ -25463,7 +25466,7 @@ fn tupleField(
if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| {
if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty);
- return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index));
+ return sema.addConstant(field_ty, try tuple_val.fieldValue(mod, field_index));
}
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src);
@@ -25575,7 +25578,7 @@ fn elemPtrArray(
return sema.addConstUndef(elem_ptr_ty);
}
if (offset) |index| {
- const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, mod);
+ const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, index, mod);
return sema.addConstant(elem_ptr_ty, elem_ptr);
}
}
@@ -25631,7 +25634,7 @@ fn elemValSlice(
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
}
- const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod);
+ const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod);
if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
@@ -25691,7 +25694,7 @@ fn elemPtrSlice(
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
}
- const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod);
+ const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod);
return sema.addConstant(elem_ptr_ty, elem_ptr_val);
}
}
@@ -25851,7 +25854,7 @@ fn coerceExtra(
// Function body to function pointer.
if (inst_ty.zigTypeTag(mod) == .Fn) {
const fn_val = try sema.resolveConstValue(block, .unneeded, inst, "");
- const fn_decl = fn_val.pointerDecl().?;
+ const fn_decl = fn_val.pointerDecl(mod).?;
const inst_as_ptr = try sema.analyzeDeclRef(fn_decl);
return sema.coerce(block, dest_ty, inst_as_ptr, inst_src);
}
@@ -26080,14 +26083,14 @@ fn coerceExtra(
if (inst_child_ty.structFieldCount(mod) == 0) {
// Optional slice is represented with a null pointer so
// we use a dummy pointer value with the required alignment.
- const slice_val = try Value.Tag.slice.create(sema.arena, .{
- .ptr = if (dest_info.@"align" != 0)
+ return sema.addConstant(dest_ty, (try mod.intern(.{ .ptr = .{
+ .ty = dest_ty.ip_index,
+ .addr = .{ .int = (if (dest_info.@"align" != 0)
try mod.intValue(Type.usize, dest_info.@"align")
else
- try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena),
- .len = try mod.intValue(Type.usize, 0),
- });
- return sema.addConstant(dest_ty, slice_val);
+ try dest_info.pointee_type.lazyAbiAlignment(mod)).ip_index },
+ .len = (try mod.intValue(Type.usize, 0)).ip_index,
+ } })).toValue());
}
// pointer to tuple to slice
@@ -26255,7 +26258,8 @@ fn coerceExtra(
.EnumLiteral => {
// enum literal to enum
const val = try sema.resolveConstValue(block, .unneeded, inst, "");
- const bytes = val.castTag(.enum_literal).?.data;
+ const string = mod.intern_pool.indexToKey(val.ip_index).enum_literal;
+ const bytes = mod.intern_pool.stringToSlice(string);
const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse {
const msg = msg: {
const msg = try sema.errMsg(
@@ -26292,26 +26296,30 @@ fn coerceExtra(
if (maybe_inst_val) |inst_val| {
switch (inst_val.ip_index) {
.undef => return sema.addConstUndef(dest_ty),
- .none => switch (inst_val.tag()) {
- .eu_payload => {
- const payload = try sema.addConstant(
- inst_ty.errorUnionPayload(mod),
- inst_val.castTag(.eu_payload).?.data,
- );
- return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) {
- error.NotCoercible => break :eu,
- else => |e| return e,
- };
+ else => switch (mod.intern_pool.indexToKey(inst_val.ip_index)) {
+ .error_union => |error_union| switch (error_union.val) {
+ .err_name => |err_name| {
+ const error_set_ty = inst_ty.errorUnionSet(mod);
+ const error_set_val = try sema.addConstant(error_set_ty, (try mod.intern(.{ .err = .{
+ .ty = error_set_ty.ip_index,
+ .name = err_name,
+ } })).toValue());
+ return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src);
+ },
+ .payload => |payload| {
+ const payload_val = try sema.addConstant(
+ inst_ty.errorUnionPayload(mod),
+ payload.toValue(),
+ );
+ return sema.wrapErrorUnionPayload(block, dest_ty, payload_val, inst_src) catch |err| switch (err) {
+ error.NotCoercible => break :eu,
+ else => |e| return e,
+ };
+ },
},
- else => {},
+ else => unreachable,
},
- else => {},
}
- const error_set = try sema.addConstant(
- inst_ty.errorUnionSet(mod),
- inst_val,
- );
- return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src);
}
},
.ErrorSet => {
@@ -27029,7 +27037,7 @@ fn coerceInMemoryAllowedErrorSets(
},
}
- if (dst_ies.func == sema.owner_func) {
+ if (dst_ies.func == sema.owner_func_index.unwrap()) {
// We are trying to coerce an error set to the current function's
// inferred error set.
try dst_ies.addErrorSet(src_ty, ip, gpa);
@@ -27323,7 +27331,7 @@ fn coerceVarArgParam(
),
.Fn => blk: {
const fn_val = try sema.resolveConstValue(block, .unneeded, inst, "");
- const fn_decl = fn_val.pointerDecl().?;
+ const fn_decl = fn_val.pointerDecl(mod).?;
break :blk try sema.analyzeDeclRef(fn_decl);
},
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
@@ -27441,7 +27449,7 @@ fn storePtr2(
try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src);
break :rs operand_src;
};
- if (ptr_val.isComptimeMutablePtr()) {
+ if (ptr_val.isComptimeMutablePtr(mod)) {
try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty);
return;
} else break :rs ptr_src;
@@ -27593,7 +27601,7 @@ fn storePtrVal(
}
const ComptimePtrMutationKit = struct {
- decl_ref_mut: Value.Payload.DeclRefMut.Data,
+ decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl,
pointee: union(enum) {
/// The pointer type matches the actual comptime Value so a direct
/// modification is possible.
@@ -27619,12 +27627,12 @@ const ComptimePtrMutationKit = struct {
decl_arena: std.heap.ArenaAllocator = undefined,
fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator {
- const decl = mod.declPtr(self.decl_ref_mut.decl_index);
+ const decl = mod.declPtr(self.decl_ref_mut.decl);
return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena);
}
fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void {
- const decl = mod.declPtr(self.decl_ref_mut.decl_index);
+ const decl = mod.declPtr(self.decl_ref_mut.decl);
decl.value_arena.?.release(&self.decl_arena);
self.decl_arena = undefined;
}
@@ -27637,6 +27645,7 @@ fn beginComptimePtrMutation(
ptr_val: Value,
ptr_elem_ty: Type,
) CompileError!ComptimePtrMutationKit {
+ if (true) unreachable;
const mod = sema.mod;
switch (ptr_val.tag()) {
.decl_ref_mut => {
@@ -28169,7 +28178,7 @@ fn beginComptimePtrMutation(
},
}
},
- .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already
+ .decl_ref => unreachable, // isComptimeMutablePtr has been checked already
else => unreachable,
}
}
@@ -28189,7 +28198,7 @@ fn beginComptimePtrMutationInner(
const decl = mod.declPtr(decl_ref_mut.decl_index);
var decl_arena: std.heap.ArenaAllocator = undefined;
- const allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena);
+ const allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena);
defer decl.value_arena.?.release(&decl_arena);
decl_val.* = try decl_val.unintern(allocator, mod);
@@ -28273,44 +28282,83 @@ fn beginComptimePtrLoad(
const mod = sema.mod;
const target = mod.getTarget();
- var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) {
- .null_value => {
- return sema.fail(block, src, "attempt to use null value", .{});
- },
-
- .none => switch (ptr_val.tag()) {
- .decl_ref,
- .decl_ref_mut,
- => blk: {
- const decl_index = switch (ptr_val.tag()) {
- .decl_ref => ptr_val.castTag(.decl_ref).?.data,
- .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index,
+ var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl, .mut_decl => blk: {
+ const decl_index = switch (ptr.addr) {
+ .decl => |decl| decl,
+ .mut_decl => |mut_decl| mut_decl.decl,
else => unreachable,
};
- const is_mutable = ptr_val.tag() == .decl_ref_mut;
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
- if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad;
+ if (decl.getVariable(mod) != null) return error.RuntimeLoad;
const layout_defined = decl.ty.hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
.pointee = decl_tv,
- .is_mutable = is_mutable,
+ .is_mutable = false,
.ty_without_well_defined_layout = if (!layout_defined) decl.ty else null,
};
},
+ .int => return error.RuntimeLoad,
+ .eu_payload, .opt_payload => |container_ptr| blk: {
+ const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod);
+ const payload_ty = ptr.ty.toType().childType(mod);
+ var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty);
- .elem_ptr => blk: {
- const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- const elem_ty = elem_ptr.elem_ty;
- var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null);
+ // eu_payload and opt_payload never have a well-defined layout
+ if (deref.parent != null) {
+ deref.parent = null;
+ deref.ty_without_well_defined_layout = container_ty;
+ }
+
+ if (deref.pointee) |*tv| {
+ const coerce_in_mem_ok =
+ (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
+ (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
+ if (coerce_in_mem_ok) {
+ const payload_val = switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
+ .error_union => |error_union| switch (error_union.val) {
+ .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}),
+ .payload => |payload| payload,
+ },
+ .opt => |opt| switch (opt.val) {
+ .none => return sema.fail(block, src, "attempt to use null value", .{}),
+ else => opt.val,
+ },
+ else => unreachable,
+ };
+ tv.* = TypedValue{ .ty = payload_ty, .val = payload_val.toValue() };
+ break :blk deref;
+ }
+ }
+ deref.pointee = null;
+ break :blk deref;
+ },
+ .comptime_field => |comptime_field| blk: {
+ const field_ty = mod.intern_pool.typeOf(comptime_field).toType();
+ break :blk ComptimePtrLoadKit{
+ .parent = null,
+ .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() },
+ .is_mutable = false,
+ .ty_without_well_defined_layout = field_ty,
+ };
+ },
+ .elem => |elem_ptr| blk: {
+ const elem_ty = ptr.ty.toType().childType(mod);
+ var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null);
// This code assumes that elem_ptrs have been "flattened" in order for direct dereference
// to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that
// our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened"
- if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| {
- assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, mod)));
+ switch (mod.intern_pool.indexToKey(elem_ptr.base)) {
+ .ptr => |base_ptr| switch (base_ptr.addr) {
+ .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)),
+ else => {},
+ },
+ else => {},
}
if (elem_ptr.index != 0) {
@@ -28327,7 +28375,7 @@ fn beginComptimePtrLoad(
}
}
- // If we're loading an elem_ptr that was derived from a different type
+ // If we're loading an elem that was derived from a different type
// than the true type of the underlying decl, we cannot deref directly
const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: {
const deref_elem_ty = deref.pointee.?.ty.childType(mod);
@@ -28373,31 +28421,25 @@ fn beginComptimePtrLoad(
};
break :blk deref;
},
+ .field => |field_ptr| blk: {
+ const field_index = @intCast(u32, field_ptr.index);
+ const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
+ var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty);
- .slice => blk: {
- const slice = ptr_val.castTag(.slice).?.data;
- break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null);
- },
-
- .field_ptr => blk: {
- const field_ptr = ptr_val.castTag(.field_ptr).?.data;
- const field_index = @intCast(u32, field_ptr.field_index);
- var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty);
-
- if (field_ptr.container_ty.hasWellDefinedLayout(mod)) {
- const struct_obj = mod.typeToStruct(field_ptr.container_ty);
+ if (container_ty.hasWellDefinedLayout(mod)) {
+ const struct_obj = mod.typeToStruct(container_ty);
if (struct_obj != null and struct_obj.?.layout == .Packed) {
// packed structs are not byte addressable
deref.parent = null;
} else if (deref.parent) |*parent| {
// Update the byte offset (in-place)
- try sema.resolveTypeLayout(field_ptr.container_ty);
- const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod);
+ try sema.resolveTypeLayout(container_ty);
+ const field_offset = container_ty.structFieldOffset(field_index, mod);
parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset);
}
} else {
deref.parent = null;
- deref.ty_without_well_defined_layout = field_ptr.container_ty;
+ deref.ty_without_well_defined_layout = container_ty;
}
const tv = deref.pointee orelse {
@@ -28405,294 +28447,40 @@ fn beginComptimePtrLoad(
break :blk deref;
};
const coerce_in_mem_ok =
- (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or
- (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok;
+ (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
+ (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
if (!coerce_in_mem_ok) {
deref.pointee = null;
break :blk deref;
}
- if (field_ptr.container_ty.isSlice(mod)) {
- const slice_val = tv.val.castTag(.slice).?.data;
+ if (container_ty.isSlice(mod)) {
deref.pointee = switch (field_index) {
- Value.Payload.Slice.ptr_index => TypedValue{
- .ty = field_ptr.container_ty.slicePtrFieldType(mod),
- .val = slice_val.ptr,
+ Value.slice_ptr_index => TypedValue{
+ .ty = container_ty.slicePtrFieldType(mod),
+ .val = tv.val.slicePtr(mod),
},
- Value.Payload.Slice.len_index => TypedValue{
+ Value.slice_len_index => TypedValue{
.ty = Type.usize,
- .val = slice_val.len,
+ .val = mod.intern_pool.indexToKey(tv.val.ip_index).ptr.len.toValue(),
},
else => unreachable,
};
} else {
- const field_ty = field_ptr.container_ty.structFieldType(field_index, mod);
+ const field_ty = container_ty.structFieldType(field_index, mod);
deref.pointee = TypedValue{
.ty = field_ty,
- .val = try tv.val.fieldValue(tv.ty, mod, field_index),
+ .val = try tv.val.fieldValue(mod, field_index),
};
}
break :blk deref;
},
-
- .comptime_field_ptr => blk: {
- const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data;
- break :blk ComptimePtrLoadKit{
- .parent = null,
- .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val },
- .is_mutable = false,
- .ty_without_well_defined_layout = comptime_field_ptr.field_ty,
- };
- },
-
- .opt_payload_ptr,
- .eu_payload_ptr,
- => blk: {
- const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data;
- const payload_ty = switch (ptr_val.tag()) {
- .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(mod),
- .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod),
- else => unreachable,
- };
- var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty);
-
- // eu_payload_ptr and opt_payload_ptr never have a well-defined layout
- if (deref.parent != null) {
- deref.parent = null;
- deref.ty_without_well_defined_layout = payload_ptr.container_ty;
- }
-
- if (deref.pointee) |*tv| {
- const coerce_in_mem_ok =
- (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or
- (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok;
- if (coerce_in_mem_ok) {
- const payload_val = switch (ptr_val.tag()) {
- .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else {
- return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name});
- },
- .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: {
- if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{});
- break :opt tv.val;
- },
- else => unreachable,
- };
- tv.* = TypedValue{ .ty = payload_ty, .val = payload_val };
- break :blk deref;
- }
- }
- deref.pointee = null;
- break :blk deref;
- },
- .opt_payload => blk: {
- const opt_payload = ptr_val.castTag(.opt_payload).?.data;
- break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null);
- },
-
- .variable,
- .extern_fn,
- .function,
- => return error.RuntimeLoad,
-
- else => unreachable,
},
- else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) {
- .int => return error.RuntimeLoad,
- .ptr => |ptr| switch (ptr.addr) {
- .@"var", .int => return error.RuntimeLoad,
- .decl, .mut_decl => blk: {
- const decl_index = switch (ptr.addr) {
- .decl => |decl| decl,
- .mut_decl => |mut_decl| mut_decl.decl,
- else => unreachable,
- };
- const decl = mod.declPtr(decl_index);
- const decl_tv = try decl.typedValue();
- if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad;
-
- const layout_defined = decl.ty.hasWellDefinedLayout(mod);
- break :blk ComptimePtrLoadKit{
- .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
- .pointee = decl_tv,
- .is_mutable = false,
- .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null,
- };
- },
- .eu_payload, .opt_payload => |container_ptr| blk: {
- const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod);
- const payload_ty = ptr.ty.toType().childType(mod);
- var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty);
-
- // eu_payload_ptr and opt_payload_ptr never have a well-defined layout
- if (deref.parent != null) {
- deref.parent = null;
- deref.ty_without_well_defined_layout = container_ty;
- }
-
- if (deref.pointee) |*tv| {
- const coerce_in_mem_ok =
- (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
- (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
- if (coerce_in_mem_ok) {
- const payload_val = switch (ptr_val.tag()) {
- .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else {
- return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name});
- },
- .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: {
- if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{});
- break :opt tv.val;
- },
- else => unreachable,
- };
- tv.* = TypedValue{ .ty = payload_ty, .val = payload_val };
- break :blk deref;
- }
- }
- deref.pointee = null;
- break :blk deref;
- },
- .comptime_field => |comptime_field| blk: {
- const field_ty = mod.intern_pool.typeOf(comptime_field).toType();
- break :blk ComptimePtrLoadKit{
- .parent = null,
- .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() },
- .is_mutable = false,
- .ty_without_well_defined_layout = field_ty,
- };
- },
- .elem => |elem_ptr| blk: {
- const elem_ty = ptr.ty.toType().childType(mod);
- var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null);
-
- // This code assumes that elem_ptrs have been "flattened" in order for direct dereference
- // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that
- // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened"
- switch (mod.intern_pool.indexToKey(elem_ptr.base)) {
- .ptr => |base_ptr| switch (base_ptr.addr) {
- .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)),
- else => {},
- },
- else => {},
- }
-
- if (elem_ptr.index != 0) {
- if (elem_ty.hasWellDefinedLayout(mod)) {
- if (deref.parent) |*parent| {
- // Update the byte offset (in-place)
- const elem_size = try sema.typeAbiSize(elem_ty);
- const offset = parent.byte_offset + elem_size * elem_ptr.index;
- parent.byte_offset = try sema.usizeCast(block, src, offset);
- }
- } else {
- deref.parent = null;
- deref.ty_without_well_defined_layout = elem_ty;
- }
- }
-
- // If we're loading an elem that was derived from a different type
- // than the true type of the underlying decl, we cannot deref directly
- const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: {
- const deref_elem_ty = deref.pointee.?.ty.childType(mod);
- break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or
- (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok;
- } else false;
- if (!ty_matches) {
- deref.pointee = null;
- break :blk deref;
- }
-
- var array_tv = deref.pointee.?;
- const check_len = array_tv.ty.arrayLenIncludingSentinel(mod);
- if (maybe_array_ty) |load_ty| {
- // It's possible that we're loading a [N]T, in which case we'd like to slice
- // the pointee array directly from our parent array.
- if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) {
- const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod));
- deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{
- .ty = try Type.array(sema.arena, N, null, elem_ty, mod),
- .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N),
- } else null;
- break :blk deref;
- }
- }
-
- if (elem_ptr.index >= check_len) {
- deref.pointee = null;
- break :blk deref;
- }
- if (elem_ptr.index == check_len - 1) {
- if (array_tv.ty.sentinel(mod)) |sent| {
- deref.pointee = TypedValue{
- .ty = elem_ty,
- .val = sent,
- };
- break :blk deref;
- }
- }
- deref.pointee = TypedValue{
- .ty = elem_ty,
- .val = try array_tv.val.elemValue(mod, elem_ptr.index),
- };
- break :blk deref;
- },
- .field => |field_ptr| blk: {
- const field_index = @intCast(u32, field_ptr.index);
- const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
- var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty);
-
- if (container_ty.hasWellDefinedLayout(mod)) {
- const struct_obj = mod.typeToStruct(container_ty);
- if (struct_obj != null and struct_obj.?.layout == .Packed) {
- // packed structs are not byte addressable
- deref.parent = null;
- } else if (deref.parent) |*parent| {
- // Update the byte offset (in-place)
- try sema.resolveTypeLayout(container_ty);
- const field_offset = container_ty.structFieldOffset(field_index, mod);
- parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset);
- }
- } else {
- deref.parent = null;
- deref.ty_without_well_defined_layout = container_ty;
- }
-
- const tv = deref.pointee orelse {
- deref.pointee = null;
- break :blk deref;
- };
- const coerce_in_mem_ok =
- (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or
- (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok;
- if (!coerce_in_mem_ok) {
- deref.pointee = null;
- break :blk deref;
- }
-
- if (container_ty.isSlice(mod)) {
- const slice_val = tv.val.castTag(.slice).?.data;
- deref.pointee = switch (field_index) {
- Value.Payload.Slice.ptr_index => TypedValue{
- .ty = container_ty.slicePtrFieldType(mod),
- .val = slice_val.ptr,
- },
- Value.Payload.Slice.len_index => TypedValue{
- .ty = Type.usize,
- .val = slice_val.len,
- },
- else => unreachable,
- };
- } else {
- const field_ty = container_ty.structFieldType(field_index, mod);
- deref.pointee = TypedValue{
- .ty = field_ty,
- .val = try tv.val.fieldValue(tv.ty, mod, field_index),
- };
- }
- break :blk deref;
- },
- },
- else => unreachable,
+ .opt => |opt| switch (opt.val) {
+ .none => return sema.fail(block, src, "attempt to use null value", .{}),
+ else => try sema.beginComptimePtrLoad(block, src, opt.val.toValue(), null),
},
+ else => unreachable,
};
if (deref.pointee) |tv| {
@@ -28853,7 +28641,7 @@ fn coerceCompatiblePtrs(
}
// The comptime Value representation is compatible with both types.
return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced(
- mod.gpa,
+ sema.gpa,
try val.intern(inst_ty, mod),
dest_ty.ip_index,
)).toValue());
@@ -29538,7 +29326,7 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void {
};
}
-fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void {
+fn ensureFuncBodyAnalyzed(sema: *Sema, func: Module.Fn.Index) CompileError!void {
sema.mod.ensureFuncBodyAnalyzed(func) catch |err| {
if (sema.owner_func) |owner_func| {
owner_func.state = .dependency_failure;
@@ -29550,6 +29338,7 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void {
}
fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value {
+ const mod = sema.mod;
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const decl = try anon_decl.finish(
@@ -29558,15 +29347,23 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value {
0, // default alignment
);
try sema.maybeQueueFuncBodyAnalysis(decl);
- try sema.mod.declareDeclDependency(sema.owner_decl_index, decl);
- return try Value.Tag.decl_ref.create(sema.arena, decl);
+ try mod.declareDeclDependency(sema.owner_decl_index, decl);
+ const result = try mod.intern(.{ .ptr = .{
+ .ty = (try mod.singleConstPtrType(ty)).ip_index,
+ .addr = .{ .decl = decl },
+ } });
+ return result.toValue();
}
fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value {
+ const mod = sema.mod;
const val = opt_val orelse return Value.null;
const ptr_val = try sema.refValue(block, ty, val);
- const result = try Value.Tag.opt_payload.create(sema.arena, ptr_val);
- return result;
+ const result = try mod.intern(.{ .opt = .{
+ .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).ip_index)).ip_index,
+ .val = ptr_val.ip_index,
+ } });
+ return result.toValue();
}
fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref {
@@ -29587,10 +29384,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo
const ptr_ty = try mod.ptrType(.{
.elem_type = decl_tv.ty.ip_index,
.alignment = InternPool.Alignment.fromByteUnits(decl.@"align"),
- .is_const = if (decl_tv.val.castTag(.variable)) |payload|
- !payload.data.is_mutable
- else
- false,
+ .is_const = if (decl.getVariable(mod)) |variable| variable.is_const else false,
.address_space = decl.@"addrspace",
});
if (analyze_fn_body) {
@@ -29608,8 +29402,8 @@ fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void {
const tv = try decl.typedValue();
if (tv.ty.zigTypeTag(mod) != .Fn) return;
if (!try sema.fnHasRuntimeBits(tv.ty)) return;
- const func = tv.val.castTag(.function) orelse return; // undef or extern_fn
- try mod.ensureFuncBodyAnalysisQueued(func.data);
+ const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap() orelse return; // undef or extern_fn
+ try mod.ensureFuncBodyAnalysisQueued(func_index);
}
fn analyzeRef(
@@ -29622,14 +29416,12 @@ fn analyzeRef(
if (try sema.resolveMaybeUndefVal(operand)) |val| {
switch (val.ip_index) {
- .none => switch (val.tag()) {
- .extern_fn, .function => {
- const decl_index = val.pointerDecl().?;
- return sema.analyzeDeclRef(decl_index);
- },
+ .none => {},
+ else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) {
+ .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl),
+ .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl),
else => {},
},
- else => {},
}
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
@@ -29854,7 +29646,7 @@ fn analyzeIsNonErrComptimeOnly(
if (other_ies.errors.count() != 0) break :blk;
}
- if (ies.func == sema.owner_func) {
+ if (ies.func == sema.owner_func_index.unwrap()) {
// We're checking the inferred errorset of the current function and none of
// its child inferred error sets contained any errors meaning that any value
// so far with this type can't contain errors either.
@@ -29873,7 +29665,7 @@ fn analyzeIsNonErrComptimeOnly(
if (err_union.isUndef(mod)) {
return sema.addConstUndef(Type.bool);
}
- if (err_union.getError() == null) {
+ if (err_union.getError(mod) == null) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
@@ -30137,7 +29929,7 @@ fn analyzeSlice(
const end_int = end_val.getUnsignedInt(mod).?;
const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int);
- const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sema.arena, sentinel_index, sema.mod);
+ const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sentinel_index, sema.mod);
const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false);
const actual_sentinel = switch (res) {
.runtime_load => break :sentinel_check,
@@ -30233,7 +30025,7 @@ fn analyzeSlice(
if (!new_ptr_val.isUndef(mod)) {
return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced(
- mod.gpa,
+ sema.gpa,
try new_ptr_val.intern(new_ptr_ty, mod),
return_ty.ip_index,
)).toValue());
@@ -30753,7 +30545,10 @@ fn wrapOptional(
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
if (try sema.resolveMaybeUndefVal(inst)) |val| {
- return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val));
+ return sema.addConstant(dest_ty, (try sema.mod.intern(.{ .opt = .{
+ .ty = dest_ty.ip_index,
+ .val = val.ip_index,
+ } })).toValue());
}
try sema.requireRuntimeBlock(block, inst_src, null);
@@ -30771,7 +30566,10 @@ fn wrapErrorUnionPayload(
const dest_payload_ty = dest_ty.errorUnionPayload(mod);
const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false });
if (try sema.resolveMaybeUndefVal(coerced)) |val| {
- return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val));
+ return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{
+ .ty = dest_ty.ip_index,
+ .val = .{ .payload = val.ip_index },
+ } })).toValue());
}
try sema.requireRuntimeBlock(block, inst_src, null);
try sema.queueFullTypeResolution(dest_payload_ty);
@@ -30794,27 +30592,20 @@ fn wrapErrorUnionSet(
.anyerror_type => {},
else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) {
.error_set_type => |error_set_type| ok: {
- const expected_name = val.castTag(.@"error").?.data.name;
- if (ip.getString(expected_name).unwrap()) |expected_name_interned| {
- if (error_set_type.nameIndex(ip, expected_name_interned) != null)
- break :ok;
- }
+ const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name;
+ if (error_set_type.nameIndex(ip, expected_name) != null) break :ok;
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
.inferred_error_set_type => |ies_index| ok: {
const ies = mod.inferredErrorSetPtr(ies_index);
- const expected_name = val.castTag(.@"error").?.data.name;
+ const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name;
// We carefully do this in an order that avoids unnecessarily
// resolving the destination error set type.
if (ies.is_anyerror) break :ok;
- if (ip.getString(expected_name).unwrap()) |expected_name_interned| {
- if (ies.errors.contains(expected_name_interned)) break :ok;
- }
- if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
- break :ok;
- }
+ if (ies.errors.contains(expected_name)) break :ok;
+ if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok;
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
@@ -31462,43 +31253,33 @@ pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileErro
/// to a type not having its layout resolved.
fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void {
switch (val.ip_index) {
- .none => switch (val.tag()) {
- .lazy_align => {
- const ty = val.castTag(.lazy_align).?.data;
- return sema.resolveTypeLayout(ty);
- },
- .lazy_size => {
- const ty = val.castTag(.lazy_size).?.data;
- return sema.resolveTypeLayout(ty);
- },
- .comptime_field_ptr => {
- const field_ptr = val.castTag(.comptime_field_ptr).?.data;
- return sema.resolveLazyValue(field_ptr.field_val);
- },
- .eu_payload,
- .opt_payload,
- => {
- const sub_val = val.cast(Value.Payload.SubValue).?.data;
- return sema.resolveLazyValue(sub_val);
- },
- .@"union" => {
- const union_val = val.castTag(.@"union").?.data;
- return sema.resolveLazyValue(union_val.val);
- },
- .aggregate => {
- const aggregate = val.castTag(.aggregate).?.data;
- for (aggregate) |elem_val| {
- try sema.resolveLazyValue(elem_val);
- }
- },
- .slice => {
- const slice = val.castTag(.slice).?.data;
- try sema.resolveLazyValue(slice.ptr);
- return sema.resolveLazyValue(slice.len);
+ .none => {},
+ else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) {
+ .int => |int| switch (int.storage) {
+ .u64, .i64, .big_int => {},
+ .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()),
+ },
+ .ptr => |ptr| {
+ switch (ptr.addr) {
+ .decl, .mut_decl => {},
+ .int => |int| try sema.resolveLazyValue(int.toValue()),
+ .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()),
+ .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()),
+ .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()),
+ }
+ if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue());
+ },
+ .aggregate => |aggregate| switch (aggregate.storage) {
+ .bytes => {},
+ .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()),
+ .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()),
+ },
+ .un => |un| {
+ try sema.resolveLazyValue(un.tag.toValue());
+ try sema.resolveLazyValue(un.val.toValue());
},
- else => return,
+ else => {},
},
- else => return,
}
}
@@ -31597,7 +31378,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
else blk: {
const decl = mod.declPtr(struct_obj.owner_decl);
var decl_arena: std.heap.ArenaAllocator = undefined;
- const decl_arena_allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena);
+ const decl_arena_allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena);
defer decl.value_arena.?.release(&decl_arena);
break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count());
};
@@ -31662,18 +31443,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
- var sema: Sema = .{
- .mod = mod,
- .gpa = gpa,
- .arena = analysis_arena.allocator(),
- .perm_arena = decl_arena_allocator,
- .code = zir,
- .owner_decl = decl,
- .owner_decl_index = decl_index,
- .func = null,
- .fn_ret_ty = Type.void,
- .owner_func = null,
- };
+ var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none };
defer sema.deinit();
var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
@@ -31720,8 +31490,10 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = null,
+ .func_index = .none,
.fn_ret_ty = Type.void,
.owner_func = null,
+ .owner_func_index = .none,
};
defer sema.deinit();
@@ -31974,16 +31746,23 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()),
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
},
};
}
@@ -32141,8 +31920,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type {
.manyptr_const_u8_type,
.manyptr_const_u8_sentinel_0_type,
.single_const_pointer_to_comptime_int_type,
- .const_slice_u8_type,
- .const_slice_u8_sentinel_0_type,
+ .slice_const_u8_type,
+ .slice_const_u8_sentinel_0_type,
.anyerror_void_error_union_type,
.generic_poison_type,
.empty_struct_type,
@@ -32288,18 +32067,19 @@ fn resolveInferredErrorSet(
if (ies.is_resolved) return;
- if (ies.func.state == .in_progress) {
+ const func = mod.funcPtr(ies.func);
+ if (func.state == .in_progress) {
return sema.fail(block, src, "unable to resolve inferred error set", .{});
}
// In order to ensure that all dependencies are properly added to the set, we
// need to ensure the function body is analyzed of the inferred error set.
// However, in the case of comptime/inline function calls with inferred error sets,
- // each call gets a new InferredErrorSet object, which points to the same
- // `*Module.Fn`. Not only is the function not relevant to the inferred error set
+ // each call gets a new InferredErrorSet object, which contains the same
+ // `Module.Fn.Index`. Not only is the function not relevant to the inferred error set
// in this case, it may be a generic function which would cause an assertion failure
// if we called `ensureFuncBodyAnalyzed` on it here.
- const ies_func_owner_decl = mod.declPtr(ies.func.owner_decl);
+ const ies_func_owner_decl = mod.declPtr(func.owner_decl);
const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?;
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
// because inline function does not create a new declaration, and the ies has been filled with analyzeCall,
@@ -32414,8 +32194,10 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = null,
+ .func_index = .none,
.fn_ret_ty = Type.void,
.owner_func = null,
+ .owner_func_index = .none,
};
defer sema.deinit();
@@ -32754,8 +32536,10 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
.owner_decl = decl,
.owner_decl_index = decl_index,
.func = null,
+ .func_index = .none,
.fn_ret_ty = Type.void,
.owner_func = null,
+ .owner_func_index = .none,
};
defer sema.deinit();
@@ -33111,7 +32895,7 @@ fn generateUnionTagTypeNumbered(
const name = name: {
const fqn = try union_obj.getFullyQualifiedName(mod);
defer sema.gpa.free(fqn);
- break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn});
+ break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn});
};
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{
.ty = Type.type,
@@ -33160,7 +32944,7 @@ fn generateUnionTagTypeSimple(
const name = name: {
const fqn = try union_obj.getFullyQualifiedName(mod);
defer sema.gpa.free(fqn);
- break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn});
+ break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn});
};
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{
.ty = Type.type,
@@ -33288,19 +33072,19 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.inferred_error_set_type,
=> null,
- .array_type => |array_type| {
- if (array_type.len == 0)
- return Value.initTag(.empty_array);
- if ((try sema.typeHasOnePossibleValue(array_type.child.toType())) != null) {
- return Value.initTag(.the_only_possible_value);
+ inline .array_type, .vector_type => |seq_type| {
+ if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{
+ .ty = ty.ip_index,
+ .storage = .{ .elems = &.{} },
+ } })).toValue();
+ if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| {
+ return (try mod.intern(.{ .aggregate = .{
+ .ty = ty.ip_index,
+ .storage = .{ .repeated_elem = opv.ip_index },
+ } })).toValue();
}
return null;
},
- .vector_type => |vector_type| {
- if (vector_type.len == 0) return Value.initTag(.empty_array);
- if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v;
- return null;
- },
.opt_type => |child| {
if (child == .noreturn_type) {
return try mod.nullValue(ty);
@@ -33466,16 +33250,23 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
},
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
},
};
}
@@ -33625,10 +33416,13 @@ fn analyzeComptimeAlloc(
decl.@"align" = alignment;
try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index);
- return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{
- .runtime_index = block.runtime_index,
- .decl_index = decl_index,
- }));
+ return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{
+ .ty = ptr_type.ip_index,
+ .addr = .{ .mut_decl = .{
+ .decl = decl_index,
+ .runtime_index = block.runtime_index,
+ } },
+ } })).toValue());
}
/// The places where a user can specify an address space attribute
@@ -33969,16 +33763,23 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()),
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
},
};
}
@@ -34337,8 +34138,9 @@ fn intFitsInType(
ty: Type,
vector_index: ?*usize,
) CompileError!bool {
- if (ty.ip_index == .comptime_int_type) return true;
const mod = sema.mod;
+ if (ty.ip_index == .comptime_int_type) return true;
+ const info = ty.intInfo(mod);
switch (val.ip_index) {
.undef,
.zero,
@@ -34346,40 +34148,8 @@ fn intFitsInType(
.zero_u8,
=> return true,
- .none => switch (val.tag()) {
- .lazy_align => {
- const info = ty.intInfo(mod);
- const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed);
- // If it is u16 or bigger we know the alignment fits without resolving it.
- if (info.bits >= max_needed_bits) return true;
- const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data);
- if (x == 0) return true;
- const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
- return info.bits >= actual_needed_bits;
- },
- .lazy_size => {
- const info = ty.intInfo(mod);
- const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed);
- // If it is u64 or bigger we know the size fits without resolving it.
- if (info.bits >= max_needed_bits) return true;
- const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data);
- if (x == 0) return true;
- const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
- return info.bits >= actual_needed_bits;
- },
-
- .the_only_possible_value => {
- assert(ty.intInfo(mod).bits == 0);
- return true;
- },
-
- .decl_ref_mut,
- .extern_fn,
- .decl_ref,
- .function,
- .variable,
- => {
- const info = ty.intInfo(mod);
+ else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .variable, .extern_func, .func, .ptr => {
const target = mod.getTarget();
const ptr_bits = target.ptrBitWidth();
return switch (info.signedness) {
@@ -34387,27 +34157,51 @@ fn intFitsInType(
.unsigned => info.bits >= ptr_bits,
};
},
-
- .aggregate => {
- assert(ty.zigTypeTag(mod) == .Vector);
- for (val.castTag(.aggregate).?.data, 0..) |elem, i| {
- if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) {
- if (vector_index) |some| some.* = i;
- return false;
- }
- }
- return true;
+ .int => |int| switch (int.storage) {
+ .u64, .i64, .big_int => {
+ var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined;
+ const big_int = int.storage.toBigInt(&buffer);
+ return big_int.fitsInTwosComp(info.signedness, info.bits);
+ },
+ .lazy_align => |lazy_ty| {
+ const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed);
+ // If it is u16 or bigger we know the alignment fits without resolving it.
+ if (info.bits >= max_needed_bits) return true;
+ const x = try sema.typeAbiAlignment(lazy_ty.toType());
+ if (x == 0) return true;
+ const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
+ return info.bits >= actual_needed_bits;
+ },
+ .lazy_size => |lazy_ty| {
+ const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed);
+ // If it is u64 or bigger we know the size fits without resolving it.
+ if (info.bits >= max_needed_bits) return true;
+ const x = try sema.typeAbiSize(lazy_ty.toType());
+ if (x == 0) return true;
+ const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
+ return info.bits >= actual_needed_bits;
+ },
},
-
- else => unreachable,
- },
-
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => |int| {
- const info = ty.intInfo(mod);
- var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined;
- const big_int = int.storage.toBigInt(&buffer);
- return big_int.fitsInTwosComp(info.signedness, info.bits);
+ .aggregate => |aggregate| {
+ assert(ty.zigTypeTag(mod) == .Vector);
+ return switch (aggregate.storage) {
+ .bytes => |bytes| for (bytes, 0..) |byte, i| {
+ if (byte == 0) continue;
+ const actual_needed_bits = std.math.log2(byte) + 1 + @boolToInt(info.signedness == .signed);
+ if (info.bits >= actual_needed_bits) continue;
+ if (vector_index) |vi| vi.* = i;
+ break false;
+ } else true,
+ .elems, .repeated_elem => for (switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems,
+ .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem),
+ }, 0..) |elem, i| {
+ if (try sema.intFitsInType(elem.toValue(), ty.scalarType(mod), null)) continue;
+ if (vector_index) |vi| vi.* = i;
+ break false;
+ } else true,
+ };
},
else => unreachable,
},
src/type.zig
@@ -93,16 +93,23 @@ pub const Type = struct {
},
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .simple_value => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
};
}
@@ -358,7 +365,7 @@ pub const Type = struct {
const func = ies.func;
try writer.writeAll("@typeInfo(@typeInfo(@TypeOf(");
- const owner_decl = mod.declPtr(func.owner_decl);
+ const owner_decl = mod.declPtr(mod.funcPtr(func).owner_decl);
try owner_decl.renderFullyQualifiedName(mod, writer);
try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set");
},
@@ -467,16 +474,23 @@ pub const Type = struct {
},
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
}
}
@@ -675,16 +689,23 @@ pub const Type = struct {
.enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
},
};
}
@@ -777,16 +798,23 @@ pub const Type = struct {
},
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
};
}
@@ -866,8 +894,8 @@ pub const Type = struct {
/// May capture a reference to `ty`.
/// Returned value has type `comptime_int`.
- pub fn lazyAbiAlignment(ty: Type, mod: *Module, arena: Allocator) !Value {
- switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) {
+ pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value {
+ switch (try ty.abiAlignmentAdvanced(mod, .lazy)) {
.val => |val| return val,
.scalar => |x| return mod.intValue(Type.comptime_int, x),
}
@@ -880,7 +908,7 @@ pub const Type = struct {
pub const AbiAlignmentAdvancedStrat = union(enum) {
eager,
- lazy: Allocator,
+ lazy,
sema: *Sema,
};
@@ -1019,16 +1047,18 @@ pub const Type = struct {
if (!struct_obj.haveFieldTypes()) switch (strat) {
.eager => unreachable, // struct layout not resolved
.sema => unreachable, // handled above
- .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
+ .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
};
if (struct_obj.layout == .Packed) {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
- .lazy => |arena| {
- if (!struct_obj.haveLayout()) {
- return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) };
- }
- },
+ .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
.eager => {},
}
assert(struct_obj.haveLayout());
@@ -1039,7 +1069,10 @@ pub const Type = struct {
var big_align: u32 = 0;
for (fields.values()) |field| {
if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
+ error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
else => |e| return e,
})) continue;
@@ -1050,7 +1083,10 @@ pub const Type = struct {
.val => switch (strat) {
.eager => unreachable, // struct layout not resolved
.sema => unreachable, // handled above
- .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
+ .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
},
};
big_align = @max(big_align, field_align);
@@ -1077,7 +1113,10 @@ pub const Type = struct {
.val => switch (strat) {
.eager => unreachable, // field type alignment not resolved
.sema => unreachable, // passed to abiAlignmentAdvanced above
- .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
+ .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
},
}
}
@@ -1092,16 +1131,23 @@ pub const Type = struct {
.enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) },
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
},
}
}
@@ -1118,7 +1164,10 @@ pub const Type = struct {
switch (strat) {
.eager, .sema => {
if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
+ error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
else => |e| return e,
})) {
return AbiAlignmentAdvanced{ .scalar = code_align };
@@ -1128,7 +1177,7 @@ pub const Type = struct {
(try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar,
) };
},
- .lazy => |arena| {
+ .lazy => {
switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |payload_align| {
return AbiAlignmentAdvanced{
@@ -1137,7 +1186,10 @@ pub const Type = struct {
},
.val => {},
}
- return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) };
+ return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() };
},
}
}
@@ -1160,16 +1212,22 @@ pub const Type = struct {
switch (strat) {
.eager, .sema => {
if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
+ error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
else => |e| return e,
})) {
return AbiAlignmentAdvanced{ .scalar = 1 };
}
return child_type.abiAlignmentAdvanced(mod, strat);
},
- .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) {
+ .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) {
.scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) },
- .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
+ .val => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
},
}
}
@@ -1198,7 +1256,10 @@ pub const Type = struct {
if (!union_obj.haveFieldTypes()) switch (strat) {
.eager => unreachable, // union layout not resolved
.sema => unreachable, // handled above
- .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
+ .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
};
if (union_obj.fields.count() == 0) {
if (have_tag) {
@@ -1212,7 +1273,10 @@ pub const Type = struct {
if (have_tag) max_align = union_obj.tag_ty.abiAlignment(mod);
for (union_obj.fields.values()) |field| {
if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
+ error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
else => |e| return e,
})) continue;
@@ -1223,7 +1287,10 @@ pub const Type = struct {
.val => switch (strat) {
.eager => unreachable, // struct layout not resolved
.sema => unreachable, // handled above
- .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
+ .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_align = ty.ip_index },
+ } })).toValue() },
},
};
max_align = @max(max_align, field_align);
@@ -1232,8 +1299,8 @@ pub const Type = struct {
}
/// May capture a reference to `ty`.
- pub fn lazyAbiSize(ty: Type, mod: *Module, arena: Allocator) !Value {
- switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) {
+ pub fn lazyAbiSize(ty: Type, mod: *Module) !Value {
+ switch (try ty.abiSizeAdvanced(mod, .lazy)) {
.val => |val| return val,
.scalar => |x| return mod.intValue(Type.comptime_int, x),
}
@@ -1283,7 +1350,10 @@ pub const Type = struct {
.scalar => |elem_size| return .{ .scalar = len * elem_size },
.val => switch (strat) {
.sema, .eager => unreachable,
- .lazy => |arena| return .{ .val = try Value.Tag.lazy_size.create(arena, ty) },
+ .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() },
},
}
},
@@ -1291,9 +1361,10 @@ pub const Type = struct {
const opt_sema = switch (strat) {
.sema => |sema| sema,
.eager => null,
- .lazy => |arena| return AbiSizeAdvanced{
- .val = try Value.Tag.lazy_size.create(arena, ty),
- },
+ .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() },
};
const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema);
const elem_bits = @intCast(u32, elem_bits_u64);
@@ -1301,9 +1372,10 @@ pub const Type = struct {
const total_bytes = (total_bits + 7) / 8;
const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |x| x,
- .val => return AbiSizeAdvanced{
- .val = try Value.Tag.lazy_size.create(strat.lazy, ty),
- },
+ .val => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() },
};
const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment);
return AbiSizeAdvanced{ .scalar = result };
@@ -1320,7 +1392,10 @@ pub const Type = struct {
// in abiAlignmentAdvanced.
const code_size = abiSize(Type.anyerror, mod);
if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) },
+ error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() },
else => |e| return e,
})) {
// Same as anyerror.
@@ -1333,7 +1408,10 @@ pub const Type = struct {
.val => switch (strat) {
.sema => unreachable,
.eager => unreachable,
- .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) },
+ .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() },
},
};
@@ -1420,11 +1498,10 @@ pub const Type = struct {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
- .lazy => |arena| {
- if (!struct_obj.haveLayout()) {
- return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
- }
- },
+ .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() },
.eager => {},
}
assert(struct_obj.haveLayout());
@@ -1433,12 +1510,13 @@ pub const Type = struct {
else => {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
- .lazy => |arena| {
+ .lazy => {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
return AbiSizeAdvanced{ .scalar = 0 };
- if (!struct_obj.haveLayout()) {
- return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
- }
+ if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() };
},
.eager => {},
}
@@ -1469,16 +1547,23 @@ pub const Type = struct {
.enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) },
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
},
}
}
@@ -1492,11 +1577,10 @@ pub const Type = struct {
) Module.CompileError!AbiSizeAdvanced {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
- .lazy => |arena| {
- if (!union_obj.haveLayout()) {
- return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
- }
- },
+ .lazy => if (!union_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() },
.eager => {},
}
return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) };
@@ -1514,7 +1598,10 @@ pub const Type = struct {
}
if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) },
+ error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() },
else => |e| return e,
})) return AbiSizeAdvanced{ .scalar = 1 };
@@ -1527,7 +1614,10 @@ pub const Type = struct {
.val => switch (strat) {
.sema => unreachable,
.eager => unreachable,
- .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) },
+ .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .ty = .comptime_int_type,
+ .storage = .{ .lazy_size = ty.ip_index },
+ } })).toValue() },
},
};
@@ -1690,16 +1780,23 @@ pub const Type = struct {
.enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema),
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
}
}
@@ -2270,16 +2367,23 @@ pub const Type = struct {
.opaque_type => unreachable,
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
},
};
}
@@ -2443,16 +2547,17 @@ pub const Type = struct {
.inferred_error_set_type,
=> return null,
- .array_type => |array_type| {
- if (array_type.len == 0)
- return Value.initTag(.empty_array);
- if ((try array_type.child.toType().onePossibleValue(mod)) != null)
- return Value.initTag(.the_only_possible_value);
- return null;
- },
- .vector_type => |vector_type| {
- if (vector_type.len == 0) return Value.initTag(.empty_array);
- if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v;
+ inline .array_type, .vector_type => |seq_type| {
+ if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{
+ .ty = ty.ip_index,
+ .storage = .{ .elems = &.{} },
+ } })).toValue();
+ if (try seq_type.child.toType().onePossibleValue(mod)) |opv| {
+ return (try mod.intern(.{ .aggregate = .{
+ .ty = ty.ip_index,
+ .storage = .{ .repeated_elem = opv.ip_index },
+ } })).toValue();
+ }
return null;
},
.opt_type => |child| {
@@ -2595,16 +2700,23 @@ pub const Type = struct {
},
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
},
};
}
@@ -2733,16 +2845,23 @@ pub const Type = struct {
.enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod),
// values, not types
- .undef => unreachable,
- .un => unreachable,
- .simple_value => unreachable,
- .extern_func => unreachable,
- .int => unreachable,
- .float => unreachable,
- .ptr => unreachable,
- .opt => unreachable,
- .enum_tag => unreachable,
- .aggregate => unreachable,
+ .undef,
+ .runtime_value,
+ .simple_value,
+ .variable,
+ .extern_func,
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .float,
+ .ptr,
+ .opt,
+ .aggregate,
+ .un,
+ => unreachable,
},
};
}
@@ -2802,13 +2921,12 @@ pub const Type = struct {
}
// Works for vectors and vectors of integers.
- pub fn minInt(ty: Type, arena: Allocator, mod: *Module) !Value {
+ pub fn minInt(ty: Type, mod: *Module) !Value {
const scalar = try minIntScalar(ty.scalarType(mod), mod);
- if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) {
- return Value.Tag.repeated.create(arena, scalar);
- } else {
- return scalar;
- }
+ return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{
+ .ty = ty.ip_index,
+ .storage = .{ .repeated_elem = scalar.ip_index },
+ } })).toValue() else scalar;
}
/// Asserts that the type is an integer.
@@ -2832,13 +2950,12 @@ pub const Type = struct {
// Works for vectors and vectors of integers.
/// The returned Value will have type dest_ty.
- pub fn maxInt(ty: Type, arena: Allocator, mod: *Module, dest_ty: Type) !Value {
+ pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value {
const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty);
- if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) {
- return Value.Tag.repeated.create(arena, scalar);
- } else {
- return scalar;
- }
+ return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{
+ .ty = ty.ip_index,
+ .storage = .{ .repeated_elem = scalar.ip_index },
+ } })).toValue() else scalar;
}
/// The returned Value will have type dest_ty.
@@ -3386,12 +3503,12 @@ pub const Type = struct {
pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type };
pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type };
- pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type };
+ pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type };
pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type };
pub const single_const_pointer_to_comptime_int: Type = .{
.ip_index = .single_const_pointer_to_comptime_int_type,
};
- pub const const_slice_u8_sentinel_0: Type = .{ .ip_index = .const_slice_u8_sentinel_0_type };
+ pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type };
pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type };
pub const generic_poison: Type = .{ .ip_index = .generic_poison_type };
src/TypedValue.zig
@@ -102,248 +102,15 @@ pub fn print(
return writer.writeAll(" }");
},
- .the_only_possible_value => return writer.writeAll("0"),
- .lazy_align => {
- const sub_ty = val.castTag(.lazy_align).?.data;
- const x = sub_ty.abiAlignment(mod);
- return writer.print("{d}", .{x});
- },
- .lazy_size => {
- const sub_ty = val.castTag(.lazy_size).?.data;
- const x = sub_ty.abiSize(mod);
- return writer.print("{d}", .{x});
- },
- .function => return writer.print("(function '{s}')", .{
- mod.declPtr(val.castTag(.function).?.data.owner_decl).name,
- }),
- .extern_fn => return writer.writeAll("(extern function)"),
- .variable => unreachable,
- .decl_ref_mut => {
- const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
- const decl = mod.declPtr(decl_index);
- if (level == 0) {
- return writer.print("(decl ref mut '{s}')", .{decl.name});
- }
- return print(.{
- .ty = decl.ty,
- .val = decl.val,
- }, writer, level - 1, mod);
- },
- .decl_ref => {
- const decl_index = val.castTag(.decl_ref).?.data;
- const decl = mod.declPtr(decl_index);
- if (level == 0) {
- return writer.print("(decl ref '{s}')", .{decl.name});
- }
- return print(.{
- .ty = decl.ty,
- .val = decl.val,
- }, writer, level - 1, mod);
- },
- .comptime_field_ptr => {
- const payload = val.castTag(.comptime_field_ptr).?.data;
- if (level == 0) {
- return writer.writeAll("(comptime field ptr)");
- }
- return print(.{
- .ty = payload.field_ty,
- .val = payload.field_val,
- }, writer, level - 1, mod);
- },
- .elem_ptr => {
- const elem_ptr = val.castTag(.elem_ptr).?.data;
- try writer.writeAll("&");
- if (level == 0) {
- try writer.writeAll("(ptr)");
- } else {
- try print(.{
- .ty = elem_ptr.elem_ty,
- .val = elem_ptr.array_ptr,
- }, writer, level - 1, mod);
- }
- return writer.print("[{}]", .{elem_ptr.index});
- },
- .field_ptr => {
- const field_ptr = val.castTag(.field_ptr).?.data;
- try writer.writeAll("&");
- if (level == 0) {
- try writer.writeAll("(ptr)");
- } else {
- try print(.{
- .ty = field_ptr.container_ty,
- .val = field_ptr.container_ptr,
- }, writer, level - 1, mod);
- }
-
- if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) {
- switch (mod.intern_pool.indexToKey(field_ptr.container_ty.ip_index)) {
- .anon_struct_type => |anon_struct| {
- if (anon_struct.names.len == 0) {
- return writer.print(".@\"{d}\"", .{field_ptr.field_index});
- }
- },
- else => {},
- }
- const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod);
- return writer.print(".{s}", .{field_name});
- } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) {
- const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index];
- return writer.print(".{s}", .{field_name});
- } else if (field_ptr.container_ty.isSlice(mod)) {
- switch (field_ptr.field_index) {
- Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"),
- Value.Payload.Slice.len_index => return writer.writeAll(".len"),
- else => unreachable,
- }
- }
- },
- .empty_array => return writer.writeAll(".{}"),
- .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}),
.bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)});
},
- .repeated => {
- if (level == 0) {
- return writer.writeAll(".{ ... }");
- }
- var i: u32 = 0;
- try writer.writeAll(".{ ");
- const elem_tv = TypedValue{
- .ty = ty.elemType2(mod),
- .val = val.castTag(.repeated).?.data,
- };
- const len = ty.arrayLen(mod);
- const max_len = std.math.min(len, max_aggregate_items);
- while (i < max_len) : (i += 1) {
- if (i != 0) try writer.writeAll(", ");
- try print(elem_tv, writer, level - 1, mod);
- }
- if (len > max_aggregate_items) {
- try writer.writeAll(", ...");
- }
- return writer.writeAll(" }");
- },
- .empty_array_sentinel => {
- if (level == 0) {
- return writer.writeAll(".{ (sentinel) }");
- }
- try writer.writeAll(".{ ");
- try print(.{
- .ty = ty.elemType2(mod),
- .val = ty.sentinel(mod).?,
- }, writer, level - 1, mod);
- return writer.writeAll(" }");
- },
- .slice => {
- if (level == 0) {
- return writer.writeAll(".{ ... }");
- }
- const payload = val.castTag(.slice).?.data;
- const elem_ty = ty.elemType2(mod);
- const len = payload.len.toUnsignedInt(mod);
-
- if (elem_ty.eql(Type.u8, mod)) str: {
- const max_len = @intCast(usize, std.math.min(len, max_string_len));
- var buf: [max_string_len]u8 = undefined;
-
- var i: u32 = 0;
- while (i < max_len) : (i += 1) {
- const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) {
- error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
- };
- if (elem_val.isUndef(mod)) break :str;
- buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str;
- }
-
- // TODO would be nice if this had a bit of unicode awareness.
- const truncated = if (len > max_string_len) " (truncated)" else "";
- return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated });
- }
-
- try writer.writeAll(".{ ");
-
- const max_len = std.math.min(len, max_aggregate_items);
- var i: u32 = 0;
- while (i < max_len) : (i += 1) {
- if (i != 0) try writer.writeAll(", ");
- const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) {
- error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
- };
- try print(.{
- .ty = elem_ty,
- .val = elem_val,
- }, writer, level - 1, mod);
- }
- if (len > max_aggregate_items) {
- try writer.writeAll(", ...");
- }
- return writer.writeAll(" }");
- },
- .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}),
- .eu_payload => {
- val = val.castTag(.eu_payload).?.data;
- ty = ty.errorUnionPayload(mod);
- },
- .opt_payload => {
- val = val.castTag(.opt_payload).?.data;
- ty = ty.optionalChild(mod);
- return print(.{ .ty = ty, .val = val }, writer, level, mod);
- },
- .eu_payload_ptr => {
- try writer.writeAll("&");
- if (level == 0) {
- return writer.writeAll("(ptr)");
- }
-
- const data = val.castTag(.eu_payload_ptr).?.data;
-
- try writer.writeAll("@as(");
- try print(.{
- .ty = Type.type,
- .val = ty.toValue(),
- }, writer, level - 1, mod);
-
- try writer.writeAll(", &(payload of ");
-
- try print(.{
- .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"),
- .val = data.container_ptr,
- }, writer, level - 1, mod);
-
- try writer.writeAll("))");
- return;
- },
- .opt_payload_ptr => {
- if (level == 0) {
- return writer.writeAll("&(ptr)");
- }
-
- const data = val.castTag(.opt_payload_ptr).?.data;
-
- try writer.writeAll("@as(");
- try print(.{
- .ty = Type.type,
- .val = ty.toValue(),
- }, writer, level - 1, mod);
-
- try writer.writeAll(", &(payload of ");
-
- try print(.{
- .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"),
- .val = data.container_ptr,
- }, writer, level - 1, mod);
-
- try writer.writeAll("))");
- return;
- },
-
// TODO these should not appear in this function
.inferred_alloc => return writer.writeAll("(inferred allocation value)"),
.inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"),
- .runtime_value => return writer.writeAll("[runtime value]"),
},
else => {
const key = mod.intern_pool.indexToKey(val.ip_index);
@@ -353,6 +120,12 @@ pub fn print(
switch (key) {
.int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}),
+ .lazy_align => |lazy_ty| return writer.print("{d}", .{
+ lazy_ty.toType().abiAlignment(mod),
+ }),
+ .lazy_size => |lazy_ty| return writer.print("{d}", .{
+ lazy_ty.toType().abiSize(mod),
+ }),
},
.enum_tag => |enum_tag| {
if (level == 0) {
@@ -407,7 +180,7 @@ fn printAggregate(
}
try print(.{
.ty = ty.structFieldType(i, mod),
- .val = try val.fieldValue(ty, mod, i),
+ .val = try val.fieldValue(mod, i),
}, writer, level - 1, mod);
}
if (ty.structFieldCount(mod) > max_aggregate_items) {
@@ -424,7 +197,7 @@ fn printAggregate(
var i: u32 = 0;
while (i < max_len) : (i += 1) {
- const elem = try val.fieldValue(ty, mod, i);
+ const elem = try val.fieldValue(mod, i);
if (elem.isUndef(mod)) break :str;
buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str;
}
@@ -441,7 +214,7 @@ fn printAggregate(
if (i != 0) try writer.writeAll(", ");
try print(.{
.ty = elem_ty,
- .val = try val.fieldValue(ty, mod, i),
+ .val = try val.fieldValue(mod, i),
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
src/value.zig
@@ -33,64 +33,12 @@ pub const Value = struct {
// Keep in sync with tools/stage2_pretty_printers_common.py
pub const Tag = enum(usize) {
// The first section of this enum are tags that require no payload.
- /// The only possible value for a particular type, which is stored externally.
- the_only_possible_value,
-
- empty_array, // See last_no_payload_tag below.
// After this, the tag requires a payload.
- function,
- extern_fn,
- /// A comptime-known pointer can point to the address of a global
- /// variable. The child element value in this case will have this tag.
- variable,
- /// A wrapper for values which are comptime-known but should
- /// semantically be runtime-known.
- runtime_value,
- /// Represents a pointer to a Decl.
- /// When machine codegen backend sees this, it must set the Decl's `alive` field to true.
- decl_ref,
- /// Pointer to a Decl, but allows comptime code to mutate the Decl's Value.
- /// This Tag will never be seen by machine codegen backends. It is changed into a
- /// `decl_ref` when a comptime variable goes out of scope.
- decl_ref_mut,
- /// Behaves like `decl_ref_mut` but validates that the stored value matches the field value.
- comptime_field_ptr,
- /// Pointer to a specific element of an array, vector or slice.
- elem_ptr,
- /// Pointer to a specific field of a struct or union.
- field_ptr,
/// A slice of u8 whose memory is managed externally.
bytes,
/// Similar to bytes however it stores an index relative to `Module.string_literal_bytes`.
str_lit,
- /// This value is repeated some number of times. The amount of times to repeat
- /// is stored externally.
- repeated,
- /// An array with length 0 but it has a sentinel.
- empty_array_sentinel,
- /// Pointer and length as sub `Value` objects.
- slice,
- enum_literal,
- @"error",
- /// When the type is error union:
- /// * If the tag is `.@"error"`, the error union is an error.
- /// * If the tag is `.eu_payload`, the error union is a payload.
- /// * A nested error such as `anyerror!(anyerror!T)` in which the the outer error union
- /// is non-error, but the inner error union is an error, is represented as
- /// a tag of `.eu_payload`, with a sub-tag of `.@"error"`.
- eu_payload,
- /// A pointer to the payload of an error union, based on a pointer to an error union.
- eu_payload_ptr,
- /// When the type is optional:
- /// * If the tag is `.null_value`, the optional is null.
- /// * If the tag is `.opt_payload`, the optional is a payload.
- /// * A nested optional such as `??T` in which the the outer optional
- /// is non-null, but the inner optional is null, is represented as
- /// a tag of `.opt_payload`, with a sub-tag of `.null_value`.
- opt_payload,
- /// A pointer to the payload of an optional, based on a pointer to an optional.
- opt_payload_ptr,
/// An instance of a struct, array, or vector.
/// Each element/field stored as a `Value`.
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
@@ -104,57 +52,19 @@ pub const Value = struct {
/// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc
/// instructions for comptime code.
inferred_alloc_comptime,
- /// The ABI alignment of the payload type.
- lazy_align,
- /// The ABI size of the payload type.
- lazy_size,
- pub const last_no_payload_tag = Tag.empty_array;
- pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
+ pub const no_payload_count = 0;
pub fn Type(comptime t: Tag) type {
return switch (t) {
- .the_only_possible_value,
- .empty_array,
- => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"),
-
- .extern_fn => Payload.ExternFn,
-
- .decl_ref => Payload.Decl,
-
- .repeated,
- .eu_payload,
- .opt_payload,
- .empty_array_sentinel,
- .runtime_value,
- => Payload.SubValue,
-
- .eu_payload_ptr,
- .opt_payload_ptr,
- => Payload.PayloadPtr,
-
- .bytes,
- .enum_literal,
- => Payload.Bytes,
+ .bytes => Payload.Bytes,
.str_lit => Payload.StrLit,
- .slice => Payload.Slice,
-
- .lazy_align,
- .lazy_size,
- => Payload.Ty,
-
- .function => Payload.Function,
- .variable => Payload.Variable,
- .decl_ref_mut => Payload.DeclRefMut,
- .elem_ptr => Payload.ElemPtr,
- .field_ptr => Payload.FieldPtr,
- .@"error" => Payload.Error,
+
.inferred_alloc => Payload.InferredAlloc,
.inferred_alloc_comptime => Payload.InferredAllocComptime,
.aggregate => Payload.Aggregate,
.@"union" => Payload.Union,
- .comptime_field_ptr => Payload.ComptimeFieldPtr,
};
}
@@ -249,91 +159,6 @@ pub const Value = struct {
.legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough },
};
} else switch (self.legacy.ptr_otherwise.tag) {
- .the_only_possible_value,
- .empty_array,
- => unreachable,
-
- .lazy_align, .lazy_size => {
- const payload = self.cast(Payload.Ty).?;
- const new_payload = try arena.create(Payload.Ty);
- new_payload.* = .{
- .base = payload.base,
- .data = payload.data,
- };
- return Value{
- .ip_index = .none,
- .legacy = .{ .ptr_otherwise = &new_payload.base },
- };
- },
- .function => return self.copyPayloadShallow(arena, Payload.Function),
- .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn),
- .variable => return self.copyPayloadShallow(arena, Payload.Variable),
- .decl_ref => return self.copyPayloadShallow(arena, Payload.Decl),
- .decl_ref_mut => return self.copyPayloadShallow(arena, Payload.DeclRefMut),
- .eu_payload_ptr,
- .opt_payload_ptr,
- => {
- const payload = self.cast(Payload.PayloadPtr).?;
- const new_payload = try arena.create(Payload.PayloadPtr);
- new_payload.* = .{
- .base = payload.base,
- .data = .{
- .container_ptr = try payload.data.container_ptr.copy(arena),
- .container_ty = payload.data.container_ty,
- },
- };
- return Value{
- .ip_index = .none,
- .legacy = .{ .ptr_otherwise = &new_payload.base },
- };
- },
- .comptime_field_ptr => {
- const payload = self.cast(Payload.ComptimeFieldPtr).?;
- const new_payload = try arena.create(Payload.ComptimeFieldPtr);
- new_payload.* = .{
- .base = payload.base,
- .data = .{
- .field_val = try payload.data.field_val.copy(arena),
- .field_ty = payload.data.field_ty,
- },
- };
- return Value{
- .ip_index = .none,
- .legacy = .{ .ptr_otherwise = &new_payload.base },
- };
- },
- .elem_ptr => {
- const payload = self.castTag(.elem_ptr).?;
- const new_payload = try arena.create(Payload.ElemPtr);
- new_payload.* = .{
- .base = payload.base,
- .data = .{
- .array_ptr = try payload.data.array_ptr.copy(arena),
- .elem_ty = payload.data.elem_ty,
- .index = payload.data.index,
- },
- };
- return Value{
- .ip_index = .none,
- .legacy = .{ .ptr_otherwise = &new_payload.base },
- };
- },
- .field_ptr => {
- const payload = self.castTag(.field_ptr).?;
- const new_payload = try arena.create(Payload.FieldPtr);
- new_payload.* = .{
- .base = payload.base,
- .data = .{
- .container_ptr = try payload.data.container_ptr.copy(arena),
- .container_ty = payload.data.container_ty,
- .field_index = payload.data.field_index,
- },
- };
- return Value{
- .ip_index = .none,
- .legacy = .{ .ptr_otherwise = &new_payload.base },
- };
- },
.bytes => {
const bytes = self.castTag(.bytes).?.data;
const new_payload = try arena.create(Payload.Bytes);
@@ -347,52 +172,6 @@ pub const Value = struct {
};
},
.str_lit => return self.copyPayloadShallow(arena, Payload.StrLit),
- .repeated,
- .eu_payload,
- .opt_payload,
- .empty_array_sentinel,
- .runtime_value,
- => {
- const payload = self.cast(Payload.SubValue).?;
- const new_payload = try arena.create(Payload.SubValue);
- new_payload.* = .{
- .base = payload.base,
- .data = try payload.data.copy(arena),
- };
- return Value{
- .ip_index = .none,
- .legacy = .{ .ptr_otherwise = &new_payload.base },
- };
- },
- .slice => {
- const payload = self.castTag(.slice).?;
- const new_payload = try arena.create(Payload.Slice);
- new_payload.* = .{
- .base = payload.base,
- .data = .{
- .ptr = try payload.data.ptr.copy(arena),
- .len = try payload.data.len.copy(arena),
- },
- };
- return Value{
- .ip_index = .none,
- .legacy = .{ .ptr_otherwise = &new_payload.base },
- };
- },
- .enum_literal => {
- const payload = self.castTag(.enum_literal).?;
- const new_payload = try arena.create(Payload.Bytes);
- new_payload.* = .{
- .base = payload.base,
- .data = try arena.dupe(u8, payload.data),
- };
- return Value{
- .ip_index = .none,
- .legacy = .{ .ptr_otherwise = &new_payload.base },
- };
- },
- .@"error" => return self.copyPayloadShallow(arena, Payload.Error),
-
.aggregate => {
const payload = self.castTag(.aggregate).?;
const new_payload = try arena.create(Payload.Aggregate);
@@ -453,7 +232,7 @@ pub const Value = struct {
pub fn dump(
start_val: Value,
comptime fmt: []const u8,
- options: std.fmt.FormatOptions,
+ _: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
comptime assert(fmt.len == 0);
@@ -469,44 +248,6 @@ pub const Value = struct {
.@"union" => {
return out_stream.writeAll("(union value)");
},
- .the_only_possible_value => return out_stream.writeAll("(the only possible value)"),
- .lazy_align => {
- try out_stream.writeAll("@alignOf(");
- try val.castTag(.lazy_align).?.data.dump("", options, out_stream);
- return try out_stream.writeAll(")");
- },
- .lazy_size => {
- try out_stream.writeAll("@sizeOf(");
- try val.castTag(.lazy_size).?.data.dump("", options, out_stream);
- return try out_stream.writeAll(")");
- },
- .runtime_value => return out_stream.writeAll("[runtime value]"),
- .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}),
- .extern_fn => return out_stream.writeAll("(extern function)"),
- .variable => return out_stream.writeAll("(variable)"),
- .decl_ref_mut => {
- const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
- return out_stream.print("(decl_ref_mut {d})", .{decl_index});
- },
- .decl_ref => {
- const decl_index = val.castTag(.decl_ref).?.data;
- return out_stream.print("(decl_ref {d})", .{decl_index});
- },
- .comptime_field_ptr => {
- return out_stream.writeAll("(comptime_field_ptr)");
- },
- .elem_ptr => {
- const elem_ptr = val.castTag(.elem_ptr).?.data;
- try out_stream.print("&[{}] ", .{elem_ptr.index});
- val = elem_ptr.array_ptr;
- },
- .field_ptr => {
- const field_ptr = val.castTag(.field_ptr).?.data;
- try out_stream.print("fieldptr({d}) ", .{field_ptr.field_index});
- val = field_ptr.container_ptr;
- },
- .empty_array => return out_stream.writeAll(".{}"),
- .enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}),
.bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
@@ -514,31 +255,8 @@ pub const Value = struct {
str_lit.index, str_lit.len,
});
},
- .repeated => {
- try out_stream.writeAll("(repeated) ");
- val = val.castTag(.repeated).?.data;
- },
- .empty_array_sentinel => return out_stream.writeAll("(empty array with sentinel)"),
- .slice => return out_stream.writeAll("(slice)"),
- .@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}),
- .eu_payload => {
- try out_stream.writeAll("(eu_payload) ");
- val = val.castTag(.eu_payload).?.data;
- },
- .opt_payload => {
- try out_stream.writeAll("(opt_payload) ");
- val = val.castTag(.opt_payload).?.data;
- },
.inferred_alloc => return out_stream.writeAll("(inferred allocation value)"),
.inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"),
- .eu_payload_ptr => {
- try out_stream.writeAll("(eu_payload_ptr)");
- val = val.castTag(.eu_payload_ptr).?.data.container_ptr;
- },
- .opt_payload_ptr => {
- try out_stream.writeAll("(opt_payload_ptr)");
- val = val.castTag(.opt_payload_ptr).?.data.container_ptr;
- },
};
}
@@ -569,30 +287,23 @@ pub const Value = struct {
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
return allocator.dupe(u8, bytes);
},
- .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data),
- .repeated => {
- const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod));
- const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod)));
- @memset(result, byte);
- return result;
- },
- .decl_ref => {
- const decl_index = val.castTag(.decl_ref).?.data;
- const decl = mod.declPtr(decl_index);
- const decl_val = try decl.value();
- return decl_val.toAllocatedBytes(decl.ty, allocator, mod);
- },
- .the_only_possible_value => return &[_]u8{},
- .slice => {
- const slice = val.castTag(.slice).?.data;
- return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod);
- },
else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod),
},
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)),
.ptr => |ptr| switch (ptr.len) {
.none => unreachable,
- else => return arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod),
+ else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod),
+ },
+ .aggregate => |aggregate| switch (aggregate.storage) {
+ .bytes => |bytes| try allocator.dupe(u8, bytes),
+ .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod),
+ .repeated_elem => |elem| {
+ const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod));
+ const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod)));
+ @memset(result, byte);
+ return result;
+ },
},
else => unreachable,
},
@@ -611,29 +322,6 @@ pub const Value = struct {
pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index {
if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index);
switch (val.tag()) {
- .elem_ptr => {
- const pl = val.castTag(.elem_ptr).?.data;
- return mod.intern(.{ .ptr = .{
- .ty = ty.ip_index,
- .addr = .{ .elem = .{
- .base = pl.array_ptr.ip_index,
- .index = pl.index,
- } },
- } });
- },
- .slice => {
- const pl = val.castTag(.slice).?.data;
- const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod);
- return mod.intern(.{ .ptr = .{
- .ty = ty.ip_index,
- .addr = mod.intern_pool.indexToKey(ptr).ptr.addr,
- .len = try pl.len.intern(Type.usize, mod),
- } });
- },
- .opt_payload => return mod.intern(.{ .opt = .{
- .ty = ty.ip_index,
- .val = try val.castTag(.opt_payload).?.data.intern(ty.childType(mod), mod),
- } }),
.aggregate => {
const old_elems = val.castTag(.aggregate).?.data;
const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len);
@@ -651,13 +339,6 @@ pub const Value = struct {
.storage = .{ .elems = new_elems },
} });
},
- .repeated => return mod.intern(.{ .aggregate = .{
- .ty = ty.ip_index,
- .storage = .{ .repeated_elem = try val.castTag(.repeated).?.data.intern(
- ty.structFieldType(0, mod),
- mod,
- ) },
- } }),
.@"union" => {
const pl = val.castTag(.@"union").?.data;
return mod.intern(.{ .un = .{
@@ -679,7 +360,6 @@ pub const Value = struct {
for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue();
return Tag.aggregate.create(arena, new_elems);
},
- .repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()),
},
else => return val,
}
@@ -698,31 +378,21 @@ pub const Value = struct {
pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
const ip = &mod.intern_pool;
switch (val.ip_index) {
- .none => {
- const field_index = switch (val.tag()) {
- .the_only_possible_value => blk: {
- assert(ty.enumFieldCount(mod) == 1);
- break :blk 0;
- },
- .enum_literal => i: {
- const name = val.castTag(.enum_literal).?.data;
- break :i ty.enumFieldIndex(name, mod).?;
- },
- else => unreachable,
- };
- return switch (ip.indexToKey(ty.ip_index)) {
- // Assume it is already an integer and return it directly.
- .simple_type, .int_type => val,
- .enum_type => |enum_type| if (enum_type.values.len != 0)
- enum_type.values[field_index].toValue()
- else // Field index and integer values are the same.
- mod.intValue(enum_type.tag_ty.toType(), field_index),
- else => unreachable,
- };
- },
else => return switch (ip.indexToKey(ip.typeOf(val.ip_index))) {
// Assume it is already an integer and return it directly.
.simple_type, .int_type => val,
+ .enum_literal => |enum_literal| {
+ const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?;
+ return switch (ip.indexToKey(ty.ip_index)) {
+ // Assume it is already an integer and return it directly.
+ .simple_type, .int_type => val,
+ .enum_type => |enum_type| if (enum_type.values.len != 0)
+ enum_type.values[field_index].toValue()
+ else // Field index and integer values are the same.
+ mod.intValue(enum_type.tag_ty.toType(), field_index),
+ else => unreachable,
+ };
+ },
.enum_type => |enum_type| (try ip.getCoerced(
mod.gpa,
val.ip_index,
@@ -733,18 +403,12 @@ pub const Value = struct {
}
}
- pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 {
- _ = ty; // TODO: remove this parameter now that we use InternPool
-
- if (val.castTag(.enum_literal)) |payload| {
- return payload.data;
- }
-
+ pub fn tagName(val: Value, mod: *Module) []const u8 {
const ip = &mod.intern_pool;
-
const enum_tag = switch (ip.indexToKey(val.ip_index)) {
.un => |un| ip.indexToKey(un.tag).enum_tag,
.enum_tag => |x| x,
+ .enum_literal => |name| return ip.stringToSlice(name),
else => unreachable,
};
const enum_type = ip.indexToKey(enum_tag.ty).enum_type;
@@ -773,49 +437,61 @@ pub const Value = struct {
.bool_true => BigIntMutable.init(&space.limbs, 1).toConst(),
.undef => unreachable,
.null_value => BigIntMutable.init(&space.limbs, 0).toConst(),
- .none => switch (val.tag()) {
- .the_only_possible_value, // i0, u0
- => BigIntMutable.init(&space.limbs, 0).toConst(),
-
- .runtime_value => {
- const sub_val = val.castTag(.runtime_value).?.data;
- return sub_val.toBigIntAdvanced(space, mod, opt_sema);
- },
- .lazy_align => {
- const ty = val.castTag(.lazy_align).?.data;
- if (opt_sema) |sema| {
- try sema.resolveTypeLayout(ty);
- }
- const x = ty.abiAlignment(mod);
- return BigIntMutable.init(&space.limbs, x).toConst();
- },
- .lazy_size => {
- const ty = val.castTag(.lazy_size).?.data;
- if (opt_sema) |sema| {
- try sema.resolveTypeLayout(ty);
- }
- const x = ty.abiSize(mod);
- return BigIntMutable.init(&space.limbs, x).toConst();
+ else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema),
+ .int => |int| switch (int.storage) {
+ .u64, .i64, .big_int => int.storage.toBigInt(space),
+ .lazy_align, .lazy_size => |ty| {
+ if (opt_sema) |sema| try sema.resolveTypeLayout(ty.toType());
+ const x = switch (int.storage) {
+ else => unreachable,
+ .lazy_align => ty.toType().abiAlignment(mod),
+ .lazy_size => ty.toType().abiSize(mod),
+ };
+ return BigIntMutable.init(&space.limbs, x).toConst();
+ },
},
-
- .elem_ptr => {
- const elem_ptr = val.castTag(.elem_ptr).?.data;
- const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?;
- const elem_size = elem_ptr.elem_ty.abiSize(mod);
- const new_addr = array_addr + elem_size * elem_ptr.index;
- return BigIntMutable.init(&space.limbs, new_addr).toConst();
+ .enum_tag => |enum_tag| enum_tag.int.toValue().toBigIntAdvanced(space, mod, opt_sema),
+ .ptr => |ptr| switch (ptr.len) {
+ .none => switch (ptr.addr) {
+ .int => |int| int.toValue().toBigIntAdvanced(space, mod, opt_sema),
+ .elem => |elem| {
+ const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)).?;
+ const elem_size = ptr.ty.toType().elemType2(mod).abiSize(mod);
+ const new_addr = base_addr + elem.index * elem_size;
+ return BigIntMutable.init(&space.limbs, new_addr).toConst();
+ },
+ else => unreachable,
+ },
+ else => unreachable,
},
-
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => |int| int.storage.toBigInt(space),
- .enum_tag => |enum_tag| mod.intern_pool.indexToKey(enum_tag.int).int.storage.toBigInt(space),
else => unreachable,
},
};
}
+ pub fn getFunction(val: Value, mod: *Module) ?*Module.Fn {
+ return mod.funcPtrUnwrap(val.getFunctionIndex(mod));
+ }
+
+ pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex {
+ return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.ip_index) else .none;
+ }
+
+ pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc {
+ return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .extern_func => |extern_func| extern_func,
+ else => null,
+ } else null;
+ }
+
+ pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable {
+ return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .variable => |variable| variable,
+ else => null,
+ } else null;
+ }
+
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 {
@@ -825,42 +501,27 @@ pub const Value = struct {
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 {
- switch (val.ip_index) {
- .bool_false => return 0,
- .bool_true => return 1,
+ return switch (val.ip_index) {
+ .bool_false => 0,
+ .bool_true => 1,
.undef => unreachable,
- .none => switch (val.tag()) {
- .the_only_possible_value, // i0, u0
- => return 0,
-
- .lazy_align => {
- const ty = val.castTag(.lazy_align).?.data;
- if (opt_sema) |sema| {
- return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar;
- } else {
- return ty.abiAlignment(mod);
- }
- },
- .lazy_size => {
- const ty = val.castTag(.lazy_size).?.data;
- if (opt_sema) |sema| {
- return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar;
- } else {
- return ty.abiSize(mod);
- }
- },
-
- else => return null,
- },
- else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
.int => |int| switch (int.storage) {
.big_int => |big_int| big_int.to(u64) catch null,
.u64 => |x| x,
.i64 => |x| std.math.cast(u64, x),
+ .lazy_align => |ty| if (opt_sema) |sema|
+ (try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar
+ else
+ ty.toType().abiAlignment(mod),
+ .lazy_size => |ty| if (opt_sema) |sema|
+ (try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar
+ else
+ ty.toType().abiSize(mod),
},
else => null,
},
- }
+ };
}
/// Asserts the value is an integer and it fits in a u64
@@ -870,58 +531,40 @@ pub const Value = struct {
/// Asserts the value is an integer and it fits in a i64
pub fn toSignedInt(val: Value, mod: *Module) i64 {
- switch (val.ip_index) {
- .bool_false => return 0,
- .bool_true => return 1,
+ return switch (val.ip_index) {
+ .bool_false => 0,
+ .bool_true => 1,
.undef => unreachable,
- .none => switch (val.tag()) {
- .the_only_possible_value, // i0, u0
- => return 0,
-
- .lazy_align => {
- const ty = val.castTag(.lazy_align).?.data;
- return @intCast(i64, ty.abiAlignment(mod));
- },
- .lazy_size => {
- const ty = val.castTag(.lazy_size).?.data;
- return @intCast(i64, ty.abiSize(mod));
- },
-
- else => unreachable,
- },
- else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
.int => |int| switch (int.storage) {
.big_int => |big_int| big_int.to(i64) catch unreachable,
.i64 => |x| x,
.u64 => |x| @intCast(i64, x),
+ .lazy_align => |ty| @intCast(i64, ty.toType().abiAlignment(mod)),
+ .lazy_size => |ty| @intCast(i64, ty.toType().abiSize(mod)),
},
else => unreachable,
},
- }
+ };
}
- pub fn toBool(val: Value, mod: *const Module) bool {
+ pub fn toBool(val: Value, _: *const Module) bool {
return switch (val.ip_index) {
.bool_true => true,
.bool_false => false,
- .none => unreachable,
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => |int| switch (int.storage) {
- .big_int => |big_int| !big_int.eqZero(),
- inline .u64, .i64 => |x| x != 0,
- },
- else => unreachable,
- },
+ else => unreachable,
};
}
- fn isDeclRef(val: Value) bool {
+ fn isDeclRef(val: Value, mod: *Module) bool {
var check = val;
- while (true) switch (check.tag()) {
- .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return true,
- .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr,
- .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr,
- .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr,
+ while (true) switch (mod.intern_pool.indexToKey(check.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl, .mut_decl, .comptime_field => return true,
+ .eu_payload, .opt_payload => |index| check = index.toValue(),
+ .elem, .field => |base_index| check = base_index.base.toValue(),
+ else => return false,
+ },
else => return false,
};
}
@@ -953,24 +596,9 @@ pub const Value = struct {
const bits = int_info.bits;
const byte_count = (bits + 7) / 8;
- const int_val = try val.enumToInt(ty, mod);
-
- if (byte_count <= @sizeOf(u64)) {
- const ip_key = mod.intern_pool.indexToKey(int_val.ip_index);
- const int: u64 = switch (ip_key.int.storage) {
- .u64 => |x| x,
- .i64 => |x| @bitCast(u64, x),
- .big_int => unreachable,
- };
- for (buffer[0..byte_count], 0..) |_, i| switch (endian) {
- .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
- .Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
- };
- } else {
- var bigint_buffer: BigIntSpace = undefined;
- const bigint = int_val.toBigInt(&bigint_buffer, mod);
- bigint.writeTwosComplement(buffer[0..byte_count], endian);
- }
+ var bigint_buffer: BigIntSpace = undefined;
+ const bigint = val.toBigInt(&bigint_buffer, mod);
+ bigint.writeTwosComplement(buffer[0..byte_count], endian);
},
.Float => switch (ty.floatBits(target)) {
16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian),
@@ -1016,7 +644,12 @@ pub const Value = struct {
.ErrorSet => {
// TODO revisit this when we have the concept of the error tag type
const Int = u16;
- const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?;
+ const name = switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .err => |err| err.name,
+ .error_union => |error_union| error_union.val.err_name,
+ else => unreachable,
+ };
+ const int = mod.global_error_set.get(mod.intern_pool.stringToSlice(name)).?;
std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian);
},
.Union => switch (ty.containerLayout(mod)) {
@@ -1029,7 +662,7 @@ pub const Value = struct {
},
.Pointer => {
if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout;
- if (val.isDeclRef()) return error.ReinterpretDeclRef;
+ if (val.isDeclRef(mod)) return error.ReinterpretDeclRef;
return val.writeToMemory(Type.usize, mod, buffer);
},
.Optional => {
@@ -1141,14 +774,14 @@ pub const Value = struct {
.Packed => {
const field_index = ty.unionTagFieldIndex(val.unionTag(mod), mod);
const field_type = ty.unionFields(mod).values()[field_index.?].ty;
- const field_val = try val.fieldValue(field_type, mod, field_index.?);
+ const field_val = try val.fieldValue(mod, field_index.?);
return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
},
},
.Pointer => {
assert(!ty.isSlice(mod)); // No well defined layout.
- if (val.isDeclRef()) return error.ReinterpretDeclRef;
+ if (val.isDeclRef(mod)) return error.ReinterpretDeclRef;
return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset);
},
.Optional => {
@@ -1262,13 +895,11 @@ pub const Value = struct {
// TODO revisit this when we have the concept of the error tag type
const Int = u16;
const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian);
-
- const payload = try arena.create(Value.Payload.Error);
- payload.* = .{
- .base = .{ .tag = .@"error" },
- .data = .{ .name = mod.error_name_list.items[@intCast(usize, int)] },
- };
- return Value.initPayload(&payload.base);
+ const name = mod.error_name_list.items[@intCast(usize, int)];
+ return (try mod.intern(.{ .err = .{
+ .ty = ty.ip_index,
+ .name = mod.intern_pool.getString(name).unwrap().?,
+ } })).toValue();
},
.Pointer => {
assert(!ty.isSlice(mod)); // No well defined layout.
@@ -1383,7 +1014,7 @@ pub const Value = struct {
}
/// Asserts that the value is a float or an integer.
- pub fn toFloat(val: Value, comptime T: type, mod: *const Module) T {
+ pub fn toFloat(val: Value, comptime T: type, mod: *Module) T {
return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.int => |int| switch (int.storage) {
.big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)),
@@ -1393,6 +1024,8 @@ pub const Value = struct {
}
return @intToFloat(T, x);
},
+ .lazy_align => |ty| @intToFloat(T, ty.toType().abiAlignment(mod)),
+ .lazy_size => |ty| @intToFloat(T, ty.toType().abiSize(mod)),
},
.float => |float| switch (float.storage) {
inline else => |x| @floatCast(T, x),
@@ -1421,89 +1054,24 @@ pub const Value = struct {
}
pub fn clz(val: Value, ty: Type, mod: *Module) u64 {
- const ty_bits = ty.intInfo(mod).bits;
- return switch (val.ip_index) {
- .bool_false => ty_bits,
- .bool_true => ty_bits - 1,
- .none => switch (val.tag()) {
- .the_only_possible_value => {
- assert(ty_bits == 0);
- return ty_bits;
- },
-
- .lazy_align, .lazy_size => {
- var bigint_buf: BigIntSpace = undefined;
- const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable;
- return bigint.clz(ty_bits);
- },
-
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => |int| switch (int.storage) {
- .big_int => |big_int| big_int.clz(ty_bits),
- .u64 => |x| @clz(x) + ty_bits - 64,
- .i64 => @panic("TODO implement i64 Value clz"),
- },
- else => unreachable,
- },
- };
+ var bigint_buf: BigIntSpace = undefined;
+ const bigint = val.toBigInt(&bigint_buf, mod);
+ return bigint.clz(ty.intInfo(mod).bits);
}
- pub fn ctz(val: Value, ty: Type, mod: *Module) u64 {
- const ty_bits = ty.intInfo(mod).bits;
- return switch (val.ip_index) {
- .bool_false => ty_bits,
- .bool_true => 0,
- .none => switch (val.tag()) {
- .the_only_possible_value => {
- assert(ty_bits == 0);
- return ty_bits;
- },
-
- .lazy_align, .lazy_size => {
- var bigint_buf: BigIntSpace = undefined;
- const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable;
- return bigint.ctz();
- },
-
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => |int| switch (int.storage) {
- .big_int => |big_int| big_int.ctz(),
- .u64 => |x| {
- const big = @ctz(x);
- return if (big == 64) ty_bits else big;
- },
- .i64 => @panic("TODO implement i64 Value ctz"),
- },
- else => unreachable,
- },
- };
+ pub fn ctz(val: Value, _: Type, mod: *Module) u64 {
+ var bigint_buf: BigIntSpace = undefined;
+ const bigint = val.toBigInt(&bigint_buf, mod);
+ return bigint.ctz();
}
pub fn popCount(val: Value, ty: Type, mod: *Module) u64 {
- assert(!val.isUndef(mod));
- switch (val.ip_index) {
- .bool_false => return 0,
- .bool_true => return 1,
- .none => unreachable,
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => |int| {
- const info = ty.intInfo(mod);
- var buffer: Value.BigIntSpace = undefined;
- const big_int = int.storage.toBigInt(&buffer);
- return @intCast(u64, big_int.popCount(info.bits));
- },
- else => unreachable,
- },
- }
+ var bigint_buf: BigIntSpace = undefined;
+ const bigint = val.toBigInt(&bigint_buf, mod);
+ return @intCast(u64, bigint.popCount(ty.intInfo(mod).bits));
}
pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
- assert(!val.isUndef(mod));
-
const info = ty.intInfo(mod);
var buffer: Value.BigIntSpace = undefined;
@@ -1520,8 +1088,6 @@ pub const Value = struct {
}
pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
- assert(!val.isUndef(mod));
-
const info = ty.intInfo(mod);
// Bit count must be evenly divisible by 8
@@ -1543,41 +1109,9 @@ pub const Value = struct {
/// Asserts the value is an integer and not undefined.
/// Returns the number of bits the value requires to represent stored in twos complement form.
pub fn intBitCountTwosComp(self: Value, mod: *Module) usize {
- const target = mod.getTarget();
- return switch (self.ip_index) {
- .bool_false => 0,
- .bool_true => 1,
- .none => switch (self.tag()) {
- .the_only_possible_value => 0,
-
- .decl_ref_mut,
- .comptime_field_ptr,
- .extern_fn,
- .decl_ref,
- .function,
- .variable,
- .eu_payload_ptr,
- .opt_payload_ptr,
- => target.ptrBitWidth(),
-
- else => {
- var buffer: BigIntSpace = undefined;
- return self.toBigInt(&buffer, mod).bitCountTwosComp();
- },
- },
- else => switch (mod.intern_pool.indexToKey(self.ip_index)) {
- .int => |int| switch (int.storage) {
- .big_int => |big_int| big_int.bitCountTwosComp(),
- .u64 => |x| if (x == 0) 0 else @intCast(usize, std.math.log2(x) + 1),
- .i64 => {
- var buffer: Value.BigIntSpace = undefined;
- const big_int = int.storage.toBigInt(&buffer);
- return big_int.bitCountTwosComp();
- },
- },
- else => unreachable,
- },
- };
+ var buffer: BigIntSpace = undefined;
+ const big_int = self.toBigInt(&buffer, mod);
+ return big_int.bitCountTwosComp();
}
/// Converts an integer or a float to a float. May result in a loss of information.
@@ -1616,84 +1150,39 @@ pub const Value = struct {
mod: *Module,
opt_sema: ?*Sema,
) Module.CompileError!std.math.Order {
- switch (lhs.ip_index) {
- .bool_false => return .eq,
- .bool_true => return .gt,
- .none => return switch (lhs.tag()) {
- .the_only_possible_value => .eq,
-
- .decl_ref,
- .decl_ref_mut,
- .comptime_field_ptr,
- .extern_fn,
- .function,
- .variable,
- => .gt,
-
- .runtime_value => {
- // This is needed to correctly handle hashing the value.
- // Checks in Sema should prevent direct comparisons from reaching here.
- const val = lhs.castTag(.runtime_value).?.data;
- return val.orderAgainstZeroAdvanced(mod, opt_sema);
- },
-
- .lazy_align => {
- const ty = lhs.castTag(.lazy_align).?.data;
- const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
- if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => unreachable,
- else => |e| return e,
- }) {
- return .gt;
- } else {
- return .eq;
- }
- },
- .lazy_size => {
- const ty = lhs.castTag(.lazy_size).?.data;
- const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
- if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => unreachable,
- else => |e| return e,
- }) {
- return .gt;
- } else {
- return .eq;
- }
- },
-
- .elem_ptr => {
- const elem_ptr = lhs.castTag(.elem_ptr).?.data;
- switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) {
+ return switch (lhs.ip_index) {
+ .bool_false => .eq,
+ .bool_true => .gt,
+ else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl, .mut_decl, .comptime_field => .gt,
+ .int => |int| int.toValue().orderAgainstZeroAdvanced(mod, opt_sema),
+ .elem => |elem| switch (try elem.base.toValue().orderAgainstZeroAdvanced(mod, opt_sema)) {
.lt => unreachable,
- .gt => return .gt,
- .eq => {
- if (elem_ptr.index == 0) {
- return .eq;
- } else {
- return .gt;
- }
- },
- }
+ .gt => .gt,
+ .eq => if (elem.index == 0) .eq else .gt,
+ },
+ else => unreachable,
},
-
- else => unreachable,
- },
- else => return switch (mod.intern_pool.indexToKey(lhs.ip_index)) {
.int => |int| switch (int.storage) {
.big_int => |big_int| big_int.orderAgainstScalar(0),
inline .u64, .i64 => |x| std.math.order(x, 0),
+ .lazy_align, .lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced(
+ mod,
+ false,
+ if (opt_sema) |sema| .{ .sema = sema } else .eager,
+ ) catch |err| switch (err) {
+ error.NeedLazy => unreachable,
+ else => |e| return e,
+ }) .gt else .eq,
},
- .enum_tag => |enum_tag| switch (mod.intern_pool.indexToKey(enum_tag.int).int.storage) {
- .big_int => |big_int| big_int.orderAgainstScalar(0),
- inline .u64, .i64 => |x| std.math.order(x, 0),
- },
+ .enum_tag => |enum_tag| enum_tag.int.toValue().orderAgainstZeroAdvanced(mod, opt_sema),
.float => |float| switch (float.storage) {
inline else => |x| std.math.order(x, 0),
},
else => unreachable,
},
- }
+ };
}
/// Asserts the value is comparable.
@@ -1760,8 +1249,8 @@ pub const Value = struct {
mod: *Module,
opt_sema: ?*Sema,
) !bool {
- if (lhs.pointerDecl()) |lhs_decl| {
- if (rhs.pointerDecl()) |rhs_decl| {
+ if (lhs.pointerDecl(mod)) |lhs_decl| {
+ if (rhs.pointerDecl(mod)) |rhs_decl| {
switch (op) {
.eq => return lhs_decl == rhs_decl,
.neq => return lhs_decl != rhs_decl,
@@ -1774,7 +1263,7 @@ pub const Value = struct {
else => {},
}
}
- } else if (rhs.pointerDecl()) |_| {
+ } else if (rhs.pointerDecl(mod)) |_| {
switch (op) {
.eq => return false,
.neq => return true,
@@ -1849,7 +1338,6 @@ pub const Value = struct {
switch (lhs.ip_index) {
.none => switch (lhs.tag()) {
- .repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvancedExtra(op, mod, opt_sema),
.aggregate => {
for (lhs.castTag(.aggregate).?.data) |elem_val| {
if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false;
@@ -1877,6 +1365,15 @@ pub const Value = struct {
.float => |float| switch (float.storage) {
inline else => |x| if (std.math.isNan(x)) return op == .neq,
},
+ .aggregate => |aggregate| return switch (aggregate.storage) {
+ .bytes => |bytes| for (bytes) |byte| {
+ if (!std.math.order(byte, 0).compare(op)) break false;
+ } else true,
+ .elems => |elems| for (elems) |elem| {
+ if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false;
+ } else true,
+ .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema),
+ },
else => {},
},
}
@@ -1910,69 +1407,6 @@ pub const Value = struct {
const a_tag = a.tag();
const b_tag = b.tag();
if (a_tag == b_tag) switch (a_tag) {
- .the_only_possible_value => return true,
- .enum_literal => {
- const a_name = a.castTag(.enum_literal).?.data;
- const b_name = b.castTag(.enum_literal).?.data;
- return std.mem.eql(u8, a_name, b_name);
- },
- .opt_payload => {
- const a_payload = a.castTag(.opt_payload).?.data;
- const b_payload = b.castTag(.opt_payload).?.data;
- const payload_ty = ty.optionalChild(mod);
- return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema);
- },
- .slice => {
- const a_payload = a.castTag(.slice).?.data;
- const b_payload = b.castTag(.slice).?.data;
- if (!(try eqlAdvanced(a_payload.len, Type.usize, b_payload.len, Type.usize, mod, opt_sema))) {
- return false;
- }
-
- const ptr_ty = ty.slicePtrFieldType(mod);
-
- return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, opt_sema);
- },
- .elem_ptr => {
- const a_payload = a.castTag(.elem_ptr).?.data;
- const b_payload = b.castTag(.elem_ptr).?.data;
- if (a_payload.index != b_payload.index) return false;
-
- return eqlAdvanced(a_payload.array_ptr, ty, b_payload.array_ptr, ty, mod, opt_sema);
- },
- .field_ptr => {
- const a_payload = a.castTag(.field_ptr).?.data;
- const b_payload = b.castTag(.field_ptr).?.data;
- if (a_payload.field_index != b_payload.field_index) return false;
-
- return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema);
- },
- .@"error" => {
- const a_name = a.castTag(.@"error").?.data.name;
- const b_name = b.castTag(.@"error").?.data.name;
- return std.mem.eql(u8, a_name, b_name);
- },
- .eu_payload => {
- const a_payload = a.castTag(.eu_payload).?.data;
- const b_payload = b.castTag(.eu_payload).?.data;
- const payload_ty = ty.errorUnionPayload(mod);
- return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema);
- },
- .eu_payload_ptr => {
- const a_payload = a.castTag(.eu_payload_ptr).?.data;
- const b_payload = b.castTag(.eu_payload_ptr).?.data;
- return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema);
- },
- .opt_payload_ptr => {
- const a_payload = a.castTag(.opt_payload_ptr).?.data;
- const b_payload = b.castTag(.opt_payload_ptr).?.data;
- return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema);
- },
- .function => {
- const a_payload = a.castTag(.function).?.data;
- const b_payload = b.castTag(.function).?.data;
- return a_payload == b_payload;
- },
.aggregate => {
const a_field_vals = a.castTag(.aggregate).?.data;
const b_field_vals = b.castTag(.aggregate).?.data;
@@ -2035,17 +1469,15 @@ pub const Value = struct {
return eqlAdvanced(a_union.val, active_field_ty, b_union.val, active_field_ty, mod, opt_sema);
},
else => {},
- } else if (b_tag == .@"error") {
- return false;
- }
+ };
- if (a.pointerDecl()) |a_decl| {
- if (b.pointerDecl()) |b_decl| {
+ if (a.pointerDecl(mod)) |a_decl| {
+ if (b.pointerDecl(mod)) |b_decl| {
return a_decl == b_decl;
} else {
return false;
}
- } else if (b.pointerDecl()) |_| {
+ } else if (b.pointerDecl(mod)) |_| {
return false;
}
@@ -2130,25 +1562,11 @@ pub const Value = struct {
if (a_nan) return true;
return a_float == b_float;
},
- .Optional => if (b_tag == .opt_payload) {
- var sub_pl: Payload.SubValue = .{
- .base = .{ .tag = b.tag() },
- .data = a,
- };
- const sub_val = Value.initPayload(&sub_pl.base);
- return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema);
- },
- .ErrorUnion => if (a_tag != .@"error" and b_tag == .eu_payload) {
- var sub_pl: Payload.SubValue = .{
- .base = .{ .tag = b.tag() },
- .data = a,
- };
- const sub_val = Value.initPayload(&sub_pl.base);
- return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema);
- },
+ .Optional,
+ .ErrorUnion,
+ => unreachable, // handled by InternPool
else => {},
}
- if (a_tag == .@"error") return false;
return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq);
}
@@ -2166,7 +1584,7 @@ pub const Value = struct {
std.hash.autoHash(hasher, zig_ty_tag);
if (val.isUndef(mod)) return;
// The value is runtime-known and shouldn't affect the hash.
- if (val.isRuntimeValue()) return;
+ if (val.isRuntimeValue(mod)) return;
switch (zig_ty_tag) {
.Opaque => unreachable, // Cannot hash opaque types
@@ -2177,38 +1595,20 @@ pub const Value = struct {
.Null,
=> {},
- .Type => unreachable, // handled via ip_index check above
- .Float => {
- // For hash/eql purposes, we treat floats as their IEEE integer representation.
- switch (ty.floatBits(mod.getTarget())) {
- 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16, mod))),
- 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32, mod))),
- 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64, mod))),
- 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80, mod))),
- 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))),
- else => unreachable,
- }
- },
- .ComptimeFloat => {
- const float = val.toFloat(f128, mod);
- const is_nan = std.math.isNan(float);
- std.hash.autoHash(hasher, is_nan);
- if (!is_nan) {
- std.hash.autoHash(hasher, @bitCast(u128, float));
- } else {
- std.hash.autoHash(hasher, std.math.signbit(float));
- }
- },
- .Bool, .Int, .ComptimeInt, .Pointer => switch (val.tag()) {
- .slice => {
- const slice = val.castTag(.slice).?.data;
- const ptr_ty = ty.slicePtrFieldType(mod);
- hash(slice.ptr, ptr_ty, hasher, mod);
- hash(slice.len, Type.usize, hasher, mod);
- },
-
- else => return hashPtr(val, hasher, mod),
- },
+ .Type,
+ .Float,
+ .ComptimeFloat,
+ .Bool,
+ .Int,
+ .ComptimeInt,
+ .Pointer,
+ .Optional,
+ .ErrorUnion,
+ .ErrorSet,
+ .Enum,
+ .EnumLiteral,
+ .Fn,
+ => unreachable, // handled via ip_index check above
.Array, .Vector => {
const len = ty.arrayLen(mod);
const elem_ty = ty.childType(mod);
@@ -2233,42 +1633,6 @@ pub const Value = struct {
else => unreachable,
}
},
- .Optional => {
- if (val.castTag(.opt_payload)) |payload| {
- std.hash.autoHash(hasher, true); // non-null
- const sub_val = payload.data;
- const sub_ty = ty.optionalChild(mod);
- sub_val.hash(sub_ty, hasher, mod);
- } else {
- std.hash.autoHash(hasher, false); // null
- }
- },
- .ErrorUnion => {
- if (val.tag() == .@"error") {
- std.hash.autoHash(hasher, false); // error
- const sub_ty = ty.errorUnionSet(mod);
- val.hash(sub_ty, hasher, mod);
- return;
- }
-
- if (val.castTag(.eu_payload)) |payload| {
- std.hash.autoHash(hasher, true); // payload
- const sub_ty = ty.errorUnionPayload(mod);
- payload.data.hash(sub_ty, hasher, mod);
- return;
- } else unreachable;
- },
- .ErrorSet => {
- // just hash the literal error value. this is the most stable
- // thing between compiler invocations. we can't use the error
- // int cause (1) its not stable and (2) we don't have access to mod.
- hasher.update(val.getError().?);
- },
- .Enum => {
- // This panic will go away when enum values move to be stored in the intern pool.
- const int_val = val.enumToInt(ty, mod) catch @panic("OOM");
- hashInt(int_val, hasher, mod);
- },
.Union => {
const union_obj = val.cast(Payload.Union).?.data;
if (ty.unionTagType(mod)) |tag_ty| {
@@ -2277,27 +1641,12 @@ pub const Value = struct {
const active_field_ty = ty.unionFieldType(union_obj.tag, mod);
union_obj.val.hash(active_field_ty, hasher, mod);
},
- .Fn => {
- // Note that this hashes the *Fn/*ExternFn rather than the *Decl.
- // This is to differentiate function bodies from function pointers.
- // This is currently redundant since we already hash the zig type tag
- // at the top of this function.
- if (val.castTag(.function)) |func| {
- std.hash.autoHash(hasher, func.data);
- } else if (val.castTag(.extern_fn)) |func| {
- std.hash.autoHash(hasher, func.data);
- } else unreachable;
- },
.Frame => {
@panic("TODO implement hashing frame values");
},
.AnyFrame => {
@panic("TODO implement hashing anyframe values");
},
- .EnumLiteral => {
- const bytes = val.castTag(.enum_literal).?.data;
- hasher.update(bytes);
- },
}
}
@@ -2308,7 +1657,7 @@ pub const Value = struct {
pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
if (val.isUndef(mod)) return;
// The value is runtime-known and shouldn't affect the hash.
- if (val.isRuntimeValue()) return;
+ if (val.isRuntimeValue(mod)) return;
if (val.ip_index != .none) {
// The InternPool data structure hashes based on Key to make interned objects
@@ -2326,16 +1675,20 @@ pub const Value = struct {
.Null,
.Struct, // It sure would be nice to do something clever with structs.
=> |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag),
- .Type => unreachable, // handled above with the ip_index check
- .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))),
- .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) {
- .slice => {
- const slice = val.castTag(.slice).?.data;
- const ptr_ty = ty.slicePtrFieldType(mod);
- slice.ptr.hashUncoerced(ptr_ty, hasher, mod);
- },
- else => val.hashPtr(hasher, mod),
- },
+ .Type,
+ .Float,
+ .ComptimeFloat,
+ .Bool,
+ .Int,
+ .ComptimeInt,
+ .Pointer,
+ .Fn,
+ .Optional,
+ .ErrorSet,
+ .ErrorUnion,
+ .Enum,
+ .EnumLiteral,
+ => unreachable, // handled above with the ip_index check
.Array, .Vector => {
const len = ty.arrayLen(mod);
const elem_ty = ty.childType(mod);
@@ -2348,21 +1701,16 @@ pub const Value = struct {
elem_val.hashUncoerced(elem_ty, hasher, mod);
}
},
- .Optional => if (val.castTag(.opt_payload)) |payload| {
- const child_ty = ty.optionalChild(mod);
- payload.data.hashUncoerced(child_ty, hasher, mod);
- } else std.hash.autoHash(hasher, std.builtin.TypeId.Null),
- .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else {
- const pl_ty = ty.errorUnionPayload(mod);
- val.castTag(.eu_payload).?.data.hashUncoerced(pl_ty, hasher, mod);
- },
- .Enum, .EnumLiteral, .Union => {
- hasher.update(val.tagName(ty, mod));
- if (val.cast(Payload.Union)) |union_obj| {
- const active_field_ty = ty.unionFieldType(union_obj.data.tag, mod);
- union_obj.data.val.hashUncoerced(active_field_ty, hasher, mod);
- } else std.hash.autoHash(hasher, std.builtin.TypeId.Void);
- },
+ .Union => {
+ hasher.update(val.tagName(mod));
+ switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .un => |un| {
+ const active_field_ty = ty.unionFieldType(un.tag.toValue(), mod);
+ un.val.toValue().hashUncoerced(active_field_ty, hasher, mod);
+ },
+ else => std.hash.autoHash(hasher, std.builtin.TypeId.Void),
+ }
+ },
.Frame => @panic("TODO implement hashing frame values"),
.AnyFrame => @panic("TODO implement hashing anyframe values"),
}
@@ -2397,57 +1745,53 @@ pub const Value = struct {
}
};
- pub fn isComptimeMutablePtr(val: Value) bool {
- return switch (val.ip_index) {
- .none => switch (val.tag()) {
- .decl_ref_mut, .comptime_field_ptr => true,
- .elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr),
- .field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr),
- .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr),
- .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr),
- .slice => isComptimeMutablePtr(val.castTag(.slice).?.data.ptr),
-
+ pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool {
+ return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .mut_decl, .comptime_field => true,
+ .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isComptimeMutablePtr(mod),
+ .elem, .field => |base_index| base_index.base.toValue().isComptimeMutablePtr(mod),
else => false,
},
else => false,
};
}
- pub fn canMutateComptimeVarState(val: Value) bool {
- if (val.isComptimeMutablePtr()) return true;
- return switch (val.ip_index) {
- .none => switch (val.tag()) {
- .repeated => return val.castTag(.repeated).?.data.canMutateComptimeVarState(),
- .eu_payload => return val.castTag(.eu_payload).?.data.canMutateComptimeVarState(),
- .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(),
- .opt_payload => return val.castTag(.opt_payload).?.data.canMutateComptimeVarState(),
- .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(),
- .aggregate => {
- const fields = val.castTag(.aggregate).?.data;
- for (fields) |field| {
- if (field.canMutateComptimeVarState()) return true;
- }
- return false;
+ pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool {
+ return val.isComptimeMutablePtr(mod) or switch (val.ip_index) {
+ else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .error_union => |error_union| switch (error_union.val) {
+ .err_name => false,
+ .payload => |payload| payload.toValue().canMutateComptimeVarState(mod),
},
- .@"union" => return val.cast(Payload.Union).?.data.val.canMutateComptimeVarState(),
- .slice => return val.castTag(.slice).?.data.ptr.canMutateComptimeVarState(),
- else => return false,
+ .ptr => |ptr| switch (ptr.addr) {
+ .eu_payload, .opt_payload => |base| base.toValue().canMutateComptimeVarState(mod),
+ else => false,
+ },
+ .opt => |opt| switch (opt.val) {
+ .none => false,
+ else => opt.val.toValue().canMutateComptimeVarState(mod),
+ },
+ .aggregate => |aggregate| for (aggregate.storage.values()) |elem| {
+ if (elem.toValue().canMutateComptimeVarState(mod)) break true;
+ } else false,
+ .un => |un| un.val.toValue().canMutateComptimeVarState(mod),
+ else => false,
},
- else => return false,
};
}
/// Gets the decl referenced by this pointer. If the pointer does not point
/// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr),
/// this function returns null.
- pub fn pointerDecl(val: Value) ?Module.Decl.Index {
- return switch (val.ip_index) {
- .none => switch (val.tag()) {
- .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index,
- .extern_fn => val.castTag(.extern_fn).?.data.owner_decl,
- .function => val.castTag(.function).?.data.owner_decl,
- .variable => val.castTag(.variable).?.data.owner_decl,
- .decl_ref => val.cast(Payload.Decl).?.data,
+ pub fn pointerDecl(val: Value, mod: *Module) ?Module.Decl.Index {
+ return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .variable => |variable| variable.decl,
+ .extern_func => |extern_func| extern_func.decl,
+ .func => |func| mod.funcPtr(func.index).owner_decl,
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| decl,
+ .mut_decl => |mut_decl| mut_decl.decl,
else => null,
},
else => null,
@@ -2463,95 +1807,15 @@ pub const Value = struct {
}
}
- fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void {
- switch (ptr_val.tag()) {
- .decl_ref,
- .decl_ref_mut,
- .extern_fn,
- .function,
- .variable,
- => {
- const decl: Module.Decl.Index = ptr_val.pointerDecl().?;
- std.hash.autoHash(hasher, decl);
- },
- .comptime_field_ptr => {
- std.hash.autoHash(hasher, Value.Tag.comptime_field_ptr);
- },
-
- .elem_ptr => {
- const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- hashPtr(elem_ptr.array_ptr, hasher, mod);
- std.hash.autoHash(hasher, Value.Tag.elem_ptr);
- std.hash.autoHash(hasher, elem_ptr.index);
- },
- .field_ptr => {
- const field_ptr = ptr_val.castTag(.field_ptr).?.data;
- std.hash.autoHash(hasher, Value.Tag.field_ptr);
- hashPtr(field_ptr.container_ptr, hasher, mod);
- std.hash.autoHash(hasher, field_ptr.field_index);
- },
- .eu_payload_ptr => {
- const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
- std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr);
- hashPtr(err_union_ptr.container_ptr, hasher, mod);
- },
- .opt_payload_ptr => {
- const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
- std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr);
- hashPtr(opt_ptr.container_ptr, hasher, mod);
- },
-
- .the_only_possible_value,
- .lazy_align,
- .lazy_size,
- => return hashInt(ptr_val, hasher, mod),
-
- else => unreachable,
- }
- }
+ pub const slice_ptr_index = 0;
+ pub const slice_len_index = 1;
pub fn slicePtr(val: Value, mod: *Module) Value {
- if (val.ip_index != .none) return mod.intern_pool.slicePtr(val.ip_index).toValue();
- return switch (val.tag()) {
- .slice => val.castTag(.slice).?.data.ptr,
- // TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc.
- .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr, .comptime_field_ptr => val,
- else => unreachable,
- };
+ return mod.intern_pool.slicePtr(val.ip_index).toValue();
}
pub fn sliceLen(val: Value, mod: *Module) u64 {
- if (val.ip_index != .none) return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod);
- return switch (val.tag()) {
- .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod),
- .decl_ref => {
- const decl_index = val.castTag(.decl_ref).?.data;
- const decl = mod.declPtr(decl_index);
- if (decl.ty.zigTypeTag(mod) == .Array) {
- return decl.ty.arrayLen(mod);
- } else {
- return 1;
- }
- },
- .decl_ref_mut => {
- const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
- const decl = mod.declPtr(decl_index);
- if (decl.ty.zigTypeTag(mod) == .Array) {
- return decl.ty.arrayLen(mod);
- } else {
- return 1;
- }
- },
- .comptime_field_ptr => {
- const payload = val.castTag(.comptime_field_ptr).?.data;
- if (payload.field_ty.zigTypeTag(mod) == .Array) {
- return payload.field_ty.arrayLen(mod);
- } else {
- return 1;
- }
- },
- else => unreachable,
- };
+ return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod);
}
/// Asserts the value is a single-item pointer to an array, or an array,
@@ -2560,14 +1824,6 @@ pub const Value = struct {
switch (val.ip_index) {
.undef => return Value.undef,
.none => switch (val.tag()) {
- // This is the case of accessing an element of an undef array.
- .empty_array => unreachable, // out of bounds array index
-
- .empty_array_sentinel => {
- assert(index == 0); // The only valid index for an empty array with sentinel.
- return val.castTag(.empty_array_sentinel).?.data;
- },
-
.bytes => {
const byte = val.castTag(.bytes).?.data[index];
return mod.intValue(Type.u8, byte);
@@ -2579,128 +1835,101 @@ pub const Value = struct {
return mod.intValue(Type.u8, byte);
},
- // No matter the index; all the elements are the same!
- .repeated => return val.castTag(.repeated).?.data,
-
.aggregate => return val.castTag(.aggregate).?.data[index],
- .slice => return val.castTag(.slice).?.data.ptr.elemValue(mod, index),
-
- .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValue(mod, index),
- .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValue(mod, index),
- .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValue(mod, index),
- .elem_ptr => {
- const data = val.castTag(.elem_ptr).?.data;
- return data.array_ptr.elemValue(mod, index + data.index);
- },
- .field_ptr => {
- const data = val.castTag(.field_ptr).?.data;
- if (data.container_ptr.pointerDecl()) |decl_index| {
- const container_decl = mod.declPtr(decl_index);
- const field_type = data.container_ty.structFieldType(data.field_index, mod);
- const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index);
- return field_val.elemValue(mod, index);
- } else unreachable;
- },
-
- // The child type of arrays which have only one possible value need
- // to have only one possible value itself.
- .the_only_possible_value => return val,
-
- .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValue(mod, index),
- .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValue(mod, index),
-
- .opt_payload => return val.castTag(.opt_payload).?.data.elemValue(mod, index),
- .eu_payload => return val.castTag(.eu_payload).?.data.elemValue(mod, index),
else => unreachable,
},
else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.ptr => |ptr| switch (ptr.addr) {
- .@"var" => unreachable,
.decl => |decl| mod.declPtr(decl).val.elemValue(mod, index),
.mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index),
.int, .eu_payload, .opt_payload => unreachable,
.comptime_field => |field_val| field_val.toValue().elemValue(mod, index),
.elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index),
- .field => unreachable,
- },
- .aggregate => |aggregate| switch (aggregate.storage) {
- .elems => |elems| elems[index].toValue(),
- .repeated_elem => |elem| elem.toValue(),
+ .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| {
+ const base_decl = mod.declPtr(decl_index);
+ const field_val = try base_decl.val.fieldValue(mod, field.index);
+ return field_val.elemValue(mod, index);
+ } else unreachable,
+ },
+ .aggregate => |aggregate| {
+ const len = mod.intern_pool.aggregateTypeLen(aggregate.ty);
+ if (index < len) return switch (aggregate.storage) {
+ .bytes => |bytes| try mod.intern(.{ .int = .{
+ .ty = .u8_type,
+ .storage = .{ .u64 = bytes[index] },
+ } }),
+ .elems => |elems| elems[index],
+ .repeated_elem => |elem| elem,
+ }.toValue();
+ assert(index == len);
+ return mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel.toValue();
},
else => unreachable,
},
}
}
- pub fn isLazyAlign(val: Value) bool {
- return val.ip_index == .none and val.tag() == .lazy_align;
- }
-
- pub fn isLazySize(val: Value) bool {
- return val.ip_index == .none and val.tag() == .lazy_size;
+ pub fn isLazyAlign(val: Value, mod: *Module) bool {
+ return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .int => |int| int.storage == .lazy_align,
+ else => false,
+ };
}
- pub fn isRuntimeValue(val: Value) bool {
- return val.ip_index == .none and val.tag() == .runtime_value;
+ pub fn isLazySize(val: Value, mod: *Module) bool {
+ return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .int => |int| int.storage == .lazy_size,
+ else => false,
+ };
}
- pub fn tagIsVariable(val: Value) bool {
- return val.ip_index == .none and val.tag() == .variable;
+ pub fn isRuntimeValue(val: Value, mod: *Module) bool {
+ return mod.intern_pool.indexToKey(val.ip_index) == .runtime_value;
}
/// Returns true if a Value is backed by a variable
pub fn isVariable(val: Value, mod: *Module) bool {
- return switch (val.ip_index) {
- .none => switch (val.tag()) {
- .slice => val.castTag(.slice).?.data.ptr.isVariable(mod),
- .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod),
- .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isVariable(mod),
- .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod),
- .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod),
- .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod),
- .decl_ref => {
- const decl = mod.declPtr(val.castTag(.decl_ref).?.data);
+ return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .variable => true,
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl_index| {
+ const decl = mod.declPtr(decl_index);
assert(decl.has_tv);
return decl.val.isVariable(mod);
},
- .decl_ref_mut => {
- const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index);
+ .mut_decl => |mut_decl| {
+ const decl = mod.declPtr(mut_decl.decl);
assert(decl.has_tv);
return decl.val.isVariable(mod);
},
-
- .variable => true,
- else => false,
+ .int => false,
+ .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isVariable(mod),
+ .comptime_field => |comptime_field| comptime_field.toValue().isVariable(mod),
+ .elem, .field => |base_index| base_index.base.toValue().isVariable(mod),
},
else => false,
};
}
pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool {
- return switch (val.ip_index) {
- .none => switch (val.tag()) {
- .variable => false,
- else => val.isPtrToThreadLocalInner(mod),
- },
- else => val.isPtrToThreadLocalInner(mod),
- };
- }
-
- fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool {
- return switch (val.ip_index) {
- .none => switch (val.tag()) {
- .slice => val.castTag(.slice).?.data.ptr.isPtrToThreadLocalInner(mod),
- .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isPtrToThreadLocalInner(mod),
- .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isPtrToThreadLocalInner(mod),
- .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod),
- .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod),
- .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod),
- .decl_ref => mod.declPtr(val.castTag(.decl_ref).?.data).val.isPtrToThreadLocalInner(mod),
- .decl_ref_mut => mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.isPtrToThreadLocalInner(mod),
-
- .variable => val.castTag(.variable).?.data.is_threadlocal,
- else => false,
+ return switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .variable => |variable| variable.is_threadlocal,
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl_index| {
+ const decl = mod.declPtr(decl_index);
+ assert(decl.has_tv);
+ return decl.val.isPtrToThreadLocal(mod);
+ },
+ .mut_decl => |mut_decl| {
+ const decl = mod.declPtr(mut_decl.decl);
+ assert(decl.has_tv);
+ return decl.val.isPtrToThreadLocal(mod);
+ },
+ .int => false,
+ .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod),
+ .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod),
+ .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod),
},
else => false,
};
@@ -2714,39 +1943,42 @@ pub const Value = struct {
start: usize,
end: usize,
) error{OutOfMemory}!Value {
- return switch (val.tag()) {
- .empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array),
- .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]),
- .str_lit => {
- const str_lit = val.castTag(.str_lit).?.data;
- return Tag.str_lit.create(arena, .{
- .index = @intCast(u32, str_lit.index + start),
- .len = @intCast(u32, end - start),
- });
+ return switch (val.ip_index) {
+ .none => switch (val.tag()) {
+ .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]),
+ .str_lit => {
+ const str_lit = val.castTag(.str_lit).?.data;
+ return Tag.str_lit.create(arena, .{
+ .index = @intCast(u32, str_lit.index + start),
+ .len = @intCast(u32, end - start),
+ });
+ },
+ else => unreachable,
},
- .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]),
- .slice => sliceArray(val.castTag(.slice).?.data.ptr, mod, arena, start, end),
-
- .decl_ref => sliceArray(mod.declPtr(val.castTag(.decl_ref).?.data).val, mod, arena, start, end),
- .decl_ref_mut => sliceArray(mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val, mod, arena, start, end),
- .comptime_field_ptr => sliceArray(val.castTag(.comptime_field_ptr).?.data.field_val, mod, arena, start, end),
- .elem_ptr => blk: {
- const elem_ptr = val.castTag(.elem_ptr).?.data;
- break :blk sliceArray(elem_ptr.array_ptr, mod, arena, start + elem_ptr.index, end + elem_ptr.index);
+ else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .ptr => |ptr| switch (ptr.addr) {
+ .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end),
+ .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end),
+ .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end),
+ .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index),
+ else => unreachable,
+ },
+ .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{
+ .ty = mod.intern_pool.typeOf(val.ip_index),
+ .storage = switch (aggregate.storage) {
+ .bytes => |bytes| .{ .bytes = bytes[start..end] },
+ .elems => |elems| .{ .elems = elems[start..end] },
+ .repeated_elem => |elem| .{ .repeated_elem = elem },
+ },
+ } })).toValue(),
+ else => unreachable,
},
-
- .repeated,
- .the_only_possible_value,
- => val,
-
- else => unreachable,
};
}
- pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value {
+ pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value {
switch (val.ip_index) {
.undef => return Value.undef,
-
.none => switch (val.tag()) {
.aggregate => {
const field_values = val.castTag(.aggregate).?.data;
@@ -2757,13 +1989,14 @@ pub const Value = struct {
// TODO assert the tag is correct
return payload.val;
},
-
- .the_only_possible_value => return (try ty.onePossibleValue(mod)).?,
-
else => unreachable,
},
else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.aggregate => |aggregate| switch (aggregate.storage) {
+ .bytes => |bytes| try mod.intern(.{ .int = .{
+ .ty = .u8_type,
+ .storage = .{ .u64 = bytes[index] },
+ } }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
}.toValue(),
@@ -2785,40 +2018,37 @@ pub const Value = struct {
pub fn elemPtr(
val: Value,
ty: Type,
- arena: Allocator,
index: usize,
mod: *Module,
) Allocator.Error!Value {
const elem_ty = ty.elemType2(mod);
- const ptr_val = switch (val.ip_index) {
- .none => switch (val.tag()) {
- .slice => val.castTag(.slice).?.data.ptr,
- else => val,
- },
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .ptr => |ptr| switch (ptr.len) {
+ const ptr_val = switch (mod.intern_pool.indexToKey(val.ip_index)) {
+ .ptr => |ptr| ptr: {
+ switch (ptr.addr) {
+ .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod))
+ return (try mod.intern(.{ .ptr = .{
+ .ty = ty.ip_index,
+ .addr = .{ .elem = .{
+ .base = elem.base,
+ .index = elem.index + index,
+ } },
+ } })).toValue(),
+ else => {},
+ }
+ break :ptr switch (ptr.len) {
.none => val,
else => val.slicePtr(mod),
- },
- else => val,
+ };
},
+ else => val,
};
-
- if (ptr_val.ip_index == .none and ptr_val.tag() == .elem_ptr) {
- const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- if (elem_ptr.elem_ty.eql(elem_ty, mod)) {
- return Tag.elem_ptr.create(arena, .{
- .array_ptr = elem_ptr.array_ptr,
- .elem_ty = elem_ptr.elem_ty,
- .index = elem_ptr.index + index,
- });
- }
- }
- return Tag.elem_ptr.create(arena, .{
- .array_ptr = ptr_val,
- .elem_ty = elem_ty,
- .index = index,
- });
+ return (try mod.intern(.{ .ptr = .{
+ .ty = ty.ip_index,
+ .addr = .{ .elem = .{
+ .base = ptr_val.ip_index,
+ .index = index,
+ } },
+ } })).toValue();
}
pub fn isUndef(val: Value, mod: *Module) bool {
@@ -2840,69 +2070,44 @@ pub const Value = struct {
/// Returns true if any value contained in `self` is undefined.
pub fn anyUndef(val: Value, mod: *Module) !bool {
if (val.ip_index == .none) return false;
- switch (val.ip_index) {
- .undef => return true,
+ return switch (val.ip_index) {
+ .undef => true,
.none => switch (val.tag()) {
- .slice => {
- const payload = val.castTag(.slice).?;
- const len = payload.data.len.toUnsignedInt(mod);
-
- for (0..len) |i| {
- const elem_val = try payload.data.ptr.elemValue(mod, i);
- if (try elem_val.anyUndef(mod)) return true;
- }
- },
-
- .aggregate => {
- const payload = val.castTag(.aggregate).?;
- for (payload.data) |field| {
- if (try field.anyUndef(mod)) return true;
- }
- },
- else => {},
+ .aggregate => for (val.castTag(.aggregate).?.data) |field| {
+ if (try field.anyUndef(mod)) break true;
+ } else false,
+ else => false,
},
else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .undef => return true,
- .simple_value => |v| if (v == .undefined) return true,
- .aggregate => |aggregate| switch (aggregate.storage) {
- .elems => |elems| for (elems) |elem| {
- if (try anyUndef(elem.toValue(), mod)) return true;
- },
- .repeated_elem => |elem| if (try anyUndef(elem.toValue(), mod)) return true,
- },
- else => {},
+ .undef => true,
+ .simple_value => |v| v == .undefined,
+ .ptr => |ptr| switch (ptr.len) {
+ .none => false,
+ else => for (0..@intCast(usize, ptr.len.toValue().toUnsignedInt(mod))) |index| {
+ if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true;
+ } else false,
+ },
+ .aggregate => |aggregate| for (aggregate.storage.values()) |elem| {
+ if (try anyUndef(elem.toValue(), mod)) break true;
+ } else false,
+ else => false,
},
- }
-
- return false;
+ };
}
/// Asserts the value is not undefined and not unreachable.
/// Integer value 0 is considered null because of C pointers.
- pub fn isNull(val: Value, mod: *const Module) bool {
+ pub fn isNull(val: Value, mod: *Module) bool {
return switch (val.ip_index) {
.undef => unreachable,
.unreachable_value => unreachable,
.null_value => true,
- .none => switch (val.tag()) {
- .opt_payload => false,
-
- // If it's not one of those two tags then it must be a C pointer value,
- // in which case the value 0 is null and other values are non-null.
-
- .the_only_possible_value => true,
-
- .inferred_alloc => unreachable,
- .inferred_alloc_comptime => unreachable,
-
- else => false,
- },
else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .int => |int| switch (int.storage) {
- .big_int => |big_int| big_int.eqZero(),
- inline .u64, .i64 => |x| x == 0,
+ .int => {
+ var buf: BigIntSpace = undefined;
+ return val.toBigInt(&buf, mod).eqZero();
},
.opt => |opt| opt.val == .none,
else => false,
@@ -2914,53 +2119,28 @@ pub const Value = struct {
/// unreachable. For error unions, prefer `errorUnionIsPayload` to find out whether
/// something is an error or not because it works without having to figure out the
/// string.
- pub fn getError(self: Value) ?[]const u8 {
- return switch (self.ip_index) {
- .undef => unreachable,
- .unreachable_value => unreachable,
- .none => switch (self.tag()) {
- .@"error" => self.castTag(.@"error").?.data.name,
- .eu_payload => null,
-
- .inferred_alloc => unreachable,
- .inferred_alloc_comptime => unreachable,
- else => unreachable,
+ pub fn getError(self: Value, mod: *const Module) ?[]const u8 {
+ return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.ip_index)) {
+ .err => |err| err.name.toOptional(),
+ .error_union => |error_union| switch (error_union.val) {
+ .err_name => |err_name| err_name.toOptional(),
+ .payload => .none,
},
else => unreachable,
- };
+ });
}
/// Assumes the type is an error union. Returns true if and only if the value is
/// the error union payload, not an error.
- pub fn errorUnionIsPayload(val: Value) bool {
- return switch (val.ip_index) {
- .undef => unreachable,
- .none => switch (val.tag()) {
- .eu_payload => true,
- else => false,
-
- .inferred_alloc => unreachable,
- .inferred_alloc_comptime => unreachable,
- },
- else => false,
- };
+ pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool {
+ return mod.intern_pool.indexToKey(val.ip_index).error_union.val == .payload;
}
/// Value of the optional, null if optional has no payload.
pub fn optionalValue(val: Value, mod: *const Module) ?Value {
- return switch (val.ip_index) {
- .none => if (val.isNull(mod)) null
- // Valid for optional representation to be the direct value
- // and not use opt_payload.
- else if (val.castTag(.opt_payload)) |p| p.data else val,
- .null_value => null,
- else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
- .opt => |opt| switch (opt.val) {
- .none => null,
- else => opt.val.toValue(),
- },
- else => unreachable,
- },
+ return switch (mod.intern_pool.indexToKey(val.ip_index).opt.val) {
+ .none => null,
+ else => |index| index.toValue(),
};
}
@@ -3001,28 +2181,8 @@ pub const Value = struct {
}
pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value {
- switch (val.ip_index) {
- .undef => return val,
- .none => switch (val.tag()) {
- .the_only_possible_value => return mod.floatValue(float_ty, 0), // for i0, u0
- .lazy_align => {
- const ty = val.castTag(.lazy_align).?.data;
- if (opt_sema) |sema| {
- return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
- } else {
- return intToFloatInner(ty.abiAlignment(mod), float_ty, mod);
- }
- },
- .lazy_size => {
- const ty = val.castTag(.lazy_size).?.data;
- if (opt_sema) |sema| {
- return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
- } else {
- return intToFloatInner(ty.abiSize(mod), float_ty, mod);
- }
- },
- else => unreachable,
- },
+ return switch (val.ip_index) {
+ .undef => val,
else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.int => |int| switch (int.storage) {
.big_int => |big_int| {
@@ -3030,10 +2190,20 @@ pub const Value = struct {
return mod.floatValue(float_ty, float);
},
inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod),
+ .lazy_align => |ty| if (opt_sema) |sema| {
+ return intToFloatInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
+ } else {
+ return intToFloatInner(ty.toType().abiAlignment(mod), float_ty, mod);
+ },
+ .lazy_size => |ty| if (opt_sema) |sema| {
+ return intToFloatInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
+ } else {
+ return intToFloatInner(ty.toType().abiSize(mod), float_ty, mod);
+ },
},
else => unreachable,
},
- }
+ };
}
fn intToFloatInner(x: anytype, dest_ty: Type, mod: *Module) !Value {
@@ -4768,81 +3938,6 @@ pub const Value = struct {
pub const Payload = struct {
tag: Tag,
- pub const Function = struct {
- base: Payload,
- data: *Module.Fn,
- };
-
- pub const ExternFn = struct {
- base: Payload,
- data: *Module.ExternFn,
- };
-
- pub const Decl = struct {
- base: Payload,
- data: Module.Decl.Index,
- };
-
- pub const Variable = struct {
- base: Payload,
- data: *Module.Var,
- };
-
- pub const SubValue = struct {
- base: Payload,
- data: Value,
- };
-
- pub const DeclRefMut = struct {
- pub const base_tag = Tag.decl_ref_mut;
-
- base: Payload = Payload{ .tag = base_tag },
- data: Data,
-
- pub const Data = struct {
- decl_index: Module.Decl.Index,
- runtime_index: RuntimeIndex,
- };
- };
-
- pub const PayloadPtr = struct {
- base: Payload,
- data: struct {
- container_ptr: Value,
- container_ty: Type,
- },
- };
-
- pub const ComptimeFieldPtr = struct {
- base: Payload,
- data: struct {
- field_val: Value,
- field_ty: Type,
- },
- };
-
- pub const ElemPtr = struct {
- pub const base_tag = Tag.elem_ptr;
-
- base: Payload = Payload{ .tag = base_tag },
- data: struct {
- array_ptr: Value,
- elem_ty: Type,
- index: usize,
- },
- };
-
- pub const FieldPtr = struct {
- pub const base_tag = Tag.field_ptr;
-
- base: Payload = Payload{ .tag = base_tag },
- data: struct {
- container_ptr: Value,
- container_ty: Type,
- field_index: usize,
- },
- };
-
pub const Bytes = struct {
base: Payload,
/// Includes the sentinel, if any.
@@ -4861,32 +3956,6 @@ pub const Value = struct {
data: []Value,
};
- pub const Slice = struct {
- base: Payload,
- data: struct {
- ptr: Value,
- len: Value,
- },
-
- pub const ptr_index = 0;
- pub const len_index = 1;
- };
-
- pub const Ty = struct {
- base: Payload,
- data: Type,
- };
-
- pub const Error = struct {
- base: Payload = .{ .tag = .@"error" },
- data: struct {
- /// `name` is owned by `Module` and will be valid for the entire
- /// duration of the compilation.
- /// TODO revisit this when we have the concept of the error tag type
- name: []const u8,
- },
- };
-
pub const InferredAlloc = struct {
pub const base_tag = Tag.inferred_alloc;
src/Zir.zig
@@ -2108,8 +2108,8 @@ pub const Inst = struct {
manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type),
manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type),
single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type),
- const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type),
- const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type),
+ slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type),
+ slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type),
anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type),
tools/lldb_pretty_printers.py
@@ -533,8 +533,8 @@ type_tag_handlers = {
'empty_struct_literal': lambda payload: '@TypeOf(.{})',
'anyerror_void_error_union': lambda payload: 'anyerror!void',
- 'const_slice_u8': lambda payload: '[]const u8',
- 'const_slice_u8_sentinel_0': lambda payload: '[:0]const u8',
+ 'slice_const_u8': lambda payload: '[]const u8',
+ 'slice_const_u8_sentinel_0': lambda payload: '[:0]const u8',
'fn_noreturn_no_args': lambda payload: 'fn() noreturn',
'fn_void_no_args': lambda payload: 'fn() void',
'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.Naked) noreturn',
@@ -560,7 +560,7 @@ type_tag_handlers = {
'many_mut_pointer': lambda payload: '[*]%s' % type_Type_SummaryProvider(payload),
'c_const_pointer': lambda payload: '[*c]const %s' % type_Type_SummaryProvider(payload),
'c_mut_pointer': lambda payload: '[*c]%s' % type_Type_SummaryProvider(payload),
- 'const_slice': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload),
+ 'slice_const': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload),
'mut_slice': lambda payload: '[]%s' % type_Type_SummaryProvider(payload),
'int_signed': lambda payload: 'i%d' % payload.unsigned,
'int_unsigned': lambda payload: 'u%d' % payload.unsigned,
tools/stage2_gdb_pretty_printers.py
@@ -18,7 +18,7 @@ class TypePrinter:
'many_mut_pointer': 'Type.Payload.ElemType',
'c_const_pointer': 'Type.Payload.ElemType',
'c_mut_pointer': 'Type.Payload.ElemType',
- 'const_slice': 'Type.Payload.ElemType',
+ 'slice_const': 'Type.Payload.ElemType',
'mut_slice': 'Type.Payload.ElemType',
'optional': 'Type.Payload.ElemType',
'optional_single_mut_pointer': 'Type.Payload.ElemType',