Commit 917640810e
Changed files (96)
lib
compiler
resinator
compiler_rt
std
Build
debug
Dwarf
Target
zig
src
arch
aarch64
arm
riscv64
sparc64
wasm
codegen
Compilation
link
Package
test
lib/compiler/resinator/main.zig
@@ -525,7 +525,7 @@ fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.A
};
const target = std.zig.resolveTargetQueryOrFatal(target_query);
const is_native_abi = target_query.isNativeAbi();
- const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, target, is_native_abi, true, null) catch {
+ const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch {
if (includes == .any) {
// fall back to mingw
includes = .gnu;
@@ -550,7 +550,7 @@ fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.A
};
const target = std.zig.resolveTargetQueryOrFatal(target_query);
const is_native_abi = target_query.isNativeAbi();
- const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, target, is_native_abi, true, null) catch |err| switch (err) {
+ const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => return error.MingwIncludesNotFound,
};
lib/compiler_rt/divmodei4.zig
@@ -35,7 +35,7 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []u32, v: []u32) !void {
pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
const v: []u32 = @ptrCast(@alignCast(v_p[0..byte_size]));
@@ -44,7 +44,7 @@ pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) vo
pub fn __modei4(r_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
const v: []u32 = @ptrCast(@alignCast(v_p[0..byte_size]));
lib/compiler_rt/fixdfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixdfei(r: [*]u8, bits: usize, a: f64) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/fixhfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixhfei(r: [*]u8, bits: usize, a: f16) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/fixsfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixsfei(r: [*]u8, bits: usize, a: f32) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/fixtfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixtfei(r: [*]u8, bits: usize, a: f128) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/fixunsdfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunsdfei(r: [*]u8, bits: usize, a: f64) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/fixunshfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunshfei(r: [*]u8, bits: usize, a: f16) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/fixunssfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunssfei(r: [*]u8, bits: usize, a: f32) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/fixunstfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunstfei(r: [*]u8, bits: usize, a: f128) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/fixunsxfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunsxfei(r: [*]u8, bits: usize, a: f80) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/fixxfei.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __fixxfei(r: [*]u8, bits: usize, a: f80) callconv(.c) void {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}
lib/compiler_rt/floateidf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floateidf(a: [*]const u8, bits: usize) callconv(.c) f64 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f64, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/floateihf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floateihf(a: [*]const u8, bits: usize) callconv(.c) f16 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f16, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/floateisf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floateisf(a: [*]const u8, bits: usize) callconv(.c) f32 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f32, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/floateitf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floateitf(a: [*]const u8, bits: usize) callconv(.c) f128 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f128, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/floateixf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floateixf(a: [*]const u8, bits: usize) callconv(.c) f80 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f80, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/floatuneidf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneidf(a: [*]const u8, bits: usize) callconv(.c) f64 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f64, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/floatuneihf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneihf(a: [*]const u8, bits: usize) callconv(.c) f16 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f16, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/floatuneisf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneisf(a: [*]const u8, bits: usize) callconv(.c) f32 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f32, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/floatuneitf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneitf(a: [*]const u8, bits: usize) callconv(.c) f128 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f128, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/floatuneixf.zig
@@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneixf(a: [*]const u8, bits: usize) callconv(.c) f80 {
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f80, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}
lib/compiler_rt/udivmodei4.zig
@@ -114,7 +114,7 @@ pub fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
const v: []const u32 = @ptrCast(@alignCast(v_p[0..byte_size]));
@@ -123,7 +123,7 @@ pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
- const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
+ const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
const v: []const u32 = @ptrCast(@alignCast(v_p[0..byte_size]));
lib/std/Build/Fuzz/WebServer.zig
@@ -198,10 +198,10 @@ fn serveWasm(
const wasm_base_path = try buildWasmBinary(ws, arena, optimize_mode);
const bin_name = try std.zig.binNameAlloc(arena, .{
.root_name = fuzzer_bin_name,
- .target = std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
+ .target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
.arch_os_abi = fuzzer_arch_os_abi,
.cpu_features = fuzzer_cpu_features,
- }) catch unreachable) catch unreachable,
+ }) catch unreachable) catch unreachable),
.output_mode = .Exe,
});
// std.http.Server does not have a sendfile API yet.
lib/std/Build/Step/Compile.zig
@@ -377,7 +377,7 @@ pub fn create(owner: *std.Build, options: Options) *Compile {
const resolved_target = options.root_module.resolved_target orelse
@panic("the root Module of a Compile step must be created with a known 'target' field");
- const target = resolved_target.result;
+ const target = &resolved_target.result;
const step_name = owner.fmt("compile {s} {s} {s}", .{
// Avoid the common case of the step name looking like "compile test test".
@@ -1866,7 +1866,7 @@ fn outputPath(c: *Compile, out_dir: std.Build.Cache.Path, ea: std.zig.EmitArtifa
const arena = c.step.owner.graph.arena;
const name = ea.cacheName(arena, .{
.root_name = c.name,
- .target = c.root_module.resolved_target.?.result,
+ .target = &c.root_module.resolved_target.?.result,
.output_mode = switch (c.kind) {
.lib => .Lib,
.obj, .test_obj => .Obj,
lib/std/Build/Step/Run.zig
@@ -1108,7 +1108,7 @@ fn runCommand(
const need_cross_libc = exe.is_linking_libc and
(root_target.isGnuLibC() or (root_target.isMuslLibC() and exe.linkage == .dynamic));
const other_target = exe.root_module.resolved_target.?.result;
- switch (std.zig.system.getExternalExecutor(b.graph.host.result, &other_target, .{
+ switch (std.zig.system.getExternalExecutor(&b.graph.host.result, &other_target, .{
.qemu_fixes_dl = need_cross_libc and b.libc_runtimes_dir != null,
.link_libc = exe.is_linking_libc,
})) {
lib/std/Build/Module.zig
@@ -655,10 +655,10 @@ fn linkLibraryOrObject(m: *Module, other: *Step.Compile) void {
m.include_dirs.append(allocator, .{ .other_step = other }) catch @panic("OOM");
}
-fn requireKnownTarget(m: *Module) std.Target {
- const resolved_target = m.resolved_target orelse
- @panic("this API requires the Module to be created with a known 'target' field");
- return resolved_target.result;
+fn requireKnownTarget(m: *Module) *const std.Target {
+ const resolved_target = &(m.resolved_target orelse
+ @panic("this API requires the Module to be created with a known 'target' field"));
+ return &resolved_target.result;
}
/// Elements of `modules` and `names` are matched one-to-one.
lib/std/debug/Dwarf/abi.zig
@@ -9,7 +9,7 @@ const Arch = std.Target.Cpu.Arch;
///
/// See also `std.debug.SelfInfo.supportsUnwinding` which tells whether the Zig
/// standard library has a working implementation of unwinding for this target.
-pub fn supportsUnwinding(target: std.Target) bool {
+pub fn supportsUnwinding(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.amdgcn,
.nvptx,
lib/std/debug/SelfInfo.zig
@@ -1795,10 +1795,10 @@ fn spRegNum(reg_context: Dwarf.abi.RegisterContext) u8 {
const ip_reg_num = Dwarf.abi.ipRegNum(native_arch).?;
/// Tells whether unwinding for the host is implemented.
-pub const supports_unwinding = supportsUnwinding(builtin.target);
+pub const supports_unwinding = supportsUnwinding(&builtin.target);
comptime {
- if (supports_unwinding) assert(Dwarf.abi.supportsUnwinding(builtin.target));
+ if (supports_unwinding) assert(Dwarf.abi.supportsUnwinding(&builtin.target));
}
/// Tells whether unwinding for this target is *implemented* here in the Zig
@@ -1806,7 +1806,7 @@ comptime {
///
/// See also `Dwarf.abi.supportsUnwinding` which tells whether Dwarf supports
/// unwinding on that target *in theory*.
-pub fn supportsUnwinding(target: std.Target) bool {
+pub fn supportsUnwinding(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.x86 => switch (target.os.tag) {
.linux, .netbsd, .solaris, .illumos => true,
lib/std/Target/Query.zig
@@ -94,7 +94,7 @@ pub const OsVersion = union(enum) {
pub const SemanticVersion = std.SemanticVersion;
-pub fn fromTarget(target: Target) Query {
+pub fn fromTarget(target: *const Target) Query {
var result: Query = .{
.cpu_arch = target.cpu.arch,
.cpu_model = .{ .explicit = target.cpu.model },
lib/std/zig/llvm/Builder.zig
@@ -66,7 +66,7 @@ pub const Options = struct {
allocator: Allocator,
strip: bool = true,
name: []const u8 = &.{},
- target: std.Target = builtin.target,
+ target: *const std.Target = &builtin.target,
triple: []const u8 = &.{},
};
lib/std/zig/system/darwin.zig
@@ -34,7 +34,7 @@ pub fn isSdkInstalled(allocator: Allocator) bool {
/// Caller owns the memory.
/// stderr from xcrun is ignored.
/// If error.OutOfMemory occurs in Allocator, this function returns null.
-pub fn getSdk(allocator: Allocator, target: Target) ?[]const u8 {
+pub fn getSdk(allocator: Allocator, target: *const Target) ?[]const u8 {
const is_simulator_abi = target.abi == .simulator;
const sdk = switch (target.os.tag) {
.ios => switch (target.abi) {
lib/std/zig/system/NativePaths.zig
@@ -13,7 +13,7 @@ framework_dirs: std.ArrayListUnmanaged([]const u8) = .empty,
rpaths: std.ArrayListUnmanaged([]const u8) = .empty,
warnings: std.ArrayListUnmanaged([]const u8) = .empty,
-pub fn detect(arena: Allocator, native_target: std.Target) !NativePaths {
+pub fn detect(arena: Allocator, native_target: *const std.Target) !NativePaths {
var self: NativePaths = .{ .arena = arena };
var is_nix = false;
if (process.getEnvVarOwned(arena, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
lib/std/zig/LibCDirs.zig
@@ -15,7 +15,7 @@ pub const DarwinSdkLayout = enum {
pub fn detect(
arena: Allocator,
zig_lib_dir: []const u8,
- target: std.Target,
+ target: *const std.Target,
is_native_abi: bool,
link_libc: bool,
libc_installation: ?*const LibCInstallation,
@@ -88,7 +88,7 @@ pub fn detect(
};
}
-fn detectFromInstallation(arena: Allocator, target: std.Target, lci: *const LibCInstallation) !LibCDirs {
+fn detectFromInstallation(arena: Allocator, target: *const std.Target, lci: *const LibCInstallation) !LibCDirs {
var list = try std.ArrayList([]const u8).initCapacity(arena, 5);
var framework_list = std.ArrayList([]const u8).init(arena);
@@ -146,7 +146,7 @@ fn detectFromInstallation(arena: Allocator, target: std.Target, lci: *const LibC
pub fn detectFromBuilding(
arena: Allocator,
zig_lib_dir: []const u8,
- target: std.Target,
+ target: *const std.Target,
) !LibCDirs {
const s = std.fs.path.sep_str;
@@ -224,7 +224,7 @@ pub fn detectFromBuilding(
};
}
-fn libCGenericName(target: std.Target) [:0]const u8 {
+fn libCGenericName(target: *const std.Target) [:0]const u8 {
switch (target.os.tag) {
.windows => return "mingw",
.macos, .ios, .tvos, .watchos, .visionos => return "darwin",
lib/std/zig/LibCInstallation.zig
@@ -26,7 +26,7 @@ pub const FindError = error{
pub fn parse(
allocator: Allocator,
libc_file: []const u8,
- target: std.Target,
+ target: *const std.Target,
) !LibCInstallation {
var self: LibCInstallation = .{};
@@ -157,7 +157,7 @@ pub fn render(self: LibCInstallation, out: anytype) !void {
pub const FindNativeOptions = struct {
allocator: Allocator,
- target: std.Target,
+ target: *const std.Target,
/// If enabled, will print human-friendly errors to stderr.
verbose: bool = false,
@@ -700,7 +700,7 @@ pub const CrtBasenames = struct {
crtn: ?[]const u8 = null,
pub const GetArgs = struct {
- target: std.Target,
+ target: *const std.Target,
link_libc: bool,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
@@ -965,7 +965,7 @@ pub fn resolveCrtPaths(
lci: LibCInstallation,
arena: Allocator,
crt_basenames: CrtBasenames,
- target: std.Target,
+ target: *const std.Target,
) error{ OutOfMemory, LibCInstallationMissingCrtDir }!CrtPaths {
const crt_dir_path: Path = .{
.root_dir = std.Build.Cache.Directory.cwd(),
lib/std/zig/system.zig
@@ -28,7 +28,7 @@ pub const GetExternalExecutorOptions = struct {
/// Return whether or not the given host is capable of running executables of
/// the other target.
pub fn getExternalExecutor(
- host: std.Target,
+ host: *const std.Target,
candidate: *const std.Target,
options: GetExternalExecutorOptions,
) Executor {
lib/std/zig/target.zig
@@ -116,7 +116,7 @@ pub const freebsd_libc_version: std.SemanticVersion = .{ .major = 14, .minor = 0
/// The version of Zig's bundled NetBSD libc used when linking libc statically.
pub const netbsd_libc_version: std.SemanticVersion = .{ .major = 10, .minor = 1, .patch = 0 };
-pub fn canBuildLibC(target: std.Target) bool {
+pub fn canBuildLibC(target: *const std.Target) bool {
for (available_libcs) |libc| {
if (target.cpu.arch == libc.arch and target.os.tag == libc.os and target.abi == libc.abi) {
if (libc.os_ver) |libc_os_ver| {
@@ -176,7 +176,7 @@ pub fn muslRuntimeTriple(
return std.Target.linuxTripleSimple(allocator, arch, .linux, abi);
}
-pub fn osArchName(target: std.Target) [:0]const u8 {
+pub fn osArchName(target: *const std.Target) [:0]const u8 {
return switch (target.os.tag) {
.linux => switch (target.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => "arm",
@@ -276,7 +276,7 @@ pub fn netbsdAbiNameHeaders(abi: std.Target.Abi) [:0]const u8 {
};
}
-pub fn isLibCLibName(target: std.Target, name: []const u8) bool {
+pub fn isLibCLibName(target: *const std.Target, name: []const u8) bool {
const ignore_case = target.os.tag.isDarwin() or target.os.tag == .windows;
if (eqlIgnoreCase(ignore_case, name, "c"))
@@ -453,7 +453,7 @@ pub fn isLibCLibName(target: std.Target, name: []const u8) bool {
return false;
}
-pub fn isLibCxxLibName(target: std.Target, name: []const u8) bool {
+pub fn isLibCxxLibName(target: *const std.Target, name: []const u8) bool {
const ignore_case = target.os.tag.isDarwin() or target.os.tag == .windows;
return eqlIgnoreCase(ignore_case, name, "c++") or
@@ -470,11 +470,11 @@ fn eqlIgnoreCase(ignore_case: bool, a: []const u8, b: []const u8) bool {
}
}
-pub fn intByteSize(target: std.Target, bits: u16) u19 {
+pub fn intByteSize(target: *const std.Target, bits: u16) u19 {
return std.mem.alignForward(u19, @intCast((@as(u17, bits) + 7) / 8), intAlignment(target, bits));
}
-pub fn intAlignment(target: std.Target, bits: u16) u16 {
+pub fn intAlignment(target: *const std.Target, bits: u16) u16 {
return switch (target.cpu.arch) {
.x86 => switch (bits) {
0 => 0,
lib/std/Target.zig
@@ -1074,7 +1074,7 @@ pub const ObjectFormat = enum {
}
};
-pub fn toElfMachine(target: Target) std.elf.EM {
+pub fn toElfMachine(target: *const Target) std.elf.EM {
return switch (target.cpu.arch) {
.amdgcn => .AMDGPU,
.arc => .ARC_COMPACT,
@@ -1115,7 +1115,7 @@ pub fn toElfMachine(target: Target) std.elf.EM {
};
}
-pub fn toCoffMachine(target: Target) std.coff.MachineType {
+pub fn toCoffMachine(target: *const Target) std.coff.MachineType {
return switch (target.cpu.arch) {
.arm => .ARM,
.thumb => .ARMNT,
@@ -1999,7 +1999,7 @@ pub const Cpu = struct {
}
};
-pub fn zigTriple(target: Target, allocator: Allocator) Allocator.Error![]u8 {
+pub fn zigTriple(target: *const Target, allocator: Allocator) Allocator.Error![]u8 {
return Query.fromTarget(target).zigTriple(allocator);
}
@@ -2007,7 +2007,7 @@ pub fn hurdTupleSimple(allocator: Allocator, arch: Cpu.Arch, abi: Abi) ![]u8 {
return std.fmt.allocPrint(allocator, "{s}-{s}", .{ @tagName(arch), @tagName(abi) });
}
-pub fn hurdTuple(target: Target, allocator: Allocator) ![]u8 {
+pub fn hurdTuple(target: *const Target, allocator: Allocator) ![]u8 {
return hurdTupleSimple(allocator, target.cpu.arch, target.abi);
}
@@ -2015,63 +2015,63 @@ pub fn linuxTripleSimple(allocator: Allocator, arch: Cpu.Arch, os_tag: Os.Tag, a
return std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ @tagName(arch), @tagName(os_tag), @tagName(abi) });
}
-pub fn linuxTriple(target: Target, allocator: Allocator) ![]u8 {
+pub fn linuxTriple(target: *const Target, allocator: Allocator) ![]u8 {
return linuxTripleSimple(allocator, target.cpu.arch, target.os.tag, target.abi);
}
-pub fn exeFileExt(target: Target) [:0]const u8 {
+pub fn exeFileExt(target: *const Target) [:0]const u8 {
return target.os.tag.exeFileExt(target.cpu.arch);
}
-pub fn staticLibSuffix(target: Target) [:0]const u8 {
+pub fn staticLibSuffix(target: *const Target) [:0]const u8 {
return target.os.tag.staticLibSuffix(target.abi);
}
-pub fn dynamicLibSuffix(target: Target) [:0]const u8 {
+pub fn dynamicLibSuffix(target: *const Target) [:0]const u8 {
return target.os.tag.dynamicLibSuffix();
}
-pub fn libPrefix(target: Target) [:0]const u8 {
+pub fn libPrefix(target: *const Target) [:0]const u8 {
return target.os.tag.libPrefix(target.abi);
}
-pub inline fn isMinGW(target: Target) bool {
+pub inline fn isMinGW(target: *const Target) bool {
return target.os.tag == .windows and target.abi.isGnu();
}
-pub inline fn isGnuLibC(target: Target) bool {
+pub inline fn isGnuLibC(target: *const Target) bool {
return switch (target.os.tag) {
.hurd, .linux => target.abi.isGnu(),
else => false,
};
}
-pub inline fn isMuslLibC(target: Target) bool {
+pub inline fn isMuslLibC(target: *const Target) bool {
return target.os.tag == .linux and target.abi.isMusl();
}
-pub inline fn isDarwinLibC(target: Target) bool {
+pub inline fn isDarwinLibC(target: *const Target) bool {
return switch (target.abi) {
.none, .macabi, .simulator => target.os.tag.isDarwin(),
else => false,
};
}
-pub inline fn isFreeBSDLibC(target: Target) bool {
+pub inline fn isFreeBSDLibC(target: *const Target) bool {
return switch (target.abi) {
.none, .eabihf => target.os.tag == .freebsd,
else => false,
};
}
-pub inline fn isNetBSDLibC(target: Target) bool {
+pub inline fn isNetBSDLibC(target: *const Target) bool {
return switch (target.abi) {
.none, .eabi, .eabihf => target.os.tag == .netbsd,
else => false,
};
}
-pub inline fn isWasiLibC(target: Target) bool {
+pub inline fn isWasiLibC(target: *const Target) bool {
return target.os.tag == .wasi and target.abi.isMusl();
}
@@ -2576,7 +2576,7 @@ pub const DynamicLinker = struct {
}
};
-pub fn standardDynamicLinkerPath(target: Target) DynamicLinker {
+pub fn standardDynamicLinkerPath(target: *const Target) DynamicLinker {
return DynamicLinker.standard(target.cpu, target.os, target.abi);
}
@@ -2645,11 +2645,11 @@ pub fn ptrBitWidth_arch_abi(cpu_arch: Cpu.Arch, abi: Abi) u16 {
};
}
-pub fn ptrBitWidth(target: Target) u16 {
+pub fn ptrBitWidth(target: *const Target) u16 {
return ptrBitWidth_cpu_abi(target.cpu, target.abi);
}
-pub fn stackAlignment(target: Target) u16 {
+pub fn stackAlignment(target: *const Target) u16 {
// Overrides for when the stack alignment is not equal to the pointer width.
switch (target.cpu.arch) {
.m68k,
@@ -2697,7 +2697,7 @@ pub fn stackAlignment(target: Target) u16 {
/// Default signedness of `char` for the native C compiler for this target
/// Note that char signedness is implementation-defined and many compilers provide
/// an option to override the default signedness e.g. GCC's -funsigned-char / -fsigned-char
-pub fn cCharSignedness(target: Target) std.builtin.Signedness {
+pub fn cCharSignedness(target: *const Target) std.builtin.Signedness {
if (target.os.tag.isDarwin() or target.os.tag == .windows or target.os.tag == .uefi) return .signed;
return switch (target.cpu.arch) {
@@ -2740,7 +2740,7 @@ pub const CType = enum {
longdouble,
};
-pub fn cTypeByteSize(t: Target, c_type: CType) u16 {
+pub fn cTypeByteSize(t: *const Target, c_type: CType) u16 {
return switch (c_type) {
.char,
.short,
@@ -2766,7 +2766,7 @@ pub fn cTypeByteSize(t: Target, c_type: CType) u16 {
};
}
-pub fn cTypeBitSize(target: Target, c_type: CType) u16 {
+pub fn cTypeBitSize(target: *const Target, c_type: CType) u16 {
switch (target.os.tag) {
.freestanding, .other => switch (target.cpu.arch) {
.msp430 => switch (c_type) {
@@ -3077,7 +3077,7 @@ pub fn cTypeBitSize(target: Target, c_type: CType) u16 {
}
}
-pub fn cTypeAlignment(target: Target, c_type: CType) u16 {
+pub fn cTypeAlignment(target: *const Target, c_type: CType) u16 {
// Overrides for unusual alignments
switch (target.cpu.arch) {
.avr => return 1,
@@ -3172,7 +3172,7 @@ pub fn cTypeAlignment(target: Target, c_type: CType) u16 {
);
}
-pub fn cTypePreferredAlignment(target: Target, c_type: CType) u16 {
+pub fn cTypePreferredAlignment(target: *const Target, c_type: CType) u16 {
// Overrides for unusual alignments
switch (target.cpu.arch) {
.arc => switch (c_type) {
@@ -3265,7 +3265,7 @@ pub fn cTypePreferredAlignment(target: Target, c_type: CType) u16 {
);
}
-pub fn cMaxIntAlignment(target: std.Target) u16 {
+pub fn cMaxIntAlignment(target: *const Target) u16 {
return switch (target.cpu.arch) {
.avr => 1,
@@ -3328,7 +3328,7 @@ pub fn cMaxIntAlignment(target: std.Target) u16 {
};
}
-pub fn cCallingConvention(target: Target) ?std.builtin.CallingConvention {
+pub fn cCallingConvention(target: *const Target) ?std.builtin.CallingConvention {
return switch (target.cpu.arch) {
.x86_64 => switch (target.os.tag) {
.windows, .uefi => .{ .x86_64_win = .{} },
lib/std/zig.zig
@@ -141,7 +141,7 @@ pub fn lineDelta(source: []const u8, start: usize, end: usize) isize {
pub const BinNameOptions = struct {
root_name: []const u8,
- target: std.Target,
+ target: *const std.Target,
output_mode: std.builtin.OutputMode,
link_mode: ?std.builtin.LinkMode = null,
version: ?std.SemanticVersion = null,
src/arch/aarch64/CodeGen.zig
@@ -6175,7 +6175,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
self.pt,
self.src_loc,
val,
- self.target.*,
+ self.target,
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@@ -6379,7 +6379,7 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register {
},
.stack_pointer => unreachable, // we can't store/load the sp
.floating_point => {
- return switch (ty.floatBits(self.target.*)) {
+ return switch (ty.floatBits(self.target)) {
16 => reg.toH(),
32 => reg.toS(),
64 => reg.toD(),
src/arch/arm/CodeGen.zig
@@ -6148,7 +6148,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
pt,
self.src_loc,
val,
- self.target.*,
+ self.target,
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
src/arch/riscv64/CodeGen.zig
@@ -1881,7 +1881,7 @@ fn memSize(func: *Func, ty: Type) Memory.Size {
const pt = func.pt;
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
- .float => Memory.Size.fromBitSize(ty.floatBits(func.target.*)),
+ .float => Memory.Size.fromBitSize(ty.floatBits(func.target)),
else => Memory.Size.fromByteSize(ty.abiSize(zcu)),
};
}
@@ -2401,7 +2401,7 @@ fn binOp(
const rhs_ty = func.typeOf(rhs_air);
if (lhs_ty.isRuntimeFloat()) libcall: {
- const float_bits = lhs_ty.floatBits(func.target.*);
+ const float_bits = lhs_ty.floatBits(func.target);
const type_needs_libcall = switch (float_bits) {
16 => true,
32, 64 => false,
@@ -5189,7 +5189,7 @@ fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
}
},
.float => {
- const float_bits = lhs_ty.floatBits(func.target.*);
+ const float_bits = lhs_ty.floatBits(func.target);
const float_reg_size: u32 = if (func.hasFeature(.d)) 64 else 32;
if (float_bits > float_reg_size) {
return func.fail("TODO: airCmp float > 64/32 bits", .{});
@@ -8195,7 +8195,7 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
const result = if (val.isUndef(pt.zcu))
try lf.lowerUav(pt, val.toIntern(), .none, src_loc)
else
- try codegen.genTypedValue(lf, pt, src_loc, val, func.target.*);
+ try codegen.genTypedValue(lf, pt, src_loc, val, func.target);
const mcv: MCValue = switch (result) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@@ -8484,7 +8484,7 @@ fn promoteInt(func: *Func, ty: Type) Type {
fn promoteVarArg(func: *Func, ty: Type) Type {
if (!ty.isRuntimeFloat()) return func.promoteInt(ty);
- switch (ty.floatBits(func.target.*)) {
+ switch (ty.floatBits(func.target)) {
32, 64 => return Type.f64,
else => |float_bits| {
assert(float_bits == func.target.cTypeBitSize(.longdouble));
src/arch/sparc64/CodeGen.zig
@@ -4088,7 +4088,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
pt,
self.src_loc,
val,
- self.target.*,
+ self.target,
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
src/arch/wasm/CodeGen.zig
@@ -982,7 +982,7 @@ fn addExtraAssumeCapacity(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
pub fn typeToValtype(ty: Type, zcu: *const Zcu, target: *const std.Target) std.wasm.Valtype {
const ip = &zcu.intern_pool;
return switch (ty.zigTypeTag(zcu)) {
- .float => switch (ty.floatBits(target.*)) {
+ .float => switch (ty.floatBits(target)) {
16 => .i32, // stored/loaded as u16
32 => .f32,
64 => .f64,
@@ -1715,7 +1715,7 @@ fn isByRef(ty: Type, zcu: *const Zcu, target: *const std.Target) bool {
.vector => return determineSimdStoreStrategy(ty, zcu, target) == .unrolled,
.int => return ty.intInfo(zcu).bits > 64,
.@"enum" => return ty.intInfo(zcu).bits > 64,
- .float => return ty.floatBits(target.*) > 64,
+ .float => return ty.floatBits(target) > 64,
.error_union => {
const pl_ty = ty.errorUnionPayload(zcu);
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
@@ -2904,7 +2904,7 @@ fn floatOp(cg: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) Inne
return cg.fail("TODO: Implement floatOps for vectors", .{});
}
- const float_bits = ty.floatBits(cg.target.*);
+ const float_bits = ty.floatBits(cg.target);
if (float_op == .neg) {
return cg.floatNeg(ty, args[0]);
@@ -2931,7 +2931,7 @@ fn floatOp(cg: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) Inne
/// NOTE: The result value remains on top of the stack.
fn floatNeg(cg: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
- const float_bits = ty.floatBits(cg.target.*);
+ const float_bits = ty.floatBits(cg.target);
switch (float_bits) {
16 => {
try cg.emitWValue(arg);
@@ -3300,7 +3300,7 @@ fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue {
33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
},
- .float => switch (ty.floatBits(cg.target.*)) {
+ .float => switch (ty.floatBits(cg.target)) {
16 => return .{ .imm32 = 0xaaaaaaaa },
32 => return .{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) },
64 => return .{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) },
@@ -3507,7 +3507,7 @@ fn cmp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOpe
/// Compares two floats.
/// NOTE: Leaves the result of the comparison on top of the stack.
fn cmpFloat(cg: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue {
- const float_bits = ty.floatBits(cg.target.*);
+ const float_bits = ty.floatBits(cg.target);
const op: Op = switch (cmp_op) {
.lt => .lt,
@@ -4919,7 +4919,7 @@ fn airIntFromFloat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try cg.resolveInst(ty_op.operand);
const op_ty = cg.typeOf(ty_op.operand);
- const op_bits = op_ty.floatBits(cg.target.*);
+ const op_bits = op_ty.floatBits(cg.target);
const dest_ty = cg.typeOfIndex(inst);
const dest_info = dest_ty.intInfo(zcu);
@@ -4973,7 +4973,7 @@ fn airFloatFromInt(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const op_info = op_ty.intInfo(zcu);
const dest_ty = cg.typeOfIndex(inst);
- const dest_bits = dest_ty.floatBits(cg.target.*);
+ const dest_bits = dest_ty.floatBits(cg.target);
if (op_info.bits > 128) {
return cg.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits});
@@ -5567,8 +5567,8 @@ fn airFpext(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Extends a float from a given `Type` to a larger wanted `Type`, leaving the
/// result on the stack.
fn fpext(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const given_bits = given.floatBits(cg.target.*);
- const wanted_bits = wanted.floatBits(cg.target.*);
+ const given_bits = given.floatBits(cg.target);
+ const wanted_bits = wanted.floatBits(cg.target);
const intrinsic: Mir.Intrinsic = switch (given_bits) {
16 => switch (wanted_bits) {
@@ -5621,8 +5621,8 @@ fn airFptrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Truncates a float from a given `Type` to its wanted `Type`, leaving the
/// result on the stack.
fn fptrunc(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const given_bits = given.floatBits(cg.target.*);
- const wanted_bits = wanted.floatBits(cg.target.*);
+ const given_bits = given.floatBits(cg.target);
+ const wanted_bits = wanted.floatBits(cg.target);
const intrinsic: Mir.Intrinsic = switch (given_bits) {
32 => switch (wanted_bits) {
@@ -6231,7 +6231,7 @@ fn airMaxMin(
if (ty.zigTypeTag(zcu) == .float) {
const intrinsic = switch (op) {
- inline .fmin, .fmax => |ct_op| switch (ty.floatBits(cg.target.*)) {
+ inline .fmin, .fmax => |ct_op| switch (ty.floatBits(cg.target)) {
inline 16, 32, 64, 80, 128 => |bits| @field(
Mir.Intrinsic,
libcFloatPrefix(bits) ++ @tagName(ct_op) ++ libcFloatSuffix(bits),
@@ -6268,7 +6268,7 @@ fn airMulAdd(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs = try cg.resolveInst(bin_op.lhs);
const rhs = try cg.resolveInst(bin_op.rhs);
- const result = if (ty.floatBits(cg.target.*) == 16) fl_result: {
+ const result = if (ty.floatBits(cg.target) == 16) fl_result: {
const rhs_ext = try cg.fpext(rhs, ty, Type.f32);
const lhs_ext = try cg.fpext(lhs, ty, Type.f32);
const addend_ext = try cg.fpext(addend, ty, Type.f32);
@@ -6667,7 +6667,7 @@ fn airDivFloor(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
_ = try cg.wrapOperand(.stack, ty);
}
} else {
- const float_bits = ty.floatBits(cg.target.*);
+ const float_bits = ty.floatBits(cg.target);
if (float_bits > 64) {
return cg.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits});
}
src/arch/x86_64/abi.zig
@@ -148,7 +148,7 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
result[0] = .integer;
return result;
},
- .float => switch (ty.floatBits(target.*)) {
+ .float => switch (ty.floatBits(target)) {
16 => {
if (ctx == .field) {
result[0] = .memory;
src/arch/x86_64/CodeGen.zig
@@ -163704,7 +163704,7 @@ fn allocRegOrMemAdvanced(self: *CodeGen, ty: Type, inst: ?Air.Inst.Index, reg_ok
if (reg_ok) need_mem: {
if (std.math.isPowerOfTwo(abi_size) and abi_size <= @as(u32, max_abi_size: switch (ty.zigTypeTag(zcu)) {
- .float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target)) {
16, 32, 64, 128 => 16,
80 => break :need_mem,
else => unreachable,
@@ -163993,9 +163993,9 @@ fn airRetPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
fn airFptrunc(self: *CodeGen, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
- const dst_bits = dst_ty.floatBits(self.target.*);
+ const dst_bits = dst_ty.floatBits(self.target);
const src_ty = self.typeOf(ty_op.operand);
- const src_bits = src_ty.floatBits(self.target.*);
+ const src_bits = src_ty.floatBits(self.target);
const result = result: {
if (switch (dst_bits) {
@@ -164095,10 +164095,10 @@ fn airFpext(self: *CodeGen, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
const dst_scalar_ty = dst_ty.scalarType(zcu);
- const dst_bits = dst_scalar_ty.floatBits(self.target.*);
+ const dst_bits = dst_scalar_ty.floatBits(self.target);
const src_ty = self.typeOf(ty_op.operand);
const src_scalar_ty = src_ty.scalarType(zcu);
- const src_bits = src_scalar_ty.floatBits(self.target.*);
+ const src_bits = src_scalar_ty.floatBits(self.target);
const result = result: {
if (switch (src_bits) {
@@ -168207,7 +168207,7 @@ fn floatSign(self: *CodeGen, inst: Air.Inst.Index, tag: Air.Inst.Tag, operand: A
const zcu = pt.zcu;
const result = result: {
- const scalar_bits = ty.scalarType(zcu).floatBits(self.target.*);
+ const scalar_bits = ty.scalarType(zcu).floatBits(self.target);
if (scalar_bits == 80) {
if (ty.zigTypeTag(zcu) != .float) return self.fail("TODO implement floatSign for {}", .{
ty.fmt(pt),
@@ -168363,14 +168363,14 @@ fn getRoundTag(self: *CodeGen, ty: Type) ?Mir.Inst.FixedTag {
const pt = self.pt;
const zcu = pt.zcu;
return if (self.hasFeature(.sse4_1)) switch (ty.zigTypeTag(zcu)) {
- .float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
16, 80, 128 => null,
else => unreachable,
},
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target)) {
32 => switch (ty.vectorLen(zcu)) {
1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round },
@@ -168670,7 +168670,7 @@ fn airSqrt(self: *CodeGen, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
switch (ty.zigTypeTag(zcu)) {
.float => {
- const float_bits = ty.floatBits(self.target.*);
+ const float_bits = ty.floatBits(self.target);
if (switch (float_bits) {
16 => !self.hasFeature(.f16c),
32, 64 => false,
@@ -168701,7 +168701,7 @@ fn airSqrt(self: *CodeGen, inst: Air.Inst.Index) !void {
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(zcu)) {
- .float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target)) {
16 => {
assert(self.hasFeature(.f16c));
const mat_src_reg = if (src_mcv.isRegister())
@@ -168723,7 +168723,7 @@ fn airSqrt(self: *CodeGen, inst: Air.Inst.Index) !void {
else => unreachable,
},
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target)) {
16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(zcu)) {
1 => {
try self.asmRegisterRegister(
@@ -170904,7 +170904,7 @@ fn genBinOp(
const abi_size: u32 = @intCast(lhs_ty.abiSize(zcu));
if (lhs_ty.isRuntimeFloat()) libcall: {
- const float_bits = lhs_ty.floatBits(self.target.*);
+ const float_bits = lhs_ty.floatBits(self.target);
const type_needs_libcall = switch (float_bits) {
16 => !self.hasFeature(.f16c),
32, 64 => false,
@@ -171083,7 +171083,7 @@ fn genBinOp(
},
};
if (sse_op and ((lhs_ty.scalarType(zcu).isRuntimeFloat() and
- lhs_ty.scalarType(zcu).floatBits(self.target.*) == 80) or
+ lhs_ty.scalarType(zcu).floatBits(self.target) == 80) or
lhs_ty.abiSize(zcu) > self.vectorSize(.float)))
return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(pt) });
@@ -171474,7 +171474,7 @@ fn genBinOp(
const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size);
const mir_tag = @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
else => unreachable,
- .float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target)) {
16 => {
assert(self.hasFeature(.f16c));
const lhs_reg = if (copied_to_dst) dst_reg else registerAlias(lhs_mcv.getReg().?, abi_size);
@@ -171917,7 +171917,7 @@ fn genBinOp(
},
else => null,
},
- .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target)) {
16 => tag: {
assert(self.hasFeature(.f16c));
const lhs_reg = if (copied_to_dst) dst_reg else registerAlias(lhs_mcv.getReg().?, abi_size);
@@ -172336,14 +172336,14 @@ fn genBinOp(
try self.asmRegisterRegisterRegisterImmediate(
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
- .float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target)) {
32 => .{ .v_ss, .cmp },
64 => .{ .v_sd, .cmp },
16, 80, 128 => null,
else => unreachable,
},
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1 => .{ .v_ss, .cmp },
2...8 => .{ .v_ps, .cmp },
@@ -172370,14 +172370,14 @@ fn genBinOp(
);
try self.asmRegisterRegisterRegisterRegister(
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
- .float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target)) {
32 => .{ .v_ps, .blendv },
64 => .{ .v_pd, .blendv },
16, 80, 128 => null,
else => unreachable,
},
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1...8 => .{ .v_ps, .blendv },
else => null,
@@ -172404,14 +172404,14 @@ fn genBinOp(
const has_blend = self.hasFeature(.sse4_1);
try self.asmRegisterRegisterImmediate(
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
- .float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target)) {
32 => .{ ._ss, .cmp },
64 => .{ ._sd, .cmp },
16, 80, 128 => null,
else => unreachable,
},
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1 => .{ ._ss, .cmp },
2...4 => .{ ._ps, .cmp },
@@ -172437,14 +172437,14 @@ fn genBinOp(
);
if (has_blend) try self.asmRegisterRegisterRegister(
@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(zcu)) {
- .float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target)) {
32 => .{ ._ps, .blendv },
64 => .{ ._pd, .blendv },
16, 80, 128 => null,
else => unreachable,
},
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1...4 => .{ ._ps, .blendv },
else => null,
@@ -172467,14 +172467,14 @@ fn genBinOp(
mask_reg,
) else {
const mir_fixes = @as(?Mir.Inst.Fixes, switch (lhs_ty.zigTypeTag(zcu)) {
- .float => switch (lhs_ty.floatBits(self.target.*)) {
+ .float => switch (lhs_ty.floatBits(self.target)) {
32 => ._ps,
64 => ._pd,
16, 80, 128 => null,
else => unreachable,
},
.vector => switch (lhs_ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (lhs_ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (lhs_ty.childType(zcu).floatBits(self.target)) {
32 => switch (lhs_ty.vectorLen(zcu)) {
1...4 => ._ps,
else => null,
@@ -173832,7 +173832,7 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v
switch (ty.zigTypeTag(zcu)) {
.float => {
- const float_bits = ty.floatBits(self.target.*);
+ const float_bits = ty.floatBits(self.target);
if (!switch (float_bits) {
16 => self.hasFeature(.f16c),
32 => self.hasFeature(.sse),
@@ -174188,7 +174188,7 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
- switch (ty.floatBits(self.target.*)) {
+ switch (ty.floatBits(self.target)) {
16 => {
assert(self.hasFeature(.f16c));
const tmp1_reg =
@@ -176335,7 +176335,7 @@ fn moveStrategy(cg: *CodeGen, ty: Type, class: Register.Class, aligned: bool) !M
else => {},
}
},
- .float => switch (ty.floatBits(cg.target.*)) {
+ .float => switch (ty.floatBits(cg.target)) {
16 => return if (cg.hasFeature(.avx)) .{ .vex_insert_extract = .{
.insert = .{ .vp_w, .insr },
.extract = .{ .vp_w, .extr },
@@ -176482,7 +176482,7 @@ fn moveStrategy(cg: *CodeGen, ty: Type, class: Register.Class, aligned: bool) !M
}
else
unreachable,
- .float => switch (ty.childType(zcu).floatBits(cg.target.*)) {
+ .float => switch (ty.childType(zcu).floatBits(cg.target)) {
16 => switch (ty.vectorLen(zcu)) {
1...8 => return .{ .load_store = if (cg.hasFeature(.avx))
.{ if (aligned) .v_dqa else .v_dqu, .mov }
@@ -177017,7 +177017,7 @@ fn genSetReg(
17...32 => if (self.hasFeature(.avx)) .{ .v_dqa, .mov } else null,
else => null,
},
- .float => switch (ty.scalarType(zcu).floatBits(self.target.*)) {
+ .float => switch (ty.scalarType(zcu).floatBits(self.target)) {
16, 128 => switch (abi_size) {
2...16 => if (self.hasFeature(.avx))
.{ .v_dqa, .mov }
@@ -177776,7 +177776,7 @@ fn airFloatFromInt(self: *CodeGen, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const dst_ty = self.typeOfIndex(inst);
- const dst_bits = dst_ty.floatBits(self.target.*);
+ const dst_bits = dst_ty.floatBits(self.target);
const src_ty = self.typeOf(ty_op.operand);
const src_bits: u32 = @intCast(src_ty.bitSize(zcu));
@@ -177828,7 +177828,7 @@ fn airFloatFromInt(self: *CodeGen, inst: Air.Inst.Index) !void {
defer self.register_manager.unlockReg(dst_lock);
const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(zcu)) {
- .float => switch (dst_ty.floatBits(self.target.*)) {
+ .float => switch (dst_ty.floatBits(self.target)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 },
16, 80, 128 => null,
@@ -177865,7 +177865,7 @@ fn airIntFromFloat(self: *CodeGen, inst: Air.Inst.Index) !void {
}, 32), 8) catch unreachable;
const src_ty = self.typeOf(ty_op.operand);
- const src_bits = src_ty.floatBits(self.target.*);
+ const src_bits = src_ty.floatBits(self.target);
const result = result: {
if (switch (src_bits) {
@@ -178136,22 +178136,22 @@ fn atomicOp(
}
if (rmw_op) |op| if (use_sse) {
const mir_tag = @as(?Mir.Inst.FixedTag, switch (op) {
- .Add => switch (val_ty.floatBits(self.target.*)) {
+ .Add => switch (val_ty.floatBits(self.target)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add },
else => null,
},
- .Sub => switch (val_ty.floatBits(self.target.*)) {
+ .Sub => switch (val_ty.floatBits(self.target)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub },
else => null,
},
- .Min => switch (val_ty.floatBits(self.target.*)) {
+ .Min => switch (val_ty.floatBits(self.target)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .min } else .{ ._ss, .min },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .min } else .{ ._sd, .min },
else => null,
},
- .Max => switch (val_ty.floatBits(self.target.*)) {
+ .Max => switch (val_ty.floatBits(self.target)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .max } else .{ ._ss, .max },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .max } else .{ ._sd, .max },
else => null,
@@ -178988,7 +178988,7 @@ fn airSplat(self: *CodeGen, inst: Air.Inst.Index) !void {
);
break :result .{ .register = dst_reg };
},
- .float => switch (scalar_ty.floatBits(self.target.*)) {
+ .float => switch (scalar_ty.floatBits(self.target)) {
32 => switch (vector_len) {
1 => {
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -179581,7 +179581,7 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
null,
else => null,
},
- .float => switch (elem_ty.floatBits(self.target.*)) {
+ .float => switch (elem_ty.floatBits(self.target)) {
else => unreachable,
16, 80, 128 => null,
32 => switch (vec_len) {
@@ -180308,7 +180308,7 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
defer self.register_manager.unlockReg(mask_lock);
const mir_fixes: Mir.Inst.Fixes = if (elem_ty.isRuntimeFloat())
- switch (elem_ty.floatBits(self.target.*)) {
+ switch (elem_ty.floatBits(self.target)) {
16, 80, 128 => .p_,
32 => ._ps,
64 => ._pd,
@@ -180414,7 +180414,7 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
.{ switch (elem_ty.zigTypeTag(zcu)) {
else => break :result null,
.int => .vp_,
- .float => switch (elem_ty.floatBits(self.target.*)) {
+ .float => switch (elem_ty.floatBits(self.target)) {
32 => .v_ps,
64 => .v_pd,
16, 80, 128 => break :result null,
@@ -180428,7 +180428,7 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
.{ switch (elem_ty.zigTypeTag(zcu)) {
else => break :result null,
.int => .p_,
- .float => switch (elem_ty.floatBits(self.target.*)) {
+ .float => switch (elem_ty.floatBits(self.target)) {
32 => ._ps,
64 => ._pd,
16, 80, 128 => break :result null,
@@ -180800,7 +180800,7 @@ fn airMulAdd(self: *CodeGen, inst: Air.Inst.Index) !void {
const ops = [3]Air.Inst.Ref{ extra.lhs, extra.rhs, pl_op.operand };
const result = result: {
- if (switch (ty.scalarType(zcu).floatBits(self.target.*)) {
+ if (switch (ty.scalarType(zcu).floatBits(self.target)) {
16, 80, 128 => true,
32, 64 => !self.hasFeature(.fma),
else => unreachable,
@@ -180855,14 +180855,14 @@ fn airMulAdd(self: *CodeGen, inst: Air.Inst.Index) !void {
const mir_tag = @as(?Mir.Inst.FixedTag, if (std.mem.eql(u2, &order, &.{ 1, 3, 2 }) or
std.mem.eql(u2, &order, &.{ 3, 1, 2 }))
switch (ty.zigTypeTag(zcu)) {
- .float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target)) {
32 => .{ .v_ss, .fmadd132 },
64 => .{ .v_sd, .fmadd132 },
16, 80, 128 => null,
else => unreachable,
},
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target)) {
32 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_ss, .fmadd132 },
2...8 => .{ .v_ps, .fmadd132 },
@@ -180882,14 +180882,14 @@ fn airMulAdd(self: *CodeGen, inst: Air.Inst.Index) !void {
}
else if (std.mem.eql(u2, &order, &.{ 2, 1, 3 }) or std.mem.eql(u2, &order, &.{ 1, 2, 3 }))
switch (ty.zigTypeTag(zcu)) {
- .float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target)) {
32 => .{ .v_ss, .fmadd213 },
64 => .{ .v_sd, .fmadd213 },
16, 80, 128 => null,
else => unreachable,
},
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target)) {
32 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_ss, .fmadd213 },
2...8 => .{ .v_ps, .fmadd213 },
@@ -180909,14 +180909,14 @@ fn airMulAdd(self: *CodeGen, inst: Air.Inst.Index) !void {
}
else if (std.mem.eql(u2, &order, &.{ 2, 3, 1 }) or std.mem.eql(u2, &order, &.{ 3, 2, 1 }))
switch (ty.zigTypeTag(zcu)) {
- .float => switch (ty.floatBits(self.target.*)) {
+ .float => switch (ty.floatBits(self.target)) {
32 => .{ .v_ss, .fmadd231 },
64 => .{ .v_sd, .fmadd231 },
16, 80, 128 => null,
else => unreachable,
},
.vector => switch (ty.childType(zcu).zigTypeTag(zcu)) {
- .float => switch (ty.childType(zcu).floatBits(self.target.*)) {
+ .float => switch (ty.childType(zcu).floatBits(self.target)) {
32 => switch (ty.vectorLen(zcu)) {
1 => .{ .v_ss, .fmadd231 },
2...8 => .{ .v_ps, .fmadd231 },
@@ -181979,7 +181979,7 @@ fn promoteInt(self: *CodeGen, ty: Type) Type {
fn promoteVarArg(self: *CodeGen, ty: Type) Type {
if (!ty.isRuntimeFloat()) return self.promoteInt(ty);
- switch (ty.floatBits(self.target.*)) {
+ switch (ty.floatBits(self.target)) {
32, 64 => return .f64,
else => |float_bits| {
assert(float_bits == self.target.cTypeBitSize(.longdouble));
@@ -182080,7 +182080,7 @@ fn intInfo(cg: *CodeGen, ty: Type) ?std.builtin.Type.Int {
}
fn floatBits(cg: *CodeGen, ty: Type) ?u16 {
- return if (ty.isRuntimeFloat()) ty.floatBits(cg.target.*) else null;
+ return if (ty.isRuntimeFloat()) ty.floatBits(cg.target) else null;
}
const Temp = struct {
src/arch/x86_64/Emit.zig
@@ -105,7 +105,7 @@ pub fn emitMir(emit: *Emit) Error!void {
emit.pt,
emit.lower.src_loc,
nav,
- emit.lower.target.*,
+ emit.lower.target,
)) {
.mcv => |mcv| mcv.lea_symbol,
.fail => |em| {
@@ -542,7 +542,7 @@ pub fn emitMir(emit: *Emit) Error!void {
emit.pt,
emit.lower.src_loc,
nav,
- emit.lower.target.*,
+ emit.lower.target,
) catch |err| switch (err) {
error.CodegenFail,
=> return emit.fail("unable to codegen: {s}", .{@errorName(err)}),
src/codegen/c/Type.zig
@@ -1319,9 +1319,9 @@ pub const Pool = struct {
},
else => {
const target = &mod.resolved_target.result;
- const abi_align_bytes = std.zig.target.intAlignment(target.*, int_info.bits);
+ const abi_align_bytes = std.zig.target.intAlignment(target, int_info.bits);
const array_ctype = try pool.getArray(allocator, .{
- .len = @divExact(std.zig.target.intByteSize(target.*, int_info.bits), abi_align_bytes),
+ .len = @divExact(std.zig.target.intByteSize(target, int_info.bits), abi_align_bytes),
.elem_ctype = try pool.fromIntInfo(allocator, .{
.signedness = .unsigned,
.bits = @intCast(abi_align_bytes * 8),
@@ -1438,13 +1438,13 @@ pub const Pool = struct {
.elem_ctype = .u8,
.@"const" = true,
}),
- .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target.*)),
+ .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)),
},
.{
.name = .{ .index = .len },
.ctype = .usize,
.alignas = AlignAs.fromAbiAlignment(
- .fromByteUnits(std.zig.target.intAlignment(target.*, target.ptrBitWidth())),
+ .fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())),
),
},
};
@@ -2246,13 +2246,13 @@ pub const Pool = struct {
mod,
kind,
),
- .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target.*)),
+ .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)),
},
.{
.name = .{ .index = .len },
.ctype = .usize,
.alignas = AlignAs.fromAbiAlignment(
- .fromByteUnits(std.zig.target.intAlignment(target.*, target.ptrBitWidth())),
+ .fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())),
),
},
};
@@ -2372,7 +2372,7 @@ pub const Pool = struct {
.name = .{ .index = .@"error" },
.ctype = error_set_ctype,
.alignas = AlignAs.fromAbiAlignment(
- .fromByteUnits(std.zig.target.intAlignment(target.*, error_set_bits)),
+ .fromByteUnits(std.zig.target.intAlignment(target, error_set_bits)),
),
},
.{
src/codegen/spirv/Module.zig
@@ -107,7 +107,7 @@ gpa: Allocator,
arena: std.heap.ArenaAllocator,
/// Target info
-target: std.Target,
+target: *const std.Target,
/// The target SPIR-V version
version: spec.Version,
@@ -187,7 +187,7 @@ decl_deps: std.ArrayListUnmanaged(Decl.Index) = .empty,
/// The list of entry points that should be exported from this module.
entry_points: std.AutoArrayHashMapUnmanaged(IdRef, EntryPoint) = .empty,
-pub fn init(gpa: Allocator, target: std.Target) Module {
+pub fn init(gpa: Allocator, target: *const std.Target) Module {
const version_minor: u8 = blk: {
// Prefer higher versions
if (target.cpu.has(.spirv, .v1_6)) break :blk 6;
src/codegen/c.zig
@@ -1080,7 +1080,7 @@ pub const DeclGen = struct {
},
.enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location),
.float => {
- const bits = ty.floatBits(target.*);
+ const bits = ty.floatBits(target);
const f128_val = val.toFloat(f128, zcu);
// All unsigned ints matching float types are pre-allocated.
@@ -1608,7 +1608,7 @@ pub const DeclGen = struct {
.f80_type,
.f128_type,
=> {
- const bits = ty.floatBits(target.*);
+ const bits = ty.floatBits(target);
// All unsigned ints matching float types are pre-allocated.
const repr_ty = dg.pt.intType(.unsigned, bits) catch unreachable;
@@ -6543,7 +6543,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
const scalar_ty = operand_ty.scalarType(zcu);
const target = &f.object.dg.mod.resolved_target.result;
const operation = if (inst_scalar_ty.isRuntimeFloat() and scalar_ty.isRuntimeFloat())
- if (inst_scalar_ty.floatBits(target.*) < scalar_ty.floatBits(target.*)) "trunc" else "extend"
+ if (inst_scalar_ty.floatBits(target) < scalar_ty.floatBits(target)) "trunc" else "extend"
else if (inst_scalar_ty.isInt(zcu) and scalar_ty.isRuntimeFloat())
if (inst_scalar_ty.isSignedInt(zcu)) "fix" else "fixuns"
else if (inst_scalar_ty.isRuntimeFloat() and scalar_ty.isInt(zcu))
@@ -6565,8 +6565,8 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
}
try writer.writeAll("zig_");
try writer.writeAll(operation);
- try writer.writeAll(compilerRtAbbrev(scalar_ty, zcu, target.*));
- try writer.writeAll(compilerRtAbbrev(inst_scalar_ty, zcu, target.*));
+ try writer.writeAll(compilerRtAbbrev(scalar_ty, zcu, target));
+ try writer.writeAll(compilerRtAbbrev(inst_scalar_ty, zcu, target));
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
try v.elem(f, writer);
@@ -8073,7 +8073,7 @@ fn signAbbrev(signedness: std.builtin.Signedness) u8 {
};
}
-fn compilerRtAbbrev(ty: Type, zcu: *Zcu, target: std.Target) []const u8 {
+fn compilerRtAbbrev(ty: Type, zcu: *Zcu, target: *const std.Target) []const u8 {
return if (ty.isInt(zcu)) switch (ty.intInfo(zcu).bits) {
1...32 => "si",
33...64 => "di",
src/codegen/llvm.zig
@@ -43,7 +43,7 @@ pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
});
}
-fn subArchName(target: std.Target, comptime family: std.Target.Cpu.Arch.Family, mappings: anytype) ?[]const u8 {
+fn subArchName(target: *const std.Target, comptime family: std.Target.Cpu.Arch.Family, mappings: anytype) ?[]const u8 {
inline for (mappings) |mapping| {
if (target.cpu.has(family, mapping[0])) return mapping[1];
}
@@ -51,7 +51,7 @@ fn subArchName(target: std.Target, comptime family: std.Target.Cpu.Arch.Family,
return null;
}
-pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
+pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8 {
var llvm_triple = std.ArrayList(u8).init(allocator);
defer llvm_triple.deinit();
@@ -309,7 +309,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
return llvm_triple.toOwnedSlice();
}
-pub fn supportsTailCall(target: std.Target) bool {
+pub fn supportsTailCall(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.wasm32, .wasm64 => target.cpu.has(.wasm, .tail_call),
// Although these ISAs support tail calls, LLVM does not support tail calls on them.
@@ -319,7 +319,7 @@ pub fn supportsTailCall(target: std.Target) bool {
};
}
-pub fn dataLayout(target: std.Target) []const u8 {
+pub fn dataLayout(target: *const std.Target) []const u8 {
// These data layouts should match Clang.
return switch (target.cpu.arch) {
.arc => "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-f32:32:32-i64:32-f64:32-a:0:32-n32",
@@ -475,7 +475,7 @@ const CodeModel = enum {
large,
};
-fn codeModel(model: std.builtin.CodeModel, target: std.Target) CodeModel {
+fn codeModel(model: std.builtin.CodeModel, target: *const std.Target) CodeModel {
// Roughly match Clang's mapping of GCC code models to LLVM code models.
return switch (model) {
.default => .default,
@@ -508,7 +508,7 @@ pub const Object = struct {
debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata),
- target: std.Target,
+ target: *const std.Target,
/// Ideally we would use `llvm_module.getNamedFunction` to go from *Decl to LLVM function,
/// but that has some downsides:
/// * we have to compute the fully qualified name every time we want to do the lookup
@@ -562,7 +562,7 @@ pub const Object = struct {
pub fn create(arena: Allocator, comp: *Compilation) !Ptr {
dev.check(.llvm_backend);
const gpa = comp.gpa;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const llvm_target_triple = try targetTriple(arena, target);
var builder = try Builder.init(.{
@@ -827,7 +827,7 @@ pub const Object = struct {
const behavior_max = try o.builder.metadataConstant(try o.builder.intConst(.i32, 7));
const behavior_min = try o.builder.metadataConstant(try o.builder.intConst(.i32, 8));
- if (target_util.llvmMachineAbi(comp.root_mod.resolved_target.result)) |abi| {
+ if (target_util.llvmMachineAbi(&comp.root_mod.resolved_target.result)) |abi| {
module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
behavior_error,
try o.builder.metadataString("target-abi"),
@@ -837,7 +837,7 @@ pub const Object = struct {
));
}
- const pic_level = target_util.picLevel(comp.root_mod.resolved_target.result);
+ const pic_level = target_util.picLevel(&comp.root_mod.resolved_target.result);
if (comp.root_mod.pic) {
module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
behavior_min,
@@ -860,7 +860,7 @@ pub const Object = struct {
try o.builder.metadataString("Code Model"),
try o.builder.metadataConstant(try o.builder.intConst(.i32, @as(
i32,
- switch (codeModel(comp.root_mod.code_model, comp.root_mod.resolved_target.result)) {
+ switch (codeModel(comp.root_mod.code_model, &comp.root_mod.resolved_target.result)) {
.default => unreachable,
.tiny => 0,
.small => 1,
@@ -906,7 +906,7 @@ pub const Object = struct {
}
}
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
if (target.os.tag == .windows and (target.cpu.arch == .x86_64 or target.cpu.arch == .x86)) {
// Add the "RegCallv4" flag so that any functions using `x86_regcallcc` use regcall
// v4, which is essentially a requirement on Windows. See corresponding logic in
@@ -1020,7 +1020,7 @@ pub const Object = struct {
else
.Static;
- const code_model: llvm.CodeModel = switch (codeModel(comp.root_mod.code_model, comp.root_mod.resolved_target.result)) {
+ const code_model: llvm.CodeModel = switch (codeModel(comp.root_mod.code_model, &comp.root_mod.resolved_target.result)) {
.default => .Default,
.tiny => .Tiny,
.small => .Small,
@@ -1045,7 +1045,7 @@ pub const Object = struct {
comp.function_sections,
comp.data_sections,
float_abi,
- if (target_util.llvmMachineAbi(comp.root_mod.resolved_target.result)) |s| s.ptr else null,
+ if (target_util.llvmMachineAbi(&comp.root_mod.resolved_target.result)) |s| s.ptr else null,
);
errdefer target_machine.dispose();
@@ -1137,7 +1137,7 @@ pub const Object = struct {
const owner_mod = zcu.fileByIndex(file_scope).mod.?;
const fn_ty = Type.fromInterned(func.ty);
const fn_info = zcu.typeToFunc(fn_ty).?;
- const target = owner_mod.resolved_target.result;
+ const target = &owner_mod.resolved_target.result;
var ng: NavGen = .{
.object = o,
@@ -2699,7 +2699,7 @@ pub const Object = struct {
if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function;
const fn_info = zcu.typeToFunc(ty).?;
- const target = owner_mod.resolved_target.result;
+ const target = &owner_mod.resolved_target.result;
const sret = firstParamSRet(fn_info, zcu, target);
const is_extern, const lib_name = if (nav.getExtern(ip)) |@"extern"|
@@ -2913,7 +2913,7 @@ pub const Object = struct {
try attributes.addFnAttr(.minsize, &o.builder);
try attributes.addFnAttr(.optsize, &o.builder);
}
- const target = owner_mod.resolved_target.result;
+ const target = &owner_mod.resolved_target.result;
if (target.cpu.model.llvm_name) |s| {
try attributes.addFnAttr(.{ .string = .{
.kind = try o.builder.string("target-cpu"),
@@ -4445,7 +4445,7 @@ pub const Object = struct {
if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function;
const zcu = o.pt.zcu;
- const target = zcu.root_mod.resolved_target.result;
+ const target = &zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(.i1, &.{try o.errorIntType()}, .normal),
name,
@@ -4474,7 +4474,7 @@ pub const Object = struct {
const usize_ty = try o.lowerType(Type.usize);
const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0);
- const target = zcu.root_mod.resolved_target.result;
+ const target = &zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
try o.builder.strtabStringFmt("__zig_tag_name_{}", .{enum_type.name.fmt(ip)}),
@@ -10372,7 +10372,7 @@ pub const FuncGen = struct {
if (gop.found_existing) return gop.value_ptr.*;
errdefer assert(o.named_enum_map.remove(enum_ty.toIntern()));
- const target = zcu.root_mod.resolved_target.result;
+ const target = &zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{enum_type.name.fmt(ip)}),
@@ -11834,7 +11834,7 @@ const CallingConventionInfo = struct {
inreg_param_count: u2 = 0,
};
-pub fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) ?CallingConventionInfo {
+pub fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: *const std.Target) ?CallingConventionInfo {
const llvm_cc = toLlvmCallConvTag(cc, target) orelse return null;
const incoming_stack_alignment: ?u64, const register_params: u2 = switch (cc) {
inline else => |pl| switch (@TypeOf(pl)) {
@@ -11858,7 +11858,7 @@ pub fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) ?Ca
.inreg_param_count = register_params,
};
}
-fn toLlvmCallConvTag(cc_tag: std.builtin.CallingConvention.Tag, target: std.Target) ?Builder.CallConv {
+fn toLlvmCallConvTag(cc_tag: std.builtin.CallingConvention.Tag, target: *const std.Target) ?Builder.CallConv {
if (target.cCallingConvention()) |default_c| {
if (cc_tag == default_c) {
return .ccc;
@@ -11972,7 +11972,7 @@ fn toLlvmCallConvTag(cc_tag: std.builtin.CallingConvention.Tag, target: std.Targ
}
/// Convert a zig-address space to an llvm address space.
-fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace {
+fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: *const std.Target) Builder.AddrSpace {
for (llvmAddrSpaceInfo(target)) |info| if (info.zig == address_space) return info.llvm;
unreachable;
}
@@ -11987,7 +11987,7 @@ const AddrSpaceInfo = struct {
idx: ?u16 = null,
force_in_data_layout: bool = false,
};
-fn llvmAddrSpaceInfo(target: std.Target) []const AddrSpaceInfo {
+fn llvmAddrSpaceInfo(target: *const std.Target) []const AddrSpaceInfo {
return switch (target.cpu.arch) {
.x86, .x86_64 => &.{
.{ .zig = .generic, .llvm = .default },
@@ -12063,7 +12063,7 @@ fn llvmAddrSpaceInfo(target: std.Target) []const AddrSpaceInfo {
/// different address, space and then cast back to the generic address space.
/// For example, on GPUs local variable declarations must be generated into the local address space.
/// This function returns the address space local values should be generated into.
-fn llvmAllocaAddressSpace(target: std.Target) Builder.AddrSpace {
+fn llvmAllocaAddressSpace(target: *const std.Target) Builder.AddrSpace {
return switch (target.cpu.arch) {
// On amdgcn, locals should be generated into the private address space.
// To make Zig not impossible to use, these are then converted to addresses in the
@@ -12075,7 +12075,7 @@ fn llvmAllocaAddressSpace(target: std.Target) Builder.AddrSpace {
/// On some targets, global values that are in the generic address space must be generated into a
/// different address space, and then cast back to the generic address space.
-fn llvmDefaultGlobalAddressSpace(target: std.Target) Builder.AddrSpace {
+fn llvmDefaultGlobalAddressSpace(target: *const std.Target) Builder.AddrSpace {
return switch (target.cpu.arch) {
// On amdgcn, globals must be explicitly allocated and uploaded so that the program can access
// them.
@@ -12086,14 +12086,14 @@ fn llvmDefaultGlobalAddressSpace(target: std.Target) Builder.AddrSpace {
/// Return the actual address space that a value should be stored in if its a global address space.
/// When a value is placed in the resulting address space, it needs to be cast back into wanted_address_space.
-fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace {
+fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: *const std.Target) Builder.AddrSpace {
return switch (wanted_address_space) {
.generic => llvmDefaultGlobalAddressSpace(target),
else => |as| toLlvmAddressSpace(as, target),
};
}
-fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool {
+fn returnTypeByRef(zcu: *Zcu, target: *const std.Target, ty: Type) bool {
if (isByRef(ty, zcu)) {
return true;
} else if (target.cpu.arch.isX86() and
@@ -12108,7 +12108,7 @@ fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool {
}
}
-fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool {
+fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: *const std.Target) bool {
const return_type = Type.fromInterned(fn_info.return_type);
if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false;
@@ -12137,8 +12137,8 @@ fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Targe
};
}
-fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool {
- const class = x86_64_abi.classifySystemV(ty, zcu, &target, .ret);
+fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: *const std.Target) bool {
+ const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
@@ -12238,8 +12238,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
if (isScalar(zcu, return_type)) {
return o.lowerType(return_type);
}
- const target = zcu.getTarget();
- const classes = x86_64_abi.classifySystemV(return_type, zcu, &target, .ret);
+ const classes = x86_64_abi.classifySystemV(return_type, zcu, zcu.getTarget(), .ret);
if (classes[0] == .memory) return .void;
var types_index: u32 = 0;
var types_buffer: [8]Builder.Type = undefined;
@@ -12527,8 +12526,7 @@ const ParamTypeIterator = struct {
fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
const zcu = it.object.pt.zcu;
const ip = &zcu.intern_pool;
- const target = zcu.getTarget();
- const classes = x86_64_abi.classifySystemV(ty, zcu, &target, .arg);
+ const classes = x86_64_abi.classifySystemV(ty, zcu, zcu.getTarget(), .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
@@ -12794,7 +12792,7 @@ fn isScalar(zcu: *Zcu, ty: Type) bool {
/// This function returns true if we expect LLVM to lower x86_fp80 correctly
/// and false if we expect LLVM to crash if it encounters an x86_fp80 type,
/// or if it produces miscompilations.
-fn backendSupportsF80(target: std.Target) bool {
+fn backendSupportsF80(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.x86, .x86_64 => !target.cpu.has(.x86, .soft_float),
else => false,
@@ -12804,7 +12802,7 @@ fn backendSupportsF80(target: std.Target) bool {
/// This function returns true if we expect LLVM to lower f16 correctly
/// and false if we expect LLVM to crash if it encounters an f16 type,
/// or if it produces miscompilations.
-fn backendSupportsF16(target: std.Target) bool {
+fn backendSupportsF16(target: *const std.Target) bool {
return switch (target.cpu.arch) {
// https://github.com/llvm/llvm-project/issues/97981
.csky,
@@ -12840,7 +12838,7 @@ fn backendSupportsF16(target: std.Target) bool {
/// This function returns true if we expect LLVM to lower f128 correctly,
/// and false if we expect LLVM to crash if it encounters an f128 type,
/// or if it produces miscompilations.
-fn backendSupportsF128(target: std.Target) bool {
+fn backendSupportsF128(target: *const std.Target) bool {
return switch (target.cpu.arch) {
// https://github.com/llvm/llvm-project/issues/121122
.amdgcn,
@@ -12870,7 +12868,7 @@ fn backendSupportsF128(target: std.Target) bool {
/// LLVM does not support all relevant intrinsics for all targets, so we
/// may need to manually generate a compiler-rt call.
-fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
+fn intrinsicsAllowed(scalar_ty: Type, target: *const std.Target) bool {
return switch (scalar_ty.toIntern()) {
.f16_type => backendSupportsF16(target),
.f80_type => (target.cTypeBitSize(.longdouble) == 80) and backendSupportsF80(target),
@@ -12907,7 +12905,7 @@ fn buildAllocaInner(
wip: *Builder.WipFunction,
llvm_ty: Builder.Type,
alignment: Builder.Alignment,
- target: std.Target,
+ target: *const std.Target,
) Allocator.Error!Builder.Value {
const address_space = llvmAllocaAddressSpace(target);
src/codegen/spirv.zig
@@ -185,7 +185,7 @@ pub const Object = struct {
/// related to that.
error_buffer: ?SpvModule.Decl.Index = null,
- pub fn init(gpa: Allocator, target: std.Target) Object {
+ pub fn init(gpa: Allocator, target: *const std.Target) Object {
return .{
.gpa = gpa,
.spv = SpvModule.init(gpa, target),
src/Compilation/Config.zig
@@ -150,7 +150,7 @@ pub const ResolveError = error{
};
pub fn resolve(options: Options) ResolveError!Config {
- const target = options.resolved_target.result;
+ const target = &options.resolved_target.result;
// WASI-only. Resolve the optional exec-model option, defaults to command.
if (target.os.tag != .wasi and options.wasi_exec_model != null)
src/libs/freebsd.zig
@@ -66,7 +66,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
// In all cases in this function, we add the C compiler flags to
// cache_exempt_flags rather than extra_flags, because these arguments
@@ -407,7 +407,7 @@ pub const BuiltSharedObjects = struct {
const all_map_basename = "all.map";
-fn wordDirective(target: std.Target) []const u8 {
+fn wordDirective(target: *const std.Target) []const u8 {
// Based on its description in the GNU `as` manual, you might assume that `.word` is sized
// according to the target word size. But no; that would just make too much sense.
return if (target.ptrBitWidth() == 64) ".quad" else ".long";
src/libs/glibc.zig
@@ -172,7 +172,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const target_ver = target.os.versionRange().gnuLibCVersion().?;
const nonshared_stat = target_ver.order(.{ .major = 2, .minor = 32, .patch = 0 }) != .gt;
const start_old_init_fini = target_ver.order(.{ .major = 2, .minor = 33, .patch = 0 }) != .gt;
@@ -485,7 +485,7 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
fn add_include_dirs_arch(
arena: Allocator,
args: *std.ArrayList([]const u8),
- target: std.Target,
+ target: *const std.Target,
opt_nptl: ?[]const u8,
dir: []const u8,
) error{OutOfMemory}!void {
@@ -649,7 +649,7 @@ pub const BuiltSharedObjects = struct {
const all_map_basename = "all.map";
-fn wordDirective(target: std.Target) []const u8 {
+fn wordDirective(target: *const std.Target) []const u8 {
// Based on its description in the GNU `as` manual, you might assume that `.word` is sized
// according to the target word size. But no; that would just make too much sense.
return if (target.ptrBitWidth() == 64) ".quad" else ".long";
src/libs/libcxx.zig
@@ -121,7 +121,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
const root_name = "c++";
const output_mode = .Lib;
const link_mode = .static;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const cxxabi_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxxabi", "include" });
const cxx_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "include" });
@@ -314,7 +314,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
const root_name = "c++abi";
const output_mode = .Lib;
const link_mode = .static;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const cxxabi_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxxabi", "include" });
const cxx_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "include" });
src/libs/libtsan.zig
@@ -324,7 +324,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
comp.tsan_lib = crt_file;
}
-fn addCcArgs(target: std.Target, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
+fn addCcArgs(target: *const std.Target, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
"-nostdinc++",
"-fvisibility=hidden",
src/libs/libunwind.zig
@@ -27,7 +27,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
const arena = arena_allocator.allocator();
const output_mode = .Lib;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const unwind_tables: std.builtin.UnwindTables =
if (target.cpu.arch == .x86 and target.os.tag == .windows) .none else .@"async";
const config = Compilation.Config.resolve(.{
src/libs/mingw.zig
@@ -299,7 +299,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
var aro_comp = aro.Compilation.init(gpa, std.fs.cwd());
defer aro_comp.deinit();
- aro_comp.target = target;
+ aro_comp.target = target.*;
const include_dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "mingw", "def-include" });
@@ -373,7 +373,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
pub fn libExists(
allocator: Allocator,
- target: std.Target,
+ target: *const std.Target,
zig_lib_directory: Cache.Directory,
lib_name: []const u8,
) !bool {
@@ -389,7 +389,7 @@ pub fn libExists(
/// see if a .def file exists.
fn findDef(
allocator: Allocator,
- target: std.Target,
+ target: *const std.Target,
zig_lib_directory: Cache.Directory,
lib_name: []const u8,
) ![]u8 {
src/libs/musl.zig
@@ -193,7 +193,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
.link_libc = false,
});
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const arch_name = std.zig.target.muslArchName(target.cpu.arch, target.abi);
const time32 = for (time32_compat_arch_list) |time32_compat_arch| {
if (mem.eql(u8, arch_name, time32_compat_arch)) break true;
src/libs/netbsd.zig
@@ -58,7 +58,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const target_version = target.os.version_range.semver.min;
// In all cases in this function, we add the C compiler flags to
@@ -353,7 +353,7 @@ pub const BuiltSharedObjects = struct {
}
};
-fn wordDirective(target: std.Target) []const u8 {
+fn wordDirective(target: *const std.Target) []const u8 {
// Based on its description in the GNU `as` manual, you might assume that `.word` is sized
// according to the target word size. But no; that would just make too much sense.
return if (target.ptrBitWidth() == 64) ".quad" else ".long";
src/link/Elf/Object.zig
@@ -69,7 +69,7 @@ pub fn parse(
/// For error reporting purposes only.
path: Path,
handle: fs.File,
- target: std.Target,
+ target: *const std.Target,
debug_fmt_strip: bool,
default_sym_version: elf.Versym,
) !void {
@@ -98,7 +98,7 @@ pub fn parseCommon(
diags: *Diags,
path: Path,
handle: fs.File,
- target: std.Target,
+ target: *const std.Target,
) !void {
const offset = if (self.archive) |ar| ar.offset else 0;
const file_size = (try handle.stat()).size;
@@ -182,7 +182,7 @@ pub fn parseCommon(
pub fn validateEFlags(
diags: *Diags,
path: Path,
- target: std.Target,
+ target: *const std.Target,
e_flags: elf.Word,
) !void {
switch (target.cpu.arch) {
@@ -263,7 +263,7 @@ fn initAtoms(
path: Path,
handle: fs.File,
debug_fmt_strip: bool,
- target: std.Target,
+ target: *const std.Target,
) !void {
const shdrs = self.shdrs.items;
try self.atoms.ensureTotalCapacityPrecise(gpa, shdrs.len);
@@ -420,7 +420,7 @@ fn parseEhFrame(
gpa: Allocator,
handle: fs.File,
shndx: u32,
- target: std.Target,
+ target: *const std.Target,
) !void {
const relocs_shndx = for (self.shdrs.items, 0..) |shdr, i| switch (shdr.sh_type) {
elf.SHT_RELA => if (shdr.sh_info == shndx) break @as(u32, @intCast(i)),
src/link/Elf/ZigObject.zig
@@ -1271,7 +1271,7 @@ fn updateNavCode(
log.debug("updateNavCode {}({d})", .{ nav.fqn.fmt(ip), nav_index });
- const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
+ const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
src/link/MachO/ZigObject.zig
@@ -948,7 +948,7 @@ fn updateNavCode(
log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
- const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
+ const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
src/link/C.zig
@@ -116,7 +116,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*C {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .c);
const optimize_mode = comp.root_mod.optimize_mode;
const use_lld = build_options.have_llvm and comp.config.use_lld;
@@ -331,7 +331,7 @@ pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedIn
_ = ti_id;
}
-fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
+fn abiDefines(self: *C, target: *const std.Target) !std.ArrayList(u8) {
const gpa = self.base.comp.gpa;
var defines = std.ArrayList(u8).init(gpa);
errdefer defines.deinit();
src/link/Coff.zig
@@ -208,7 +208,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Coff {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .coff);
const optimize_mode = comp.root_mod.optimize_mode;
const output_mode = comp.config.output_mode;
@@ -1328,7 +1328,7 @@ fn updateNavCode(
log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
- const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
+ const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
@@ -2153,7 +2153,7 @@ fn writeDataDirectoriesHeaders(coff: *Coff) !void {
}
fn writeHeader(coff: *Coff) !void {
- const target = coff.base.comp.root_mod.resolved_target.result;
+ const target = &coff.base.comp.root_mod.resolved_target.result;
const gpa = coff.base.comp.gpa;
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
@@ -2800,7 +2800,7 @@ pub const Relocation = struct {
.ptr_width = coff.ptr_width,
};
- const target = coff.base.comp.root_mod.resolved_target.result;
+ const target = &coff.base.comp.root_mod.resolved_target.result;
switch (target.cpu.arch) {
.aarch64 => reloc.resolveAarch64(ctx),
.x86, .x86_64 => reloc.resolveX86(ctx),
src/link/Dwarf.zig
@@ -92,7 +92,7 @@ const DebugFrame = struct {
};
fn headerBytes(dwarf: *Dwarf) u32 {
- const target = dwarf.bin_file.comp.root_mod.resolved_target.result;
+ const target = &dwarf.bin_file.comp.root_mod.resolved_target.result;
return @intCast(switch (dwarf.debug_frame.header.format) {
.none => return 0,
.debug_frame => dwarf.unitLengthBytes() + dwarf.sectionOffsetBytes() + 1 + "\x00".len + 1 + 1,
@@ -2140,7 +2140,7 @@ fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
pub fn init(lf: *link.File, format: DW.Format) Dwarf {
const comp = lf.comp;
const gpa = comp.gpa;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
return .{
.gpa = gpa,
.bin_file = lf,
@@ -2573,7 +2573,7 @@ fn initWipNavInner(
try wip_nav.infoAddrSym(sym_index, 0);
wip_nav.func_high_pc = @intCast(wip_nav.debug_info.items.len);
try diw.writeInt(u32, 0, dwarf.endian);
- const target = mod.resolved_target.result;
+ const target = &mod.resolved_target.result;
try uleb128(diw, switch (nav.status.fully_resolved.alignment) {
.none => target_info.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_info.minFunctionAlignment(target)),
@@ -4529,7 +4529,7 @@ pub fn flush(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
dwarf.debug_aranges.section.dirty = false;
}
if (dwarf.debug_frame.section.dirty) {
- const target = dwarf.bin_file.comp.root_mod.resolved_target.result;
+ const target = &dwarf.bin_file.comp.root_mod.resolved_target.result;
switch (dwarf.debug_frame.header.format) {
.none => {},
.debug_frame => unreachable,
src/link/Elf.zig
@@ -196,7 +196,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Elf {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .elf);
const use_llvm = comp.config.use_llvm;
@@ -1073,7 +1073,7 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void {
const gpa = self.base.comp.gpa;
const diags = &self.base.comp.link_diags;
- const target = self.base.comp.root_mod.resolved_target.result;
+ const target = &self.base.comp.root_mod.resolved_target.result;
const debug_fmt_strip = self.base.comp.config.debug_format == .strip;
const default_sym_version = self.default_sym_version;
const file_handles = &self.file_handles;
@@ -1104,7 +1104,7 @@ fn parseArchive(
diags: *Diags,
file_handles: *std.ArrayListUnmanaged(File.Handle),
files: *std.MultiArrayList(File.Entry),
- target: std.Target,
+ target: *const std.Target,
debug_fmt_strip: bool,
default_sym_version: elf.Versym,
objects: *std.ArrayListUnmanaged(File.Index),
@@ -1139,7 +1139,7 @@ fn parseDso(
dso: link.Input.Dso,
shared_objects: *std.StringArrayHashMapUnmanaged(File.Index),
files: *std.MultiArrayList(File.Entry),
- target: std.Target,
+ target: *const std.Target,
) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -4121,8 +4121,8 @@ pub fn lsearch(comptime T: type, haystack: []const T, predicate: anytype) usize
return i;
}
-pub fn getTarget(self: Elf) std.Target {
- return self.base.comp.root_mod.resolved_target.result;
+pub fn getTarget(self: *const Elf) *const std.Target {
+ return &self.base.comp.root_mod.resolved_target.result;
}
fn requiresThunks(self: Elf) bool {
src/link/Goff.zig
@@ -26,7 +26,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Goff {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const use_llvm = comp.config.use_llvm;
@@ -59,7 +59,7 @@ pub fn open(
emit: Path,
options: link.File.OpenOptions,
) !*Goff {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .goff);
return createEmpty(arena, comp, emit, options);
}
src/link/Lld.zig
@@ -30,7 +30,7 @@ const Coff = struct {
dllmain_crt_startup: bool,
},
fn init(comp: *Compilation, options: link.File.OpenOptions) !Coff {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const output_mode = comp.config.output_mode;
return .{
.image_base = options.image_base orelse switch (output_mode) {
@@ -103,7 +103,7 @@ pub const Elf = struct {
fn init(comp: *Compilation, options: link.File.OpenOptions) !Elf {
const PtrWidth = enum { p32, p64 };
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const output_mode = comp.config.output_mode;
const is_dyn_lib = output_mode == .Lib and comp.config.link_mode == .dynamic;
const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
@@ -202,7 +202,7 @@ pub fn createEmpty(
emit: Cache.Path,
options: link.File.OpenOptions,
) !*Lld {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const output_mode = comp.config.output_mode;
const optimize_mode = comp.root_mod.optimize_mode;
const is_native_os = comp.root_mod.resolved_target.is_native_os;
@@ -342,7 +342,7 @@ fn linkAsArchive(lld: *Lld, arena: Allocator) !void {
const llvm_bindings = @import("../codegen/llvm/bindings.zig");
const llvm = @import("../codegen/llvm.zig");
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
llvm.initializeLLVMTarget(target.cpu.arch);
const bad = llvm_bindings.WriteArchive(
full_out_path_z,
@@ -374,7 +374,7 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
const is_dyn_lib = comp.config.link_mode == .dynamic and is_lib;
const is_exe_or_dyn_lib = is_dyn_lib or comp.config.output_mode == .Exe;
const link_in_crt = comp.config.link_libc and is_exe_or_dyn_lib;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const optimize_mode = comp.root_mod.optimize_mode;
const entry_name: ?[]const u8 = switch (coff.entry) {
// This logic isn't quite right for disabled or enabled. No point in fixing it
@@ -811,7 +811,7 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
const is_dyn_lib = link_mode == .dynamic and is_lib;
const is_exe_or_dyn_lib = is_dyn_lib or output_mode == .Exe;
const have_dynamic_linker = link_mode == .dynamic and is_exe_or_dyn_lib;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const compiler_rt_path: ?Cache.Path = blk: {
if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
@@ -1281,7 +1281,7 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
try spawnLld(comp, arena, argv.items);
}
}
-fn getLDMOption(target: std.Target) ?[]const u8 {
+fn getLDMOption(target: *const std.Target) ?[]const u8 {
// This should only return emulations understood by LLD's parseEmulation().
return switch (target.cpu.arch) {
.aarch64 => switch (target.os.tag) {
@@ -1364,7 +1364,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
const shared_memory = comp.config.shared_memory;
const export_memory = comp.config.export_memory;
const import_memory = comp.config.import_memory;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const base = &lld.base;
const wasm = &lld.ofmt.wasm;
src/link/MachO.zig
@@ -163,7 +163,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*MachO {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .macho);
const gpa = comp.gpa;
@@ -3545,8 +3545,8 @@ pub fn markDirty(self: *MachO, sect_index: u8) void {
}
}
-pub fn getTarget(self: MachO) std.Target {
- return self.base.comp.root_mod.resolved_target.result;
+pub fn getTarget(self: *const MachO) *const std.Target {
+ return &self.base.comp.root_mod.resolved_target.result;
}
/// XNU starting with Big Sur running on arm64 is caching inodes of running binaries.
@@ -4233,7 +4233,7 @@ pub const Platform = struct {
}
}
- pub fn fromTarget(target: std.Target) Platform {
+ pub fn fromTarget(target: *const std.Target) Platform {
return .{
.os_tag = target.os.tag,
.abi = target.abi,
src/link/Plan9.zig
@@ -184,7 +184,7 @@ pub const Atom = struct {
// asserts that self.got_index != null
pub fn getOffsetTableAddress(self: Atom, plan9: *Plan9) u64 {
- const target = plan9.base.comp.root_mod.resolved_target.result;
+ const target = &plan9.base.comp.root_mod.resolved_target.result;
const ptr_bytes = @divExact(target.ptrBitWidth(), 8);
const got_addr = plan9.bases.data;
const got_index = self.got_index.?;
@@ -278,7 +278,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Plan9 {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const gpa = comp.gpa;
const optimize_mode = comp.root_mod.optimize_mode;
const output_mode = comp.config.output_mode;
@@ -394,7 +394,7 @@ pub fn updateFunc(
const zcu = pt.zcu;
const gpa = zcu.gpa;
- const target = self.base.comp.root_mod.resolved_target.result;
+ const target = &self.base.comp.root_mod.resolved_target.result;
const func = zcu.funcInfo(func_index);
const atom_idx = try self.seeNav(pt, func.owner_nav);
@@ -583,7 +583,7 @@ pub fn flush(
const comp = self.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
switch (comp.config.output_mode) {
.Exe => {},
@@ -1153,7 +1153,7 @@ pub fn open(
emit: Path,
options: link.File.OpenOptions,
) !*Plan9 {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const use_llvm = comp.config.use_llvm;
src/link/SpirV.zig
@@ -58,7 +58,7 @@ pub fn createEmpty(
options: link.File.OpenOptions,
) !*SpirV {
const gpa = comp.gpa;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
assert(!comp.config.use_lld); // Caught by Compilation.Config.resolve
assert(!comp.config.use_llvm); // Caught by Compilation.Config.resolve
src/link/Wasm.zig
@@ -2943,7 +2943,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Wasm {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .wasm);
const use_llvm = comp.config.use_llvm;
src/link/Xcoff.zig
@@ -26,7 +26,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Xcoff {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const use_llvm = comp.config.use_llvm;
@@ -59,7 +59,7 @@ pub fn open(
emit: Path,
options: link.File.OpenOptions,
) !*Xcoff {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .xcoff);
return createEmpty(arena, comp, emit, options);
}
src/Package/Module.zig
@@ -102,7 +102,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
if (options.inherited.error_tracing == true) assert(options.global.any_error_tracing);
const resolved_target = options.inherited.resolved_target orelse options.parent.?.resolved_target;
- const target = resolved_target.result;
+ const target = &resolved_target.result;
const optimize_mode = options.inherited.optimize_mode orelse
if (options.parent) |p| p.optimize_mode else options.global.root_optimize_mode;
@@ -363,7 +363,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.root_src_path = options.paths.root_src_path,
.fully_qualified_name = options.fully_qualified_name,
.resolved_target = .{
- .result = target,
+ .result = target.*,
.is_native_os = resolved_target.is_native_os,
.is_native_abi = resolved_target.is_native_abi,
.is_explicit_dynamic_linker = resolved_target.is_explicit_dynamic_linker,
@@ -474,7 +474,7 @@ pub fn getBuiltinOptions(m: Module, global: Compilation.Config) Builtin {
assert(global.have_zcu);
return .{
.target = m.resolved_target.result,
- .zig_backend = target_util.zigBackend(m.resolved_target.result, global.use_llvm),
+ .zig_backend = target_util.zigBackend(&m.resolved_target.result, global.use_llvm),
.output_mode = global.output_mode,
.link_mode = global.link_mode,
.unwind_tables = m.unwind_tables,
src/Zcu/PerThread.zig
@@ -4382,7 +4382,7 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, ou
error.CodegenFail => zcu.assertCodegenFailed(zcu.funcInfo(func_index).owner_nav),
error.NoLinkFile => assert(zcu.comp.bin_file == null),
error.BackendDoesNotProduceMir => switch (target_util.zigBackend(
- zcu.root_mod.resolved_target.result,
+ &zcu.root_mod.resolved_target.result,
zcu.comp.config.use_llvm,
)) {
else => unreachable, // assertion failure
src/codegen.zig
@@ -65,7 +65,7 @@ fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*const Air.Legalize.Features {
const zcu = pt.zcu;
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
- switch (target_util.zigBackend(target.*, zcu.comp.config.use_llvm)) {
+ switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_llvm,
.stage2_c,
@@ -114,7 +114,7 @@ pub const AnyMir = union {
pub fn deinit(mir: *AnyMir, zcu: *const Zcu) void {
const gpa = zcu.gpa;
- const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
+ const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
switch (backend) {
else => unreachable,
inline .stage2_aarch64,
@@ -145,7 +145,7 @@ pub fn generateFunction(
) CodeGenError!AnyMir {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
- const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
+ const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
inline .stage2_aarch64,
@@ -183,7 +183,7 @@ pub fn emitFunction(
) CodeGenError!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
- const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
+ const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_aarch64,
@@ -210,7 +210,7 @@ pub fn generateLazyFunction(
) CodeGenError!void {
const zcu = pt.zcu;
const target = if (Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu)) |inst_index|
- zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
+ &zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
else
zcu.getTarget();
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
@@ -225,7 +225,7 @@ pub fn generateLazyFunction(
}
}
-fn writeFloat(comptime F: type, f: F, target: std.Target, endian: std.builtin.Endian, code: []u8) void {
+fn writeFloat(comptime F: type, f: F, target: *const std.Target, endian: std.builtin.Endian, code: []u8) void {
_ = target;
const bits = @typeInfo(F).float.bits;
const Int = @Type(.{ .int = .{ .signedness = .unsigned, .bits = bits } });
@@ -253,7 +253,7 @@ pub fn generateLazySymbol(
const gpa = comp.gpa;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const endian = target.cpu.arch.endian();
log.debug("generateLazySymbol: kind = {s}, ty = {}", .{
@@ -839,7 +839,7 @@ fn lowerNavRef(
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
- const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
+ const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const is_obj = lf.comp.config.output_mode == .Obj;
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
@@ -956,7 +956,7 @@ pub fn genNavRef(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
nav_index: InternPool.Nav.Index,
- target: std.Target,
+ target: *const std.Target,
) CodeGenError!GenResult {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@@ -1040,9 +1040,9 @@ pub fn genTypedValue(
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
val: Value,
- target: std.Target,
+ target: *const std.Target,
) CodeGenError!GenResult {
- return switch (try lowerValue(pt, val, &target)) {
+ return switch (try lowerValue(pt, val, target)) {
.none => .{ .mcv = .none },
.undef => .{ .mcv = .undef },
.immediate => |imm| .{ .mcv = .{ .immediate = imm } },
src/Compilation.zig
@@ -1361,7 +1361,7 @@ pub const cache_helpers = struct {
hh: *Cache.HashHelper,
resolved_target: Package.Module.ResolvedTarget,
) void {
- const target = resolved_target.result;
+ const target = &resolved_target.result;
hh.add(target.cpu.arch);
hh.addBytes(target.cpu.model.name);
hh.add(target.cpu.features.ints);
@@ -1705,7 +1705,7 @@ pub const CreateOptions = struct {
assert(opts.cache_mode != .none);
return try ea.cacheName(arena, .{
.root_name = opts.root_name,
- .target = opts.root_mod.resolved_target.result,
+ .target = &opts.root_mod.resolved_target.result,
.output_mode = opts.config.output_mode,
.link_mode = opts.config.link_mode,
.version = opts.version,
@@ -1772,14 +1772,14 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
}
const have_zcu = options.config.have_zcu;
+ const use_llvm = options.config.use_llvm;
+ const target = &options.root_mod.resolved_target.result;
const comp: *Compilation = comp: {
// We put the `Compilation` itself in the arena. Freeing the arena will free the module.
// It's initialized later after we prepare the initialization options.
const root_name = try arena.dupeZ(u8, options.root_name);
- const use_llvm = options.config.use_llvm;
-
// The "any" values provided by resolved config only account for
// explicitly-provided settings. We now make them additionally account
// for default setting resolution.
@@ -1804,7 +1804,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
const libc_dirs = try std.zig.LibCDirs.detect(
arena,
options.dirs.zig_lib.path.?,
- options.root_mod.resolved_target.result,
+ target,
options.root_mod.resolved_target.is_native_abi,
link_libc,
options.libc_installation,
@@ -1846,7 +1846,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// approach, since the ubsan runtime uses quite a lot of the standard library
// and this reduces unnecessary bloat.
const ubsan_rt_strat: RtStrat = s: {
- const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(options.root_mod.resolved_target.result);
+ const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(target);
const want_ubsan_rt = options.want_ubsan_rt orelse (can_build_ubsan_rt and any_sanitize_c == .full and is_exe_or_dyn_lib);
if (!want_ubsan_rt) break :s .none;
if (options.skip_linker_dependencies) break :s .none;
@@ -1872,7 +1872,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (options.verbose_llvm_cpu_features) {
if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: {
- const target = options.root_mod.resolved_target.result;
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
const stderr = std.io.getStdErr().writer();
@@ -2244,8 +2243,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
};
errdefer comp.destroy();
- const target = comp.root_mod.resolved_target.result;
- const can_build_compiler_rt = target_util.canBuildLibCompilerRt(target, comp.config.use_llvm, build_options.have_llvm);
+ const can_build_compiler_rt = target_util.canBuildLibCompilerRt(target, use_llvm, build_options.have_llvm);
// Add a `CObject` for each `c_source_files`.
try comp.c_object_table.ensureTotalCapacity(gpa, options.c_source_files.len);
@@ -2344,7 +2342,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
comp.link_task_queue.pending_prelink_tasks += 1;
}
comp.queued_jobs.glibc_shared_objects = true;
- comp.link_task_queue.pending_prelink_tasks += glibc.sharedObjectsCount(&target);
+ comp.link_task_queue.pending_prelink_tasks += glibc.sharedObjectsCount(target);
comp.queued_jobs.glibc_crt_file[@intFromEnum(glibc.CrtFile.libc_nonshared_a)] = true;
comp.link_task_queue.pending_prelink_tasks += 1;
@@ -2571,8 +2569,8 @@ pub fn clearMiscFailures(comp: *Compilation) void {
comp.misc_failures = .{};
}
-pub fn getTarget(self: Compilation) Target {
- return self.root_mod.resolved_target.result;
+pub fn getTarget(self: *const Compilation) *const Target {
+ return &self.root_mod.resolved_target.result;
}
/// Only legal to call when cache mode is incremental and a link file is present.
@@ -3210,7 +3208,7 @@ fn addNonIncrementalStuffToCacheManifest(
man.hash.addOptional(opts.image_base);
man.hash.addOptional(opts.gc_sections);
man.hash.add(opts.emit_relocs);
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
if (target.ofmt == .macho or target.ofmt == .coff) {
// TODO remove this, libraries need to be resolved by the frontend. this is already
// done by ELF.
@@ -6270,7 +6268,7 @@ pub fn addCCArgs(
out_dep_path: ?[]const u8,
mod: *Package.Module,
) !void {
- const target = mod.resolved_target.result;
+ const target = &mod.resolved_target.result;
// As of Clang 16.x, it will by default read extra flags from /etc/clang.
// I'm sure the person who implemented this means well, but they have a lot
@@ -6944,7 +6942,7 @@ pub const FileExt = enum {
};
}
- pub fn canonicalName(ext: FileExt, target: Target) [:0]const u8 {
+ pub fn canonicalName(ext: FileExt, target: *const Target) [:0]const u8 {
return switch (ext) {
.c => ".c",
.cpp => ".cpp",
@@ -7187,7 +7185,7 @@ pub fn dump_argv(argv: []const []const u8) void {
}
pub fn getZigBackend(comp: Compilation) std.builtin.CompilerBackend {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
return target_util.zigBackend(target, comp.config.use_llvm);
}
@@ -7371,7 +7369,7 @@ pub fn build_crt_file(
const basename = try std.zig.binNameAlloc(gpa, .{
.root_name = root_name,
- .target = comp.root_mod.resolved_target.result,
+ .target = &comp.root_mod.resolved_target.result,
.output_mode = output_mode,
});
@@ -7523,13 +7521,13 @@ pub fn getCrtPaths(
comp: *Compilation,
arena: Allocator,
) error{ OutOfMemory, LibCInstallationMissingCrtDir }!LibCInstallation.CrtPaths {
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
return getCrtPathsInner(arena, target, comp.config, comp.libc_installation, &comp.crt_files);
}
fn getCrtPathsInner(
arena: Allocator,
- target: std.Target,
+ target: *const std.Target,
config: Config,
libc_installation: ?*const LibCInstallation,
crt_files: *std.StringHashMapUnmanaged(CrtFile),
@@ -7558,7 +7556,7 @@ pub fn addLinkLib(comp: *Compilation, lib_name: []const u8) !void {
// then when we create a sub-Compilation for zig libc, it also tries to
// build kernel32.lib.
if (comp.skip_linker_dependencies) return;
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
if (target.os.tag != .windows or target.ofmt == .c) return;
// This happens when an `extern "foo"` function is referenced.
@@ -7574,7 +7572,7 @@ pub fn compilerRtOptMode(comp: Compilation) std.builtin.OptimizeMode {
if (comp.debug_compiler_runtime_libs) {
return comp.root_mod.optimize_mode;
}
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
switch (comp.root_mod.optimize_mode) {
.Debug, .ReleaseSafe => return target_util.defaultCompilerRtOptimizeMode(target),
.ReleaseFast => return .ReleaseFast,
src/link.zig
@@ -1321,7 +1321,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
const prog_node = comp.link_prog_node.start("Parse Host libc", 0);
defer prog_node.end();
- const target = comp.root_mod.resolved_target.result;
+ const target = &comp.root_mod.resolved_target.result;
const flags = target_util.libcFullLinkFlags(target);
const crt_dir = comp.libc_installation.?.crt_dir.?;
const sep = std.fs.path.sep_str;
@@ -1670,7 +1670,7 @@ pub fn hashInputs(man: *Cache.Manifest, link_inputs: []const Input) !void {
pub fn resolveInputs(
gpa: Allocator,
arena: Allocator,
- target: std.Target,
+ target: *const std.Target,
/// This function mutates this array but does not take ownership.
/// Allocated with `gpa`.
unresolved_inputs: *std.ArrayListUnmanaged(UnresolvedInput),
@@ -1914,7 +1914,7 @@ fn resolveLibInput(
ld_script_bytes: *std.ArrayListUnmanaged(u8),
lib_directory: Directory,
name_query: UnresolvedInput.NameQuery,
- target: std.Target,
+ target: *const std.Target,
link_mode: std.builtin.LinkMode,
color: std.zig.Color,
) Allocator.Error!ResolveLibInputResult {
@@ -2028,7 +2028,7 @@ fn resolvePathInput(
resolved_inputs: *std.ArrayListUnmanaged(Input),
/// Allocated via `gpa`.
ld_script_bytes: *std.ArrayListUnmanaged(u8),
- target: std.Target,
+ target: *const std.Target,
pq: UnresolvedInput.PathQuery,
color: std.zig.Color,
) Allocator.Error!?ResolveLibInputResult {
@@ -2070,7 +2070,7 @@ fn resolvePathInputLib(
resolved_inputs: *std.ArrayListUnmanaged(Input),
/// Allocated via `gpa`.
ld_script_bytes: *std.ArrayListUnmanaged(u8),
- target: std.Target,
+ target: *const std.Target,
pq: UnresolvedInput.PathQuery,
link_mode: std.builtin.LinkMode,
color: std.zig.Color,
src/main.zig
@@ -340,7 +340,7 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
dev.check(.targets_command);
const host = std.zig.resolveTargetQueryOrFatal(.{});
const stdout = io.getStdOut().writer();
- return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, host);
+ return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, &host);
} else if (mem.eql(u8, cmd, "version")) {
dev.check(.version_command);
try std.io.getStdOut().writeAll(build_options.version ++ "\n");
@@ -3086,7 +3086,7 @@ fn buildOutputType(
else => main_mod,
};
- const target = main_mod.resolved_target.result;
+ const target = &main_mod.resolved_target.result;
if (target.cpu.arch == .arc or target.cpu.arch.isNvptx()) {
if (emit_bin != .no and create_module.resolved_options.use_llvm) {
@@ -3655,7 +3655,7 @@ fn buildOutputType(
test_exec_args.items,
self_exe_path,
arg_mode,
- &target,
+ target,
&comp_destroyed,
all_args,
runtime_args_start,
@@ -3800,12 +3800,12 @@ fn createModule(
// This block is for initializing the fields of
// `Compilation.Config.Options` that require knowledge of the
// target (which was just now resolved for the root module above).
- const resolved_target = cli_mod.inherited.resolved_target.?;
- create_module.opts.resolved_target = resolved_target;
+ const resolved_target = &cli_mod.inherited.resolved_target.?;
+ create_module.opts.resolved_target = resolved_target.*;
create_module.opts.root_optimize_mode = cli_mod.inherited.optimize_mode;
create_module.opts.root_strip = cli_mod.inherited.strip;
create_module.opts.root_error_tracing = cli_mod.inherited.error_tracing;
- const target = resolved_target.result;
+ const target = &resolved_target.result;
// First, remove libc, libc++, and compiler_rt libraries from the system libraries list.
// We need to know whether the set of system libraries contains anything besides these
@@ -6482,7 +6482,7 @@ fn warnAboutForeignBinaries(
const host_query: std.Target.Query = .{};
const host_target = std.zig.resolveTargetQueryOrFatal(host_query);
- switch (std.zig.system.getExternalExecutor(host_target, target, .{ .link_libc = link_libc })) {
+ switch (std.zig.system.getExternalExecutor(&host_target, target, .{ .link_libc = link_libc })) {
.native => return,
.rosetta => {
const host_name = try host_target.zigTriple(arena);
src/print_targets.zig
@@ -16,7 +16,7 @@ pub fn cmdTargets(
args: []const []const u8,
/// Output stream
stdout: anytype,
- native_target: Target,
+ native_target: *const Target,
) !void {
_ = args;
var zig_lib_directory = introspect.findZigLibDir(allocator) catch |err| {
src/Sema.zig
@@ -29912,7 +29912,7 @@ pub fn coerceInMemoryAllowed(
/// load from the `*Src` to effectively perform an in-memory coercion from `Dest` to `Src`.
/// Therefore, when `dest_is_mut`, the in-memory coercion must be valid in *both directions*.
dest_is_mut: bool,
- target: std.Target,
+ target: *const std.Target,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
src_val: ?Value,
@@ -30271,7 +30271,7 @@ fn coerceInMemoryAllowedFns(
src_ty: Type,
/// If set, the coercion must be valid in both directions.
dest_is_mut: bool,
- target: std.Target,
+ target: *const std.Target,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
@@ -30380,7 +30380,7 @@ fn coerceInMemoryAllowedFns(
}
fn callconvCoerceAllowed(
- target: std.Target,
+ target: *const std.Target,
src_cc: std.builtin.CallingConvention,
dest_cc: std.builtin.CallingConvention,
) bool {
@@ -30426,7 +30426,7 @@ fn coerceInMemoryAllowedPtrs(
src_ptr_ty: Type,
/// If set, the coercion must be valid in both directions.
dest_is_mut: bool,
- target: std.Target,
+ target: *const std.Target,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
src/target.zig
@@ -9,7 +9,7 @@ const Feature = @import("Zcu.zig").Feature;
pub const default_stack_protector_buffer_size = 4;
-pub fn cannotDynamicLink(target: std.Target) bool {
+pub fn cannotDynamicLink(target: *const std.Target) bool {
return switch (target.os.tag) {
.freestanding => true,
else => target.cpu.arch.isSpirV(),
@@ -19,15 +19,15 @@ pub fn cannotDynamicLink(target: std.Target) bool {
/// On Darwin, we always link libSystem which contains libc.
/// Similarly on FreeBSD and NetBSD we always link system libc
/// since this is the stable syscall interface.
-pub fn osRequiresLibC(target: std.Target) bool {
+pub fn osRequiresLibC(target: *const std.Target) bool {
return target.os.requiresLibC();
}
-pub fn libCNeedsLibUnwind(target: std.Target, link_mode: std.builtin.LinkMode) bool {
+pub fn libCNeedsLibUnwind(target: *const std.Target, link_mode: std.builtin.LinkMode) bool {
return target.isGnuLibC() and link_mode == .static;
}
-pub fn libCxxNeedsLibUnwind(target: std.Target) bool {
+pub fn libCxxNeedsLibUnwind(target: *const std.Target) bool {
return switch (target.os.tag) {
.macos,
.ios,
@@ -44,14 +44,14 @@ pub fn libCxxNeedsLibUnwind(target: std.Target) bool {
}
/// This function returns whether non-pic code is completely invalid on the given target.
-pub fn requiresPIC(target: std.Target, linking_libc: bool) bool {
+pub fn requiresPIC(target: *const std.Target, linking_libc: bool) bool {
return target.abi.isAndroid() or
target.os.tag == .windows or target.os.tag == .uefi or
osRequiresLibC(target) or
(linking_libc and target.isGnuLibC());
}
-pub fn picLevel(target: std.Target) u32 {
+pub fn picLevel(target: *const std.Target) u32 {
// MIPS always uses PIC level 1; other platforms vary in their default PIC levels, but they
// support both level 1 and 2, in which case we prefer 2.
return if (target.cpu.arch.isMIPS()) 1 else 2;
@@ -59,7 +59,7 @@ pub fn picLevel(target: std.Target) u32 {
/// This is not whether the target supports Position Independent Code, but whether the -fPIC
/// C compiler argument is valid to Clang.
-pub fn supports_fpic(target: std.Target) bool {
+pub fn supports_fpic(target: *const std.Target) bool {
return switch (target.os.tag) {
.windows,
.uefi,
@@ -68,12 +68,12 @@ pub fn supports_fpic(target: std.Target) bool {
};
}
-pub fn alwaysSingleThreaded(target: std.Target) bool {
+pub fn alwaysSingleThreaded(target: *const std.Target) bool {
_ = target;
return false;
}
-pub fn defaultSingleThreaded(target: std.Target) bool {
+pub fn defaultSingleThreaded(target: *const std.Target) bool {
switch (target.cpu.arch) {
.wasm32, .wasm64 => return true,
else => {},
@@ -85,7 +85,7 @@ pub fn defaultSingleThreaded(target: std.Target) bool {
return false;
}
-pub fn hasValgrindSupport(target: std.Target, backend: std.builtin.CompilerBackend) bool {
+pub fn hasValgrindSupport(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
// We can't currently output the necessary Valgrind client request assembly when using the C
// backend and compiling with an MSVC-like compiler.
const ofmt_c_msvc = (target.abi == .msvc or target.abi == .itanium) and target.ofmt == .c;
@@ -133,7 +133,7 @@ pub fn hasValgrindSupport(target: std.Target, backend: std.builtin.CompilerBacke
/// The set of targets that LLVM has non-experimental support for.
/// Used to select between LLVM backend and self-hosted backend when compiling in
/// release modes.
-pub fn hasLlvmSupport(target: std.Target, ofmt: std.Target.ObjectFormat) bool {
+pub fn hasLlvmSupport(target: *const std.Target, ofmt: std.Target.ObjectFormat) bool {
switch (ofmt) {
// LLVM does not support these object formats:
.c,
@@ -221,7 +221,7 @@ pub fn hasLldSupport(ofmt: std.Target.ObjectFormat) bool {
/// Used to select between LLVM backend and self-hosted backend when compiling in
/// debug mode. A given target should only return true here if it is passing greater
/// than or equal to the number of behavior tests as the respective LLVM backend.
-pub fn selfHostedBackendIsAsRobustAsLlvm(target: std.Target) bool {
+pub fn selfHostedBackendIsAsRobustAsLlvm(target: *const std.Target) bool {
if (target.cpu.arch.isSpirV()) return true;
if (target.cpu.arch == .x86_64 and target.ptrBitWidth() == 64) return switch (target.ofmt) {
.elf, .macho => true,
@@ -230,12 +230,12 @@ pub fn selfHostedBackendIsAsRobustAsLlvm(target: std.Target) bool {
return false;
}
-pub fn supportsStackProbing(target: std.Target) bool {
+pub fn supportsStackProbing(target: *const std.Target) bool {
return target.os.tag != .windows and target.os.tag != .uefi and
(target.cpu.arch == .x86 or target.cpu.arch == .x86_64);
}
-pub fn supportsStackProtector(target: std.Target, backend: std.builtin.CompilerBackend) bool {
+pub fn supportsStackProtector(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
switch (target.os.tag) {
.plan9 => return false,
else => {},
@@ -250,20 +250,20 @@ pub fn supportsStackProtector(target: std.Target, backend: std.builtin.CompilerB
};
}
-pub fn clangSupportsStackProtector(target: std.Target) bool {
+pub fn clangSupportsStackProtector(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.spirv, .spirv32, .spirv64 => return false,
else => true,
};
}
-pub fn libcProvidesStackProtector(target: std.Target) bool {
+pub fn libcProvidesStackProtector(target: *const std.Target) bool {
return !target.isMinGW() and target.os.tag != .wasi and !target.cpu.arch.isSpirV();
}
/// Returns true if `@returnAddress()` is supported by the target and has a
/// reasonably performant implementation for the requested optimization mode.
-pub fn supportsReturnAddress(target: std.Target, optimize: std.builtin.OptimizeMode) bool {
+pub fn supportsReturnAddress(target: *const std.Target, optimize: std.builtin.OptimizeMode) bool {
return switch (target.cpu.arch) {
// Emscripten currently implements `emscripten_return_address()` by calling
// out into JavaScript and parsing a stack trace, which introduces significant
@@ -299,7 +299,7 @@ pub fn classifyCompilerRtLibName(name: []const u8) CompilerRtClassification {
return .none;
}
-pub fn hasDebugInfo(target: std.Target) bool {
+pub fn hasDebugInfo(target: *const std.Target) bool {
return switch (target.cpu.arch) {
// TODO: We should make newer PTX versions depend on older ones so we'd just check `ptx75`.
.nvptx, .nvptx64 => target.cpu.hasAny(.nvptx, &.{
@@ -321,7 +321,7 @@ pub fn hasDebugInfo(target: std.Target) bool {
};
}
-pub fn defaultCompilerRtOptimizeMode(target: std.Target) std.builtin.OptimizeMode {
+pub fn defaultCompilerRtOptimizeMode(target: *const std.Target) std.builtin.OptimizeMode {
if (target.cpu.arch.isWasm() and target.os.tag == .freestanding) {
return .ReleaseSmall;
} else {
@@ -329,7 +329,7 @@ pub fn defaultCompilerRtOptimizeMode(target: std.Target) std.builtin.OptimizeMod
}
}
-pub fn canBuildLibCompilerRt(target: std.Target, use_llvm: bool, have_llvm: bool) bool {
+pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llvm: bool) bool {
switch (target.os.tag) {
.plan9 => return false,
else => {},
@@ -342,12 +342,15 @@ pub fn canBuildLibCompilerRt(target: std.Target, use_llvm: bool, have_llvm: bool
}
return switch (zigBackend(target, use_llvm)) {
.stage2_llvm => true,
- .stage2_x86_64 => if (target.ofmt == .elf or target.ofmt == .macho) true else have_llvm,
+ .stage2_x86_64 => switch (target.ofmt) {
+ .elf, .macho => true,
+ else => have_llvm,
+ },
else => have_llvm,
};
}
-pub fn canBuildLibUbsanRt(target: std.Target) bool {
+pub fn canBuildLibUbsanRt(target: *const std.Target) bool {
switch (target.cpu.arch) {
.spirv, .spirv32, .spirv64 => return false,
// Remove this once https://github.com/ziglang/zig/issues/23715 is fixed
@@ -356,7 +359,7 @@ pub fn canBuildLibUbsanRt(target: std.Target) bool {
}
}
-pub fn hasRedZone(target: std.Target) bool {
+pub fn hasRedZone(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.aarch64,
.aarch64_be,
@@ -372,7 +375,7 @@ pub fn hasRedZone(target: std.Target) bool {
};
}
-pub fn libcFullLinkFlags(target: std.Target) []const []const u8 {
+pub fn libcFullLinkFlags(target: *const std.Target) []const []const u8 {
// The linking order of these is significant and should match the order other
// c compilers such as gcc or clang use.
const result: []const []const u8 = switch (target.os.tag) {
@@ -389,14 +392,14 @@ pub fn libcFullLinkFlags(target: std.Target) []const []const u8 {
return result;
}
-pub fn clangMightShellOutForAssembly(target: std.Target) bool {
+pub fn clangMightShellOutForAssembly(target: *const std.Target) bool {
// Clang defaults to using the system assembler in some cases.
return target.cpu.arch.isNvptx() or target.cpu.arch == .xcore;
}
/// Each backend architecture in Clang has a different codepath which may or may not
/// support an -mcpu flag.
-pub fn clangAssemblerSupportsMcpuArg(target: std.Target) bool {
+pub fn clangAssemblerSupportsMcpuArg(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => true,
else => false,
@@ -405,7 +408,7 @@ pub fn clangAssemblerSupportsMcpuArg(target: std.Target) bool {
/// Some experimental or poorly-maintained LLVM targets do not properly process CPU models in their
/// Clang driver code. For these, we should omit the `-Xclang -target-cpu -Xclang <model>` flags.
-pub fn clangSupportsTargetCpuArg(target: std.Target) bool {
+pub fn clangSupportsTargetCpuArg(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.arc,
.msp430,
@@ -417,7 +420,7 @@ pub fn clangSupportsTargetCpuArg(target: std.Target) bool {
};
}
-pub fn clangSupportsFloatAbiArg(target: std.Target) bool {
+pub fn clangSupportsFloatAbiArg(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.arm,
.armeb,
@@ -442,7 +445,7 @@ pub fn clangSupportsFloatAbiArg(target: std.Target) bool {
};
}
-pub fn clangSupportsNoImplicitFloatArg(target: std.Target) bool {
+pub fn clangSupportsNoImplicitFloatArg(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.aarch64,
.aarch64_be,
@@ -459,7 +462,7 @@ pub fn clangSupportsNoImplicitFloatArg(target: std.Target) bool {
};
}
-pub fn defaultUnwindTables(target: std.Target, libunwind: bool, libtsan: bool) std.builtin.UnwindTables {
+pub fn defaultUnwindTables(target: *const std.Target, libunwind: bool, libtsan: bool) std.builtin.UnwindTables {
if (target.os.tag == .windows) {
// The old 32-bit x86 variant of SEH doesn't use tables.
return if (target.cpu.arch != .x86) .@"async" else .none;
@@ -472,7 +475,7 @@ pub fn defaultUnwindTables(target: std.Target, libunwind: bool, libtsan: bool) s
}
pub fn defaultAddressSpace(
- target: std.Target,
+ target: *const std.Target,
context: enum {
/// Query the default address space for global constant values.
global_constant,
@@ -492,7 +495,7 @@ pub fn defaultAddressSpace(
/// Returns true if pointers in `from` can be converted to a pointer in `to`.
pub fn addrSpaceCastIsValid(
- target: std.Target,
+ target: *const std.Target,
from: AddressSpace,
to: AddressSpace,
) bool {
@@ -512,7 +515,7 @@ pub fn addrSpaceCastIsValid(
/// a number of restrictions on usage of such pointers. For example, a logical pointer may not be
/// part of a merge (result of a branch) and may not be stored in memory at all. This function returns
/// for a particular architecture and address space wether such pointers are logical.
-pub fn arePointersLogical(target: std.Target, as: AddressSpace) bool {
+pub fn arePointersLogical(target: *const std.Target, as: AddressSpace) bool {
if (target.os.tag != .vulkan) return false;
return switch (as) {
@@ -537,7 +540,7 @@ pub fn arePointersLogical(target: std.Target, as: AddressSpace) bool {
};
}
-pub fn isDynamicAMDGCNFeature(target: std.Target, feature: std.Target.Cpu.Feature) bool {
+pub fn isDynamicAMDGCNFeature(target: *const std.Target, feature: std.Target.Cpu.Feature) bool {
if (target.cpu.arch != .amdgcn) return false;
const sramecc_only = &[_]*const std.Target.Cpu.Model{
@@ -585,7 +588,7 @@ pub fn isDynamicAMDGCNFeature(target: std.Target, feature: std.Target.Cpu.Featur
return false;
}
-pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 {
+pub fn llvmMachineAbi(target: *const std.Target) ?[:0]const u8 {
// LLD does not support ELFv1. Rather than having LLVM produce ELFv1 code and then linking it
// into a broken ELFv2 binary, just force LLVM to use ELFv2 as well. This will break when glibc
// is linked as glibc only supports ELFv2 for little endian, but there's nothing we can do about
@@ -642,7 +645,7 @@ pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 {
/// This function returns 1 if function alignment is not observable or settable. Note that this
/// value will not necessarily match the backend's default function alignment (e.g. for LLVM).
-pub fn defaultFunctionAlignment(target: std.Target) Alignment {
+pub fn defaultFunctionAlignment(target: *const std.Target) Alignment {
// Overrides of the minimum for performance.
return switch (target.cpu.arch) {
.csky,
@@ -669,7 +672,7 @@ pub fn defaultFunctionAlignment(target: std.Target) Alignment {
}
/// This function returns 1 if function alignment is not observable or settable.
-pub fn minFunctionAlignment(target: std.Target) Alignment {
+pub fn minFunctionAlignment(target: *const std.Target) Alignment {
return switch (target.cpu.arch) {
.riscv32,
.riscv64,
@@ -712,7 +715,7 @@ pub fn minFunctionAlignment(target: std.Target) Alignment {
};
}
-pub fn supportsFunctionAlignment(target: std.Target) bool {
+pub fn supportsFunctionAlignment(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.nvptx,
.nvptx64,
@@ -726,7 +729,7 @@ pub fn supportsFunctionAlignment(target: std.Target) bool {
};
}
-pub fn functionPointerMask(target: std.Target) ?u64 {
+pub fn functionPointerMask(target: *const std.Target) ?u64 {
// 32-bit Arm uses the LSB to mean that the target function contains Thumb code.
// MIPS uses the LSB to mean that the target function contains MIPS16/microMIPS code.
return if (target.cpu.arch.isArm() or target.cpu.arch.isMIPS32())
@@ -737,7 +740,7 @@ pub fn functionPointerMask(target: std.Target) ?u64 {
null;
}
-pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend) bool {
+pub fn supportsTailCall(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
switch (backend) {
.stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target),
.stage2_c => return true,
@@ -745,7 +748,7 @@ pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend
}
}
-pub fn supportsThreads(target: std.Target, backend: std.builtin.CompilerBackend) bool {
+pub fn supportsThreads(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
return switch (backend) {
.stage2_powerpc => true,
.stage2_x86_64 => target.ofmt == .macho or target.ofmt == .elf,
@@ -804,7 +807,7 @@ pub fn fnCallConvAllowsZigTypes(cc: std.builtin.CallingConvention) bool {
};
}
-pub fn zigBackend(target: std.Target, use_llvm: bool) std.builtin.CompilerBackend {
+pub fn zigBackend(target: *const std.Target, use_llvm: bool) std.builtin.CompilerBackend {
if (use_llvm) return .stage2_llvm;
if (target.ofmt == .c) return .stage2_c;
return switch (target.cpu.arch) {
src/Type.zig
@@ -1602,7 +1602,7 @@ fn abiSizeInnerOptional(
};
}
-pub fn ptrAbiAlignment(target: Target) Alignment {
+pub fn ptrAbiAlignment(target: *const Target) Alignment {
return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8));
}
@@ -2395,7 +2395,7 @@ pub fn isAnyFloat(ty: Type) bool {
/// Asserts the type is a fixed-size float or comptime_float.
/// Returns 128 for comptime_float types.
-pub fn floatBits(ty: Type, target: Target) u16 {
+pub fn floatBits(ty: Type, target: *const Target) u16 {
return switch (ty.toIntern()) {
.f16_type => 16,
.f32_type => 32,
@@ -4188,6 +4188,6 @@ pub fn smallestUnsignedBits(max: u64) u16 {
/// to packed struct layout to find out all the places in the codebase you need to edit!
pub const packed_struct_layout_version = 2;
-fn cTypeAlign(target: Target, c_type: Target.CType) Alignment {
+fn cTypeAlign(target: *const Target, c_type: Target.CType) Alignment {
return Alignment.fromByteUnits(target.cTypeAlignment(c_type));
}
src/Zcu.zig
@@ -3773,8 +3773,8 @@ pub fn errNote(
/// Deprecated. There is no global target for a Zig Compilation Unit. Instead,
/// look up the target based on the Module that contains the source code being
/// analyzed.
-pub fn getTarget(zcu: *const Zcu) Target {
- return zcu.root_mod.resolved_target.result;
+pub fn getTarget(zcu: *const Zcu) *const Target {
+ return &zcu.root_mod.resolved_target.result;
}
/// Deprecated. There is no global optimization mode for a Zig Compilation
@@ -3863,7 +3863,7 @@ pub const Feature = enum {
};
pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool {
- const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
+ const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
return target_util.backendSupportsFeature(backend, feature);
}
test/link/macho.zig
@@ -864,7 +864,7 @@ fn testLayout(b: *Build, opts: Options) *Step {
fn testLinkDirectlyCppTbd(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "link-directly-cpp-tbd", opts);
- const sdk = std.zig.system.darwin.getSdk(b.allocator, opts.target.result) orelse
+ const sdk = std.zig.system.darwin.getSdk(b.allocator, &opts.target.result) orelse
@panic("macOS SDK is required to run the test");
const exe = addExecutable(b, opts, .{
test/src/Cases.zig
@@ -433,7 +433,7 @@ fn addFromDirInner(
// Cross-product to get all possible test combinations
for (targets) |target_query| {
const resolved_target = b.resolveTargetQuery(target_query);
- const target = resolved_target.result;
+ const target = &resolved_target.result;
for (backends) |backend| {
if (backend == .stage2 and
target.cpu.arch != .wasm32 and target.cpu.arch != .x86_64 and target.cpu.arch != .spirv64)
@@ -708,7 +708,7 @@ pub fn lowerToBuildSteps(
},
.Execution => |expected_stdout| no_exec: {
const run = if (case.target.result.ofmt == .c) run_step: {
- if (getExternalExecutor(host, &case.target.result, .{ .link_libc = true }) != .native) {
+ if (getExternalExecutor(&host, &case.target.result, .{ .link_libc = true }) != .native) {
// We wouldn't be able to run the compiled C code.
break :no_exec;
}
test/src/Debugger.zig
@@ -21,7 +21,7 @@ pub const Target = struct {
test_name_suffix: []const u8,
};
-pub fn addTestsForTarget(db: *Debugger, target: Target) void {
+pub fn addTestsForTarget(db: *Debugger, target: *const Target) void {
db.addLldbTest(
"basic",
target,
@@ -2376,7 +2376,7 @@ const File = struct { import: ?[]const u8 = null, path: []const u8, source: []co
fn addGdbTest(
db: *Debugger,
name: []const u8,
- target: Target,
+ target: *const Target,
files: []const File,
commands: []const u8,
expected_output: []const []const u8,
@@ -2402,7 +2402,7 @@ fn addGdbTest(
fn addLldbTest(
db: *Debugger,
name: []const u8,
- target: Target,
+ target: *const Target,
files: []const File,
commands: []const u8,
expected_output: []const []const u8,
@@ -2433,7 +2433,7 @@ const success = 99;
fn addTest(
db: *Debugger,
name: []const u8,
- target: Target,
+ target: *const Target,
files: []const File,
db_argv1: []const []const u8,
db_commands: []const u8,
test/src/StackTrace.zig
@@ -22,7 +22,7 @@ const Config = struct {
pub fn addCase(self: *StackTrace, config: Config) void {
self.addCaseInner(config, true);
- if (shouldTestNonLlvm(self.b.graph.host.result)) {
+ if (shouldTestNonLlvm(&self.b.graph.host.result)) {
self.addCaseInner(config, false);
}
}
@@ -41,7 +41,7 @@ fn addCaseInner(self: *StackTrace, config: Config, use_llvm: bool) void {
self.addExpect(config.name, config.source, .ReleaseSafe, use_llvm, per_mode);
}
-fn shouldTestNonLlvm(target: std.Target) bool {
+fn shouldTestNonLlvm(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.x86_64 => switch (target.ofmt) {
.elf => true,
test/standalone/ios/build.zig
@@ -12,7 +12,7 @@ pub fn build(b: *std.Build) void {
.cpu_arch = .aarch64,
.os_tag = .ios,
});
- const sdk = std.zig.system.darwin.getSdk(b.allocator, target.result) orelse
+ const sdk = std.zig.system.darwin.getSdk(b.allocator, &target.result) orelse
@panic("no iOS SDK found");
b.sysroot = sdk;
test/tests.zig
@@ -2311,7 +2311,7 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
const resolved_target = b.resolveTargetQuery(test_target.target);
const triple_txt = resolved_target.query.zigTriple(b.allocator) catch @panic("OOM");
- const target = resolved_target.result;
+ const target = &resolved_target.result;
if (options.test_target_filters.len > 0) {
for (options.test_target_filters) |filter| {
@@ -2557,7 +2557,7 @@ pub fn addCAbiTests(b: *std.Build, options: CAbiTestOptions) *Step {
const resolved_target = b.resolveTargetQuery(c_abi_target.target);
const triple_txt = resolved_target.query.zigTriple(b.allocator) catch @panic("OOM");
- const target = resolved_target.result;
+ const target = &resolved_target.result;
if (options.test_target_filters.len > 0) {
for (options.test_target_filters) |filter| {
@@ -2659,7 +2659,7 @@ pub fn addDebuggerTests(b: *std.Build, options: DebuggerContext.Options) ?*Step
.options = options,
.root_step = step,
};
- context.addTestsForTarget(.{
+ context.addTestsForTarget(&.{
.resolved = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
@@ -2668,7 +2668,7 @@ pub fn addDebuggerTests(b: *std.Build, options: DebuggerContext.Options) ?*Step
.pic = false,
.test_name_suffix = "x86_64-linux",
});
- context.addTestsForTarget(.{
+ context.addTestsForTarget(&.{
.resolved = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
tools/doctest.zig
@@ -317,7 +317,7 @@ fn printOutput(
const target = try std.zig.system.resolveTargetQuery(
target_query,
);
- switch (getExternalExecutor(host, &target, .{
+ switch (getExternalExecutor(&host, &target, .{
.link_libc = code.link_libc,
})) {
.native => {},
@@ -538,7 +538,7 @@ fn printOutput(
.lib => {
const bin_basename = try std.zig.binNameAlloc(arena, .{
.root_name = code_name,
- .target = builtin.target,
+ .target = &builtin.target,
.output_mode = .Lib,
});
tools/incr-check.zig
@@ -316,7 +316,7 @@ const Eval = struct {
const bin_name = try std.zig.EmitArtifact.bin.cacheName(arena, .{
.root_name = "root", // corresponds to the module name "root"
- .target = eval.target.resolved,
+ .target = &eval.target.resolved,
.output_mode = .Exe,
});
const bin_path = try std.fs.path.join(arena, &.{ result_dir, bin_name });
@@ -444,7 +444,7 @@ const Eval = struct {
var argv_buf: [2][]const u8 = undefined;
const argv: []const []const u8, const is_foreign: bool = switch (std.zig.system.getExternalExecutor(
- eval.host,
+ &eval.host,
&eval.target.resolved,
.{ .link_libc = eval.target.backend == .cbe },
)) {
build.zig
@@ -759,7 +759,7 @@ fn addCmakeCfgOptionsToExe(
use_zig_libcxx: bool,
) !void {
const mod = exe.root_module;
- const target = mod.resolved_target.?.result;
+ const target = &mod.resolved_target.?.result;
if (target.os.tag.isDarwin()) {
// useful for package maintainers