Commit aa92446365

Andrew Kelley <andrew@ziglang.org>
2020-06-29 01:39:54
stage2: implement function parameters
In codegen.zig, the std.Target.Cpu.Arch is now generally available as a comptime value where needed. This is a tradeoff that causes the compiler binary to be more bloated, but gives us higher performance, since the optimizer can optimize per architecture (which is usually how compilers are designed anyway, with different code per-architecture), and it also allows us to use per-architecture types, such as a Register enum that is specific to the comptime-known architecture. Adds abiSize method to Type.
1 parent e120b07
Changed files (3)
src-self-hosted
src-self-hosted/codegen/x86_64.zig
@@ -1,20 +1,21 @@
+const Type = @import("../Type.zig");
+
 // zig fmt: off
 
-/// Definitions of all of the x64 registers. The order is very, very important.
+/// Definitions of all of the x64 registers. The order is semantically meaningful.
 /// The registers are defined such that IDs go in descending order of 64-bit,
 /// 32-bit, 16-bit, and then 8-bit, and each set contains exactly sixteen
-/// registers. This results in some very, very useful properties:
+/// registers. This results in some useful properties:
 ///
 /// Any 64-bit register can be turned into its 32-bit form by adding 16, and
 /// vice versa. This also works between 32-bit and 16-bit forms. With 8-bit, it
-/// works for all except for sp, bp, si, and di, which don't *have* an 8-bit
+/// works for all except for sp, bp, si, and di, which do *not* have an 8-bit
 /// form.
 ///
 /// If (register & 8) is set, the register is extended.
 ///
 /// The ID can be easily determined by figuring out what range the register is
 /// in, and then subtracting the base.
-/// 
 pub const Register = enum(u8) {
     // 0 through 15, 64-bit registers. 8-15 are extended.
     // id is just the int value.
@@ -67,3 +68,24 @@ pub const Register = enum(u8) {
 };
 
 // zig fmt: on
+
+/// After argument values have been computed, they are placed either in registers
+/// or pushed on the stack. The way values are passed depends on the class.
+pub const ParameterClass = enum {
+    /// Integral types that fit into one of the general purpose registers.
+    integer,
+    /// Types that fit into a vector register.
+    sse,
+    /// Types that fit into a vector register and can be passed
+    /// and returned in the upper bytes of it.
+    sse_up,
+    /// Types  that  will  be  returned  via  the  x87FPU.
+    x87,
+    /// Types  that  will  be  returned  via  the  x87FPU and can be passed and returned
+    /// in the upper bytes of it.
+    x87_up,
+    /// Types that will be returned via the x87FPU.
+    complex_x87,
+    /// Types that will be passed and returned in mem-ory via the stack.
+    memory,
+};
src-self-hosted/codegen.zig
@@ -37,6 +37,69 @@ pub fn generateSymbol(
         .Fn => {
             const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
 
+            const fn_type = module_fn.owner_decl.typed_value.most_recent.typed_value.ty;
+            const param_types = try bin_file.allocator.alloc(Type, fn_type.fnParamLen());
+            defer bin_file.allocator.free(param_types);
+            fn_type.fnParamTypes(param_types);
+            // A parameter may be broken into multiple machine code parameters, so we don't
+            // know the size up front.
+            var mc_args = try std.ArrayList(Function.MCValue).initCapacity(bin_file.allocator, param_types.len);
+            defer mc_args.deinit();
+
+            var next_stack_offset: u64 = 0;
+
+            switch (fn_type.fnCallingConvention()) {
+                .Naked => assert(mc_args.items.len == 0),
+                .Unspecified, .C => {
+                    // Prepare the function parameters
+                    switch (bin_file.options.target.cpu.arch) {
+                        .x86_64 => {
+                            const integer_registers = [_]Reg(.x86_64){.rdi, .rsi, .rdx, .rcx, .r8, .r9};
+                            var next_int_reg: usize = 0;
+
+                            for (param_types) |param_type, src_i| {
+                                switch (param_type.zigTypeTag()) {
+                                    .Bool, .Int => {
+                                        if (next_int_reg >= integer_registers.len) {
+                                            try mc_args.append(.{ .stack_offset = next_stack_offset });
+                                            next_stack_offset += param_type.abiSize(bin_file.options.target);
+                                        } else {
+                                            try mc_args.append(.{ .register = @enumToInt(integer_registers[next_int_reg])});
+                                            next_int_reg += 1;
+                                        }
+                                    },
+                                    else => return Result{
+                                        .fail = try ErrorMsg.create(
+                                            bin_file.allocator,
+                                            src,
+                                            "TODO implement function parameters of type {}",
+                                            .{@tagName(param_type.zigTypeTag())},
+                                        ),
+                                    }
+                                }
+                            }
+
+                        },
+                        else => return Result{
+                            .fail = try ErrorMsg.create(
+                                bin_file.allocator,
+                                src,
+                                "TODO implement function parameters for {}",
+                                .{bin_file.options.target.cpu.arch},
+                            ),
+                        },
+                    }
+                },
+                else => return Result{
+                    .fail = try ErrorMsg.create(
+                        bin_file.allocator,
+                        src,
+                        "TODO implement {} calling convention",
+                        .{fn_type.fnCallingConvention()},
+                    ),
+                },
+            }
+
             var function = Function{
                 .target = &bin_file.options.target,
                 .bin_file = bin_file,
@@ -44,16 +107,14 @@ pub fn generateSymbol(
                 .code = code,
                 .inst_table = std.AutoHashMap(*ir.Inst, Function.MCValue).init(bin_file.allocator),
                 .err_msg = null,
+                .args = mc_args.items,
             };
             defer function.inst_table.deinit();
 
-            for (module_fn.analysis.success.instructions) |inst| {
-                const new_inst = function.genFuncInst(inst) catch |err| switch (err) {
-                    error.CodegenFail => return Result{ .fail = function.err_msg.? },
-                    else => |e| return e,
-                };
-                try function.inst_table.putNoClobber(inst, new_inst);
-            }
+            function.gen() catch |err| switch (err) {
+                error.CodegenFail => return Result{ .fail = function.err_msg.? },
+                else => |e| return e,            
+            };
 
             if (function.err_msg) |em| {
                 return Result{ .fail = em };
@@ -157,6 +218,7 @@ const Function = struct {
     code: *std.ArrayList(u8),
     inst_table: std.AutoHashMap(*ir.Inst, MCValue),
     err_msg: ?*ErrorMsg,
+    args: []MCValue,
 
     const MCValue = union(enum) {
         none,
@@ -170,44 +232,119 @@ const Function = struct {
         register: usize,
         /// The value is in memory at a hard-coded address.
         memory: u64,
+        /// The value is one of the stack variables.
+        stack_offset: u64,
     };
 
-    fn genFuncInst(self: *Function, inst: *ir.Inst) !MCValue {
+    fn gen(self: *Function) !void {
+        switch (self.target.cpu.arch) {
+            .arm => return self.genArch(.arm),
+            .armeb => return self.genArch(.armeb),
+            .aarch64 => return self.genArch(.aarch64),
+            .aarch64_be => return self.genArch(.aarch64_be),
+            .aarch64_32 => return self.genArch(.aarch64_32),
+            .arc => return self.genArch(.arc),
+            .avr => return self.genArch(.avr),
+            .bpfel => return self.genArch(.bpfel),
+            .bpfeb => return self.genArch(.bpfeb),
+            .hexagon => return self.genArch(.hexagon),
+            .mips => return self.genArch(.mips),
+            .mipsel => return self.genArch(.mipsel),
+            .mips64 => return self.genArch(.mips64),
+            .mips64el => return self.genArch(.mips64el),
+            .msp430 => return self.genArch(.msp430),
+            .powerpc => return self.genArch(.powerpc),
+            .powerpc64 => return self.genArch(.powerpc64),
+            .powerpc64le => return self.genArch(.powerpc64le),
+            .r600 => return self.genArch(.r600),
+            .amdgcn => return self.genArch(.amdgcn),
+            .riscv32 => return self.genArch(.riscv32),
+            .riscv64 => return self.genArch(.riscv64),
+            .sparc => return self.genArch(.sparc),
+            .sparcv9 => return self.genArch(.sparcv9),
+            .sparcel => return self.genArch(.sparcel),
+            .s390x => return self.genArch(.s390x),
+            .tce => return self.genArch(.tce),
+            .tcele => return self.genArch(.tcele),
+            .thumb => return self.genArch(.thumb),
+            .thumbeb => return self.genArch(.thumbeb),
+            .i386 => return self.genArch(.i386),
+            .x86_64 => return self.genArch(.x86_64),
+            .xcore => return self.genArch(.xcore),
+            .nvptx => return self.genArch(.nvptx),
+            .nvptx64 => return self.genArch(.nvptx64),
+            .le32 => return self.genArch(.le32),
+            .le64 => return self.genArch(.le64),
+            .amdil => return self.genArch(.amdil),
+            .amdil64 => return self.genArch(.amdil64),
+            .hsail => return self.genArch(.hsail),
+            .hsail64 => return self.genArch(.hsail64),
+            .spir => return self.genArch(.spir),
+            .spir64 => return self.genArch(.spir64),
+            .kalimba => return self.genArch(.kalimba),
+            .shave => return self.genArch(.shave),
+            .lanai => return self.genArch(.lanai),
+            .wasm32 => return self.genArch(.wasm32),
+            .wasm64 => return self.genArch(.wasm64),
+            .renderscript32 => return self.genArch(.renderscript32),
+            .renderscript64 => return self.genArch(.renderscript64),
+            .ve => return self.genArch(.ve),
+        }
+    }
+
+    fn genArch(self: *Function, comptime arch: std.Target.Cpu.Arch) !void {
+        for (self.mod_fn.analysis.success.instructions) |inst| {
+            const new_inst = try self.genFuncInst(inst, arch);
+            try self.inst_table.putNoClobber(inst, new_inst);
+        }
+    }
+
+    fn genFuncInst(self: *Function, inst: *ir.Inst, comptime arch: std.Target.Cpu.Arch) !MCValue {
         switch (inst.tag) {
-            .add => return self.genAdd(inst.cast(ir.Inst.Add).?),
-            .arg => return self.genArg(inst.src),
-            .block => return self.genBlock(inst.cast(ir.Inst.Block).?),
-            .breakpoint => return self.genBreakpoint(inst.src),
-            .call => return self.genCall(inst.cast(ir.Inst.Call).?),
+            .add => return self.genAdd(inst.cast(ir.Inst.Add).?, arch),
+            .arg => return self.genArg(inst.cast(ir.Inst.Arg).?),
+            .block => return self.genBlock(inst.cast(ir.Inst.Block).?, arch),
+            .breakpoint => return self.genBreakpoint(inst.src, arch),
+            .call => return self.genCall(inst.cast(ir.Inst.Call).?, arch),
             .unreach => return MCValue{ .unreach = {} },
             .constant => unreachable, // excluded from function bodies
-            .assembly => return self.genAsm(inst.cast(ir.Inst.Assembly).?),
+            .assembly => return self.genAsm(inst.cast(ir.Inst.Assembly).?, arch),
             .ptrtoint => return self.genPtrToInt(inst.cast(ir.Inst.PtrToInt).?),
             .bitcast => return self.genBitCast(inst.cast(ir.Inst.BitCast).?),
-            .ret => return self.genRet(inst.cast(ir.Inst.Ret).?),
-            .retvoid => return self.genRetVoid(inst.cast(ir.Inst.RetVoid).?),
-            .cmp => return self.genCmp(inst.cast(ir.Inst.Cmp).?),
-            .condbr => return self.genCondBr(inst.cast(ir.Inst.CondBr).?),
-            .isnull => return self.genIsNull(inst.cast(ir.Inst.IsNull).?),
-            .isnonnull => return self.genIsNonNull(inst.cast(ir.Inst.IsNonNull).?),
+            .ret => return self.genRet(inst.cast(ir.Inst.Ret).?, arch),
+            .retvoid => return self.genRetVoid(inst.cast(ir.Inst.RetVoid).?, arch),
+            .cmp => return self.genCmp(inst.cast(ir.Inst.Cmp).?, arch),
+            .condbr => return self.genCondBr(inst.cast(ir.Inst.CondBr).?, arch),
+            .isnull => return self.genIsNull(inst.cast(ir.Inst.IsNull).?, arch),
+            .isnonnull => return self.genIsNonNull(inst.cast(ir.Inst.IsNonNull).?, arch),
         }
     }
 
-    fn genAdd(self: *Function, inst: *ir.Inst.Add) !MCValue {
-        switch (self.target.cpu.arch) {
+    fn genAdd(self: *Function, inst: *ir.Inst.Add, comptime arch: std.Target.Cpu.Arch) !MCValue {
+        const lhs = try self.resolveInst(inst.args.lhs);
+        const rhs = try self.resolveInst(inst.args.rhs);
+        switch (arch) {
+            .i386, .x86_64 => {
+                // const lhs_reg = try self.instAsReg(lhs);
+                // const rhs_reg = try self.instAsReg(rhs);
+                // const result = try self.allocateReg();
+
+                // try self.code.append(??);
+
+                // lhs_reg.release();
+                // rhs_reg.release();
+                return self.fail(inst.base.src, "TODO implement register allocation", .{});
+            },
             else => return self.fail(inst.base.src, "TODO implement add for {}", .{self.target.cpu.arch}),
         }
     }
 
-    fn genArg(self: *Function, src: usize) !MCValue {
-        switch (self.target.cpu.arch) {
-            else => return self.fail(src, "TODO implement function parameters for {}", .{self.target.cpu.arch}),
-        }
-        return .none;
+    fn genArg(self: *Function, inst: *ir.Inst.Arg) !MCValue {
+        return self.args[inst.args.index];
     }
 
-    fn genBreakpoint(self: *Function, src: usize) !MCValue {
-        switch (self.target.cpu.arch) {
+    fn genBreakpoint(self: *Function, src: usize, comptime arch: std.Target.Cpu.Arch) !MCValue {
+        switch (arch) {
             .i386, .x86_64 => {
                 try self.code.append(0xcc); // int3
             },
@@ -216,8 +353,8 @@ const Function = struct {
         return .none;
     }
 
-    fn genCall(self: *Function, inst: *ir.Inst.Call) !MCValue {
-        switch (self.target.cpu.arch) {
+    fn genCall(self: *Function, inst: *ir.Inst.Call, comptime arch: std.Target.Cpu.Arch) !MCValue {
+        switch (arch) {
             .x86_64, .i386 => {
                 if (inst.args.func.cast(ir.Inst.Constant)) |func_inst| {
                     if (inst.args.args.len != 0) {
@@ -251,11 +388,11 @@ const Function = struct {
         }
     }
 
-    fn ret(self: *Function, src: usize, mcv: MCValue) !MCValue {
+    fn ret(self: *Function, src: usize, comptime arch: std.Target.Cpu.Arch, mcv: MCValue) !MCValue {
         if (mcv != .none) {
             return self.fail(src, "TODO implement return with non-void operand", .{});
         }
-        switch (self.target.cpu.arch) {
+        switch (arch) {
             .i386, .x86_64 => {
                 try self.code.append(0xc3); // ret
             },
@@ -264,43 +401,43 @@ const Function = struct {
         return .unreach;
     }
 
-    fn genRet(self: *Function, inst: *ir.Inst.Ret) !MCValue {
+    fn genRet(self: *Function, inst: *ir.Inst.Ret, comptime arch: std.Target.Cpu.Arch) !MCValue {
         const operand = try self.resolveInst(inst.args.operand);
-        return self.ret(inst.base.src, operand);
+        return self.ret(inst.base.src, arch, operand);
     }
 
-    fn genRetVoid(self: *Function, inst: *ir.Inst.RetVoid) !MCValue {
-        return self.ret(inst.base.src, .none);
+    fn genRetVoid(self: *Function, inst: *ir.Inst.RetVoid, comptime arch: std.Target.Cpu.Arch) !MCValue {
+        return self.ret(inst.base.src, arch, .none);
     }
 
-    fn genCmp(self: *Function, inst: *ir.Inst.Cmp) !MCValue {
-        switch (self.target.cpu.arch) {
+    fn genCmp(self: *Function, inst: *ir.Inst.Cmp, comptime arch: std.Target.Cpu.Arch) !MCValue {
+        switch (arch) {
             else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}),
         }
     }
 
-    fn genCondBr(self: *Function, inst: *ir.Inst.CondBr) !MCValue {
-        switch (self.target.cpu.arch) {
+    fn genCondBr(self: *Function, inst: *ir.Inst.CondBr, comptime arch: std.Target.Cpu.Arch) !MCValue {
+        switch (arch) {
             else => return self.fail(inst.base.src, "TODO implement condbr for {}", .{self.target.cpu.arch}),
         }
     }
 
-    fn genIsNull(self: *Function, inst: *ir.Inst.IsNull) !MCValue {
-        switch (self.target.cpu.arch) {
+    fn genIsNull(self: *Function, inst: *ir.Inst.IsNull, comptime arch: std.Target.Cpu.Arch) !MCValue {
+        switch (arch) {
             else => return self.fail(inst.base.src, "TODO implement isnull for {}", .{self.target.cpu.arch}),
         }
     }
 
-    fn genIsNonNull(self: *Function, inst: *ir.Inst.IsNonNull) !MCValue {
+    fn genIsNonNull(self: *Function, inst: *ir.Inst.IsNonNull, comptime arch: std.Target.Cpu.Arch) !MCValue {
         // Here you can specialize this instruction if it makes sense to, otherwise the default
         // will call genIsNull and invert the result.
-        switch (self.target.cpu.arch) {
+        switch (arch) {
             else => return self.fail(inst.base.src, "TODO call genIsNull and invert the result ", .{}),
         }
     }
 
-    fn genRelativeFwdJump(self: *Function, src: usize, amount: u32) !void {
-        switch (self.target.cpu.arch) {
+    fn genRelativeFwdJump(self: *Function, src: usize, comptime arch: std.Target.Cpu.Arch, amount: u32) !void {
+        switch (arch) {
             .i386, .x86_64 => {
                 // TODO x86 treats the operands as signed
                 if (amount <= std.math.maxInt(u8)) {
@@ -318,70 +455,13 @@ const Function = struct {
         }
     }
 
-    fn genBlock(self: *Function, inst: *ir.Inst.Block) !MCValue {
-        switch (self.target.cpu.arch) {
+    fn genBlock(self: *Function, inst: *ir.Inst.Block, comptime arch: std.Target.Cpu.Arch) !MCValue {
+        switch (arch) {
             else => return self.fail(inst.base.src, "TODO implement codegen Block for {}", .{self.target.cpu.arch}),
         }
     }
 
-    fn genAsm(self: *Function, inst: *ir.Inst.Assembly) !MCValue {
-        // TODO convert to inline function
-        switch (self.target.cpu.arch) {
-            .arm => return self.genAsmArch(.arm, inst),
-            .armeb => return self.genAsmArch(.armeb, inst),
-            .aarch64 => return self.genAsmArch(.aarch64, inst),
-            .aarch64_be => return self.genAsmArch(.aarch64_be, inst),
-            .aarch64_32 => return self.genAsmArch(.aarch64_32, inst),
-            .arc => return self.genAsmArch(.arc, inst),
-            .avr => return self.genAsmArch(.avr, inst),
-            .bpfel => return self.genAsmArch(.bpfel, inst),
-            .bpfeb => return self.genAsmArch(.bpfeb, inst),
-            .hexagon => return self.genAsmArch(.hexagon, inst),
-            .mips => return self.genAsmArch(.mips, inst),
-            .mipsel => return self.genAsmArch(.mipsel, inst),
-            .mips64 => return self.genAsmArch(.mips64, inst),
-            .mips64el => return self.genAsmArch(.mips64el, inst),
-            .msp430 => return self.genAsmArch(.msp430, inst),
-            .powerpc => return self.genAsmArch(.powerpc, inst),
-            .powerpc64 => return self.genAsmArch(.powerpc64, inst),
-            .powerpc64le => return self.genAsmArch(.powerpc64le, inst),
-            .r600 => return self.genAsmArch(.r600, inst),
-            .amdgcn => return self.genAsmArch(.amdgcn, inst),
-            .riscv32 => return self.genAsmArch(.riscv32, inst),
-            .riscv64 => return self.genAsmArch(.riscv64, inst),
-            .sparc => return self.genAsmArch(.sparc, inst),
-            .sparcv9 => return self.genAsmArch(.sparcv9, inst),
-            .sparcel => return self.genAsmArch(.sparcel, inst),
-            .s390x => return self.genAsmArch(.s390x, inst),
-            .tce => return self.genAsmArch(.tce, inst),
-            .tcele => return self.genAsmArch(.tcele, inst),
-            .thumb => return self.genAsmArch(.thumb, inst),
-            .thumbeb => return self.genAsmArch(.thumbeb, inst),
-            .i386 => return self.genAsmArch(.i386, inst),
-            .x86_64 => return self.genAsmArch(.x86_64, inst),
-            .xcore => return self.genAsmArch(.xcore, inst),
-            .nvptx => return self.genAsmArch(.nvptx, inst),
-            .nvptx64 => return self.genAsmArch(.nvptx64, inst),
-            .le32 => return self.genAsmArch(.le32, inst),
-            .le64 => return self.genAsmArch(.le64, inst),
-            .amdil => return self.genAsmArch(.amdil, inst),
-            .amdil64 => return self.genAsmArch(.amdil64, inst),
-            .hsail => return self.genAsmArch(.hsail, inst),
-            .hsail64 => return self.genAsmArch(.hsail64, inst),
-            .spir => return self.genAsmArch(.spir, inst),
-            .spir64 => return self.genAsmArch(.spir64, inst),
-            .kalimba => return self.genAsmArch(.kalimba, inst),
-            .shave => return self.genAsmArch(.shave, inst),
-            .lanai => return self.genAsmArch(.lanai, inst),
-            .wasm32 => return self.genAsmArch(.wasm32, inst),
-            .wasm64 => return self.genAsmArch(.wasm64, inst),
-            .renderscript32 => return self.genAsmArch(.renderscript32, inst),
-            .renderscript64 => return self.genAsmArch(.renderscript64, inst),
-            .ve => return self.genAsmArch(.ve, inst),
-        }
-    }
-
-    fn genAsmArch(self: *Function, comptime arch: Target.Cpu.Arch, inst: *ir.Inst.Assembly) !MCValue {
+    fn genAsm(self: *Function, inst: *ir.Inst.Assembly, comptime arch: Target.Cpu.Arch) !MCValue {
         if (arch != .x86_64 and arch != .i386) {
             return self.fail(inst.base.src, "TODO implement inline asm support for more architectures", .{});
         }
@@ -607,6 +687,9 @@ const Function = struct {
                         }
                     }
                 },
+                .stack_offset => |off| {
+                    return self.fail(src, "TODO implement genSetReg for stack variables", .{});
+                }
             },
             else => return self.fail(src, "TODO implement genSetReg for more architectures", .{}),
         }
src-self-hosted/type.zig
@@ -535,6 +535,76 @@ pub const Type = extern union {
         };
     }
 
+    /// Asserts the type has the ABI size already resolved.
+    pub fn abiSize(self: Type, target: Target) u64 {
+        return switch (self.tag()) {
+            .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
+            .fn_void_no_args => unreachable, // represents machine code; not a pointer
+            .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer
+            .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer
+            .function => unreachable, // represents machine code; not a pointer
+            .c_void => unreachable,
+            .void => unreachable,
+            .type => unreachable,
+            .comptime_int => unreachable,
+            .comptime_float => unreachable,
+            .noreturn => unreachable,
+            .@"null" => unreachable,
+            .@"undefined" => unreachable,
+
+            .u8,
+            .i8,
+            .bool,
+            => return 1,
+
+            .array_u8_sentinel_0 => @fieldParentPtr(Payload.Array_u8_Sentinel0, "base", self.ptr_otherwise).len,
+            .array => {
+                const payload = @fieldParentPtr(Payload.Array, "base", self.ptr_otherwise);
+                const elem_size = std.math.max(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target));
+                return payload.len * elem_size;
+            },
+            .i16, .u16 => return 2,
+            .i32, .u32 => return 4,
+            .i64, .u64 => return 8,
+
+            .isize,
+            .usize,
+            .single_const_pointer_to_comptime_int,
+            .const_slice_u8,
+            .single_const_pointer,
+            => return @divExact(target.cpu.arch.ptrBitWidth(), 8),
+
+            .c_short => return @divExact(CType.short.sizeInBits(target), 8),
+            .c_ushort => return @divExact(CType.ushort.sizeInBits(target), 8),
+            .c_int => return @divExact(CType.int.sizeInBits(target), 8),
+            .c_uint => return @divExact(CType.uint.sizeInBits(target), 8),
+            .c_long => return @divExact(CType.long.sizeInBits(target), 8),
+            .c_ulong => return @divExact(CType.ulong.sizeInBits(target), 8),
+            .c_longlong => return @divExact(CType.longlong.sizeInBits(target), 8),
+            .c_ulonglong => return @divExact(CType.ulonglong.sizeInBits(target), 8),
+
+            .f16 => return 2,
+            .f32 => return 4,
+            .f64 => return 8,
+            .f128 => return 16,
+            .c_longdouble => return 16,
+
+            .anyerror => return 2, // TODO revisit this when we have the concept of the error tag type
+
+
+            .int_signed, .int_unsigned => {
+                const bits: u16 = if (self.cast(Payload.IntSigned)) |pl|
+                    pl.bits
+                else if (self.cast(Payload.IntUnsigned)) |pl|
+                    pl.bits
+                else
+                    unreachable;
+
+                return std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8);
+            },
+        };
+    }
+
     pub fn isSinglePointer(self: Type) bool {
         return switch (self.tag()) {
             .u8,