Commit 96e715d5a3

joachimschmidt557 <joachim.schmidt557@outlook.com>
2021-11-12 22:14:49
stage2 AArch64: add new load/store from/to stack MIR instructions
1 parent 8ab90a0
Changed files (3)
src/arch/aarch64/CodeGen.zig
@@ -302,6 +302,7 @@ pub fn generate(
         .prev_di_pc = 0,
         .prev_di_line = module_fn.lbrace_line,
         .prev_di_column = module_fn.lbrace_column,
+        .stack_size = mem.alignForwardGeneric(u32, function.max_end_stack, function.stack_align),
     };
     defer emit.deinit();
 
@@ -1346,9 +1347,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
             const stack_offset = try self.allocMem(inst, abi_size, abi_align);
             try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
 
-            // TODO correct loading and storing from memory
-            // break :blk MCValue{ .stack_offset = stack_offset };
-            break :blk result;
+            break :blk MCValue{ .stack_offset = stack_offset };
         },
         else => result,
     };
@@ -2261,28 +2260,23 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
 
             switch (abi_size) {
                 1, 2, 4, 8 => {
-                    const offset = if (math.cast(i9, adj_off)) |imm|
-                        Instruction.LoadStoreOffset.imm_post_index(-imm)
-                    else |_|
-                        Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u64), MCValue{ .immediate = adj_off }));
-                    const rn: Register = switch (self.target.cpu.arch) {
-                        .aarch64, .aarch64_be => .x29,
-                        .aarch64_32 => .w29,
-                        else => unreachable,
-                    };
                     const tag: Mir.Inst.Tag = switch (abi_size) {
-                        1 => .strb,
-                        2 => .strh,
-                        4, 8 => .str,
+                        1 => .strb_stack,
+                        2 => .strh_stack,
+                        4, 8 => .str_stack,
+                        else => unreachable, // unexpected abi size
+                    };
+                    const rt: Register = switch (abi_size) {
+                        1, 2, 4 => reg.to32(),
+                        8 => reg.to64(),
                         else => unreachable, // unexpected abi size
                     };
 
                     _ = try self.addInst(.{
                         .tag = tag,
-                        .data = .{ .load_store_register = .{
-                            .rt = reg,
-                            .rn = rn,
-                            .offset = offset,
+                        .data = .{ .load_store_stack = .{
+                            .rt = rt,
+                            .offset = @intCast(u32, adj_off),
                         } },
                     });
                 },
@@ -2384,36 +2378,28 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
             });
         },
         .stack_offset => |unadjusted_off| {
-            // TODO: maybe addressing from sp instead of fp
             const abi_size = ty.abiSize(self.target.*);
             const adj_off = unadjusted_off + abi_size;
 
-            const rn: Register = switch (self.target.cpu.arch) {
-                .aarch64, .aarch64_be => .x29,
-                .aarch64_32 => .w29,
-                else => unreachable,
-            };
-
-            const offset = if (math.cast(i9, adj_off)) |imm|
-                Instruction.LoadStoreOffset.imm_post_index(-imm)
-            else |_|
-                Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u64), MCValue{ .immediate = adj_off }));
-
             switch (abi_size) {
                 1, 2, 4, 8 => {
                     const tag: Mir.Inst.Tag = switch (abi_size) {
-                        1 => .ldrb,
-                        2 => .ldrh,
-                        4, 8 => .ldr,
+                        1 => .ldrb_stack,
+                        2 => .ldrh_stack,
+                        4, 8 => .ldr_stack,
+                        else => unreachable, // unexpected abi size
+                    };
+                    const rt: Register = switch (abi_size) {
+                        1, 2, 4 => reg.to32(),
+                        8 => reg.to64(),
                         else => unreachable, // unexpected abi size
                     };
 
                     _ = try self.addInst(.{
                         .tag = tag,
-                        .data = .{ .load_store_register = .{
-                            .rt = reg,
-                            .rn = rn,
-                            .offset = offset,
+                        .data = .{ .load_store_stack = .{
+                            .rt = rt,
+                            .offset = @intCast(u32, adj_off),
                         } },
                     });
                 },
src/arch/aarch64/Emit.zig
@@ -42,6 +42,8 @@ branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUn
 /// instruction
 code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
 
+stack_size: u32,
+
 const InnerError = error{
     OutOfMemory,
     EmitFail,
@@ -103,6 +105,13 @@ pub fn emitMir(
             .ldp => try emit.mirLoadStoreRegisterPair(inst),
             .stp => try emit.mirLoadStoreRegisterPair(inst),
 
+            .ldr_stack => try emit.mirLoadStoreStack(inst),
+            .ldrb_stack => try emit.mirLoadStoreStack(inst),
+            .ldrh_stack => try emit.mirLoadStoreStack(inst),
+            .str_stack => try emit.mirLoadStoreStack(inst),
+            .strb_stack => try emit.mirLoadStoreStack(inst),
+            .strh_stack => try emit.mirLoadStoreStack(inst),
+
             .ldr => try emit.mirLoadStoreRegister(inst),
             .ldrb => try emit.mirLoadStoreRegister(inst),
             .ldrh => try emit.mirLoadStoreRegister(inst),
@@ -652,6 +661,79 @@ fn mirLoadStoreRegisterPair(emit: *Emit, inst: Mir.Inst.Index) !void {
     }
 }
 
+fn mirLoadStoreStack(emit: *Emit, inst: Mir.Inst.Index) !void {
+    const tag = emit.mir.instructions.items(.tag)[inst];
+    const load_store_stack = emit.mir.instructions.items(.data)[inst].load_store_stack;
+
+    const raw_offset = emit.stack_size - load_store_stack.offset;
+    const offset = switch (tag) {
+        .ldrb_stack, .strb_stack => blk: {
+            if (math.cast(u12, raw_offset)) |imm| {
+                break :blk Instruction.LoadStoreOffset.imm(imm);
+            } else |_| {
+                return emit.fail("TODO load/store stack byte with larger offset", .{});
+            }
+        },
+        .ldrh_stack, .strh_stack => blk: {
+            assert(std.mem.isAlignedGeneric(u32, raw_offset, 2)); // misaligned stack entry
+            if (math.cast(u12, @divExact(raw_offset, 2))) |imm| {
+                break :blk Instruction.LoadStoreOffset.imm(imm);
+            } else |_| {
+                return emit.fail("TODO load/store stack halfword with larger offset", .{});
+            }
+        },
+        .ldr_stack, .str_stack => blk: {
+            const alignment: u32 = switch (load_store_stack.rt.size()) {
+                32 => 4,
+                64 => 8,
+                else => unreachable,
+            };
+
+            assert(std.mem.isAlignedGeneric(u32, raw_offset, alignment)); // misaligned stack entry
+            if (math.cast(u12, @divExact(raw_offset, alignment))) |imm| {
+                break :blk Instruction.LoadStoreOffset.imm(imm);
+            } else |_| {
+                return emit.fail("TODO load/store stack with larger offset", .{});
+            }
+        },
+        else => unreachable,
+    };
+
+    switch (tag) {
+        .ldr_stack => try emit.writeInstruction(Instruction.ldr(
+            load_store_stack.rt,
+            Register.sp,
+            offset,
+        )),
+        .ldrb_stack => try emit.writeInstruction(Instruction.ldrb(
+            load_store_stack.rt,
+            Register.sp,
+            offset,
+        )),
+        .ldrh_stack => try emit.writeInstruction(Instruction.ldrh(
+            load_store_stack.rt,
+            Register.sp,
+            offset,
+        )),
+        .str_stack => try emit.writeInstruction(Instruction.str(
+            load_store_stack.rt,
+            Register.sp,
+            offset,
+        )),
+        .strb_stack => try emit.writeInstruction(Instruction.strb(
+            load_store_stack.rt,
+            Register.sp,
+            offset,
+        )),
+        .strh_stack => try emit.writeInstruction(Instruction.strh(
+            load_store_stack.rt,
+            Register.sp,
+            offset,
+        )),
+        else => unreachable,
+    }
+}
+
 fn mirLoadStoreRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
     const tag = emit.mir.instructions.items(.tag)[inst];
     const load_store_register = emit.mir.instructions.items(.data)[inst].load_store_register;
src/arch/aarch64/Mir.zig
@@ -56,12 +56,18 @@ pub const Inst = struct {
         load_memory,
         /// Load Pair of Registers
         ldp,
+        /// Pseudo-instruction: Load from stack
+        ldr_stack,
         /// Load Register
         // TODO: split into ldr_immediate and ldr_register
         ldr,
+        /// Pseudo-instruction: Load byte from stack
+        ldrb_stack,
         /// Load Register Byte
         // TODO: split into ldrb_immediate and ldrb_register
         ldrb,
+        /// Pseudo-instruction: Load halfword from stack
+        ldrh_stack,
         /// Load Register Halfword
         // TODO: split into ldrh_immediate and ldrh_register
         ldrh,
@@ -79,12 +85,18 @@ pub const Inst = struct {
         ret,
         /// Store Pair of Registers
         stp,
+        /// Pseudo-instruction: Store to stack
+        str_stack,
         /// Store Register
         // TODO: split into str_immediate and str_register
         str,
+        /// Pseudo-instruction: Store byte to stack
+        strb_stack,
         /// Store Register Byte
         // TODO: split into strb_immediate and strb_register
         strb,
+        /// Pseudo-instruction: Store halfword to stack
+        strh_stack,
         /// Store Register Halfword
         // TODO: split into strh_immediate and strh_register
         strh,
@@ -175,7 +187,7 @@ pub const Inst = struct {
             rm: Register,
             cond: bits.Instruction.Condition,
         },
-        /// Three registers and a LoadStoreOffset
+        /// Two registers and a LoadStoreOffset
         ///
         /// Used by e.g. str_register
         load_store_register: struct {
@@ -183,6 +195,13 @@ pub const Inst = struct {
             rn: Register,
             offset: bits.Instruction.LoadStoreOffset,
         },
+        /// A registers and a stack offset
+        ///
+        /// Used by e.g. str_register
+        load_store_stack: struct {
+            rt: Register,
+            offset: u32,
+        },
         /// Three registers and a LoadStorePairOffset
         ///
         /// Used by e.g. stp