Commit 5d019abe4e

Alex Rønne Petersen <alex@alexrp.com>
2025-08-24 21:23:45
start adding big endian RISC-V support
The big endian RISC-V effort is mostly driven by MIPS (the company) which is pivoting to RISC-V, and presumably needs a big endian variant to fill the niche that big endian MIPS (the ISA) did. GCC already supports these targets, but LLVM support will only appear in 22; this commit just adds the necessary target knowledge and checks on our end.
1 parent 12686d9
lib/compiler/aro/aro/Compilation.zig
@@ -927,7 +927,7 @@ fn generateVaListType(comp: *Compilation) !Type {
             .ios, .macos, .tvos, .watchos => .char_ptr,
             else => .aarch64_va_list,
         },
-        .sparc, .wasm32, .wasm64, .bpfel, .bpfeb, .riscv32, .riscv64, .avr, .spirv32, .spirv64 => .void_ptr,
+        .sparc, .wasm32, .wasm64, .bpfel, .bpfeb, .riscv32, .riscv32be, .riscv64, .riscv64be, .avr, .spirv32, .spirv64 => .void_ptr,
         .powerpc => switch (comp.target.os.tag) {
             .ios, .macos, .tvos, .watchos, .aix => @as(Kind, .char_ptr),
             else => return Type{ .specifier = .void }, // unknown
lib/compiler/aro/aro/target.zig
@@ -15,6 +15,7 @@ pub fn intMaxType(target: std.Target) Type {
         .bpfeb,
         .loongarch64,
         .riscv64,
+        .riscv64be,
         .powerpc64,
         .powerpc64le,
         .ve,
@@ -47,6 +48,7 @@ pub fn intPtrType(target: std.Target) Type {
         .csky,
         .loongarch32,
         .riscv32,
+        .riscv32be,
         .xcore,
         .hexagon,
         .m68k,
@@ -109,6 +111,7 @@ pub fn int64Type(target: std.Target) Type {
         .loongarch64,
         .ve,
         .riscv64,
+        .riscv64be,
         .powerpc64,
         .powerpc64le,
         .bpfel,
@@ -138,7 +141,7 @@ pub fn defaultFunctionAlignment(target: std.Target) u8 {
         .arm, .armeb => 4,
         .aarch64, .aarch64_be => 4,
         .sparc, .sparc64 => 4,
-        .riscv64 => 2,
+        .riscv64, .riscv64be => 2,
         else => 1,
     };
 }
@@ -330,7 +333,9 @@ pub const FPSemantics = enum {
             .armeb,
             .hexagon,
             .riscv32,
+            .riscv32be,
             .riscv64,
+            .riscv64be,
             .spirv32,
             .spirv64,
             => return .IEEEHalf,
@@ -429,7 +434,9 @@ pub fn ldEmulationOption(target: std.Target, arm_endianness: ?std.builtin.Endian
         .powerpc64 => "elf64ppc",
         .powerpc64le => "elf64lppc",
         .riscv32 => "elf32lriscv",
+        .riscv32be => "elf32briscv",
         .riscv64 => "elf64lriscv",
+        .riscv64be => "elf64briscv",
         .sparc => "elf32_sparc",
         .sparc64 => "elf64_sparc",
         .loongarch32 => "elf32loongarch",
@@ -477,6 +484,7 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
         .powerpc,
         .powerpcle,
         .riscv32,
+        .riscv32be,
         .sparc,
         .thumb,
         .thumbeb,
@@ -502,6 +510,7 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
         .powerpc64 => copy.cpu.arch = .powerpc,
         .powerpc64le => copy.cpu.arch = .powerpcle,
         .riscv64 => copy.cpu.arch = .riscv32,
+        .riscv64be => copy.cpu.arch = .riscv32be,
         .sparc64 => copy.cpu.arch = .sparc,
         .x86_64 => copy.cpu.arch = .x86,
     }
@@ -537,6 +546,7 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
         .powerpc64,
         .powerpc64le,
         .riscv64,
+        .riscv64be,
         .s390x,
         .sparc64,
         .ve,
@@ -552,6 +562,7 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
         .powerpc => copy.cpu.arch = .powerpc64,
         .powerpcle => copy.cpu.arch = .powerpc64le,
         .riscv32 => copy.cpu.arch = .riscv64,
+        .riscv32be => copy.cpu.arch = .riscv64be,
         .sparc => copy.cpu.arch = .sparc64,
         .spirv32 => copy.cpu.arch = .spirv64,
         .thumb => copy.cpu.arch = .aarch64,
@@ -595,7 +606,9 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
         .powerpc64le => "powerpc64le",
         .amdgcn => "amdgcn",
         .riscv32 => "riscv32",
+        .riscv32be => "riscv32be",
         .riscv64 => "riscv64",
+        .riscv64be => "riscv64be",
         .sparc => "sparc",
         .sparc64 => "sparc64",
         .s390x => "s390x",
lib/compiler_rt/clear_cache.zig
@@ -39,10 +39,7 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
         .mips, .mipsel, .mips64, .mips64el => true,
         else => false,
     };
-    const riscv = switch (arch) {
-        .riscv32, .riscv64 => true,
-        else => false,
-    };
+    const riscv = arch.isRISCV();
     const powerpc64 = switch (arch) {
         .powerpc64, .powerpc64le => true,
         else => false,
lib/compiler_rt/common.zig
@@ -87,7 +87,9 @@ pub const gnu_f16_abi = switch (builtin.cpu.arch) {
     .wasm32,
     .wasm64,
     .riscv64,
+    .riscv64be,
     .riscv32,
+    .riscv32be,
     => false,
 
     .x86, .x86_64 => true,
@@ -124,7 +126,9 @@ pub fn F16T(comptime OtherType: type) type {
         .nvptx,
         .nvptx64,
         .riscv32,
+        .riscv32be,
         .riscv64,
+        .riscv64be,
         .spirv32,
         .spirv64,
         => f16,
lib/std/builtin/assembly.zig
@@ -641,7 +641,7 @@ pub const Clobbers = switch (@import("builtin").cpu.arch) {
         q14: bool = false,
         q15: bool = false,
     },
-    .riscv32, .riscv64 => packed struct {
+    .riscv32, .riscv32be, .riscv64, .riscv64be => packed struct {
         /// Whether the inline assembly code may perform stores to memory
         /// addresses other than those derived from input pointer provenance.
         memory: bool = false,
lib/std/debug/Dwarf/abi.zig
@@ -20,7 +20,7 @@ pub fn supportsUnwinding(target: *const std.Target) bool {
 
         // Enabling this causes relocation errors such as:
         // error: invalid relocation type R_RISCV_SUB32 at offset 0x20
-        .riscv64, .riscv32 => false,
+        .riscv64, .riscv64be, .riscv32, .riscv32be => false,
 
         // Conservative guess. Feel free to update this logic with any targets
         // that are known to not support Dwarf unwinding.
lib/std/atomic.zig
@@ -386,7 +386,9 @@ pub inline fn spinLoopHint() void {
         => asm volatile ("pause(#1)"),
 
         .riscv32,
+        .riscv32be,
         .riscv64,
+        .riscv64be,
         => if (comptime builtin.cpu.has(.riscv, .zihintpause)) {
             asm volatile ("pause");
         },
lib/std/builtin.zig
@@ -909,7 +909,7 @@ pub const VaList = switch (builtin.cpu.arch) {
     .hexagon => if (builtin.target.abi.isMusl()) VaListHexagon else *u8,
     .loongarch32, .loongarch64 => *anyopaque,
     .mips, .mipsel, .mips64, .mips64el => *anyopaque,
-    .riscv32, .riscv64 => *anyopaque,
+    .riscv32, .riscv32be, .riscv64, .riscv64be => *anyopaque,
     .powerpc, .powerpcle => switch (builtin.os.tag) {
         .ios, .macos, .tvos, .watchos, .visionos, .aix => *u8,
         else => VaListPowerPc,
lib/std/pie.zig
@@ -30,7 +30,7 @@ const R_RELATIVE = switch (builtin.cpu.arch) {
     .m68k => R_68K_RELATIVE,
     .mips, .mipsel, .mips64, .mips64el => R_MIPS_RELATIVE,
     .powerpc, .powerpcle, .powerpc64, .powerpc64le => R_PPC_RELATIVE,
-    .riscv32, .riscv64 => R_RISCV_RELATIVE,
+    .riscv32, .riscv32be, .riscv64, .riscv64be => R_RISCV_RELATIVE,
     .s390x => R_390_RELATIVE,
     .sparc, .sparc64 => R_SPARC_RELATIVE,
     else => @compileError("Missing R_RELATIVE definition for this target"),
@@ -163,7 +163,7 @@ inline fn getDynamicSymbol() [*]const elf.Dyn {
                 : [ret] "=r" (-> [*]const elf.Dyn),
                 :
                 : .{ .lr = true, .r4 = true }),
-            .riscv32, .riscv64 => asm volatile (
+            .riscv32, .riscv32be, .riscv64, .riscv64be => asm volatile (
                 \\ .weak _DYNAMIC
                 \\ .hidden _DYNAMIC
                 \\ lla %[ret], _DYNAMIC
lib/std/start.zig
@@ -203,7 +203,7 @@ fn _start() callconv(.naked) noreturn {
             .m68k => ".cfi_undefined %%pc",
             .mips, .mipsel, .mips64, .mips64el => ".cfi_undefined $ra",
             .powerpc, .powerpcle, .powerpc64, .powerpc64le => ".cfi_undefined lr",
-            .riscv32, .riscv64 => if (builtin.zig_backend == .stage2_riscv64)
+            .riscv32, .riscv32be, .riscv64, .riscv64be => if (builtin.zig_backend == .stage2_riscv64)
                 ""
             else
                 ".cfi_undefined ra",
@@ -305,7 +305,7 @@ fn _start() callconv(.naked) noreturn {
             \\ bstrins.d $sp, $zero, 3, 0
             \\ b %[posixCallMainAndExit]
             ,
-            .riscv32, .riscv64 =>
+            .riscv32, .riscv32be, .riscv64, .riscv64be =>
             \\ li fp, 0
             \\ li ra, 0
             \\ mv a0, sp
lib/std/Target.zig
@@ -1042,7 +1042,7 @@ pub fn toElfMachine(target: *const Target) std.elf.EM {
         .powerpc, .powerpcle => .PPC,
         .powerpc64, .powerpc64le => .PPC64,
         .propeller => .PROPELLER,
-        .riscv32, .riscv64 => .RISCV,
+        .riscv32, .riscv32be, .riscv64, .riscv64be => .RISCV,
         .s390x => .S390,
         .sparc => if (target.cpu.has(.sparc, .v9)) .SPARC32PLUS else .SPARC,
         .sparc64 => .SPARCV9,
@@ -1099,6 +1099,8 @@ pub fn toCoffMachine(target: *const Target) std.coff.MachineType {
         .powerpcle,
         .powerpc64,
         .powerpc64le,
+        .riscv32be,
+        .riscv64be,
         .s390x,
         .sparc,
         .sparc64,
@@ -1310,7 +1312,9 @@ pub const Cpu = struct {
         powerpc64le,
         propeller,
         riscv32,
+        riscv32be,
         riscv64,
+        riscv64be,
         s390x,
         sparc,
         sparc64,
@@ -1340,6 +1344,7 @@ pub const Cpu = struct {
         // - sparcel
         // - spir
         // - spir64
+        // - spirv
         // - tce
         // - tcele
 
@@ -1396,7 +1401,7 @@ pub const Cpu = struct {
                 .nvptx, .nvptx64 => .nvptx,
                 .powerpc, .powerpcle, .powerpc64, .powerpc64le => .powerpc,
                 .propeller => .propeller,
-                .riscv32, .riscv64 => .riscv,
+                .riscv32, .riscv32be, .riscv64, .riscv64be => .riscv,
                 .s390x => .s390x,
                 .sparc, .sparc64 => .sparc,
                 .spirv32, .spirv64 => .spirv,
@@ -1452,8 +1457,19 @@ pub const Cpu = struct {
         }
 
         pub inline fn isRISCV(arch: Arch) bool {
+            return arch.isRiscv32() or arch.isRiscv64();
+        }
+
+        pub inline fn isRiscv32(arch: Arch) bool {
+            return switch (arch) {
+                .riscv32, .riscv32be => true,
+                else => false,
+            };
+        }
+
+        pub inline fn isRiscv64(arch: Arch) bool {
             return switch (arch) {
-                .riscv32, .riscv64 => true,
+                .riscv64, .riscv64be => true,
                 else => false,
             };
         }
@@ -1576,6 +1592,8 @@ pub const Cpu = struct {
                 .or1k,
                 .powerpc,
                 .powerpc64,
+                .riscv32be,
+                .riscv64be,
                 .thumbeb,
                 .sparc,
                 .sparc64,
@@ -1688,12 +1706,12 @@ pub const Cpu = struct {
                 .riscv64_lp64,
                 .riscv64_lp64_v,
                 .riscv64_interrupt,
-                => &.{.riscv64},
+                => &.{ .riscv64, .riscv64be },
 
                 .riscv32_ilp32,
                 .riscv32_ilp32_v,
                 .riscv32_interrupt,
-                => &.{.riscv32},
+                => &.{ .riscv32, .riscv32be },
 
                 .sparc64_sysv,
                 => &.{.sparc64},
@@ -1822,8 +1840,8 @@ pub const Cpu = struct {
                 .powerpc, .powerpcle => &powerpc.cpu.ppc,
                 .powerpc64, .powerpc64le => &powerpc.cpu.ppc64,
                 .propeller => &propeller.cpu.p1,
-                .riscv32 => &riscv.cpu.generic_rv32,
-                .riscv64 => &riscv.cpu.generic_rv64,
+                .riscv32, .riscv32be => &riscv.cpu.generic_rv32,
+                .riscv64, .riscv64be => &riscv.cpu.generic_rv64,
                 .sparc64 => &sparc.cpu.v9, // SPARC can only be 64-bit from v9 and up.
                 .wasm32, .wasm64 => &wasm.cpu.mvp,
                 .x86 => &x86.cpu.i386,
@@ -1867,8 +1885,8 @@ pub const Cpu = struct {
                 .msp430 => &msp430.cpu.msp430,
                 .nvptx, .nvptx64 => &nvptx.cpu.sm_52,
                 .powerpc64le => &powerpc.cpu.ppc64le,
-                .riscv32 => &riscv.cpu.baseline_rv32,
-                .riscv64 => &riscv.cpu.baseline_rv64,
+                .riscv32, .riscv32be => &riscv.cpu.baseline_rv32,
+                .riscv64, .riscv64be => &riscv.cpu.baseline_rv64,
                 .s390x => &s390x.cpu.arch8, // gcc/clang do not have a generic s390x model.
                 .sparc => &sparc.cpu.v9, // glibc does not work with 'plain' v8.
                 .x86 => &x86.cpu.pentium4,
@@ -2615,6 +2633,7 @@ pub fn ptrBitWidth_arch_abi(cpu_arch: Cpu.Arch, abi: Abi) u16 {
         .powerpc,
         .powerpcle,
         .riscv32,
+        .riscv32be,
         .thumb,
         .thumbeb,
         .x86,
@@ -2637,6 +2656,7 @@ pub fn ptrBitWidth_arch_abi(cpu_arch: Cpu.Arch, abi: Abi) u16 {
         .powerpc64,
         .powerpc64le,
         .riscv64,
+        .riscv64be,
         .x86_64,
         .nvptx64,
         .wasm64,
@@ -2691,7 +2711,9 @@ pub fn stackAlignment(target: *const Target) u16 {
         .powerpc64le,
         => if (target.os.tag == .linux or target.os.tag == .aix) return 16,
         .riscv32,
+        .riscv32be,
         .riscv64,
+        .riscv64be,
         => if (!target.cpu.has(.riscv, .e)) return 16,
         .x86 => if (target.os.tag != .windows and target.os.tag != .uefi) return 16,
         .x86_64 => return 16,
@@ -2724,7 +2746,9 @@ pub fn cCharSignedness(target: *const Target) std.builtin.Signedness {
         .powerpc64le,
         .s390x,
         .riscv32,
+        .riscv32be,
         .riscv64,
+        .riscv64be,
         .xcore,
         .xtensa,
         => .unsigned,
@@ -2838,7 +2862,9 @@ pub fn cTypeBitSize(target: *const Target, c_type: CType) u16 {
                     },
 
                     .riscv32,
+                    .riscv32be,
                     .riscv64,
+                    .riscv64be,
                     .aarch64,
                     .aarch64_be,
                     .s390x,
@@ -2957,7 +2983,9 @@ pub fn cTypeBitSize(target: *const Target, c_type: CType) u16 {
                     },
 
                     .riscv32,
+                    .riscv32be,
                     .riscv64,
+                    .riscv64be,
                     .aarch64,
                     .aarch64_be,
                     .s390x,
@@ -3169,7 +3197,9 @@ pub fn cTypeAlignment(target: *const Target, c_type: CType) u16 {
             .powerpc64,
             .powerpc64le,
             .riscv32,
+            .riscv32be,
             .riscv64,
+            .riscv64be,
             .sparc64,
             .spirv32,
             .spirv64,
@@ -3261,7 +3291,9 @@ pub fn cTypePreferredAlignment(target: *const Target, c_type: CType) u16 {
             .powerpc64,
             .powerpc64le,
             .riscv32,
+            .riscv32be,
             .riscv64,
+            .riscv64be,
             .sparc64,
             .spirv32,
             .spirv64,
@@ -3300,6 +3332,7 @@ pub fn cMaxIntAlignment(target: *const Target) u16 {
         .powerpc,
         .powerpcle,
         .riscv32,
+        .riscv32be,
         .s390x,
         => 8,
 
@@ -3315,6 +3348,7 @@ pub fn cMaxIntAlignment(target: *const Target) u16 {
         .powerpc64,
         .powerpc64le,
         .riscv64,
+        .riscv64be,
         .sparc,
         .sparc64,
         .wasm32,
@@ -3364,8 +3398,8 @@ pub fn cCallingConvention(target: *const Target) ?std.builtin.CallingConvention
             else => .{ .mips64_n64 = .{} },
         },
         .mips, .mipsel => .{ .mips_o32 = .{} },
-        .riscv64 => .{ .riscv64_lp64 = .{} },
-        .riscv32 => .{ .riscv32_ilp32 = .{} },
+        .riscv64, .riscv64be => .{ .riscv64_lp64 = .{} },
+        .riscv32, .riscv32be => .{ .riscv32_ilp32 = .{} },
         .sparc64 => .{ .sparc64_sysv = .{} },
         .sparc => .{ .sparc_sysv = .{} },
         .powerpc64 => if (target.abi.isMusl())
src/codegen/llvm.zig
@@ -82,7 +82,9 @@ pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8
         .powerpc64le => "powerpc64le",
         .amdgcn => "amdgcn",
         .riscv32 => "riscv32",
+        .riscv32be => "riscv32be",
         .riscv64 => "riscv64",
+        .riscv64be => "riscv64be",
         .sparc => "sparc",
         .sparc64 => "sparc64",
         .s390x => "s390x",
@@ -397,10 +399,18 @@ pub fn dataLayout(target: *const std.Target) []const u8 {
             "e-m:e-p:32:32-i64:64-n32-S32"
         else
             "e-m:e-p:32:32-i64:64-n32-S128",
+        .riscv32be => if (target.cpu.has(.riscv, .e))
+            "E-m:e-p:32:32-i64:64-n32-S32"
+        else
+            "E-m:e-p:32:32-i64:64-n32-S128",
         .riscv64 => if (target.cpu.has(.riscv, .e))
             "e-m:e-p:64:64-i64:64-i128:128-n32:64-S64"
         else
             "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128",
+        .riscv64be => if (target.cpu.has(.riscv, .e))
+            "E-m:e-p:64:64-i64:64-i128:128-n32:64-S64"
+        else
+            "E-m:e-p:64:64-i64:64-i128:128-n32:64-S128",
         .sparc => "E-m:e-p:32:32-i64:64-i128:128-f128:64-n32-S64",
         .sparc64 => "E-m:e-i64:64-i128:128-n32:64-S128",
         .s390x => if (target.os.tag == .zos)
@@ -12224,8 +12234,8 @@ fn lowerFnRetTy(o: *Object, pt: Zcu.PerThread, fn_info: InternPool.Key.FuncType)
             .integer => return o.builder.intType(@intCast(return_type.bitSize(zcu))),
             .double_integer => {
                 const integer: Builder.Type = switch (zcu.getTarget().cpu.arch) {
-                    .riscv64 => .i64,
-                    .riscv32 => .i32,
+                    .riscv64, .riscv64be => .i64,
+                    .riscv32, .riscv32be => .i32,
                     else => unreachable,
                 };
                 return o.builder.structType(.normal, &.{ integer, integer });
@@ -12685,7 +12695,7 @@ fn ccAbiPromoteInt(
             else => null,
         },
         else => switch (target.cpu.arch) {
-            .loongarch64, .riscv64 => switch (int_info.bits) {
+            .loongarch64, .riscv64, .riscv64be => switch (int_info.bits) {
                 0...16 => int_info.signedness,
                 32 => .signed, // LLVM always signextends 32 bit ints, unsure if bug.
                 17...31, 33...63 => int_info.signedness,
@@ -13079,7 +13089,7 @@ pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
             llvm.LLVMInitializePowerPCAsmPrinter();
             llvm.LLVMInitializePowerPCAsmParser();
         },
-        .riscv32, .riscv64 => {
+        .riscv32, .riscv32be, .riscv64, .riscv64be => {
             llvm.LLVMInitializeRISCVTarget();
             llvm.LLVMInitializeRISCVTargetInfo();
             llvm.LLVMInitializeRISCVTargetMC();
src/link/Elf/Atom.zig
@@ -346,11 +346,11 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
                 error.RelocFailure => has_reloc_errors = true,
                 else => |e| return e,
             },
-            .aarch64 => aarch64.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
+            .aarch64, .aarch64_be => aarch64.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
                 error.RelocFailure => has_reloc_errors = true,
                 else => |e| return e,
             },
-            .riscv64 => riscv.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
+            .riscv64, .riscv64be => riscv.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
                 error.RelocFailure => has_reloc_errors = true,
                 else => |e| return e,
             },
@@ -674,7 +674,7 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
                 => has_reloc_errors = true,
                 else => |e| return e,
             },
-            .aarch64 => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+            .aarch64, .aarch64_be => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
                 error.RelocFailure,
                 error.RelaxFailure,
                 error.UnexpectedRemainder,
@@ -682,7 +682,7 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
                 => has_reloc_errors = true,
                 else => |e| return e,
             },
-            .riscv64 => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+            .riscv64, .riscv64be => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
                 error.RelocFailure,
                 error.RelaxFailure,
                 => has_reloc_errors = true,
@@ -870,11 +870,11 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
                 error.RelocFailure => has_reloc_errors = true,
                 else => |e| return e,
             },
-            .aarch64 => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+            .aarch64, .aarch64_be => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
                 error.RelocFailure => has_reloc_errors = true,
                 else => |e| return e,
             },
-            .riscv64 => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
+            .riscv64, .riscv64be => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, &it, code, &stream) catch |err| switch (err) {
                 error.RelocFailure => has_reloc_errors = true,
                 else => |e| return e,
             },
src/link/Elf/eh_frame.zig
@@ -286,8 +286,8 @@ fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file:
 
     switch (cpu_arch) {
         .x86_64 => try x86_64.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
-        .aarch64 => try aarch64.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
-        .riscv64 => try riscv.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
+        .aarch64, .aarch64_be => try aarch64.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
+        .riscv64, .riscv64be => try riscv.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
         else => return error.UnsupportedCpuArch,
     }
 }
src/link/Elf/Object.zig
@@ -189,7 +189,7 @@ pub fn validateEFlags(
     e_flags: elf.Word,
 ) !void {
     switch (target.cpu.arch) {
-        .riscv64 => {
+        .riscv64, .riscv64be => {
             const flags: riscv.Eflags = @bitCast(e_flags);
             var any_errors: bool = false;
 
@@ -366,7 +366,7 @@ fn initAtoms(
                 const rel_count: u32 = @intCast(relocs.len);
                 self.setAtomFields(atom_ptr, .{ .rel_index = rel_index, .rel_count = rel_count });
                 try self.relocs.appendUnalignedSlice(gpa, relocs);
-                if (target.cpu.arch == .riscv64) {
+                if (target.cpu.arch.isRiscv64()) {
                     sortRelocs(self.relocs.items[rel_index..][0..rel_count]);
                 }
             }
@@ -445,7 +445,7 @@ fn parseEhFrame(
     // We expect relocations to be sorted by r_offset as per this comment in mold linker:
     // https://github.com/rui314/mold/blob/8e4f7b53832d8af4f48a633a8385cbc932d1944e/src/input-files.cc#L653
     // Except for RISCV and Loongarch which do not seem to be uphold this convention.
-    if (target.cpu.arch == .riscv64) {
+    if (target.cpu.arch.isRiscv64()) {
         sortRelocs(self.relocs.items[rel_start..][0..relocs.len]);
     }
     const fdes_start = self.fdes.items.len;
src/link/Elf/relocation.zig
@@ -76,8 +76,8 @@ const riscv64_relocs = Table(11, elf.R_RISCV, .{
 pub fn decode(r_type: u32, cpu_arch: std.Target.Cpu.Arch) ?Kind {
     return switch (cpu_arch) {
         .x86_64 => x86_64_relocs.decode(r_type),
-        .aarch64 => aarch64_relocs.decode(r_type),
-        .riscv64 => riscv64_relocs.decode(r_type),
+        .aarch64, .aarch64_be => aarch64_relocs.decode(r_type),
+        .riscv64, .riscv64be => riscv64_relocs.decode(r_type),
         else => @panic("TODO unhandled cpu arch"),
     };
 }
@@ -85,8 +85,8 @@ pub fn decode(r_type: u32, cpu_arch: std.Target.Cpu.Arch) ?Kind {
 pub fn encode(comptime kind: Kind, cpu_arch: std.Target.Cpu.Arch) u32 {
     return switch (cpu_arch) {
         .x86_64 => x86_64_relocs.encode(kind),
-        .aarch64 => aarch64_relocs.encode(kind),
-        .riscv64 => riscv64_relocs.encode(kind),
+        .aarch64, .aarch64_be => aarch64_relocs.encode(kind),
+        .riscv64, .riscv64be => riscv64_relocs.encode(kind),
         else => @panic("TODO unhandled cpu arch"),
     };
 }
@@ -98,11 +98,11 @@ pub const dwarf = struct {
                 .@"32" => .@"32",
                 .@"64" => .@"64",
             })),
-            .aarch64 => @intFromEnum(@as(elf.R_AARCH64, switch (format) {
+            .aarch64, .aarch64_be => @intFromEnum(@as(elf.R_AARCH64, switch (format) {
                 .@"32" => .ABS32,
                 .@"64" => .ABS64,
             })),
-            .riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (format) {
+            .riscv64, .riscv64be => @intFromEnum(@as(elf.R_RISCV, switch (format) {
                 .@"32" => .@"32",
                 .@"64" => .@"64",
             })),
@@ -125,7 +125,7 @@ pub const dwarf = struct {
                 },
                 .debug_frame => .PC32,
             })),
-            .aarch64 => @intFromEnum(@as(elf.R_AARCH64, switch (source_section) {
+            .aarch64, .aarch64_be => @intFromEnum(@as(elf.R_AARCH64, switch (source_section) {
                 else => switch (address_size) {
                     .@"32" => .ABS32,
                     .@"64" => .ABS64,
@@ -133,7 +133,7 @@ pub const dwarf = struct {
                 },
                 .debug_frame => .PREL32,
             })),
-            .riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (source_section) {
+            .riscv64, .riscv64be => @intFromEnum(@as(elf.R_RISCV, switch (source_section) {
                 else => switch (address_size) {
                     .@"32" => .@"32",
                     .@"64" => .@"64",
@@ -164,8 +164,8 @@ fn formatRelocType(ctx: FormatRelocTypeCtx, writer: *std.io.Writer) std.io.Write
     const r_type = ctx.r_type;
     switch (ctx.cpu_arch) {
         .x86_64 => try writer.print("R_X86_64_{s}", .{@tagName(@as(elf.R_X86_64, @enumFromInt(r_type)))}),
-        .aarch64 => try writer.print("R_AARCH64_{s}", .{@tagName(@as(elf.R_AARCH64, @enumFromInt(r_type)))}),
-        .riscv64 => try writer.print("R_RISCV_{s}", .{@tagName(@as(elf.R_RISCV, @enumFromInt(r_type)))}),
+        .aarch64, .aarch64_be => try writer.print("R_AARCH64_{s}", .{@tagName(@as(elf.R_AARCH64, @enumFromInt(r_type)))}),
+        .riscv64, .riscv64be => try writer.print("R_RISCV_{s}", .{@tagName(@as(elf.R_RISCV, @enumFromInt(r_type)))}),
         else => unreachable,
     }
 }
src/link/Elf/Thunk.zig
@@ -24,8 +24,8 @@ pub fn targetAddress(thunk: Thunk, ref: Elf.Ref, elf_file: *Elf) i64 {
 
 pub fn write(thunk: Thunk, elf_file: *Elf, writer: anytype) !void {
     switch (elf_file.getTarget().cpu.arch) {
-        .aarch64 => try aarch64.write(thunk, elf_file, writer),
-        .x86_64, .riscv64 => unreachable,
+        .aarch64, .aarch64_be => try aarch64.write(thunk, elf_file, writer),
+        .x86_64, .riscv64, .riscv64be => unreachable,
         else => @panic("unhandled arch"),
     }
 }
@@ -59,8 +59,8 @@ pub fn writeSymtab(thunk: Thunk, elf_file: *Elf) void {
 
 fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) usize {
     return switch (cpu_arch) {
-        .aarch64 => aarch64.trampoline_size,
-        .x86_64, .riscv64 => unreachable,
+        .aarch64, .aarch64_be => aarch64.trampoline_size,
+        .x86_64, .riscv64, .riscv64be => unreachable,
         else => @panic("unhandled arch"),
     };
 }
src/link/Elf.zig
@@ -3722,8 +3722,8 @@ pub fn tpAddress(self: *Elf) i64 {
     const phdr = self.phdrs.items[index];
     const addr = switch (self.getTarget().cpu.arch) {
         .x86_64 => mem.alignForward(u64, phdr.p_vaddr + phdr.p_memsz, phdr.p_align),
-        .aarch64 => mem.alignBackward(u64, phdr.p_vaddr - 16, phdr.p_align),
-        .riscv64 => phdr.p_vaddr,
+        .aarch64, .aarch64_be => mem.alignBackward(u64, phdr.p_vaddr - 16, phdr.p_align),
+        .riscv64, .riscv64be => phdr.p_vaddr,
         else => |arch| std.debug.panic("TODO implement getTpAddress for {s}", .{@tagName(arch)}),
     };
     return @intCast(addr);
@@ -4099,8 +4099,8 @@ pub fn getTarget(self: *const Elf) *const std.Target {
 
 fn requiresThunks(self: Elf) bool {
     return switch (self.getTarget().cpu.arch) {
-        .aarch64 => true,
-        .x86_64, .riscv64 => false,
+        .aarch64, .aarch64_be => true,
+        .x86_64, .riscv64, .riscv64be => false,
         else => @panic("TODO unimplemented architecture"),
     };
 }
@@ -4345,8 +4345,8 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
     // A branch will need an extender if its target is larger than
     // `2^(jump_bits - 1) - margin` where margin is some arbitrary number.
     const max_distance = switch (cpu_arch) {
-        .aarch64 => 0x500_000,
-        .x86_64, .riscv64 => unreachable,
+        .aarch64, .aarch64_be => 0x500_000,
+        .x86_64, .riscv64, .riscv64be => unreachable,
         else => @panic("unhandled arch"),
     };
 
@@ -4392,7 +4392,7 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
             log.debug("atom({f}) {s}", .{ ref, atom_ptr.name(elf_file) });
             for (atom_ptr.relocs(elf_file)) |rel| {
                 const is_reachable = switch (cpu_arch) {
-                    .aarch64 => r: {
+                    .aarch64, .aarch64_be => r: {
                         const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
                         if (r_type != .CALL26 and r_type != .JUMP26) break :r true;
                         const target_ref = file_ptr.resolveSymbol(rel.r_sym(), elf_file);
@@ -4406,7 +4406,7 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
                         _ = math.cast(i28, taddr + rel.r_addend - saddr) orelse break :r false;
                         break :r true;
                     },
-                    .x86_64, .riscv64 => unreachable,
+                    .x86_64, .riscv64, .riscv64be => unreachable,
                     else => @panic("unsupported arch"),
                 };
                 if (is_reachable) continue;
src/link/Lld.zig
@@ -1342,7 +1342,9 @@ fn getLDMOption(target: *const std.Target) ?[]const u8 {
         .powerpc64 => "elf64ppc",
         .powerpc64le => "elf64lppc",
         .riscv32 => "elf32lriscv",
+        .riscv32be => "elf32briscv",
         .riscv64 => "elf64lriscv",
+        .riscv64be => "elf64briscv",
         .s390x => "elf64_s390",
         .sparc64 => "elf64_sparc",
         .x86 => switch (target.os.tag) {
src/Compilation.zig
@@ -7051,7 +7051,7 @@ pub fn addCCArgs(
             // compiler frontend does. Therefore we must hard-code the -m flags for
             // all CPU features here.
             switch (target.cpu.arch) {
-                .riscv32, .riscv64 => {
+                .riscv32, .riscv32be, .riscv64, .riscv64be => {
                     const RvArchFeat = struct { char: u8, feat: std.Target.riscv.Feature };
                     const letters = [_]RvArchFeat{
                         .{ .char = 'm', .feat = .m },
src/target.zig
@@ -189,7 +189,9 @@ pub fn hasLlvmSupport(target: *const std.Target, ofmt: std.Target.ObjectFormat)
         .powerpc64le,
         .amdgcn,
         .riscv32,
+        .riscv32be,
         .riscv64,
+        .riscv64be,
         .sparc,
         .sparc64,
         .spirv32,
@@ -486,7 +488,9 @@ pub fn clangSupportsNoImplicitFloatArg(target: *const std.Target) bool {
         .thumb,
         .thumbeb,
         .riscv32,
+        .riscv32be,
         .riscv64,
+        .riscv64be,
         .x86,
         .x86_64,
         => true,
@@ -655,7 +659,7 @@ pub fn llvmMachineAbi(target: *const std.Target) ?[:0]const u8 {
             else => if (target.abi.isMusl()) "elfv2" else "elfv1",
         },
         .powerpc64le => "elfv2",
-        .riscv64 => if (target.cpu.has(.riscv, .e))
+        .riscv64, .riscv64be => if (target.cpu.has(.riscv, .e))
             "lp64e"
         else if (target.cpu.has(.riscv, .d))
             "lp64d"
@@ -663,7 +667,7 @@ pub fn llvmMachineAbi(target: *const std.Target) ?[:0]const u8 {
             "lp64f"
         else
             "lp64",
-        .riscv32 => if (target.cpu.has(.riscv, .e))
+        .riscv32, .riscv32be => if (target.cpu.has(.riscv, .e))
             "ilp32e"
         else if (target.cpu.has(.riscv, .d))
             "ilp32d"
@@ -707,7 +711,9 @@ pub fn defaultFunctionAlignment(target: *const std.Target) Alignment {
 pub fn minFunctionAlignment(target: *const std.Target) Alignment {
     return switch (target.cpu.arch) {
         .riscv32,
+        .riscv32be,
         .riscv64,
+        .riscv64be,
         => if (target.cpu.hasAny(.riscv, &.{ .c, .zca })) .@"2" else .@"4",
         .thumb,
         .thumbeb,
src/Zcu.zig
@@ -3850,6 +3850,7 @@ pub fn atomicPtrAlignment(
         .powerpc,
         .powerpcle,
         .riscv32,
+        .riscv32be,
         .sparc,
         .thumb,
         .thumbeb,
@@ -3874,6 +3875,7 @@ pub fn atomicPtrAlignment(
         .powerpc64,
         .powerpc64le,
         .riscv64,
+        .riscv64be,
         .sparc64,
         .s390x,
         .wasm64,
test/llvm_targets.zig
@@ -217,6 +217,8 @@ const targets = [_]std.Target.Query{
     .{ .cpu_arch = .riscv32, .os_tag = .rtems, .abi = .none },
     // .{ .cpu_arch = .riscv32, .os_tag = .uefi, .abi = .none },
 
+    // .{ .cpu_arch = .riscv32be, .os_tag = .freestanding, .abi = .none },
+
     .{ .cpu_arch = .riscv64, .os_tag = .freebsd, .abi = .none },
     .{ .cpu_arch = .riscv64, .os_tag = .freestanding, .abi = .none },
     .{ .cpu_arch = .riscv64, .os_tag = .fuchsia, .abi = .none },
@@ -231,6 +233,8 @@ const targets = [_]std.Target.Query{
     .{ .cpu_arch = .riscv64, .os_tag = .serenity, .abi = .none },
     // .{ .cpu_arch = .riscv64, .os_tag = .uefi, .abi = .none },
 
+    // .{ .cpu_arch = .riscv64, .os_tag = .freestanding, .abi = .none },
+
     .{ .cpu_arch = .s390x, .os_tag = .freestanding, .abi = .none },
     .{ .cpu_arch = .s390x, .os_tag = .linux, .abi = .gnu },
     .{ .cpu_arch = .s390x, .os_tag = .linux, .abi = .none },