master
   1//===----------------------------------------------------------------------===//
   2//
   3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
   4// See https://llvm.org/LICENSE.txt for license information.
   5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
   6//
   7//===----------------------------------------------------------------------===//
   8
   9#include "assembly.h"
  10
  11#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
  12#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
  13
  14#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
  15#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
  16
  17#if defined(_AIX)
  18    .toc
  19#else
  20    .text
  21#endif
  22
  23#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)
  24
  25#if defined(__i386__)
  26
  27#
  28# extern int __unw_getcontext(unw_context_t* thread_state)
  29#
  30# On entry:
  31#   +                       +
  32#   +-----------------------+
  33#   + thread_state pointer  +
  34#   +-----------------------+
  35#   + return address        +
  36#   +-----------------------+   <-- SP
  37#   +                       +
  38#
  39DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
  40
  41  _LIBUNWIND_CET_ENDBR
  42  push  %eax
  43  movl  8(%esp), %eax
  44  movl  %ebx,  4(%eax)
  45  movl  %ecx,  8(%eax)
  46  movl  %edx, 12(%eax)
  47  movl  %edi, 16(%eax)
  48  movl  %esi, 20(%eax)
  49  movl  %ebp, 24(%eax)
  50  movl  %esp, %edx
  51  addl  $8, %edx
  52  movl  %edx, 28(%eax)  # store what sp was at call site as esp
  53  # skip ss
  54  # skip eflags
  55  movl  4(%esp), %edx
  56  movl  %edx, 40(%eax)  # store return address as eip
  57  # skip cs
  58  # skip ds
  59  # skip es
  60  # skip fs
  61  # skip gs
  62  movl  (%esp), %edx
  63  movl  %edx, (%eax)  # store original eax
  64  popl  %eax
  65  xorl  %eax, %eax    # return UNW_ESUCCESS
  66  ret
  67
  68#elif defined(__arm64ec__)
  69
  70//
  71// extern int __unw_getcontext(unw_context_t* thread_state)
  72//
  73// On entry:
  74//  thread_state pointer is in x0
  75//
  76  .section .text,"xr",discard,"#__unw_getcontext"
  77  .p2align 2
  78DEFINE_LIBUNWIND_FUNCTION("#__unw_getcontext")
  79  stp    x8, x27, [x0, #0x000]  // rax, rbx
  80  stp    x0, x1,  [x0, #0x010]  // rcx, rdx
  81  stp    x26,x25, [x0, #0x020]  // rdi, rsi
  82  mov    x1, sp
  83  stp    fp, x1,  [x0, #0x030]  // rbp, rsp
  84  stp    x2, x3,  [x0, #0x040]  // r8,  r9
  85  stp    x4, x5,  [x0, #0x050]  // r10, r11
  86  stp    x19,x20, [x0, #0x060]  // r12, r13
  87  stp    x21,x22, [x0, #0x070]  // r14, r15
  88  str    x30,     [x0, #0x080]  // store return address as pc
  89  stp    q0, q1,  [x0, #0x0b0]  // xmm0, xmm1
  90  stp    q2, q3,  [x0, #0x0d0]  // xmm2, xmm3
  91  stp    q4, q5,  [x0, #0x0f0]  // xmm4, xmm5
  92  stp    q6, q7,  [x0, #0x110]  // xmm6, xmm7
  93  stp    q8, q9,  [x0, #0x130]  // xmm8, xmm9
  94  stp    q10,q11, [x0, #0x150]  // xmm10,xmm11
  95  stp    q12,q13, [x0, #0x170]  // xmm12,xmm13
  96  stp    q14,q15, [x0, #0x190]  // xmm14,xmm15
  97  mov    x0, #0                 // return UNW_ESUCCESS
  98  ret
  99
 100  .weak_anti_dep __unw_getcontext
 101  .set __unw_getcontext, "#__unw_getcontext"
 102
 103  .section .hybmp$x,"yi"
 104  .symidx "#__unw_getcontext"
 105  .symidx $ientry_thunk$cdecl$i8$i8
 106  .word 1
 107  .text
 108
 109#elif defined(__x86_64__)
 110
 111#
 112# extern int __unw_getcontext(unw_context_t* thread_state)
 113#
 114# On entry:
 115#  thread_state pointer is in rdi
 116#
 117DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
 118#if defined(_WIN64)
 119#define PTR %rcx
 120#define TMP %rdx
 121#else
 122#define PTR %rdi
 123#define TMP %rsi
 124#endif
 125
 126  _LIBUNWIND_CET_ENDBR
 127  movq  %rax,   (PTR)
 128  movq  %rbx,  8(PTR)
 129  movq  %rcx, 16(PTR)
 130  movq  %rdx, 24(PTR)
 131  movq  %rdi, 32(PTR)
 132  movq  %rsi, 40(PTR)
 133  movq  %rbp, 48(PTR)
 134  movq  %rsp, 56(PTR)
 135  addq  $8,   56(PTR)
 136  movq  %r8,  64(PTR)
 137  movq  %r9,  72(PTR)
 138  movq  %r10, 80(PTR)
 139  movq  %r11, 88(PTR)
 140  movq  %r12, 96(PTR)
 141  movq  %r13,104(PTR)
 142  movq  %r14,112(PTR)
 143  movq  %r15,120(PTR)
 144  movq  (%rsp),TMP
 145  movq  TMP,128(PTR) # store return address as rip
 146  # skip rflags
 147  # skip cs
 148  # skip fs
 149  # skip gs
 150
 151#if defined(_WIN64)
 152  movdqu %xmm0,176(PTR)
 153  movdqu %xmm1,192(PTR)
 154  movdqu %xmm2,208(PTR)
 155  movdqu %xmm3,224(PTR)
 156  movdqu %xmm4,240(PTR)
 157  movdqu %xmm5,256(PTR)
 158  movdqu %xmm6,272(PTR)
 159  movdqu %xmm7,288(PTR)
 160  movdqu %xmm8,304(PTR)
 161  movdqu %xmm9,320(PTR)
 162  movdqu %xmm10,336(PTR)
 163  movdqu %xmm11,352(PTR)
 164  movdqu %xmm12,368(PTR)
 165  movdqu %xmm13,384(PTR)
 166  movdqu %xmm14,400(PTR)
 167  movdqu %xmm15,416(PTR)
 168#endif
 169  xorl  %eax, %eax    # return UNW_ESUCCESS
 170  ret
 171
 172#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
 173
 174#
 175# extern int __unw_getcontext(unw_context_t* thread_state)
 176#
 177# On entry:
 178#  thread_state pointer is in a0 ($4)
 179#
 180DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
 181  .set push
 182  .set noat
 183  .set noreorder
 184  .set nomacro
 185  sw    $1, (4 * 1)($4)
 186  sw    $2, (4 * 2)($4)
 187  sw    $3, (4 * 3)($4)
 188  sw    $4, (4 * 4)($4)
 189  sw    $5, (4 * 5)($4)
 190  sw    $6, (4 * 6)($4)
 191  sw    $7, (4 * 7)($4)
 192  sw    $8, (4 * 8)($4)
 193  sw    $9, (4 * 9)($4)
 194  sw    $10, (4 * 10)($4)
 195  sw    $11, (4 * 11)($4)
 196  sw    $12, (4 * 12)($4)
 197  sw    $13, (4 * 13)($4)
 198  sw    $14, (4 * 14)($4)
 199  sw    $15, (4 * 15)($4)
 200  sw    $16, (4 * 16)($4)
 201  sw    $17, (4 * 17)($4)
 202  sw    $18, (4 * 18)($4)
 203  sw    $19, (4 * 19)($4)
 204  sw    $20, (4 * 20)($4)
 205  sw    $21, (4 * 21)($4)
 206  sw    $22, (4 * 22)($4)
 207  sw    $23, (4 * 23)($4)
 208  sw    $24, (4 * 24)($4)
 209  sw    $25, (4 * 25)($4)
 210  sw    $26, (4 * 26)($4)
 211  sw    $27, (4 * 27)($4)
 212  sw    $28, (4 * 28)($4)
 213  sw    $29, (4 * 29)($4)
 214  sw    $30, (4 * 30)($4)
 215  sw    $31, (4 * 31)($4)
 216  # Store return address to pc
 217  sw    $31, (4 * 32)($4)
 218#if __mips_isa_rev < 6
 219  # hi and lo
 220  mfhi  $8
 221  sw    $8,  (4 * 33)($4)
 222  mflo  $8
 223  sw    $8,  (4 * 34)($4)
 224#endif
 225#ifdef __mips_hard_float
 226#if __mips_fpr != 64
 227  sdc1  $f0, (4 * 36 + 8 * 0)($4)
 228  sdc1  $f2, (4 * 36 + 8 * 2)($4)
 229  sdc1  $f4, (4 * 36 + 8 * 4)($4)
 230  sdc1  $f6, (4 * 36 + 8 * 6)($4)
 231  sdc1  $f8, (4 * 36 + 8 * 8)($4)
 232  sdc1  $f10, (4 * 36 + 8 * 10)($4)
 233  sdc1  $f12, (4 * 36 + 8 * 12)($4)
 234  sdc1  $f14, (4 * 36 + 8 * 14)($4)
 235  sdc1  $f16, (4 * 36 + 8 * 16)($4)
 236  sdc1  $f18, (4 * 36 + 8 * 18)($4)
 237  sdc1  $f20, (4 * 36 + 8 * 20)($4)
 238  sdc1  $f22, (4 * 36 + 8 * 22)($4)
 239  sdc1  $f24, (4 * 36 + 8 * 24)($4)
 240  sdc1  $f26, (4 * 36 + 8 * 26)($4)
 241  sdc1  $f28, (4 * 36 + 8 * 28)($4)
 242  sdc1  $f30, (4 * 36 + 8 * 30)($4)
 243#else
 244  sdc1  $f0, (4 * 36 + 8 * 0)($4)
 245  sdc1  $f1, (4 * 36 + 8 * 1)($4)
 246  sdc1  $f2, (4 * 36 + 8 * 2)($4)
 247  sdc1  $f3, (4 * 36 + 8 * 3)($4)
 248  sdc1  $f4, (4 * 36 + 8 * 4)($4)
 249  sdc1  $f5, (4 * 36 + 8 * 5)($4)
 250  sdc1  $f6, (4 * 36 + 8 * 6)($4)
 251  sdc1  $f7, (4 * 36 + 8 * 7)($4)
 252  sdc1  $f8, (4 * 36 + 8 * 8)($4)
 253  sdc1  $f9, (4 * 36 + 8 * 9)($4)
 254  sdc1  $f10, (4 * 36 + 8 * 10)($4)
 255  sdc1  $f11, (4 * 36 + 8 * 11)($4)
 256  sdc1  $f12, (4 * 36 + 8 * 12)($4)
 257  sdc1  $f13, (4 * 36 + 8 * 13)($4)
 258  sdc1  $f14, (4 * 36 + 8 * 14)($4)
 259  sdc1  $f15, (4 * 36 + 8 * 15)($4)
 260  sdc1  $f16, (4 * 36 + 8 * 16)($4)
 261  sdc1  $f17, (4 * 36 + 8 * 17)($4)
 262  sdc1  $f18, (4 * 36 + 8 * 18)($4)
 263  sdc1  $f19, (4 * 36 + 8 * 19)($4)
 264  sdc1  $f20, (4 * 36 + 8 * 20)($4)
 265  sdc1  $f21, (4 * 36 + 8 * 21)($4)
 266  sdc1  $f22, (4 * 36 + 8 * 22)($4)
 267  sdc1  $f23, (4 * 36 + 8 * 23)($4)
 268  sdc1  $f24, (4 * 36 + 8 * 24)($4)
 269  sdc1  $f25, (4 * 36 + 8 * 25)($4)
 270  sdc1  $f26, (4 * 36 + 8 * 26)($4)
 271  sdc1  $f27, (4 * 36 + 8 * 27)($4)
 272  sdc1  $f28, (4 * 36 + 8 * 28)($4)
 273  sdc1  $f29, (4 * 36 + 8 * 29)($4)
 274  sdc1  $f30, (4 * 36 + 8 * 30)($4)
 275  sdc1  $f31, (4 * 36 + 8 * 31)($4)
 276#endif
 277#endif
 278  jr	$31
 279  # return UNW_ESUCCESS
 280  or    $2, $0, $0
 281  .set pop
 282
 283#elif defined(__mips64)
 284
 285#
 286# extern int __unw_getcontext(unw_context_t* thread_state)
 287#
 288# On entry:
 289#  thread_state pointer is in a0 ($4)
 290#
 291DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
 292  .set push
 293  .set noat
 294  .set noreorder
 295  .set nomacro
 296  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
 297    sd $\i, (8 * \i)($4)
 298  .endr
 299  # Store return address to pc
 300  sd    $31, (8 * 32)($4)
 301#if __mips_isa_rev < 6
 302  # hi and lo
 303  mfhi  $8
 304  sd    $8,  (8 * 33)($4)
 305  mflo  $8
 306  sd    $8,  (8 * 34)($4)
 307#endif
 308#ifdef __mips_hard_float
 309  .irp i,FROM_0_TO_31
 310    sdc1 $f\i, (280+8*\i)($4)
 311  .endr
 312#endif
 313  jr	$31
 314  # return UNW_ESUCCESS
 315  or    $2, $0, $0
 316  .set pop
 317
 318# elif defined(__mips__)
 319
 320#
 321# extern int __unw_getcontext(unw_context_t* thread_state)
 322#
 323# Just trap for the time being.
 324DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
 325  teq $0, $0
 326
 327#elif defined(__powerpc64__)
 328
 329//
 330// extern int __unw_getcontext(unw_context_t* thread_state)
 331//
 332// On entry:
 333//  thread_state pointer is in r3
 334//
 335#if defined(_AIX)
 336DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
 337#else
 338DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
 339#endif
 340// store register (GPR)
 341#define PPC64_STR(n) \
 342  std   n, (8 * (n + 2))(3)
 343
 344  // save GPRs
 345  PPC64_STR(0)
 346  mflr  0
 347  std   0, PPC64_OFFS_SRR0(3) // store lr as ssr0
 348  PPC64_STR(1)
 349  PPC64_STR(4)        // Save r4 first since it will be used for fixing r2.
 350#if defined(_AIX)
 351  // The TOC register (r2) was changed by the glue code if unw_getcontext
 352  // is called from a different module. Save the original TOC register
 353  // in the context if this is the case.
 354  mflr   4
 355  lwz    4, 0(4)      // Get the first instruction at the return address.
 356  xoris  0, 4, 0xe841 // Is it reloading the TOC register "ld 2,40(1)"?
 357  cmplwi 0, 0x28
 358  bne    0, LnoR2Fix  // No need to fix up r2 if it is not.
 359  ld     2, 40(1)     // Use the saved TOC register in the stack.
 360LnoR2Fix:
 361#endif
 362  PPC64_STR(2)
 363  PPC64_STR(3)
 364  PPC64_STR(5)
 365  PPC64_STR(6)
 366  PPC64_STR(7)
 367  PPC64_STR(8)
 368  PPC64_STR(9)
 369  PPC64_STR(10)
 370  PPC64_STR(11)
 371  PPC64_STR(12)
 372  PPC64_STR(13)
 373  PPC64_STR(14)
 374  PPC64_STR(15)
 375  PPC64_STR(16)
 376  PPC64_STR(17)
 377  PPC64_STR(18)
 378  PPC64_STR(19)
 379  PPC64_STR(20)
 380  PPC64_STR(21)
 381  PPC64_STR(22)
 382  PPC64_STR(23)
 383  PPC64_STR(24)
 384  PPC64_STR(25)
 385  PPC64_STR(26)
 386  PPC64_STR(27)
 387  PPC64_STR(28)
 388  PPC64_STR(29)
 389  PPC64_STR(30)
 390  PPC64_STR(31)
 391
 392  mfcr  0
 393  std   0,  PPC64_OFFS_CR(3)
 394  mfxer 0
 395  std   0,  PPC64_OFFS_XER(3)
 396#if defined(_AIX)
 397  // LR value saved from the register is not used, initialize it to 0.
 398  li    0,  0
 399#else
 400  mflr  0
 401#endif
 402  std   0,  PPC64_OFFS_LR(3)
 403  mfctr 0
 404  std   0,  PPC64_OFFS_CTR(3)
 405  mfvrsave    0
 406  std   0,  PPC64_OFFS_VRSAVE(3)
 407
 408#if defined(__VSX__)
 409  // save VS registers
 410  // (note that this also saves floating point registers and V registers,
 411  // because part of VS is mapped to these registers)
 412
 413  addi  4, 3, PPC64_OFFS_FP
 414
 415// store VS register
 416#ifdef __LITTLE_ENDIAN__
 417// For little-endian targets, we need a swap since stxvd2x will store the
 418// register in the incorrect doubleword order.
 419// FIXME: when supporting targets older than Power9 on LE is no longer required
 420//        this can be changed to simply `stxv n, 16 * n(4)`.
 421#define PPC64_STVS(n)      \
 422  xxswapd n, n            ;\
 423  stxvd2x n, 0, 4         ;\
 424  addi    4, 4, 16
 425#else
 426#define PPC64_STVS(n)      \
 427  stxvd2x n, 0, 4         ;\
 428  addi    4, 4, 16
 429#endif
 430
 431  PPC64_STVS(0)
 432  PPC64_STVS(1)
 433  PPC64_STVS(2)
 434  PPC64_STVS(3)
 435  PPC64_STVS(4)
 436  PPC64_STVS(5)
 437  PPC64_STVS(6)
 438  PPC64_STVS(7)
 439  PPC64_STVS(8)
 440  PPC64_STVS(9)
 441  PPC64_STVS(10)
 442  PPC64_STVS(11)
 443  PPC64_STVS(12)
 444  PPC64_STVS(13)
 445  PPC64_STVS(14)
 446  PPC64_STVS(15)
 447  PPC64_STVS(16)
 448  PPC64_STVS(17)
 449  PPC64_STVS(18)
 450  PPC64_STVS(19)
 451  PPC64_STVS(20)
 452  PPC64_STVS(21)
 453  PPC64_STVS(22)
 454  PPC64_STVS(23)
 455  PPC64_STVS(24)
 456  PPC64_STVS(25)
 457  PPC64_STVS(26)
 458  PPC64_STVS(27)
 459  PPC64_STVS(28)
 460  PPC64_STVS(29)
 461  PPC64_STVS(30)
 462  PPC64_STVS(31)
 463  PPC64_STVS(32)
 464  PPC64_STVS(33)
 465  PPC64_STVS(34)
 466  PPC64_STVS(35)
 467  PPC64_STVS(36)
 468  PPC64_STVS(37)
 469  PPC64_STVS(38)
 470  PPC64_STVS(39)
 471  PPC64_STVS(40)
 472  PPC64_STVS(41)
 473  PPC64_STVS(42)
 474  PPC64_STVS(43)
 475  PPC64_STVS(44)
 476  PPC64_STVS(45)
 477  PPC64_STVS(46)
 478  PPC64_STVS(47)
 479  PPC64_STVS(48)
 480  PPC64_STVS(49)
 481  PPC64_STVS(50)
 482  PPC64_STVS(51)
 483  PPC64_STVS(52)
 484  PPC64_STVS(53)
 485  PPC64_STVS(54)
 486  PPC64_STVS(55)
 487  PPC64_STVS(56)
 488  PPC64_STVS(57)
 489  PPC64_STVS(58)
 490  PPC64_STVS(59)
 491  PPC64_STVS(60)
 492  PPC64_STVS(61)
 493  PPC64_STVS(62)
 494  PPC64_STVS(63)
 495
 496#else
 497
 498// store FP register
 499#define PPC64_STF(n) \
 500  stfd  n, (PPC64_OFFS_FP + n * 16)(3)
 501
 502  // save float registers
 503  PPC64_STF(0)
 504  PPC64_STF(1)
 505  PPC64_STF(2)
 506  PPC64_STF(3)
 507  PPC64_STF(4)
 508  PPC64_STF(5)
 509  PPC64_STF(6)
 510  PPC64_STF(7)
 511  PPC64_STF(8)
 512  PPC64_STF(9)
 513  PPC64_STF(10)
 514  PPC64_STF(11)
 515  PPC64_STF(12)
 516  PPC64_STF(13)
 517  PPC64_STF(14)
 518  PPC64_STF(15)
 519  PPC64_STF(16)
 520  PPC64_STF(17)
 521  PPC64_STF(18)
 522  PPC64_STF(19)
 523  PPC64_STF(20)
 524  PPC64_STF(21)
 525  PPC64_STF(22)
 526  PPC64_STF(23)
 527  PPC64_STF(24)
 528  PPC64_STF(25)
 529  PPC64_STF(26)
 530  PPC64_STF(27)
 531  PPC64_STF(28)
 532  PPC64_STF(29)
 533  PPC64_STF(30)
 534  PPC64_STF(31)
 535
 536#if defined(__ALTIVEC__)
 537  // save vector registers
 538
 539  // Use 16-bytes below the stack pointer as an
 540  // aligned buffer to save each vector register.
 541  // Note that the stack pointer is always 16-byte aligned.
 542  subi  4, 1, 16
 543
 544#define PPC64_STV_UNALIGNED(n)             \
 545  stvx  n, 0, 4                           ;\
 546  ld    5, 0(4)                           ;\
 547  std   5, (PPC64_OFFS_V + n * 16)(3)     ;\
 548  ld    5, 8(4)                           ;\
 549  std   5, (PPC64_OFFS_V + n * 16 + 8)(3)
 550
 551  PPC64_STV_UNALIGNED(0)
 552  PPC64_STV_UNALIGNED(1)
 553  PPC64_STV_UNALIGNED(2)
 554  PPC64_STV_UNALIGNED(3)
 555  PPC64_STV_UNALIGNED(4)
 556  PPC64_STV_UNALIGNED(5)
 557  PPC64_STV_UNALIGNED(6)
 558  PPC64_STV_UNALIGNED(7)
 559  PPC64_STV_UNALIGNED(8)
 560  PPC64_STV_UNALIGNED(9)
 561  PPC64_STV_UNALIGNED(10)
 562  PPC64_STV_UNALIGNED(11)
 563  PPC64_STV_UNALIGNED(12)
 564  PPC64_STV_UNALIGNED(13)
 565  PPC64_STV_UNALIGNED(14)
 566  PPC64_STV_UNALIGNED(15)
 567  PPC64_STV_UNALIGNED(16)
 568  PPC64_STV_UNALIGNED(17)
 569  PPC64_STV_UNALIGNED(18)
 570  PPC64_STV_UNALIGNED(19)
 571  PPC64_STV_UNALIGNED(20)
 572  PPC64_STV_UNALIGNED(21)
 573  PPC64_STV_UNALIGNED(22)
 574  PPC64_STV_UNALIGNED(23)
 575  PPC64_STV_UNALIGNED(24)
 576  PPC64_STV_UNALIGNED(25)
 577  PPC64_STV_UNALIGNED(26)
 578  PPC64_STV_UNALIGNED(27)
 579  PPC64_STV_UNALIGNED(28)
 580  PPC64_STV_UNALIGNED(29)
 581  PPC64_STV_UNALIGNED(30)
 582  PPC64_STV_UNALIGNED(31)
 583
 584#endif
 585#endif
 586
 587  li    3,  0   // return UNW_ESUCCESS
 588  blr
 589
 590
 591#elif defined(__powerpc__)
 592
 593//
 594// extern int unw_getcontext(unw_context_t* thread_state)
 595//
 596// On entry:
 597//  thread_state pointer is in r3
 598//
 599#if defined(_AIX)
 600DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
 601#else
 602DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
 603#endif
 604  stw     0,   8(3)
 605  mflr    0
 606  stw     0,   0(3) // store lr as ssr0
 607  stw     1,  12(3)
 608  stw     4,  24(3) // Save r4 first since it will be used for fixing r2.
 609#if defined(_AIX)
 610  // The TOC register (r2) was changed by the glue code if unw_getcontext
 611  // is called from a different module. Save the original TOC register
 612  // in the context if this is the case.
 613  mflr    4
 614  lwz     4,  0(4)      // Get the instruction at the return address.
 615  xoris   0,  4, 0x8041 // Is it reloading the TOC register "lwz 2,20(1)"?
 616  cmplwi  0,  0x14
 617  bne     0,  LnoR2Fix  // No need to fix up r2 if it is not.
 618  lwz     2,  20(1)     // Use the saved TOC register in the stack.
 619LnoR2Fix:
 620#endif
 621  stw     2,  16(3)
 622  stw     3,  20(3)
 623  stw     5,  28(3)
 624  stw     6,  32(3)
 625  stw     7,  36(3)
 626  stw     8,  40(3)
 627  stw     9,  44(3)
 628  stw     10, 48(3)
 629  stw     11, 52(3)
 630  stw     12, 56(3)
 631  stw     13, 60(3)
 632  stw     14, 64(3)
 633  stw     15, 68(3)
 634  stw     16, 72(3)
 635  stw     17, 76(3)
 636  stw     18, 80(3)
 637  stw     19, 84(3)
 638  stw     20, 88(3)
 639  stw     21, 92(3)
 640  stw     22, 96(3)
 641  stw     23,100(3)
 642  stw     24,104(3)
 643  stw     25,108(3)
 644  stw     26,112(3)
 645  stw     27,116(3)
 646  stw     28,120(3)
 647  stw     29,124(3)
 648  stw     30,128(3)
 649  stw     31,132(3)
 650
 651#if defined(__ALTIVEC__)
 652  // save VRSave register
 653  mfspr   0, 256
 654  stw     0, 156(3)
 655#endif
 656  // save CR registers
 657  mfcr    0
 658  stw     0, 136(3)
 659#if defined(_AIX)
 660  // LR value from the register is not used, initialize it to 0.
 661  li      0, 0
 662  stw     0, 144(3)
 663#endif
 664  // save CTR register
 665  mfctr   0
 666  stw     0, 148(3)
 667
 668#if !defined(__NO_FPRS__)
 669  // save float registers
 670  stfd    0, 160(3)
 671  stfd    1, 168(3)
 672  stfd    2, 176(3)
 673  stfd    3, 184(3)
 674  stfd    4, 192(3)
 675  stfd    5, 200(3)
 676  stfd    6, 208(3)
 677  stfd    7, 216(3)
 678  stfd    8, 224(3)
 679  stfd    9, 232(3)
 680  stfd    10,240(3)
 681  stfd    11,248(3)
 682  stfd    12,256(3)
 683  stfd    13,264(3)
 684  stfd    14,272(3)
 685  stfd    15,280(3)
 686  stfd    16,288(3)
 687  stfd    17,296(3)
 688  stfd    18,304(3)
 689  stfd    19,312(3)
 690  stfd    20,320(3)
 691  stfd    21,328(3)
 692  stfd    22,336(3)
 693  stfd    23,344(3)
 694  stfd    24,352(3)
 695  stfd    25,360(3)
 696  stfd    26,368(3)
 697  stfd    27,376(3)
 698  stfd    28,384(3)
 699  stfd    29,392(3)
 700  stfd    30,400(3)
 701  stfd    31,408(3)
 702#endif
 703
 704#if defined(__ALTIVEC__)
 705  // save vector registers
 706
 707  subi    4, 1, 16
 708  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
 709  // r4 is now a 16-byte aligned pointer into the red zone
 710
 711#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
 712  stvx    _vec, 0, 4               SEPARATOR \
 713  lwz     5, 0(4)                  SEPARATOR \
 714  stw     5, _offset(3)            SEPARATOR \
 715  lwz     5, 4(4)                  SEPARATOR \
 716  stw     5, _offset+4(3)          SEPARATOR \
 717  lwz     5, 8(4)                  SEPARATOR \
 718  stw     5, _offset+8(3)          SEPARATOR \
 719  lwz     5, 12(4)                 SEPARATOR \
 720  stw     5, _offset+12(3)
 721
 722  SAVE_VECTOR_UNALIGNED( 0, 424+0x000)
 723  SAVE_VECTOR_UNALIGNED( 1, 424+0x010)
 724  SAVE_VECTOR_UNALIGNED( 2, 424+0x020)
 725  SAVE_VECTOR_UNALIGNED( 3, 424+0x030)
 726  SAVE_VECTOR_UNALIGNED( 4, 424+0x040)
 727  SAVE_VECTOR_UNALIGNED( 5, 424+0x050)
 728  SAVE_VECTOR_UNALIGNED( 6, 424+0x060)
 729  SAVE_VECTOR_UNALIGNED( 7, 424+0x070)
 730  SAVE_VECTOR_UNALIGNED( 8, 424+0x080)
 731  SAVE_VECTOR_UNALIGNED( 9, 424+0x090)
 732  SAVE_VECTOR_UNALIGNED(10, 424+0x0A0)
 733  SAVE_VECTOR_UNALIGNED(11, 424+0x0B0)
 734  SAVE_VECTOR_UNALIGNED(12, 424+0x0C0)
 735  SAVE_VECTOR_UNALIGNED(13, 424+0x0D0)
 736  SAVE_VECTOR_UNALIGNED(14, 424+0x0E0)
 737  SAVE_VECTOR_UNALIGNED(15, 424+0x0F0)
 738  SAVE_VECTOR_UNALIGNED(16, 424+0x100)
 739  SAVE_VECTOR_UNALIGNED(17, 424+0x110)
 740  SAVE_VECTOR_UNALIGNED(18, 424+0x120)
 741  SAVE_VECTOR_UNALIGNED(19, 424+0x130)
 742  SAVE_VECTOR_UNALIGNED(20, 424+0x140)
 743  SAVE_VECTOR_UNALIGNED(21, 424+0x150)
 744  SAVE_VECTOR_UNALIGNED(22, 424+0x160)
 745  SAVE_VECTOR_UNALIGNED(23, 424+0x170)
 746  SAVE_VECTOR_UNALIGNED(24, 424+0x180)
 747  SAVE_VECTOR_UNALIGNED(25, 424+0x190)
 748  SAVE_VECTOR_UNALIGNED(26, 424+0x1A0)
 749  SAVE_VECTOR_UNALIGNED(27, 424+0x1B0)
 750  SAVE_VECTOR_UNALIGNED(28, 424+0x1C0)
 751  SAVE_VECTOR_UNALIGNED(29, 424+0x1D0)
 752  SAVE_VECTOR_UNALIGNED(30, 424+0x1E0)
 753  SAVE_VECTOR_UNALIGNED(31, 424+0x1F0)
 754#endif
 755
 756  li      3, 0  // return UNW_ESUCCESS
 757  blr
 758
 759
 760#elif defined(__aarch64__)
 761
 762//
 763// extern int __unw_getcontext(unw_context_t* thread_state)
 764//
 765// On entry:
 766//  thread_state pointer is in x0
 767//
 768  .p2align 2
 769DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
 770  stp    x0, x1,  [x0, #0x000]
 771  stp    x2, x3,  [x0, #0x010]
 772  stp    x4, x5,  [x0, #0x020]
 773  stp    x6, x7,  [x0, #0x030]
 774  stp    x8, x9,  [x0, #0x040]
 775  stp    x10,x11, [x0, #0x050]
 776  stp    x12,x13, [x0, #0x060]
 777  stp    x14,x15, [x0, #0x070]
 778  stp    x16,x17, [x0, #0x080]
 779  stp    x18,x19, [x0, #0x090]
 780  stp    x20,x21, [x0, #0x0A0]
 781  stp    x22,x23, [x0, #0x0B0]
 782  stp    x24,x25, [x0, #0x0C0]
 783  stp    x26,x27, [x0, #0x0D0]
 784  stp    x28,x29, [x0, #0x0E0]
 785  str    x30,     [x0, #0x0F0]
 786  mov    x1,sp
 787  str    x1,      [x0, #0x0F8]
 788  str    x30,     [x0, #0x100]    // store return address as pc
 789  // skip cpsr
 790#if defined(__ARM_FP) && __ARM_FP != 0
 791  stp    d0, d1,  [x0, #0x110]
 792  stp    d2, d3,  [x0, #0x120]
 793  stp    d4, d5,  [x0, #0x130]
 794  stp    d6, d7,  [x0, #0x140]
 795  stp    d8, d9,  [x0, #0x150]
 796  stp    d10,d11, [x0, #0x160]
 797  stp    d12,d13, [x0, #0x170]
 798  stp    d14,d15, [x0, #0x180]
 799  stp    d16,d17, [x0, #0x190]
 800  stp    d18,d19, [x0, #0x1A0]
 801  stp    d20,d21, [x0, #0x1B0]
 802  stp    d22,d23, [x0, #0x1C0]
 803  stp    d24,d25, [x0, #0x1D0]
 804  stp    d26,d27, [x0, #0x1E0]
 805  stp    d28,d29, [x0, #0x1F0]
 806  str    d30,     [x0, #0x200]
 807  str    d31,     [x0, #0x208]
 808#endif
 809  mov    x0, #0                   // return UNW_ESUCCESS
 810  ret
 811
 812#elif defined(__arm__) && !defined(__APPLE__)
 813
 814#if !defined(__ARM_ARCH_ISA_ARM)
 815#if (__ARM_ARCH_ISA_THUMB == 2)
 816  .syntax unified
 817#endif
 818  .thumb
 819#endif
 820
 821@
 822@ extern int __unw_getcontext(unw_context_t* thread_state)
 823@
 824@ On entry:
 825@  thread_state pointer is in r0
 826@
 827@ Per EHABI #4.7 this only saves the core integer registers.
 828@ EHABI #7.4.5 notes that in general all VRS registers should be restored
 829@ however this is very hard to do for VFP registers because it is unknown
 830@ to the library how many registers are implemented by the architecture.
 831@ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
 832@
 833  .p2align 2
 834DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
 835#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
 836  stm r0!, {r0-r7}
 837  mov r1, r8
 838  mov r2, r9
 839  mov r3, r10
 840  stm r0!, {r1-r3}
 841  mov r1, r11
 842  mov r2, sp
 843  mov r3, lr
 844  str r1, [r0, #0]   @ r11
 845  @ r12 does not need storing, it it the intra-procedure-call scratch register
 846  str r2, [r0, #8]   @ sp
 847  str r3, [r0, #12]  @ lr
 848  str r3, [r0, #16]  @ store return address as pc
 849  @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
 850  @ It is safe to use here though because we are about to return, and cpsr is
 851  @ not expected to be preserved.
 852  movs r0, #0        @ return UNW_ESUCCESS
 853#else
 854  @ 32bit thumb-2 restrictions for stm:
 855  @ . the sp (r13) cannot be in the list
 856  @ . the pc (r15) cannot be in the list in an STM instruction
 857  stm r0, {r0-r12}
 858  str sp, [r0, #52]
 859  str lr, [r0, #56]
 860  str lr, [r0, #60]  @ store return address as pc
 861  mov r0, #0         @ return UNW_ESUCCESS
 862#endif
 863  JMP(lr)
 864
 865@
 866@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
 867@
 868@ On entry:
 869@  values pointer is in r0
 870@
 871  .p2align 2
 872#if defined(__ELF__)
 873  .fpu vfpv3-d16
 874#endif
 875DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
 876  vstmia r0, {d0-d15}
 877  JMP(lr)
 878
 879@
 880@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
 881@
 882@ On entry:
 883@  values pointer is in r0
 884@
 885  .p2align 2
 886#if defined(__ELF__)
 887  .fpu vfpv3-d16
 888#endif
 889DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
 890  vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
 891  JMP(lr)
 892
 893@
 894@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
 895@
 896@ On entry:
 897@  values pointer is in r0
 898@
 899  .p2align 2
 900#if defined(__ELF__)
 901  .fpu vfpv3
 902#endif
 903DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
 904  @ VFP and iwMMX instructions are only available when compiling with the flags
 905  @ that enable them. We do not want to do that in the library (because we do not
 906  @ want the compiler to generate instructions that access those) but this is
 907  @ only accessed if the personality routine needs these registers. Use of
 908  @ these registers implies they are, actually, available on the target, so
 909  @ it's ok to execute.
 910  @ So, generate the instructions using the corresponding coprocessor mnemonic.
 911  vstmia r0, {d16-d31}
 912  JMP(lr)
 913
 914#if defined(_LIBUNWIND_ARM_WMMX)
 915
 916@
 917@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
 918@
 919@ On entry:
 920@  values pointer is in r0
 921@
 922  .p2align 2
 923#if defined(__ELF__)
 924  .arch armv5te
 925#endif
 926DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
 927  stcl p1, cr0, [r0], #8  @ wstrd wR0, [r0], #8
 928  stcl p1, cr1, [r0], #8  @ wstrd wR1, [r0], #8
 929  stcl p1, cr2, [r0], #8  @ wstrd wR2, [r0], #8
 930  stcl p1, cr3, [r0], #8  @ wstrd wR3, [r0], #8
 931  stcl p1, cr4, [r0], #8  @ wstrd wR4, [r0], #8
 932  stcl p1, cr5, [r0], #8  @ wstrd wR5, [r0], #8
 933  stcl p1, cr6, [r0], #8  @ wstrd wR6, [r0], #8
 934  stcl p1, cr7, [r0], #8  @ wstrd wR7, [r0], #8
 935  stcl p1, cr8, [r0], #8  @ wstrd wR8, [r0], #8
 936  stcl p1, cr9, [r0], #8  @ wstrd wR9, [r0], #8
 937  stcl p1, cr10, [r0], #8  @ wstrd wR10, [r0], #8
 938  stcl p1, cr11, [r0], #8  @ wstrd wR11, [r0], #8
 939  stcl p1, cr12, [r0], #8  @ wstrd wR12, [r0], #8
 940  stcl p1, cr13, [r0], #8  @ wstrd wR13, [r0], #8
 941  stcl p1, cr14, [r0], #8  @ wstrd wR14, [r0], #8
 942  stcl p1, cr15, [r0], #8  @ wstrd wR15, [r0], #8
 943  JMP(lr)
 944
 945@
 946@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
 947@
 948@ On entry:
 949@  values pointer is in r0
 950@
 951  .p2align 2
 952#if defined(__ELF__)
 953  .arch armv5te
 954#endif
 955DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
 956  stc2 p1, cr8, [r0], #4  @ wstrw wCGR0, [r0], #4
 957  stc2 p1, cr9, [r0], #4  @ wstrw wCGR1, [r0], #4
 958  stc2 p1, cr10, [r0], #4  @ wstrw wCGR2, [r0], #4
 959  stc2 p1, cr11, [r0], #4  @ wstrw wCGR3, [r0], #4
 960  JMP(lr)
 961
 962#endif
 963
 964#elif defined(__or1k__)
 965
 966#
 967# extern int __unw_getcontext(unw_context_t* thread_state)
 968#
 969# On entry:
 970#  thread_state pointer is in r3
 971#
 972DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
 973  l.sw       0(r3), r0
 974  l.sw       4(r3), r1
 975  l.sw       8(r3), r2
 976  l.sw      12(r3), r3
 977  l.sw      16(r3), r4
 978  l.sw      20(r3), r5
 979  l.sw      24(r3), r6
 980  l.sw      28(r3), r7
 981  l.sw      32(r3), r8
 982  l.sw      36(r3), r9
 983  l.sw      40(r3), r10
 984  l.sw      44(r3), r11
 985  l.sw      48(r3), r12
 986  l.sw      52(r3), r13
 987  l.sw      56(r3), r14
 988  l.sw      60(r3), r15
 989  l.sw      64(r3), r16
 990  l.sw      68(r3), r17
 991  l.sw      72(r3), r18
 992  l.sw      76(r3), r19
 993  l.sw      80(r3), r20
 994  l.sw      84(r3), r21
 995  l.sw      88(r3), r22
 996  l.sw      92(r3), r23
 997  l.sw      96(r3), r24
 998  l.sw     100(r3), r25
 999  l.sw     104(r3), r26
1000  l.sw     108(r3), r27
1001  l.sw     112(r3), r28
1002  l.sw     116(r3), r29
1003  l.sw     120(r3), r30
1004  l.sw     124(r3), r31
1005  # store ra to pc
1006  l.sw     128(r3), r9
1007  # zero epcr
1008  l.sw     132(r3), r0
1009
1010#elif defined(__hexagon__)
1011#
1012# extern int unw_getcontext(unw_context_t* thread_state)
1013#
1014# On entry:
1015#  thread_state pointer is in r0
1016#
1017#define OFFSET(offset) (offset/4)
1018DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1019  memw(r0+#32) = r8
1020  memw(r0+#36) = r9
1021  memw(r0+#40) = r10
1022  memw(r0+#44) = r11
1023
1024  memw(r0+#48) = r12
1025  memw(r0+#52) = r13
1026  memw(r0+#56) = r14
1027  memw(r0+#60) = r15
1028
1029  memw(r0+#64) = r16
1030  memw(r0+#68) = r17
1031  memw(r0+#72) = r18
1032  memw(r0+#76) = r19
1033
1034  memw(r0+#80) = r20
1035  memw(r0+#84) = r21
1036  memw(r0+#88) = r22
1037  memw(r0+#92) = r23
1038
1039  memw(r0+#96) = r24
1040  memw(r0+#100) = r25
1041  memw(r0+#104) = r26
1042  memw(r0+#108) = r27
1043
1044  memw(r0+#112) = r28
1045  memw(r0+#116) = r29
1046  memw(r0+#120) = r30
1047  memw(r0+#124) = r31
1048  r1 = c4   // Predicate register
1049  memw(r0+#128) = r1
1050  r1 = memw(r30)           // *FP == Saved FP
1051  r1 = r31
1052  memw(r0+#132) = r1
1053
1054  jumpr r31
1055
1056#elif defined(__sparc__) && defined(__arch64__)
1057
1058#
1059# extern int __unw_getcontext(unw_context_t* thread_state)
1060#
1061# On entry:
1062#  thread_state pointer is in %o0
1063#
1064DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1065  .register %g2, #scratch
1066  .register %g3, #scratch
1067  .register %g6, #scratch
1068  .register %g7, #scratch
1069  stx  %g1, [%o0 + 0x08]
1070  stx  %g2, [%o0 + 0x10]
1071  stx  %g3, [%o0 + 0x18]
1072  stx  %g4, [%o0 + 0x20]
1073  stx  %g5, [%o0 + 0x28]
1074  stx  %g6, [%o0 + 0x30]
1075  stx  %g7, [%o0 + 0x38]
1076  stx  %o0, [%o0 + 0x40]
1077  stx  %o1, [%o0 + 0x48]
1078  stx  %o2, [%o0 + 0x50]
1079  stx  %o3, [%o0 + 0x58]
1080  stx  %o4, [%o0 + 0x60]
1081  stx  %o5, [%o0 + 0x68]
1082  stx  %o6, [%o0 + 0x70]
1083  stx  %o7, [%o0 + 0x78]
1084  stx  %l0, [%o0 + 0x80]
1085  stx  %l1, [%o0 + 0x88]
1086  stx  %l2, [%o0 + 0x90]
1087  stx  %l3, [%o0 + 0x98]
1088  stx  %l4, [%o0 + 0xa0]
1089  stx  %l5, [%o0 + 0xa8]
1090  stx  %l6, [%o0 + 0xb0]
1091  stx  %l7, [%o0 + 0xb8]
1092  stx  %i0, [%o0 + 0xc0]
1093  stx  %i1, [%o0 + 0xc8]
1094  stx  %i2, [%o0 + 0xd0]
1095  stx  %i3, [%o0 + 0xd8]
1096  stx  %i4, [%o0 + 0xe0]
1097  stx  %i5, [%o0 + 0xe8]
1098  stx  %i6, [%o0 + 0xf0]
1099  stx  %i7, [%o0 + 0xf8]
1100
1101  # save StackGhost cookie
1102  mov  %i7, %g4
1103  save %sp, -176, %sp
1104  # register window flush necessary even without StackGhost
1105  flushw
1106  restore
1107  ldx  [%sp + 2047 + 0x78], %g5
1108  xor  %g4, %g5, %g4
1109  stx  %g4, [%o0 + 0x100]
1110  retl
1111  # return UNW_ESUCCESS
1112   clr %o0
1113
1114#elif defined(__sparc__)
1115
1116#
1117# extern int __unw_getcontext(unw_context_t* thread_state)
1118#
1119# On entry:
1120#  thread_state pointer is in o0
1121#
1122DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1123  ta 3
1124  add %o7, 8, %o7
1125  std %g0, [%o0 +   0]
1126  std %g2, [%o0 +   8]
1127  std %g4, [%o0 +  16]
1128  std %g6, [%o0 +  24]
1129  std %o0, [%o0 +  32]
1130  std %o2, [%o0 +  40]
1131  std %o4, [%o0 +  48]
1132  std %o6, [%o0 +  56]
1133  std %l0, [%o0 +  64]
1134  std %l2, [%o0 +  72]
1135  std %l4, [%o0 +  80]
1136  std %l6, [%o0 +  88]
1137  std %i0, [%o0 +  96]
1138  std %i2, [%o0 + 104]
1139  std %i4, [%o0 + 112]
1140  std %i6, [%o0 + 120]
1141  jmp %o7
1142   clr %o0                   // return UNW_ESUCCESS
1143
1144#elif defined(__riscv)
1145
1146#
1147# extern int __unw_getcontext(unw_context_t* thread_state)
1148#
1149# On entry:
1150#  thread_state pointer is in a0
1151#
1152DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1153  ISTORE    x1, (RISCV_ISIZE * 0)(a0) // store ra as pc
1154#if defined(__riscv_32e)
1155  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
1156#else
1157  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1158#endif
1159    ISTORE x\i, (RISCV_ISIZE * \i)(a0)
1160  .endr
1161
1162# if defined(__riscv_flen)
1163  .irp i,FROM_0_TO_31
1164    FSTORE f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
1165  .endr
1166# endif
1167
1168  li     a0, 0  // return UNW_ESUCCESS
1169  ret           // jump to ra
1170
1171#elif defined(__s390x__)
1172
1173//
1174// extern int __unw_getcontext(unw_context_t* thread_state)
1175//
1176// On entry:
1177//  thread_state pointer is in r2
1178//
1179DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1180
1181  // Save GPRs
1182  stmg %r0, %r15, 16(%r2)
1183
1184  // Save PSWM
1185  epsw %r0, %r1
1186  stm %r0, %r1, 0(%r2)
1187
1188  // Store return address as PSWA
1189  stg %r14, 8(%r2)
1190
1191  // Save FPRs
1192  .irp i,FROM_0_TO_15
1193    std %f\i, (144+8*\i)(%r2)
1194  .endr
1195
1196  // Return UNW_ESUCCESS
1197  lghi %r2, 0
1198  br %r14
1199
1200#elif defined(__loongarch__) && __loongarch_grlen == 64
1201
1202#
1203# extern int __unw_getcontext(unw_context_t* thread_state)
1204#
1205# On entry:
1206#  thread_state pointer is in $a0($r4)
1207#
1208DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1209  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1210    st.d $r\i, $a0, (8*\i)
1211  .endr
1212  st.d    $r1,  $a0, (8 * 32) // store $ra to pc
1213
1214# if __loongarch_frlen == 64
1215  .irp i,FROM_0_TO_31
1216    fst.d $f\i, $a0, (8 * 33 + 8 * \i)
1217  .endr
1218# endif
1219
1220  move     $a0, $zero  // UNW_ESUCCESS
1221  jr       $ra
1222
1223#endif
1224
1225#ifdef __arm64ec__
1226  .globl "#unw_getcontext"
1227  .set "#unw_getcontext", "#__unw_getcontext"
1228  .weak_anti_dep unw_getcontext
1229  .set unw_getcontext, "#unw_getcontext"
1230  EXPORT_SYMBOL(unw_getcontext)
1231#else
1232  WEAK_ALIAS(__unw_getcontext, unw_getcontext)
1233#endif
1234
1235#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */
1236
1237NO_EXEC_STACK_DIRECTIVE