master
1/* Assembler macros for x86-64.
2 Copyright (C) 2001-2025 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#ifndef _X86_64_SYSDEP_H
20#define _X86_64_SYSDEP_H 1
21
22/* zig patch: inline x86-lp_size.h */
23
24#include <sysdeps/x86/sysdep.h>
25#ifdef __ASSEMBLER__
26# define LP_SIZE 8
27#else
28# define LP_SIZE "8"
29#endif
30
31/* __CET__ is defined by GCC with Control-Flow Protection values:
32
33enum cf_protection_level
34{
35 CF_NONE = 0,
36 CF_BRANCH = 1 << 0,
37 CF_RETURN = 1 << 1,
38 CF_FULL = CF_BRANCH | CF_RETURN,
39 CF_SET = 1 << 2
40};
41*/
42
43/* Set if CF_BRANCH (IBT) is enabled. */
44#define X86_FEATURE_1_IBT (1U << 0)
45/* Set if CF_RETURN (SHSTK) is enabled. */
46#define X86_FEATURE_1_SHSTK (1U << 1)
47
48#ifdef __CET__
49# define CET_ENABLED 1
50# define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK)
51#else
52# define CET_ENABLED 0
53# define SHSTK_ENABLED 0
54#endif
55
56#ifdef __ASSEMBLER__
57
58/* Syntactic details of assembler. */
59
60#ifdef _CET_ENDBR
61# define _CET_NOTRACK notrack
62#else
63# define _CET_ENDBR
64# define _CET_NOTRACK
65#endif
66
67/* Define an entry point visible from C. */
68#define ENTRY_P2ALIGN(name, alignment) \
69 .globl C_SYMBOL_NAME(name); \
70 .type C_SYMBOL_NAME(name),@function; \
71 .align ALIGNARG(alignment); \
72 C_LABEL(name) \
73 cfi_startproc; \
74 _CET_ENDBR; \
75 CALL_MCOUNT
76
77/* This macro is for setting proper CFI with DW_CFA_expression describing
78 the register as saved relative to %rsp instead of relative to the CFA.
79 Expression is DW_OP_drop, DW_OP_breg7 (%rsp is register 7), sleb128 offset
80 from %rsp. */
81#define cfi_offset_rel_rsp(regn, off) .cfi_escape 0x10, regn, 0x4, 0x13, \
82 0x77, off & 0x7F | 0x80, off >> 7
83
84/* If compiled for profiling, call `mcount' at the start of each function. */
85#ifdef PROF
86/* The mcount code relies on a normal frame pointer being on the stack
87 to locate our caller, so push one just for its benefit. */
88#define CALL_MCOUNT \
89 pushq %rbp; \
90 cfi_adjust_cfa_offset(8); \
91 movq %rsp, %rbp; \
92 cfi_def_cfa_register(%rbp); \
93 call JUMPTARGET(mcount); \
94 popq %rbp; \
95 cfi_def_cfa(rsp,8);
96#else
97#define CALL_MCOUNT /* Do nothing. */
98#endif
99
100#define PSEUDO(name, syscall_name, args) \
101lose: \
102 jmp JUMPTARGET(syscall_error) \
103 .globl syscall_error; \
104 ENTRY (name) \
105 DO_CALL (syscall_name, args); \
106 jb lose
107
108#undef JUMPTARGET
109#ifdef SHARED
110# ifdef BIND_NOW
111# define JUMPTARGET(name) *name##@GOTPCREL(%rip)
112# else
113# define JUMPTARGET(name) name##@PLT
114# endif
115#else
116/* For static archives, branch to target directly. */
117# define JUMPTARGET(name) name
118#endif
119
120/* Instruction to operate on long and pointer. */
121#define LP_OP(insn) insn##q
122
123/* Assembler address directive. */
124#define ASM_ADDR .quad
125
126/* Registers to hold long and pointer. */
127#define RAX_LP rax
128#define RBP_LP rbp
129#define RBX_LP rbx
130#define RCX_LP rcx
131#define RDI_LP rdi
132#define RDX_LP rdx
133#define RSI_LP rsi
134#define RSP_LP rsp
135#define R8_LP r8
136#define R9_LP r9
137#define R10_LP r10
138#define R11_LP r11
139#define R12_LP r12
140#define R13_LP r13
141#define R14_LP r14
142#define R15_LP r15
143
144/* Zero upper vector registers and return with xtest. NB: Use VZEROALL
145 to avoid RTM abort triggered by VZEROUPPER inside transactionally. */
146#define ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST \
147 xtest; \
148 jnz 1f; \
149 vzeroupper; \
150 ret; \
1511: \
152 vzeroall; \
153 ret
154
155/* Can be used to replace vzeroupper that is not directly before a
156 return. This is useful when hoisting a vzeroupper from multiple
157 return paths to decrease the total number of vzerouppers and code
158 size. */
159#define COND_VZEROUPPER_XTEST \
160 xtest; \
161 jz 1f; \
162 vzeroall; \
163 jmp 2f; \
1641: \
165 vzeroupper; \
1662:
167
168/* In RTM define this as COND_VZEROUPPER_XTEST. */
169#ifndef COND_VZEROUPPER
170# define COND_VZEROUPPER vzeroupper
171#endif
172
173/* Zero upper vector registers and return. */
174#ifndef ZERO_UPPER_VEC_REGISTERS_RETURN
175# define ZERO_UPPER_VEC_REGISTERS_RETURN \
176 VZEROUPPER; \
177 ret
178#endif
179
180#ifndef VZEROUPPER_RETURN
181# define VZEROUPPER_RETURN VZEROUPPER; ret
182#endif
183
184#else /* __ASSEMBLER__ */
185
186/* Instruction to operate on long and pointer. */
187#define LP_OP(insn) #insn "q"
188
189/* Assembler address directive. */
190#define ASM_ADDR ".quad"
191
192/* Registers to hold long and pointer. */
193#define RAX_LP "rax"
194#define RBP_LP "rbp"
195#define RBX_LP "rbx"
196#define RCX_LP "rcx"
197#define RDI_LP "rdi"
198#define RDX_LP "rdx"
199#define RSI_LP "rsi"
200#define RSP_LP "rsp"
201#define R8_LP "r8"
202#define R9_LP "r9"
203#define R10_LP "r10"
204#define R11_LP "r11"
205#define R12_LP "r12"
206#define R13_LP "r13"
207#define R14_LP "r14"
208#define R15_LP "r15"
209
210#endif /* __ASSEMBLER__ */
211
212#endif /* _X86_64_SYSDEP_H */