master
  1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
  2/*
  3 * Register definitions for the Hexagon architecture
  4 */
  5
  6
  7#ifndef _ASM_REGISTERS_H
  8#define _ASM_REGISTERS_H
  9
 10#ifndef __ASSEMBLY__
 11
 12/*  See kernel/entry.S for further documentation.  */
 13
 14/*
 15 * Entry code copies the event record out of guest registers into
 16 * this structure (which is on the stack).
 17 */
 18
 19struct hvm_event_record {
 20	unsigned long vmel;     /* Event Linkage (return address) */
 21	unsigned long vmest;    /* Event context - pre-event SSR values */
 22	unsigned long vmpsp;    /* Previous stack pointer */
 23	unsigned long vmbadva;  /* Bad virtual address for addressing events */
 24};
 25
 26struct pt_regs {
 27	long restart_r0;        /* R0 checkpoint for syscall restart */
 28	long syscall_nr;        /* Only used in system calls */
 29	union {
 30		struct {
 31			unsigned long usr;
 32			unsigned long preds;
 33		};
 34		long long int predsusr;
 35	};
 36	union {
 37		struct {
 38			unsigned long m0;
 39			unsigned long m1;
 40		};
 41		long long int m1m0;
 42	};
 43	union {
 44		struct {
 45			unsigned long sa1;
 46			unsigned long lc1;
 47		};
 48		long long int lc1sa1;
 49	};
 50	union {
 51		struct {
 52			unsigned long sa0;
 53			unsigned long lc0;
 54		};
 55		long long int lc0sa0;
 56	};
 57	union {
 58		struct {
 59			unsigned long ugp;
 60			unsigned long gp;
 61		};
 62		long long int gpugp;
 63	};
 64	union {
 65		struct {
 66			unsigned long cs0;
 67			unsigned long cs1;
 68		};
 69		long long int cs1cs0;
 70	};
 71	/*
 72	* Be extremely careful with rearranging these, if at all.  Some code
 73	* assumes the 32 registers exist exactly like this in memory;
 74	* e.g. kernel/ptrace.c
 75	* e.g. kernel/signal.c (restore_sigcontext)
 76	*/
 77	union {
 78		struct {
 79			unsigned long r00;
 80			unsigned long r01;
 81		};
 82		long long int r0100;
 83	};
 84	union {
 85		struct {
 86			unsigned long r02;
 87			unsigned long r03;
 88		};
 89		long long int r0302;
 90	};
 91	union {
 92		struct {
 93			unsigned long r04;
 94			unsigned long r05;
 95		};
 96		long long int r0504;
 97	};
 98	union {
 99		struct {
100			unsigned long r06;
101			unsigned long r07;
102		};
103		long long int r0706;
104	};
105	union {
106		struct {
107			unsigned long r08;
108			unsigned long r09;
109		};
110		long long int r0908;
111	};
112	union {
113	       struct {
114			unsigned long r10;
115			unsigned long r11;
116	       };
117	       long long int r1110;
118	};
119	union {
120	       struct {
121			unsigned long r12;
122			unsigned long r13;
123	       };
124	       long long int r1312;
125	};
126	union {
127	       struct {
128			unsigned long r14;
129			unsigned long r15;
130	       };
131	       long long int r1514;
132	};
133	union {
134		struct {
135			unsigned long r16;
136			unsigned long r17;
137		};
138		long long int r1716;
139	};
140	union {
141		struct {
142			unsigned long r18;
143			unsigned long r19;
144		};
145		long long int r1918;
146	};
147	union {
148		struct {
149			unsigned long r20;
150			unsigned long r21;
151		};
152		long long int r2120;
153	};
154	union {
155		struct {
156			unsigned long r22;
157			unsigned long r23;
158		};
159		long long int r2322;
160	};
161	union {
162		struct {
163			unsigned long r24;
164			unsigned long r25;
165		};
166		long long int r2524;
167	};
168	union {
169		struct {
170			unsigned long r26;
171			unsigned long r27;
172		};
173		long long int r2726;
174	};
175	union {
176		struct {
177			unsigned long r28;
178			unsigned long r29;
179	       };
180	       long long int r2928;
181	};
182	union {
183		struct {
184			unsigned long r30;
185			unsigned long r31;
186		};
187		long long int r3130;
188	};
189	/* VM dispatch pushes event record onto stack - we can build on it */
190	struct hvm_event_record hvmer;
191};
192
193/* Defines to conveniently access the values  */
194
195/*
196 * As of the VM spec 0.5, these registers are now set/retrieved via a
197 * VM call.  On the in-bound side, we just fetch the values
198 * at the entry points and stuff them into the old record in pt_regs.
199 * However, on the outbound side, probably at VM rte, we set the
200 * registers back.
201 */
202
203#define pt_elr(regs) ((regs)->hvmer.vmel)
204#define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))
205#define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))
206#define user_mode(regs) \
207	(((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)
208#define ints_enabled(regs) \
209	(((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)
210#define pt_psp(regs) ((regs)->hvmer.vmpsp)
211#define pt_badva(regs) ((regs)->hvmer.vmbadva)
212
213#define pt_set_singlestep(regs) ((regs)->hvmer.vmest |= (1<<HVM_VMEST_SS_SFT))
214#define pt_clr_singlestep(regs) ((regs)->hvmer.vmest &= ~(1<<HVM_VMEST_SS_SFT))
215
216#define pt_set_rte_sp(regs, sp) do {\
217	pt_psp(regs) = (regs)->r29 = (sp);\
218	} while (0)
219
220#define pt_set_kmode(regs) \
221	(regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
222
223#define pt_set_usermode(regs) \
224	(regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \
225			    | (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
226
227#endif  /*  ifndef __ASSEMBLY  */
228
229#endif