master
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1998 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#ifndef _MACHINE_CPUFUNC_H_
30#define _MACHINE_CPUFUNC_H_
31
32#ifdef _KERNEL
33
34#include <sys/types.h>
35
36#include <machine/psl.h>
37#include <machine/spr.h>
38
39struct thread;
40
41#ifdef KDB
42void breakpoint(void);
43#else
44static __inline void
45breakpoint(void)
46{
47
48 return;
49}
50#endif
51
52/* CPU register mangling inlines */
53
54static __inline void
55mtmsr(register_t value)
56{
57
58 __asm __volatile ("mtmsr %0; isync" :: "r"(value));
59}
60
61#ifdef __powerpc64__
62static __inline void
63mtmsrd(register_t value)
64{
65
66 __asm __volatile ("mtmsrd %0; isync" :: "r"(value));
67}
68#endif
69
70static __inline register_t
71mfmsr(void)
72{
73 register_t value;
74
75 __asm __volatile ("mfmsr %0" : "=r"(value));
76
77 return (value);
78}
79
80#ifndef __powerpc64__
81static __inline void
82mtsrin(vm_offset_t va, register_t value)
83{
84
85 __asm __volatile ("mtsrin %0,%1; isync" :: "r"(value), "r"(va));
86}
87
88static __inline register_t
89mfsrin(vm_offset_t va)
90{
91 register_t value;
92
93 __asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
94
95 return (value);
96}
97#endif
98
99static __inline register_t
100mfctrl(void)
101{
102 register_t value;
103
104 __asm __volatile ("mfspr %0,136" : "=r"(value));
105
106 return (value);
107}
108
109static __inline void
110mtdec(register_t value)
111{
112
113 __asm __volatile ("mtdec %0" :: "r"(value));
114}
115
116static __inline register_t
117mfdec(void)
118{
119 register_t value;
120
121 __asm __volatile ("mfdec %0" : "=r"(value));
122
123 return (value);
124}
125
126static __inline uint32_t
127mfpvr(void)
128{
129 uint32_t value;
130
131 __asm __volatile ("mfpvr %0" : "=r"(value));
132
133 return (value);
134}
135
136static __inline u_quad_t
137mftb(void)
138{
139 u_quad_t tb;
140 #ifdef __powerpc64__
141 __asm __volatile ("mftb %0" : "=r"(tb));
142 #else
143 uint32_t *tbup = (uint32_t *)&tb;
144 uint32_t *tblp = tbup + 1;
145
146 do {
147 *tbup = mfspr(TBR_TBU);
148 *tblp = mfspr(TBR_TBL);
149 } while (*tbup != mfspr(TBR_TBU));
150 #endif
151
152 return (tb);
153}
154
155static __inline void
156mttb(u_quad_t time)
157{
158
159 mtspr(TBR_TBWL, 0);
160 mtspr(TBR_TBWU, (uint32_t)(time >> 32));
161 mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff));
162}
163
164static __inline register_t
165mffs(void)
166{
167 uint64_t value;
168
169 __asm __volatile ("mffs 0; stfd 0,0(%0)"
170 :: "b"(&value));
171
172 return ((register_t)value);
173}
174
175static __inline void
176mtfsf(uint64_t value)
177{
178
179 __asm __volatile ("lfd 0,0(%0); mtfsf 0xff,0"
180 :: "b"(&value));
181}
182
183static __inline void
184eieio(void)
185{
186
187 __asm __volatile ("eieio" : : : "memory");
188}
189
190static __inline void
191isync(void)
192{
193
194 __asm __volatile ("isync" : : : "memory");
195}
196
197static __inline void
198powerpc_sync(void)
199{
200
201 __asm __volatile ("sync" : : : "memory");
202}
203
204static __inline int
205cntlzd(uint64_t word)
206{
207 uint64_t result;
208 /* cntlzd %0, %1 */
209 __asm __volatile(".long 0x7c000074 | (%1 << 21) | (%0 << 16)" :
210 "=r"(result) : "r"(word));
211
212 return (int)result;
213}
214
215static __inline int
216cnttzd(uint64_t word)
217{
218 uint64_t result;
219 /* cnttzd %0, %1 */
220 __asm __volatile(".long 0x7c000474 | (%1 << 21) | (%0 << 16)" :
221 "=r"(result) : "r"(word));
222
223 return (int)result;
224}
225
226static __inline void
227ptesync(void)
228{
229 __asm __volatile("ptesync");
230}
231
232static __inline register_t
233intr_disable(void)
234{
235 register_t msr;
236
237 msr = mfmsr();
238 mtmsr(msr & ~PSL_EE);
239 return (msr);
240}
241
242static __inline void
243intr_restore(register_t msr)
244{
245
246 mtmsr(msr);
247}
248
249static __inline struct pcpu *
250get_pcpu(void)
251{
252 struct pcpu *ret;
253
254 __asm __volatile("mfsprg %0, 0" : "=r"(ret));
255
256 return (ret);
257}
258
259/* "NOP" operations to signify priorities to the kernel. */
260static __inline void
261nop_prio_vlow(void)
262{
263 __asm __volatile("or 31,31,31");
264}
265
266static __inline void
267nop_prio_low(void)
268{
269 __asm __volatile("or 1,1,1");
270}
271
272static __inline void
273nop_prio_mlow(void)
274{
275 __asm __volatile("or 6,6,6");
276}
277
278static __inline void
279nop_prio_medium(void)
280{
281 __asm __volatile("or 2,2,2");
282}
283
284static __inline void
285nop_prio_mhigh(void)
286{
287 __asm __volatile("or 5,5,5");
288}
289
290static __inline void
291nop_prio_high(void)
292{
293 __asm __volatile("or 3,3,3");
294}
295
296#endif /* _KERNEL */
297
298#endif /* !_MACHINE_CPUFUNC_H_ */