master
1/* $NetBSD: cpu.h,v 1.123.4.1 2023/08/09 17:42:01 martin Exp $ */
2
3/*
4 * Copyright (c) 1994-1996 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * RiscBSD kernel project
38 *
39 * cpu.h
40 *
41 * CPU specific symbols
42 *
43 * Created : 18/09/94
44 *
45 * Based on kate/katelib/arm6.h
46 */
47
48#ifndef _ARM_CPU_H_
49#define _ARM_CPU_H_
50
51#ifdef _KERNEL
52#ifndef _LOCORE
53
54typedef unsigned long mpidr_t;
55
56#ifdef MULTIPROCESSOR
57extern u_int arm_cpu_max;
58extern mpidr_t cpu_mpidr[];
59
60void cpu_init_secondary_processor(int);
61void cpu_boot_secondary_processors(void);
62void cpu_mpstart(void);
63bool cpu_hatched_p(u_int);
64
65void cpu_clr_mbox(int);
66void cpu_set_hatched(int);
67
68#endif
69
70struct proc;
71
72void cpu_proc_fork(struct proc *, struct proc *);
73
74#endif /* !_LOCORE */
75#endif /* _KERNEL */
76
77#ifdef __arm__
78
79/*
80 * User-visible definitions
81 */
82
83/* CTL_MACHDEP definitions. */
84#define CPU_DEBUG 1 /* int: misc kernel debug control */
85#define CPU_BOOTED_DEVICE 2 /* string: device we booted from */
86#define CPU_BOOTED_KERNEL 3 /* string: kernel we booted */
87#define CPU_CONSDEV 4 /* struct: dev_t of our console */
88#define CPU_POWERSAVE 5 /* int: use CPU powersave mode */
89
90#if defined(_KERNEL) || defined(_KMEMUSER)
91
92/*
93 * Kernel-only definitions
94 */
95
96#if !defined(_MODULE) && defined(_KERNEL_OPT)
97#include "opt_gprof.h"
98#include "opt_multiprocessor.h"
99#include "opt_cpuoptions.h"
100#include "opt_lockdebug.h"
101#include "opt_cputypes.h"
102#endif /* !_MODULE && _KERNEL_OPT */
103
104#ifndef _LOCORE
105#if defined(TPIDRPRW_IS_CURLWP) || defined(TPIDRPRW_IS_CURCPU)
106#include <arm/armreg.h>
107#endif /* TPIDRPRW_IS_CURLWP || TPIDRPRW_IS_CURCPU */
108
109/* 1 == use cpu_sleep(), 0 == don't */
110extern int cpu_do_powersave;
111extern int cpu_fpu_present;
112
113/* All the CLKF_* macros take a struct clockframe * as an argument. */
114
115/*
116 * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
117 * frame came from USR mode or not.
118 */
119#define CLKF_USERMODE(cf) (((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE)
120
121/*
122 * CLKF_INTR: True if we took the interrupt from inside another
123 * interrupt handler.
124 */
125#if !defined(__ARM_EABI__)
126/* Hack to treat FPE time as interrupt time so we can measure it */
127#define CLKF_INTR(cf) \
128 ((curcpu()->ci_intr_depth > 1) || \
129 ((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE)
130#else
131#define CLKF_INTR(cf) ((void)(cf), curcpu()->ci_intr_depth > 1)
132#endif
133
134/*
135 * CLKF_PC: Extract the program counter from a clockframe
136 */
137#define CLKF_PC(frame) (frame->cf_tf.tf_pc)
138
139/*
140 * LWP_PC: Find out the program counter for the given lwp.
141 */
142#define LWP_PC(l) (lwp_trapframe(l)->tf_pc)
143
144/*
145 * Per-CPU information. For now we assume one CPU.
146 */
147#ifdef _KERNEL
148static inline int curcpl(void);
149static inline void set_curcpl(int);
150static inline void cpu_dosoftints(void);
151#endif
152
153#include <sys/param.h>
154
155#ifdef _KMEMUSER
156#include <sys/intr.h>
157#endif
158#include <sys/atomic.h>
159#include <sys/cpu_data.h>
160#include <sys/device_if.h>
161#include <sys/evcnt.h>
162
163/*
164 * Cache info variables.
165 */
166#define CACHE_TYPE_VIVT 0
167#define CACHE_TYPE_xxPT 1
168#define CACHE_TYPE_VIPT 1
169#define CACHE_TYPE_PIxx 2
170#define CACHE_TYPE_PIPT 3
171
172/* PRIMARY CACHE VARIABLES */
173struct arm_cache_info {
174 u_int icache_size;
175 u_int icache_line_size;
176 u_int icache_ways;
177 u_int icache_way_size;
178 u_int icache_sets;
179
180 u_int dcache_size;
181 u_int dcache_line_size;
182 u_int dcache_ways;
183 u_int dcache_way_size;
184 u_int dcache_sets;
185
186 uint8_t cache_type;
187 bool cache_unified;
188 uint8_t icache_type;
189 uint8_t dcache_type;
190};
191
192struct cpu_info {
193 struct cpu_data ci_data; /* MI per-cpu data */
194 device_t ci_dev; /* Device corresponding to this CPU */
195 cpuid_t ci_cpuid;
196 uint32_t ci_arm_cpuid; /* aggregate CPU id */
197 uint32_t ci_arm_cputype; /* CPU type */
198 uint32_t ci_arm_cpurev; /* CPU revision */
199 uint32_t ci_ctrl; /* The CPU control register */
200
201 /*
202 * the following are in their own cache line, as they are stored to
203 * regularly by remote CPUs; when they were mixed with other fields
204 * we observed frequent cache misses.
205 */
206 int ci_want_resched __aligned(COHERENCY_UNIT);
207 /* resched() was called */
208 lwp_t * ci_curlwp __aligned(COHERENCY_UNIT);
209 /* current lwp */
210 lwp_t * ci_onproc; /* current user LWP / kthread */
211
212 /*
213 * largely CPU-private.
214 */
215 lwp_t * ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
216
217 struct cpu_softc *
218 ci_softc; /* platform softc */
219
220 int ci_cpl; /* current processor level (spl) */
221 volatile int ci_hwpl; /* current hardware priority */
222 int ci_kfpu_spl;
223
224 volatile u_int ci_intr_depth; /* */
225 volatile u_int ci_softints;
226 volatile uint32_t ci_blocked_pics;
227 volatile uint32_t ci_pending_pics;
228 volatile uint32_t ci_pending_ipls;
229
230 lwp_t * ci_lastlwp; /* last lwp */
231
232 struct evcnt ci_arm700bugcount;
233 int32_t ci_mtx_count;
234 int ci_mtx_oldspl;
235 register_t ci_undefsave[3];
236 uint32_t ci_vfp_id;
237 uint64_t ci_lastintr;
238
239 struct pmap_tlb_info *
240 ci_tlb_info;
241 struct pmap * ci_pmap_lastuser;
242 struct pmap * ci_pmap_cur;
243 tlb_asid_t ci_pmap_asid_cur;
244
245 struct trapframe *
246 ci_ddb_regs;
247
248 struct evcnt ci_abt_evs[16];
249 struct evcnt ci_und_ev;
250 struct evcnt ci_und_cp15_ev;
251 struct evcnt ci_vfp_evs[3];
252
253 uint32_t ci_midr;
254 uint32_t ci_actlr;
255 uint32_t ci_revidr;
256 uint32_t ci_mpidr;
257 uint32_t ci_mvfr[2];
258
259 uint32_t ci_capacity_dmips_mhz;
260
261 struct arm_cache_info
262 ci_cacheinfo;
263
264#if defined(GPROF) && defined(MULTIPROCESSOR)
265 struct gmonparam *ci_gmon; /* MI per-cpu GPROF */
266#endif
267};
268
269extern struct cpu_info cpu_info_store[];
270
271struct lwp *arm_curlwp(void);
272struct cpu_info *arm_curcpu(void);
273
274#ifdef _KERNEL
275#if defined(_MODULE)
276
277#define curlwp arm_curlwp()
278#define curcpu() arm_curcpu()
279
280#elif defined(TPIDRPRW_IS_CURLWP)
281static inline struct lwp *
282_curlwp(void)
283{
284 return (struct lwp *) armreg_tpidrprw_read();
285}
286
287static inline void
288_curlwp_set(struct lwp *l)
289{
290 armreg_tpidrprw_write((uintptr_t)l);
291}
292
293// Also in <sys/lwp.h> but also here if this was included before <sys/lwp.h>
294static inline struct cpu_info *lwp_getcpu(struct lwp *);
295
296#define curlwp _curlwp()
297// curcpu() expands into two instructions: a mrc and a ldr
298#define curcpu() lwp_getcpu(_curlwp())
299#elif defined(TPIDRPRW_IS_CURCPU)
300#ifdef __HAVE_PREEMPTION
301#error __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
302#endif
303static inline struct cpu_info *
304curcpu(void)
305{
306 return (struct cpu_info *) armreg_tpidrprw_read();
307}
308#elif !defined(MULTIPROCESSOR)
309#define curcpu() (&cpu_info_store[0])
310#elif !defined(__HAVE_PREEMPTION)
311#error MULTIPROCESSOR && !__HAVE_PREEMPTION requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP
312#else
313#error MULTIPROCESSOR && __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
314#endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */
315
316#ifndef curlwp
317#define curlwp (curcpu()->ci_curlwp)
318#endif
319#define curpcb ((struct pcb *)lwp_getpcb(curlwp))
320
321#define CPU_INFO_ITERATOR int
322#if defined(_MODULE) || defined(MULTIPROCESSOR)
323extern struct cpu_info *cpu_info[];
324#define cpu_number() (curcpu()->ci_index)
325#define CPU_IS_PRIMARY(ci) ((ci)->ci_index == 0)
326#define CPU_INFO_FOREACH(cii, ci) \
327 cii = 0, ci = cpu_info[0]; cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; cii++
328#else
329#define cpu_number() 0
330
331#define CPU_IS_PRIMARY(ci) true
332#define CPU_INFO_FOREACH(cii, ci) \
333 cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
334#endif
335
336#define LWP0_CPU_INFO (&cpu_info_store[0])
337
338static inline int
339curcpl(void)
340{
341 return curcpu()->ci_cpl;
342}
343
344static inline void
345set_curcpl(int pri)
346{
347 curcpu()->ci_cpl = pri;
348}
349
350static inline void
351cpu_dosoftints(void)
352{
353#ifdef __HAVE_FAST_SOFTINTS
354 void dosoftints(void);
355#ifndef __HAVE_PIC_FAST_SOFTINTS
356 struct cpu_info * const ci = curcpu();
357 if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0)
358 dosoftints();
359#endif
360#endif
361}
362
363/*
364 * Scheduling glue
365 */
366void cpu_signotify(struct lwp *);
367#define setsoftast(ci) (cpu_signotify((ci)->ci_onproc))
368
369/*
370 * Give a profiling tick to the current process when the user profiling
371 * buffer pages are invalid. On the i386, request an ast to send us
372 * through trap(), marking the proc as needing a profiling tick.
373 */
374#define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, \
375 setsoftast(lwp_getcpu(l)))
376
377/*
378 * We've already preallocated the stack for the idlelwps for additional CPUs.
379 * This hook allows to return them.
380 */
381vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *);
382
383#ifdef _ARM_ARCH_6
384int cpu_maxproc_hook(int);
385#endif
386
387#endif /* _KERNEL */
388
389#endif /* !_LOCORE */
390
391#endif /* _KERNEL || _KMEMUSER */
392
393#elif defined(__aarch64__)
394
395#include <aarch64/cpu.h>
396
397#endif /* __arm__/__aarch64__ */
398
399#endif /* !_ARM_CPU_H_ */