1/*	$NetBSD: cpu.h,v 1.133.4.1 2023/08/09 17:42:03 martin Exp $ */
  2
  3/*
  4 * Copyright (c) 1992, 1993
  5 *	The Regents of the University of California.  All rights reserved.
  6 *
  7 * This software was developed by the Computer Systems Engineering group
  8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
  9 * contributed to Berkeley.
 10 *
 11 * All advertising materials mentioning features or use of this software
 12 * must display the following acknowledgement:
 13 *	This product includes software developed by the University of
 14 *	California, Lawrence Berkeley Laboratory.
 15 *
 16 * Redistribution and use in source and binary forms, with or without
 17 * modification, are permitted provided that the following conditions
 18 * are met:
 19 * 1. Redistributions of source code must retain the above copyright
 20 *    notice, this list of conditions and the following disclaimer.
 21 * 2. Redistributions in binary form must reproduce the above copyright
 22 *    notice, this list of conditions and the following disclaimer in the
 23 *    documentation and/or other materials provided with the distribution.
 24 * 3. Neither the name of the University nor the names of its contributors
 25 *    may be used to endorse or promote products derived from this software
 26 *    without specific prior written permission.
 27 *
 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 38 * SUCH DAMAGE.
 39 *
 40 *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
 41 */
 42
 43#ifndef _CPU_H_
 44#define _CPU_H_
 45
 46/*
 47 * CTL_MACHDEP definitions.
 48 */
 49#define	CPU_BOOTED_KERNEL	1	/* string: booted kernel name */
 50#define	CPU_BOOTED_DEVICE	2	/* string: device booted from */
 51#define	CPU_BOOT_ARGS		3	/* string: args booted with */
 52#define	CPU_ARCH		4	/* integer: cpu architecture version */
 53#define CPU_VIS			5	/* 0 - no VIS, 1 - VIS 1.0, etc. */
 54
 55/*
 56 * This is exported via sysctl for cpuctl(8).
 57 */
 58struct cacheinfo {
 59	int 	c_itotalsize;
 60	int 	c_ilinesize;
 61	int 	c_dtotalsize;
 62	int 	c_dlinesize;
 63	int 	c_etotalsize;
 64	int 	c_elinesize;
 65};
 66
 67#if defined(_KERNEL) || defined(_KMEMUSER)
 68/*
 69 * Exported definitions unique to SPARC cpu support.
 70 */
 71
 72#if defined(_KERNEL_OPT)
 73#include "opt_gprof.h"
 74#include "opt_multiprocessor.h"
 75#include "opt_lockdebug.h"
 76#endif
 77
 78#include <machine/psl.h>
 79#include <machine/reg.h>
 80#include <machine/pte.h>
 81#include <machine/intr.h>
 82#if defined(_KERNEL)
 83#include <machine/bus_defs.h>
 84#include <machine/cpuset.h>
 85#include <sparc64/sparc64/intreg.h>
 86#endif
 87#ifdef SUN4V
 88#include <machine/hypervisor.h>
 89#endif
 90
 91#include <sys/cpu_data.h>
 92#include <sys/mutex.h>
 93#include <sys/evcnt.h>
 94
 95/*
 96 * The cpu_info structure is part of a 64KB structure mapped both the kernel
 97 * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
 98 * Each processor's cpu_info is accessible at CPUINFO_VA only for that
 99 * processor.  Other processors can access that through an additional mapping
100 * in the kernel pmap.
101 *
102 * The 64KB page contains:
103 *
104 * cpu_info
105 * interrupt stack (all remaining space)
106 * idle PCB
107 * idle stack (STACKSPACE - sizeof(PCB))
108 * 32KB TSB
109 */
110
111struct cpu_info {
112	struct cpu_data		ci_data;	/* MI per-cpu data */
113
114
115	/*
116	 * SPARC cpu_info structures live at two VAs: one global
117	 * VA (so each CPU can access any other CPU's cpu_info)
118	 * and an alias VA CPUINFO_VA which is the same on each
119	 * CPU and maps to that CPU's cpu_info.  Since the alias
120	 * CPUINFO_VA is how we locate our cpu_info, we have to
121	 * self-reference the global VA so that we can return it
122	 * in the curcpu() macro.
123	 */
124	struct cpu_info * volatile ci_self;
125
126	/* Most important fields first */
127	struct lwp		*ci_curlwp;
128	struct lwp		*ci_onproc;	/* current user LWP / kthread */
129	struct pcb		*ci_cpcb;
130	struct cpu_info		*ci_next;
131
132	struct lwp		*ci_fplwp;
133
134	void			*ci_eintstack;
135
136	int			ci_mtx_count;
137	int			ci_mtx_oldspl;
138
139	/* Spinning up the CPU */
140	void			(*ci_spinup)(void);
141	paddr_t			ci_paddr;
142
143	int			ci_cpuid;
144
145	uint64_t		ci_ver;
146
147	/* CPU PROM information. */
148	u_int			ci_node;
149	const char		*ci_name;
150
151	/* This is for sysctl. */
152	struct cacheinfo	ci_cacheinfo;
153
154	/* %tick and cpu frequency information */
155	u_long			ci_tick_increment;
156	uint64_t		ci_cpu_clockrate[2];	/* %tick */
157	uint64_t		ci_system_clockrate[2];	/* %stick */
158
159	/* Interrupts */
160	struct intrhand		*ci_intrpending[16];
161	struct intrhand		*ci_tick_ih;
162
163	/* Event counters */
164	struct evcnt		ci_tick_evcnt;
165
166	/* This could be under MULTIPROCESSOR, but there's no good reason */
167	struct evcnt		ci_ipi_evcnt[IPI_EVCNT_NUM];
168
169	int			ci_flags;
170	int			ci_want_ast;
171	int			ci_want_resched;
172	int			ci_idepth;
173
174/*
175 * A context is simply a small number that differentiates multiple mappings
176 * of the same address.  Contexts on the spitfire are 13 bits, but could
177 * be as large as 17 bits.
178 *
179 * Each context is either free or attached to a pmap.
180 *
181 * The context table is an array of pointers to psegs.  Just dereference
182 * the right pointer and you get to the pmap segment tables.  These are
183 * physical addresses, of course.
184 *
185 * ci_ctx_lock protects this CPUs context allocation/free.
186 * These are all allocated almost with in the same cacheline.
187 */
188	kmutex_t		ci_ctx_lock;
189	int			ci_pmap_next_ctx;
190	int			ci_numctx;
191	paddr_t 		*ci_ctxbusy;
192	LIST_HEAD(, pmap) 	ci_pmap_ctxlist;
193
194	/*
195	 * The TSBs are per cpu too (since MMU context differs between
196	 * cpus). These are just caches for the TLBs.
197	 */
198	pte_t			*ci_tsb_dmmu;
199	pte_t			*ci_tsb_immu;
200
201	/* TSB description (sun4v). */
202	struct tsb_desc         *ci_tsb_desc;
203
204	/* MMU Fault Status Area (sun4v).
205	 * Will be initialized to the physical address of the bottom of
206	 * the interrupt stack.
207	 */
208	paddr_t			ci_mmufsa;
209
210	/*
211	 * sun4v mondo control fields
212	 */
213	paddr_t			ci_cpumq;  /* cpu mondo queue address */
214	paddr_t			ci_devmq;  /* device mondo queue address */
215	paddr_t			ci_cpuset; /* mondo recipient address */ 
216	paddr_t			ci_mondo;  /* mondo message address */
217
218	/* probe fault in PCI config space reads */
219	bool			ci_pci_probe;
220	bool			ci_pci_fault;
221
222	volatile void		*ci_ddb_regs;	/* DDB regs */
223
224	void (*ci_idlespin)(void);
225
226#if defined(GPROF) && defined(MULTIPROCESSOR)
227	struct gmonparam *ci_gmon;	/* MI per-cpu GPROF */
228#endif
229};
230
231#endif /* _KERNEL || _KMEMUSER */
232
233#ifdef _KERNEL
234
235#define CPUF_PRIMARY	1
236
237/*
238 * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
239 */
240struct cpu_bootargs {
241	u_int	cb_node;	/* PROM CPU node */
242	volatile int cb_flags;
243
244	vaddr_t cb_ktext;
245	paddr_t cb_ktextp;
246	vaddr_t cb_ektext;
247
248	vaddr_t cb_kdata;
249	paddr_t cb_kdatap;
250	vaddr_t cb_ekdata;
251
252	paddr_t	cb_cpuinfo;
253	int cb_cputyp;
254};
255
256extern struct cpu_bootargs *cpu_args;
257
258#if defined(MULTIPROCESSOR)
259extern int sparc_ncpus;
260#else
261#define sparc_ncpus 1
262#endif
263
264extern struct cpu_info *cpus;
265extern struct pool_cache *fpstate_cache;
266
267/* CURCPU_INT() a local (per CPU) view of our cpu_info */
268#define	CURCPU_INT()	((struct cpu_info *)CPUINFO_VA)
269/* in general we prefer the globaly visible pointer */
270#define	curcpu()	(CURCPU_INT()->ci_self)
271#define	cpu_number()	(curcpu()->ci_index)
272#define	CPU_IS_PRIMARY(ci)	((ci)->ci_flags & CPUF_PRIMARY)
273
274#define CPU_INFO_ITERATOR		int __unused
275#define CPU_INFO_FOREACH(cii, ci)	ci = cpus; ci != NULL; ci = ci->ci_next
276
277/* these are only valid on the local cpu */
278#define curlwp		CURCPU_INT()->ci_curlwp
279#define fplwp		CURCPU_INT()->ci_fplwp
280#define curpcb		CURCPU_INT()->ci_cpcb
281#define want_ast	CURCPU_INT()->ci_want_ast
282
283/*
284 * definitions of cpu-dependent requirements
285 * referenced in generic code
286 */
287#define	cpu_wait(p)	/* nothing */
288void cpu_proc_fork(struct proc *, struct proc *);
289
290/* run on the cpu itself */
291void	cpu_pmap_init(struct cpu_info *);
292/* run upfront to prepare the cpu_info */
293void	cpu_pmap_prepare(struct cpu_info *, bool);
294
295/* Helper functions to retrieve cache info */
296int	cpu_ecache_associativity(int node);
297int	cpu_ecache_size(int node);
298
299#if defined(MULTIPROCESSOR)
300extern vaddr_t cpu_spinup_trampoline;
301
302extern  char   *mp_tramp_code;
303extern  u_long  mp_tramp_code_len;
304extern  u_long  mp_tramp_dtlb_slots, mp_tramp_itlb_slots;
305extern  u_long  mp_tramp_func;
306extern  u_long  mp_tramp_ci;
307
308void	cpu_hatch(void);
309void	cpu_boot_secondary_processors(void);
310
311/*
312 * Call a function on other cpus:
313 *	multicast - send to everyone in the sparc64_cpuset_t
314 *	broadcast - send to to all cpus but ourselves
315 *	send - send to just this cpu
316 * The called function do not follow the C ABI, so need to be coded in
317 * assembler.
318 */
319typedef void (* ipifunc_t)(void *, void *);
320
321void	sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
322void	sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
323extern void (*sparc64_send_ipi)(int, ipifunc_t, uint64_t, uint64_t);
324
325/*
326 * Call an arbitrary C function on another cpu (or all others but ourself)
327 */
328typedef void (*ipi_c_call_func_t)(void*);
329void	sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*);
330
331#endif
332
333/* Provide %pc of a lwp */
334#define	LWP_PC(l)	((l)->l_md.md_tf->tf_pc)
335
336/*
337 * Arguments to hardclock, softclock and gatherstats encapsulate the
338 * previous machine state in an opaque clockframe.  The ipl is here
339 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
340 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
341 */
342struct clockframe {
343	struct trapframe64 t;
344};
345
346#define	CLKF_USERMODE(framep)	(((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
347#define	CLKF_PC(framep)		((framep)->t.tf_pc)
348/* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
349#define	CLKF_INTR(framep)						\
350	((!CLKF_USERMODE(framep))&&					\
351		(((framep)->t.tf_out[6] & 1 ) ?				\
352			(((vaddr_t)(framep)->t.tf_out[6] <		\
353				(vaddr_t)EINTSTACK-0x7ff) &&		\
354			((vaddr_t)(framep)->t.tf_out[6] >		\
355				(vaddr_t)INTSTACK-0x7ff)) :		\
356			(((vaddr_t)(framep)->t.tf_out[6] <		\
357				(vaddr_t)EINTSTACK) &&			\
358			((vaddr_t)(framep)->t.tf_out[6] >		\
359				(vaddr_t)INTSTACK))))
360
361/*
362 * Give a profiling tick to the current process when the user profiling
363 * buffer pages are invalid.  On the sparc, request an ast to send us
364 * through trap(), marking the proc as needing a profiling tick.
365 */
366#define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
367
368/*
369 * Notify an LWP that it has a signal pending, process as soon as possible.
370 */
371void cpu_signotify(struct lwp *);
372
373
374/*
375 * Interrupt handler chains.  Interrupt handlers should return 0 for
376 * ``not me'' or 1 (``I took care of it'').  intr_establish() inserts a
377 * handler into the list.  The handler is called with its (single)
378 * argument, or with a pointer to a clockframe if ih_arg is NULL.
379 */
380struct intrhand {
381	int			(*ih_fun)(void *);
382	void			*ih_arg;
383	/* if we have to take the biglock, we interpose a wrapper
384	 * and need to save the original function and arg */
385	int			(*ih_realfun)(void *);
386	void			*ih_realarg;
387	short			ih_number;	/* interrupt number */
388						/* the H/W provides */
389	char			ih_pil;		/* interrupt priority */
390	struct intrhand		*ih_next;	/* global list */
391	struct intrhand		*ih_pending;	/* interrupt queued */
392	volatile uint64_t	*ih_map;	/* Interrupt map reg */
393	volatile uint64_t	*ih_clr;	/* clear interrupt reg */
394	void			(*ih_ack)(struct intrhand *); /* ack interrupt function */
395	bus_space_tag_t		ih_bus;		/* parent bus */
396	struct evcnt		ih_cnt;		/* counter for vmstat */
397	uint32_t		ih_ivec;
398	char			ih_name[32];	/* name for the above */
399};
400extern struct intrhand *intrhand[];
401extern struct intrhand *intrlev[MAXINTNUM];
402
403void	intr_establish(int level, bool mpsafe, struct intrhand *);
404void	*sparc_softintr_establish(int, int (*)(void *), void *);
405void	sparc_softintr_schedule(void *);
406void	sparc_softintr_disestablish(void *);
407struct intrhand *intrhand_alloc(void);
408
409/* cpu.c */
410int	cpu_myid(void);
411
412/* disksubr.c */
413struct dkbad;
414int isbad(struct dkbad *bt, int, int, int);
415/* machdep.c */
416void *	reserve_dumppages(void *);
417/* clock.c */
418struct timeval;
419int	tickintr(void *);	/* level 10/14 (tick) interrupt code */
420int	stickintr(void *);	/* system tick interrupt code */
421int	stick2eintr(void *);	/* system tick interrupt code */
422int	clockintr(void *);	/* level 10 (clock) interrupt code */
423int	statintr(void *);	/* level 14 (statclock) interrupt code */
424int	schedintr(void *);	/* level 10 (schedclock) interrupt code */
425void	tickintr_establish(int, int (*)(void *));
426void	stickintr_establish(int, int (*)(void *));
427void	stick2eintr_establish(int, int (*)(void *));
428
429/* locore.s */
430struct fpstate64;
431void	savefpstate(struct fpstate64 *);
432void	loadfpstate(struct fpstate64 *);
433void	clearfpstate(void);
434uint64_t	probeget(paddr_t, int, int);
435int	probeset(paddr_t, int, int, uint64_t);
436void	setcputyp(int);
437
438#define	 write_all_windows() __asm volatile("flushw" : : )
439#define	 write_user_windows() __asm volatile("flushw" : : )
440
441struct pcb;
442void	snapshot(struct pcb *);
443struct frame *getfp(void);
444void	switchtoctx_us(int);
445void	switchtoctx_usiii(int);
446void	next_tick(long);
447void	next_stick(long);
448void	next_stick_init(void);
449/* trap.c */
450void	cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
451int	rwindow_save(struct lwp *);
452/* cons.c */
453int	cnrom(void);
454/* zs.c */
455void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
456/* fb.c */
457void	fb_unblank(void);
458/* kgdb_stub.c */
459#ifdef KGDB
460void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
461void kgdb_connect(int);
462void kgdb_panic(void);
463#endif
464/* emul.c */
465int	fixalign(struct lwp *, struct trapframe64 *);
466int	emulinstr(vaddr_t, struct trapframe64 *);
467
468#endif /* _KERNEL */
469#endif /* _CPU_H_ */