1/*	$NetBSD: cpu.h,v 1.110.4.1 2023/08/09 17:42:02 martin Exp $ */
  2
  3/*
  4 * Copyright (c) 1992, 1993
  5 *	The Regents of the University of California.  All rights reserved.
  6 *
  7 * This software was developed by the Computer Systems Engineering group
  8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
  9 * contributed to Berkeley.
 10 *
 11 * All advertising materials mentioning features or use of this software
 12 * must display the following acknowledgement:
 13 *	This product includes software developed by the University of
 14 *	California, Lawrence Berkeley Laboratory.
 15 *
 16 * Redistribution and use in source and binary forms, with or without
 17 * modification, are permitted provided that the following conditions
 18 * are met:
 19 * 1. Redistributions of source code must retain the above copyright
 20 *    notice, this list of conditions and the following disclaimer.
 21 * 2. Redistributions in binary form must reproduce the above copyright
 22 *    notice, this list of conditions and the following disclaimer in the
 23 *    documentation and/or other materials provided with the distribution.
 24 * 3. Neither the name of the University nor the names of its contributors
 25 *    may be used to endorse or promote products derived from this software
 26 *    without specific prior written permission.
 27 *
 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 38 * SUCH DAMAGE.
 39 *
 40 *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
 41 */
 42
 43#ifndef _CPU_H_
 44#define _CPU_H_
 45
 46/*
 47 * CTL_MACHDEP definitions.
 48 */
 49#define	CPU_BOOTED_KERNEL	1	/* string: booted kernel name */
 50#define	CPU_BOOTED_DEVICE	2	/* string: device booted from */
 51#define	CPU_BOOT_ARGS		3	/* string: args booted with */
 52#define	CPU_ARCH		4	/* integer: cpu architecture version */
 53
 54/*
 55 * Exported definitions unique to SPARC cpu support.
 56 */
 57
 58/*
 59 * Sun-4 and Sun-4c virtual address cache.
 60 *
 61 * Sun-4 virtual caches come in two flavors, write-through (Sun-4c)
 62 * and write-back (Sun-4).  The write-back caches are much faster
 63 * but require a bit more care.
 64 *
 65 * This is exported via sysctl so be careful changing it.
 66 */
 67enum vactype { VAC_UNKNOWN, VAC_NONE, VAC_WRITETHROUGH, VAC_WRITEBACK };
 68
 69/*
 70 * Cache control information.
 71 *
 72 * This is exported via sysctl so be careful changing it.
 73 */
 74
 75struct cacheinfo {
 76	int	c_totalsize;		/* total size, in bytes */
 77					/* if split, MAX(icache,dcache) */
 78	int	c_enabled;		/* true => cache is enabled */
 79	int	c_hwflush;		/* true => have hardware flush */
 80	int	c_linesize;		/* line size, in bytes */
 81					/* if split, MIN(icache,dcache) */
 82	int	c_l2linesize;		/* log2(linesize) */
 83	int	c_nlines;		/* precomputed # of lines to flush */
 84	int	c_physical;		/* true => cache has physical
 85						   address tags */
 86	int 	c_associativity;	/* # of "buckets" in cache line */
 87	int 	c_split;		/* true => cache is split */
 88
 89	int 	ic_totalsize;		/* instruction cache */
 90	int 	ic_enabled;
 91	int 	ic_linesize;
 92	int 	ic_l2linesize;
 93	int 	ic_nlines;
 94	int 	ic_associativity;
 95
 96	int 	dc_totalsize;		/* data cache */
 97	int 	dc_enabled;
 98	int 	dc_linesize;
 99	int 	dc_l2linesize;
100	int 	dc_nlines;
101	int 	dc_associativity;
102
103	int	ec_totalsize;		/* external cache info */
104	int 	ec_enabled;
105	int	ec_linesize;
106	int	ec_l2linesize;
107	int 	ec_nlines;
108	int 	ec_associativity;
109
110	enum vactype	c_vactype;
111
112	int	c_flags;
113#define CACHE_PAGETABLES	0x1	/* caching pagetables OK on (sun4m) */
114#define CACHE_TRAPPAGEBUG	0x2	/* trap page can't be cached (sun4) */
115#define CACHE_MANDATORY		0x4	/* if cache is on, don't use
116					   uncached access */
117};
118
119/* Things needed by crash or the kernel */
120#if defined(_KERNEL) || defined(_KMEMUSER)
121
122#if defined(_KERNEL_OPT)
123#include "opt_gprof.h"
124#include "opt_multiprocessor.h"
125#include "opt_lockdebug.h"
126#include "opt_sparc_arch.h"
127#endif
128
129#include <sys/cpu_data.h>
130#include <sys/evcnt.h>
131
132#include <machine/intr.h>
133#include <machine/psl.h>
134
135#if defined(_KERNEL)
136#include <sparc/sparc/cpuvar.h>
137#include <sparc/sparc/intreg.h>
138#endif
139
140struct trapframe;
141
142/*
143 * Message structure for Inter Processor Communication in MP systems
144 */
145struct xpmsg {
146	volatile int tag;
147#define	XPMSG15_PAUSECPU	1
148#define	XPMSG_FUNC		4
149#define	XPMSG_FTRP		5
150
151	volatile union {
152		/*
153		 * Cross call: ask to run (*func)(arg0,arg1,arg2)
154		 * or (*trap)(arg0,arg1,arg2). `trap' should be the
155		 * address of a `fast trap' handler that executes in
156		 * the trap window (see locore.s).
157		 */
158		struct xpmsg_func {
159			void	(*func)(int, int, int);
160			void	(*trap)(int, int, int);
161			int	arg0;
162			int	arg1;
163			int	arg2;
164		} xpmsg_func;
165	} u;
166	volatile int	received;
167	volatile int	complete;
168};
169
170/*
171 * The cpuinfo structure. This structure maintains information about one
172 * currently installed CPU (there may be several of these if the machine
173 * supports multiple CPUs, as on some Sun4m architectures). The information
174 * in this structure supersedes the old "cpumod", "mmumod", and similar
175 * fields.
176 */
177
178struct cpu_info {
179	/*
180	 * Primary Inter-processor message area.  Keep this aligned
181	 * to a cache line boundary if possible, as the structure
182	 * itself is one or less (32/64 byte) cache-line.
183	 */
184	struct xpmsg	msg __aligned(64);
185
186	/* Scheduler flags */
187	int	ci_want_ast;
188	int	ci_want_resched;
189
190	/*
191	 * SPARC cpu_info structures live at two VAs: one global
192	 * VA (so each CPU can access any other CPU's cpu_info)
193	 * and an alias VA CPUINFO_VA which is the same on each
194	 * CPU and maps to that CPU's cpu_info.  Since the alias
195	 * CPUINFO_VA is how we locate our cpu_info, we have to
196	 * self-reference the global VA so that we can return it
197	 * in the curcpu() macro.
198	 */
199	struct cpu_info * volatile ci_self;
200
201	int		ci_cpuid;	/* CPU index (see cpus[] array) */
202
203	/* Context administration */
204	int		*ctx_tbl;	/* [4m] SRMMU-edible context table */
205	paddr_t		ctx_tbl_pa;	/* [4m] ctx table physical address */
206
207	/* Cache information */
208	struct cacheinfo	cacheinfo;	/* see above */
209
210	/* various flags to workaround anomalies in chips */
211	volatile int	flags;		/* see CPUFLG_xxx, below */
212
213	/* Per processor counter register (sun4m only) */
214	volatile struct counter_4m	*counterreg_4m;
215
216	/* Per processor interrupt mask register (sun4m only) */
217	volatile struct icr_pi	*intreg_4m;
218	/*
219	 * Send a IPI to (cpi).  For Ross cpus we need to read
220	 * the pending register to avoid a hardware bug.
221	 */
222#define raise_ipi(cpi,lvl)	do {			\
223	volatile int x;						\
224	(cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl);	\
225	x = (cpi)->intreg_4m->pi_pend; __USE(x);	\
226} while (0)
227
228	int		sun4_mmu3l;	/* [4]: 3-level MMU present */
229#if defined(SUN4_MMU3L)
230#define HASSUN4_MMU3L	(cpuinfo.sun4_mmu3l)
231#else
232#define HASSUN4_MMU3L	(0)
233#endif
234	int		ci_idepth;		/* Interrupt depth */
235
236	/*
237	 * The following pointers point to processes that are somehow
238	 * associated with this CPU--running on it, using its FPU,
239	 * etc.
240	 */
241	struct	lwp	*ci_curlwp;		/* CPU owner */
242	struct	lwp	*ci_onproc;		/* current user LWP / kthread */
243	struct	lwp 	*fplwp;			/* FPU owner */
244
245	int		ci_mtx_count;
246	int		ci_mtx_oldspl;
247
248	/*
249	 * Idle PCB and Interrupt stack;
250	 */
251	void		*eintstack;		/* End of interrupt stack */
252#define INT_STACK_SIZE	(128 * 128)		/* 128 128-byte stack frames */
253	void		*redzone;		/* DEBUG: stack red zone */
254#define REDSIZE		(8*96)			/* some room for bouncing */
255
256	struct	pcb	*curpcb;		/* CPU's PCB & kernel stack */
257
258	/* locore defined: */
259	void	(*get_syncflt)(void);		/* Not C-callable */
260	int	(*get_asyncflt)(u_int *, u_int *);
261
262	/* Synchronous Fault Status; temporary storage */
263	struct {
264		int	sfsr;
265		int	sfva;
266	} syncfltdump;
267
268	/*
269	 * Cache handling functions.
270	 * Most cache flush function come in two flavours: one that
271	 * acts only on the CPU it executes on, and another that
272	 * uses inter-processor signals to flush the cache on
273	 * all processor modules.
274	 * The `ft_' versions are fast trap cache flush handlers.
275	 */
276	void	(*cache_flush)(void *, u_int);
277	void	(*vcache_flush_page)(int, int);
278	void	(*sp_vcache_flush_page)(int, int);
279	void	(*ft_vcache_flush_page)(int, int);
280	void	(*vcache_flush_segment)(int, int, int);
281	void	(*sp_vcache_flush_segment)(int, int, int);
282	void	(*ft_vcache_flush_segment)(int, int, int);
283	void	(*vcache_flush_region)(int, int);
284	void	(*sp_vcache_flush_region)(int, int);
285	void	(*ft_vcache_flush_region)(int, int);
286	void	(*vcache_flush_context)(int);
287	void	(*sp_vcache_flush_context)(int);
288	void	(*ft_vcache_flush_context)(int);
289
290	/* The are helpers for (*cache_flush)() */
291	void	(*sp_vcache_flush_range)(int, int, int);
292	void	(*ft_vcache_flush_range)(int, int, int);
293
294	void	(*pcache_flush_page)(paddr_t, int);
295	void	(*pure_vcache_flush)(void);
296	void	(*cache_flush_all)(void);
297
298	/* Support for hardware-assisted page clear/copy */
299	void	(*zero_page)(paddr_t);
300	void	(*copy_page)(paddr_t, paddr_t);
301
302	/* Virtual addresses for use in pmap copy_page/zero_page */
303	void *	vpage[2];
304	int	*vpage_pte[2];		/* pte location of vpage[] */
305
306	void	(*cache_enable)(void);
307
308	int	cpu_type;	/* Type: see CPUTYP_xxx below */
309
310	/* Inter-processor message area (high priority but used infrequently) */
311	struct xpmsg	msg_lev15;
312
313	/* CPU information */
314	int		node;		/* PROM node for this CPU */
315	int		mid;		/* Module ID for MP systems */
316	int		mbus;		/* 1 if CPU is on MBus */
317	int		mxcc;		/* 1 if a MBus-level MXCC is present */
318	const char	*cpu_longname;	/* CPU model */
319	int		cpu_impl;	/* CPU implementation code */
320	int		cpu_vers;	/* CPU version code */
321	int		mmu_impl;	/* MMU implementation code */
322	int		mmu_vers;	/* MMU version code */
323	int		master;		/* 1 if this is bootup CPU */
324
325	vaddr_t		mailbox;	/* VA of CPU's mailbox */
326
327	int		mmu_ncontext;	/* Number of contexts supported */
328	int		mmu_nregion; 	/* Number of regions supported */
329	int		mmu_nsegment;	/* [4/4c] Segments */
330	int		mmu_npmeg;	/* [4/4c] Pmegs */
331
332/* XXX - we currently don't actually use the following */
333	int		arch;		/* Architecture: CPU_SUN4x */
334	int		class;		/* Class: SuperSPARC, microSPARC... */
335	int		classlvl;	/* Iteration in class: 1, 2, etc. */
336	int		classsublvl;	/* stepping in class (version) */
337
338	int		hz;		/* Clock speed */
339
340	/* FPU information */
341	int		fpupresent;	/* true if FPU is present */
342	int		fpuvers;	/* FPU revision */
343	const char	*fpu_name;	/* FPU model */
344	char		fpu_namebuf[32];/* Buffer for FPU name, if necessary */
345
346	/* XXX */
347	volatile void	*ci_ddb_regs;		/* DDB regs */
348
349	/*
350	 * The following are function pointers to do interesting CPU-dependent
351	 * things without having to do type-tests all the time
352	 */
353
354	/* bootup things: access to physical memory */
355	u_int	(*read_physmem)(u_int addr, int space);
356	void	(*write_physmem)(u_int addr, u_int data);
357	void	(*cache_tablewalks)(void);
358	void	(*mmu_enable)(void);
359	void	(*hotfix)(struct cpu_info *);
360
361
362#if 0
363	/* hardware-assisted block operation routines */
364	void		(*hwbcopy)(const void *from, void *to, size_t len);
365	void		(*hwbzero)(void *buf, size_t len);
366
367	/* routine to clear mbus-sbus buffers */
368	void		(*mbusflush)(void);
369#endif
370
371	/*
372	 * Memory error handler; parity errors, unhandled NMIs and other
373	 * unrecoverable faults end up here.
374	 */
375	void		(*memerr)(unsigned, u_int, u_int, struct trapframe *);
376	void		(*idlespin)(void);
377	/* Module Control Registers */
378	/*bus_space_handle_t*/ long ci_mbusport;
379	/*bus_space_handle_t*/ long ci_mxccregs;
380
381	u_int	ci_tt;			/* Last trap (if tracing) */
382
383	/*
384	 * Start/End VA's of this cpu_info region; we upload the other pages
385	 * in this region that aren't part of the cpu_info to uvm.
386	 */
387	vaddr_t	ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2;
388
389	struct evcnt ci_savefpstate;
390	struct evcnt ci_savefpstate_null;
391	struct evcnt ci_xpmsg_mutex_fail;
392	struct evcnt ci_xpmsg_mutex_fail_call;
393	struct evcnt ci_xpmsg_mutex_not_held;
394	struct evcnt ci_xpmsg_bogus;
395	struct evcnt ci_intrcnt[16];
396	struct evcnt ci_sintrcnt[16];
397
398	struct cpu_data ci_data;	/* MI per-cpu data */
399
400#if defined(GPROF) && defined(MULTIPROCESSOR)
401	struct gmonparam *ci_gmon;	/* MI per-cpu GPROF */
402#endif
403};
404
405#endif /* _KERNEL || _KMEMUSER */
406
407/* Kernel only things. */
408#if defined(_KERNEL)
409
410#include <sys/mutex.h>
411
412/*
413 * definitions of cpu-dependent requirements
414 * referenced in generic code
415 */
416#define	cpuinfo			(*(struct cpu_info *)CPUINFO_VA)
417#define	curcpu()		(cpuinfo.ci_self)
418#define	curlwp			(cpuinfo.ci_curlwp)
419#define	CPU_IS_PRIMARY(ci)	((ci)->master)
420
421#define	cpu_number()		(cpuinfo.ci_cpuid)
422
423void	cpu_proc_fork(struct proc *, struct proc *);
424
425#if defined(MULTIPROCESSOR)
426void	cpu_boot_secondary_processors(void);
427#endif
428
429/*
430 * Arguments to hardclock, softclock and statclock encapsulate the
431 * previous machine state in an opaque clockframe.  The ipl is here
432 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
433 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
434 */
435struct clockframe {
436	u_int	psr;		/* psr before interrupt, excluding PSR_ET */
437	u_int	pc;		/* pc at interrupt */
438	u_int	npc;		/* npc at interrupt */
439	u_int	ipl;		/* actual interrupt priority level */
440	u_int	fp;		/* %fp at interrupt */
441};
442typedef struct clockframe clockframe;
443
444extern int eintstack[];
445
446#define	CLKF_USERMODE(framep)	(((framep)->psr & PSR_PS) == 0)
447#define	CLKF_LOPRI(framep,n)	(((framep)->psr & PSR_PIL) < (n) << 8)
448#define	CLKF_PC(framep)		((framep)->pc)
449#if defined(MULTIPROCESSOR)
450#define	CLKF_INTR(framep)						\
451	((framep)->fp > (u_int)cpuinfo.eintstack - INT_STACK_SIZE &&	\
452	 (framep)->fp < (u_int)cpuinfo.eintstack)
453#else
454#define	CLKF_INTR(framep)	((framep)->fp < (u_int)eintstack)
455#endif
456
457void	sparc_softintr_init(void);
458
459/*
460 * Preempt the current process on the target CPU if in interrupt from
461 * user mode, or after the current trap/syscall if in system mode.
462 */
463#define cpu_need_resched(ci, l, flags) do {				\
464	__USE(flags);							\
465	(ci)->ci_want_ast = 1;						\
466									\
467	/* Just interrupt the target CPU, so it can notice its AST */	\
468	if ((flags & RESCHED_REMOTE) != 0)				\
469		XCALL0(sparc_noop, 1U << (ci)->ci_cpuid);		\
470} while (/*CONSTCOND*/0)
471
472/*
473 * Give a profiling tick to the current process when the user profiling
474 * buffer pages are invalid.  On the sparc, request an ast to send us
475 * through trap(), marking the proc as needing a profiling tick.
476 */
477#define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, cpuinfo.ci_want_ast = 1)
478
479/*
480 * Notify the current process (p) that it has a signal pending,
481 * process as soon as possible.
482 */
483#define cpu_signotify(l) do {						\
484	(l)->l_cpu->ci_want_ast = 1;					\
485									\
486	/* Just interrupt the target CPU, so it can notice its AST */	\
487	if ((l)->l_cpu->ci_cpuid != cpu_number())			\
488		XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid);		\
489} while (/*CONSTCOND*/0)
490
491/* CPU architecture version */
492extern int cpu_arch;
493
494/* Number of CPUs in the system */
495extern int sparc_ncpus;
496
497/* Provide %pc of a lwp */
498#define LWP_PC(l)       ((l)->l_md.md_tf->tf_pc)
499
500/* Hardware cross-call mutex */
501extern kmutex_t xpmsg_mutex;
502
503/*
504 * Interrupt handler chains.  Interrupt handlers should return 0 for
505 * ``not me'' or 1 (``I took care of it'').  intr_establish() inserts a
506 * handler into the list.  The handler is called with its (single)
507 * argument, or with a pointer to a clockframe if ih_arg is NULL.
508 *
509 * realfun/realarg are used to chain callers, usually with the
510 * biglock wrapper.
511 */
512extern struct intrhand {
513	int	(*ih_fun)(void *);
514	void	*ih_arg;
515	struct	intrhand *ih_next;
516	int	ih_classipl;
517	int	(*ih_realfun)(void *);
518	void	*ih_realarg;
519} *intrhand[15];
520
521void	intr_establish(int, int, struct intrhand *, void (*)(void), bool);
522void	intr_disestablish(int, struct intrhand *);
523
524void	intr_lock_kernel(void);
525void	intr_unlock_kernel(void);
526
527/* disksubr.c */
528struct dkbad;
529int isbad(struct dkbad *, int, int, int);
530
531/* machdep.c */
532int	ldcontrolb(void *);
533void *	reserve_dumppages(void *);
534void	wcopy(const void *, void *, u_int);
535void	wzero(void *, u_int);
536
537/* clock.c */
538struct timeval;
539void	lo_microtime(struct timeval *);
540void	schedintr(void *);
541
542/* locore.s */
543struct fpstate;
544void	ipi_savefpstate(struct fpstate *);
545void	savefpstate(struct fpstate *);
546void	loadfpstate(struct fpstate *);
547int	probeget(void *, int);
548void	write_all_windows(void);
549void	write_user_windows(void);
550void 	lwp_trampoline(void);
551struct pcb;
552void	snapshot(struct pcb *);
553struct frame *getfp(void);
554int	xldcontrolb(void *, struct pcb *);
555void	copywords(const void *, void *, size_t);
556void	qcopy(const void *, void *, size_t);
557void	qzero(void *, size_t);
558
559/* trap.c */
560void	cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
561int	rwindow_save(struct lwp *);
562
563/* cons.c */
564int	cnrom(void);
565
566/* zs.c */
567void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
568#ifdef KGDB
569void zs_kgdb_init(void);
570#endif
571
572/* fb.c */
573void	fb_unblank(void);
574
575/* kgdb_stub.c */
576#ifdef KGDB
577void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
578void kgdb_connect(int);
579void kgdb_panic(void);
580#endif
581
582/* emul.c */
583struct trapframe;
584int fixalign(struct lwp *, struct trapframe *, void **);
585int emulinstr(int, struct trapframe *);
586
587/* cpu.c */
588void mp_pause_cpus(void);
589void mp_resume_cpus(void);
590void mp_halt_cpus(void);
591#ifdef DDB
592void mp_pause_cpus_ddb(void);
593void mp_resume_cpus_ddb(void);
594#endif
595
596/* intr.c */
597u_int setitr(u_int);
598u_int getitr(void);
599
600
601/*
602 *
603 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
604 * of the trap vector table.  The next eight bits are supplied by the
605 * hardware when the trap occurs, and the bottom four bits are always
606 * zero (so that we can shove up to 16 bytes of executable code---exactly
607 * four instructions---into each trap vector).
608 *
609 * The hardware allocates half the trap vectors to hardware and half to
610 * software.
611 *
612 * Traps have priorities assigned (lower number => higher priority).
613 */
614
615struct trapvec {
616	int	tv_instr[4];		/* the four instructions */
617};
618
619extern struct trapvec *trapbase;	/* the 256 vectors */
620
621#endif /* _KERNEL */
622#endif /* _CPU_H_ */