1/* $NetBSD: cpu.h,v 1.48.2.1 2024/10/13 10:43:11 martin Exp $ */
  2
  3/*-
  4 * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
  5 * All rights reserved.
  6 *
  7 * This code is derived from software contributed to The NetBSD Foundation
  8 * by Matt Thomas of 3am Software Foundry.
  9 *
 10 * Redistribution and use in source and binary forms, with or without
 11 * modification, are permitted provided that the following conditions
 12 * are met:
 13 * 1. Redistributions of source code must retain the above copyright
 14 *    notice, this list of conditions and the following disclaimer.
 15 * 2. Redistributions in binary form must reproduce the above copyright
 16 *    notice, this list of conditions and the following disclaimer in the
 17 *    documentation and/or other materials provided with the distribution.
 18 *
 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 29 * POSSIBILITY OF SUCH DAMAGE.
 30 */
 31
 32#ifndef _AARCH64_CPU_H_
 33#define _AARCH64_CPU_H_
 34
 35#include <arm/cpu.h>
 36
 37#ifdef __aarch64__
 38
 39#ifdef _KERNEL_OPT
 40#include "opt_gprof.h"
 41#include "opt_multiprocessor.h"
 42#include "opt_pmap.h"
 43#endif
 44
 45#include <sys/param.h>
 46
 47#if defined(_KERNEL) || defined(_KMEMUSER)
 48#include <sys/evcnt.h>
 49
 50#include <aarch64/armreg.h>
 51#include <aarch64/frame.h>
 52
 53struct clockframe {
 54	struct trapframe cf_tf;
 55};
 56
 57/* (spsr & 15) == SPSR_M_EL0T(64bit,0) or USER(32bit,0) */
 58#define CLKF_USERMODE(cf)	((((cf)->cf_tf.tf_spsr) & 0x0f) == 0)
 59#define CLKF_PC(cf)		((cf)->cf_tf.tf_pc)
 60#define CLKF_INTR(cf)		((void)(cf), curcpu()->ci_intr_depth > 1)
 61
 62/*
 63 * LWP_PC: Find out the program counter for the given lwp.
 64 */
 65#define LWP_PC(l)		((l)->l_md.md_utf->tf_pc)
 66
 67#include <sys/cpu_data.h>
 68#include <sys/device_if.h>
 69#include <sys/intr.h>
 70
 71struct aarch64_cpufuncs {
 72	void (*cf_set_ttbr0)(uint64_t);
 73	void (*cf_icache_sync_range)(vaddr_t, vsize_t);
 74};
 75
 76#define MAX_CACHE_LEVEL	8		/* ARMv8 has maximum 8 level cache */
 77
 78struct aarch64_cache_unit {
 79	u_int cache_type;
 80#define CACHE_TYPE_VPIPT	0	/* VMID-aware PIPT */
 81#define CACHE_TYPE_VIVT		1	/* ASID-tagged VIVT */
 82#define CACHE_TYPE_VIPT		2
 83#define CACHE_TYPE_PIPT		3
 84	u_int cache_line_size;
 85	u_int cache_ways;
 86	u_int cache_sets;
 87	u_int cache_way_size;
 88	u_int cache_size;
 89};
 90
 91struct aarch64_cache_info {
 92	u_int cacheable;
 93#define CACHE_CACHEABLE_NONE	0
 94#define CACHE_CACHEABLE_ICACHE	1	/* instruction cache only */
 95#define CACHE_CACHEABLE_DCACHE	2	/* data cache only */
 96#define CACHE_CACHEABLE_IDCACHE	3	/* instruction and data caches */
 97#define CACHE_CACHEABLE_UNIFIED	4	/* unified cache */
 98	struct aarch64_cache_unit icache;
 99	struct aarch64_cache_unit dcache;
100};
101
102struct cpu_info {
103	struct cpu_data ci_data;
104	device_t ci_dev;
105	cpuid_t ci_cpuid;
106
107	/*
108	 * the following are in their own cache line, as they are stored to
109	 * regularly by remote CPUs; when they were mixed with other fields
110	 * we observed frequent cache misses.
111	 */
112	int ci_want_resched __aligned(COHERENCY_UNIT);
113	/* XXX pending IPIs? */
114
115	/*
116	 * this is stored frequently, and is fetched by remote CPUs.
117	 */
118	struct lwp *ci_curlwp __aligned(COHERENCY_UNIT);
119	struct lwp *ci_onproc;
120
121	/*
122	 * largely CPU-private.
123	 */
124	struct lwp *ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
125
126	uint64_t ci_lastintr;
127
128	int ci_mtx_oldspl;
129	int ci_mtx_count;
130
131	int ci_cpl;		/* current processor level (spl) */
132	volatile int ci_hwpl;	/* current hardware priority */
133	volatile u_int ci_softints;
134	volatile u_int ci_intr_depth;
135	volatile uint32_t ci_blocked_pics;
136	volatile uint32_t ci_pending_pics;
137	volatile uint32_t ci_pending_ipls;
138
139	int ci_kfpu_spl;
140
141#if defined(PMAP_MI)
142        struct pmap_tlb_info *ci_tlb_info;
143        struct pmap *ci_pmap_lastuser;
144        struct pmap *ci_pmap_cur;
145#endif
146
147	/* ASID of current pmap */
148	tlb_asid_t ci_pmap_asid_cur;
149
150	/* event counters */
151	struct evcnt ci_vfp_use;
152	struct evcnt ci_vfp_reuse;
153	struct evcnt ci_vfp_save;
154	struct evcnt ci_vfp_release;
155	struct evcnt ci_uct_trap;
156	struct evcnt ci_intr_preempt;
157	struct evcnt ci_rndrrs_fail;
158
159	/* FDT or similar supplied "cpu capacity" */
160	uint32_t ci_capacity_dmips_mhz;
161
162	/* interrupt controller */
163	u_int ci_gic_redist;	/* GICv3 redistributor index */
164	uint64_t ci_gic_sgir;	/* GICv3 SGIR target */
165
166	/* ACPI */
167	uint32_t ci_acpiid;	/* ACPI Processor Unique ID */
168
169	/* cached system registers */
170	uint64_t ci_sctlr_el1;
171	uint64_t ci_sctlr_el2;
172
173	/* sysctl(9) exposed system registers */
174	struct aarch64_sysctl_cpu_id ci_id;
175
176	/* cache information and function pointers */
177	struct aarch64_cache_info ci_cacheinfo[MAX_CACHE_LEVEL];
178	struct aarch64_cpufuncs ci_cpufuncs;
179
180#if defined(GPROF) && defined(MULTIPROCESSOR)
181	struct gmonparam *ci_gmon;	/* MI per-cpu GPROF */
182#endif
183} __aligned(COHERENCY_UNIT);
184
185#ifdef _KERNEL
186static inline __always_inline struct lwp * __attribute__ ((const))
187aarch64_curlwp(void)
188{
189	struct lwp *l;
190	__asm("mrs %0, tpidr_el1" : "=r"(l));
191	return l;
192}
193
194/* forward declaration; defined in sys/lwp.h. */
195static __inline struct cpu_info *lwp_getcpu(struct lwp *);
196
197#define	curcpu()		(lwp_getcpu(aarch64_curlwp()))
198#define	setsoftast(ci)		(cpu_signotify((ci)->ci_onproc))
199#undef curlwp
200#define	curlwp			(aarch64_curlwp())
201#define	curpcb			((struct pcb *)lwp_getpcb(curlwp))
202
203void	cpu_signotify(struct lwp *l);
204void	cpu_need_proftick(struct lwp *l);
205
206void	cpu_hatch(struct cpu_info *);
207
208extern struct cpu_info *cpu_info[];
209extern struct cpu_info cpu_info_store[];
210
211#define CPU_INFO_ITERATOR	int
212#if defined(MULTIPROCESSOR) || defined(_MODULE)
213#define cpu_number()		(curcpu()->ci_index)
214#define CPU_IS_PRIMARY(ci)	((ci)->ci_index == 0)
215#define CPU_INFO_FOREACH(cii, ci)					\
216	cii = 0, ci = cpu_info[0];					\
217	cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL;	\
218	cii++
219#else /* MULTIPROCESSOR */
220#define cpu_number()		0
221#define CPU_IS_PRIMARY(ci)	true
222#define CPU_INFO_FOREACH(cii, ci)					\
223	cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
224#endif /* MULTIPROCESSOR */
225
226#define	LWP0_CPU_INFO	(&cpu_info_store[0])
227
228#define	__HAVE_CPU_DOSOFTINTS_CI
229
230static inline void
231cpu_dosoftints_ci(struct cpu_info *ci)
232{
233#if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS)
234	void dosoftints(void);
235
236	if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0) {
237		dosoftints();
238	}
239#endif
240}
241
242static inline void
243cpu_dosoftints(void)
244{
245#if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS)
246	cpu_dosoftints_ci(curcpu());
247#endif
248}
249
250
251#endif /* _KERNEL */
252
253#endif /* _KERNEL || _KMEMUSER */
254
255#endif
256
257#endif /* _AARCH64_CPU_H_ */