master
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#ifdef __arm__
28#include <arm/cpufunc.h>
29#else /* !__arm__ */
30
31#ifndef _MACHINE_CPUFUNC_H_
32#define _MACHINE_CPUFUNC_H_
33
34static __inline void
35breakpoint(void)
36{
37
38 __asm("brk #0");
39}
40
41#ifdef _KERNEL
42#include <machine/armreg.h>
43
44void pan_enable(void);
45
46static __inline register_t
47dbg_disable(void)
48{
49 uint32_t ret;
50
51 __asm __volatile(
52 "mrs %x0, daif \n"
53 "msr daifset, #(" __XSTRING(DAIF_D) ") \n"
54 : "=&r" (ret));
55
56 return (ret);
57}
58
59static __inline void
60dbg_enable(void)
61{
62
63 __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_D) ")");
64}
65
66static __inline register_t
67intr_disable(void)
68{
69 /* DAIF is a 32-bit register */
70 uint32_t ret;
71
72 __asm __volatile(
73 "mrs %x0, daif \n"
74 "msr daifset, #(" __XSTRING(DAIF_INTR) ") \n"
75 : "=&r" (ret));
76
77 return (ret);
78}
79
80static __inline void
81intr_restore(register_t s)
82{
83
84 WRITE_SPECIALREG(daif, s);
85}
86
87static __inline void
88intr_enable(void)
89{
90
91 __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_INTR) ")");
92}
93
94static __inline void
95serror_enable(void)
96{
97
98 __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")");
99}
100
101static __inline register_t
102get_midr(void)
103{
104 uint64_t midr;
105
106 midr = READ_SPECIALREG(midr_el1);
107
108 return (midr);
109}
110
111static __inline register_t
112get_mpidr(void)
113{
114 uint64_t mpidr;
115
116 mpidr = READ_SPECIALREG(mpidr_el1);
117
118 return (mpidr);
119}
120
121static __inline void
122clrex(void)
123{
124
125 /*
126 * Ensure compiler barrier, otherwise the monitor clear might
127 * occur too late for us ?
128 */
129 __asm __volatile("clrex" : : : "memory");
130}
131
132static __inline void
133set_ttbr0(uint64_t ttbr0)
134{
135
136 __asm __volatile(
137 "msr ttbr0_el1, %0 \n"
138 "isb \n"
139 :
140 : "r" (ttbr0));
141}
142
143static __inline void
144invalidate_icache(void)
145{
146
147 __asm __volatile(
148 "ic ialluis \n"
149 "dsb ish \n"
150 "isb \n");
151}
152
153static __inline void
154invalidate_local_icache(void)
155{
156
157 __asm __volatile(
158 "ic iallu \n"
159 "dsb nsh \n"
160 "isb \n");
161}
162
163extern bool icache_aliasing;
164extern bool icache_vmid;
165
166extern int64_t dcache_line_size;
167extern int64_t icache_line_size;
168extern int64_t idcache_line_size;
169extern int64_t dczva_line_size;
170
171#define cpu_nullop() arm64_nullop()
172#define cpufunc_nullop() arm64_nullop()
173
174#define cpu_tlb_flushID() arm64_tlb_flushID()
175
176#define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s))
177#define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s))
178#define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s))
179
180extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t);
181
182#define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s))
183#define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
184
185void arm64_nullop(void);
186void arm64_tlb_flushID(void);
187void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t);
188void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
189void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
190int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
191void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
192void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
193void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
194bool arm64_get_writable_addr(vm_offset_t, vm_offset_t *);
195
196#endif /* _KERNEL */
197#endif /* _MACHINE_CPUFUNC_H_ */
198
199#endif /* !__arm__ */