master
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91
35 * from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30
36 */
37
38#ifndef _MACHINE_VMPARAM_H_
39#define _MACHINE_VMPARAM_H_
40
41/*
42 * Virtual memory related constants, all in bytes
43 */
44#ifndef MAXTSIZ
45#define MAXTSIZ (1*1024*1024*1024) /* max text size */
46#endif
47#ifndef DFLDSIZ
48#define DFLDSIZ (128*1024*1024) /* initial data size limit */
49#endif
50#ifndef MAXDSIZ
51#define MAXDSIZ (1*1024*1024*1024) /* max data size */
52#endif
53#ifndef DFLSSIZ
54#define DFLSSIZ (128*1024*1024) /* initial stack size limit */
55#endif
56#ifndef MAXSSIZ
57#define MAXSSIZ (1*1024*1024*1024) /* max stack size */
58#endif
59#ifndef SGROWSIZ
60#define SGROWSIZ (128*1024) /* amount to grow stack */
61#endif
62
63/*
64 * The physical address space is sparsely populated.
65 */
66#define VM_PHYSSEG_SPARSE
67
68/*
69 * The number of PHYSSEG entries.
70 */
71#define VM_PHYSSEG_MAX 64
72
73/*
74 * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool
75 * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
76 * the pool from which physical pages for small UMA objects are
77 * allocated.
78 */
79#define VM_NFREEPOOL 2
80#define VM_FREEPOOL_DEFAULT 0
81#define VM_FREEPOOL_DIRECT 1
82
83/*
84 * Create one free page list: VM_FREELIST_DEFAULT is for all physical
85 * pages.
86 */
87#define VM_NFREELIST 1
88#define VM_FREELIST_DEFAULT 0
89
90/*
91 * An allocation size of 16MB is supported in order to optimize the
92 * use of the direct map by UMA. Specifically, a cache line contains
93 * at most four TTEs, collectively mapping 16MB of physical memory.
94 * By reducing the number of distinct 16MB "pages" that are used by UMA,
95 * the physical memory allocator reduces the likelihood of both 4MB
96 * page TLB misses and cache misses caused by 4MB page TLB misses.
97 */
98#define VM_NFREEORDER 12
99
100/*
101 * Enable superpage reservations: 1 level.
102 */
103#ifndef VM_NRESERVLEVEL
104#define VM_NRESERVLEVEL 1
105#endif
106
107/*
108 * Level 0 reservations consist of 512 pages.
109 */
110#ifndef VM_LEVEL_0_ORDER
111#define VM_LEVEL_0_ORDER 9
112#endif
113
114/**
115 * Address space layout.
116 *
117 * RISC-V implements multiple paging modes with different virtual address space
118 * sizes: SV32, SV39, SV48 and SV57. Only SV39 and SV48 are supported by
119 * FreeBSD. SV39 provides a 512GB virtual address space and uses three-level
120 * page tables, while SV48 provides a 256TB virtual address space and uses
121 * four-level page tables. 64-bit RISC-V implementations are required to provide
122 * at least SV39 mode; locore initially enables SV39 mode while bootstrapping
123 * page tables, and pmap_bootstrap() optionally switches to SV48 mode.
124 *
125 * The address space is split into two regions at each end of the 64-bit address
126 * space; the lower region is for use by user mode software, while the upper
127 * region is used for various kernel maps. The kernel map layout in SV48 mode
128 * is currently identical to that used in SV39 mode.
129 *
130 * SV39 memory map:
131 * 0x0000000000000000 - 0x0000003fffffffff 256GB user map
132 * 0x0000004000000000 - 0xffffffbfffffffff unmappable
133 * 0xffffffc000000000 - 0xffffffc7ffffffff 32GB kernel map
134 * 0xffffffc800000000 - 0xffffffcfffffffff 32GB unused
135 * 0xffffffd000000000 - 0xffffffefffffffff 128GB direct map
136 * 0xfffffff000000000 - 0xffffffffffffffff 64GB unused
137 *
138 * SV48 memory map:
139 * 0x0000000000000000 - 0x00007fffffffffff 128TB user map
140 * 0x0000800000000000 - 0xffff7fffffffffff unmappable
141 * 0xffff800000000000 - 0xffffffc7ffffffff 127.75TB hole
142 * 0xffffffc000000000 - 0xffffffc7ffffffff 32GB kernel map
143 * 0xffffffc800000000 - 0xffffffcfffffffff 32GB unused
144 * 0xffffffd000000000 - 0xffffffefffffffff 128GB direct map
145 * 0xfffffff000000000 - 0xffffffffffffffff 64GB unused
146 *
147 * The kernel is loaded at the beginning of the kernel map.
148 *
149 * We define some interesting address constants:
150 *
151 * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire
152 * 64 bit address space, mostly just for convenience.
153 *
154 * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of
155 * mappable kernel virtual address space.
156 *
157 * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the
158 * user address space.
159 */
160#define VM_MIN_ADDRESS (0x0000000000000000UL)
161#define VM_MAX_ADDRESS (0xffffffffffffffffUL)
162
163#define VM_MIN_KERNEL_ADDRESS (0xffffffc000000000UL)
164#define VM_MAX_KERNEL_ADDRESS (0xffffffc800000000UL)
165
166#define DMAP_MIN_ADDRESS (0xffffffd000000000UL)
167#define DMAP_MAX_ADDRESS (0xfffffff000000000UL)
168
169#define DMAP_MIN_PHYSADDR (dmap_phys_base)
170#define DMAP_MAX_PHYSADDR (dmap_phys_max)
171
172/* True if pa is in the dmap range */
173#define PHYS_IN_DMAP(pa) ((pa) >= DMAP_MIN_PHYSADDR && \
174 (pa) < DMAP_MAX_PHYSADDR)
175/* True if va is in the dmap range */
176#define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \
177 (va) < (dmap_max_addr))
178
179#define PMAP_HAS_DMAP 1
180#define PHYS_TO_DMAP(pa) \
181({ \
182 KASSERT(PHYS_IN_DMAP(pa), \
183 ("%s: PA out of range, PA: 0x%lx", __func__, \
184 (vm_paddr_t)(pa))); \
185 ((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS; \
186})
187
188#define DMAP_TO_PHYS(va) \
189({ \
190 KASSERT(VIRT_IN_DMAP(va), \
191 ("%s: VA out of range, VA: 0x%lx", __func__, \
192 (vm_offset_t)(va))); \
193 ((va) - DMAP_MIN_ADDRESS) + dmap_phys_base; \
194})
195
196#define VM_MIN_USER_ADDRESS (0x0000000000000000UL)
197#define VM_MAX_USER_ADDRESS_SV39 (0x0000004000000000UL)
198#define VM_MAX_USER_ADDRESS_SV48 (0x0000800000000000UL)
199#define VM_MAX_USER_ADDRESS VM_MAX_USER_ADDRESS_SV48
200
201#define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS)
202#define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS)
203
204#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
205#define SHAREDPAGE_SV39 (VM_MAX_USER_ADDRESS_SV39 - PAGE_SIZE)
206#define SHAREDPAGE_SV48 (VM_MAX_USER_ADDRESS_SV48 - PAGE_SIZE)
207#define SHAREDPAGE SHAREDPAGE_SV48
208#define USRSTACK_SV39 SHAREDPAGE_SV39
209#define USRSTACK_SV48 SHAREDPAGE_SV48
210#define USRSTACK USRSTACK_SV48
211#define PS_STRINGS_SV39 (USRSTACK_SV39 - sizeof(struct ps_strings))
212#define PS_STRINGS_SV48 (USRSTACK_SV48 - sizeof(struct ps_strings))
213
214#define VM_EARLY_DTB_ADDRESS (VM_MAX_KERNEL_ADDRESS - (2 * L2_SIZE))
215
216/*
217 * How many physical pages per kmem arena virtual page.
218 */
219#ifndef VM_KMEM_SIZE_SCALE
220#define VM_KMEM_SIZE_SCALE (1)
221#endif
222
223/*
224 * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the
225 * kernel map.
226 */
227#ifndef VM_KMEM_SIZE_MAX
228#define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \
229 VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
230#endif
231
232/*
233 * Initial pagein size of beginning of executable file.
234 */
235#ifndef VM_INITIAL_PAGEIN
236#define VM_INITIAL_PAGEIN 16
237#endif
238
239#define UMA_MD_SMALL_ALLOC
240
241#ifndef LOCORE
242extern vm_paddr_t dmap_phys_base;
243extern vm_paddr_t dmap_phys_max;
244extern vm_offset_t dmap_max_addr;
245extern vm_offset_t init_pt_va;
246#endif
247
248#define ZERO_REGION_SIZE (64 * 1024) /* 64KB */
249
250#define DEVMAP_MAX_VADDR VM_MAX_KERNEL_ADDRESS
251#define PMAP_MAPDEV_EARLY_SIZE L2_SIZE
252
253/*
254 * No non-transparent large page support in the pmap.
255 */
256#define PMAP_HAS_LARGEPAGES 0
257
258/*
259 * Need a page dump array for minidump.
260 */
261#define MINIDUMP_PAGE_TRACKING 1
262
263#endif /* !_MACHINE_VMPARAM_H_ */