master
  1/*	$NetBSD: uvm_map.h,v 1.80 2020/05/26 00:50:53 kamil Exp $	*/
  2
  3/*
  4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
  5 * Copyright (c) 1991, 1993, The Regents of the University of California.
  6 *
  7 * All rights reserved.
  8 *
  9 * This code is derived from software contributed to Berkeley by
 10 * The Mach Operating System project at Carnegie-Mellon University.
 11 *
 12 * Redistribution and use in source and binary forms, with or without
 13 * modification, are permitted provided that the following conditions
 14 * are met:
 15 * 1. Redistributions of source code must retain the above copyright
 16 *    notice, this list of conditions and the following disclaimer.
 17 * 2. Redistributions in binary form must reproduce the above copyright
 18 *    notice, this list of conditions and the following disclaimer in the
 19 *    documentation and/or other materials provided with the distribution.
 20 * 3. Neither the name of the University nor the names of its contributors
 21 *    may be used to endorse or promote products derived from this software
 22 *    without specific prior written permission.
 23 *
 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 34 * SUCH DAMAGE.
 35 *
 36 *	@(#)vm_map.h    8.3 (Berkeley) 3/15/94
 37 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
 38 *
 39 *
 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
 41 * All rights reserved.
 42 *
 43 * Permission to use, copy, modify and distribute this software and
 44 * its documentation is hereby granted, provided that both the copyright
 45 * notice and this permission notice appear in all copies of the
 46 * software, derivative works or modified versions, and any portions
 47 * thereof, and that both notices appear in supporting documentation.
 48 *
 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
 50 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
 52 *
 53 * Carnegie Mellon requests users of this software to return to
 54 *
 55 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
 56 *  School of Computer Science
 57 *  Carnegie Mellon University
 58 *  Pittsburgh PA 15213-3890
 59 *
 60 * any improvements or extensions that they make and grant Carnegie the
 61 * rights to redistribute these changes.
 62 */
 63
 64#ifndef _UVM_UVM_MAP_H_
 65#define _UVM_UVM_MAP_H_
 66
 67/*
 68 * uvm_map.h
 69 */
 70
 71#ifdef _KERNEL
 72
 73/*
 74 * macros
 75 */
 76
 77/*
 78 * UVM_MAP_CLIP_START: ensure that the entry begins at or after
 79 * the starting address, if it doesn't we split the entry.
 80 *
 81 * => map must be locked by caller
 82 */
 83
 84#define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
 85	if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
 86		uvm_map_clip_start(MAP,ENTRY,VA); \
 87	} \
 88}
 89
 90/*
 91 * UVM_MAP_CLIP_END: ensure that the entry ends at or before
 92 *      the ending address, if it does't we split the entry.
 93 *
 94 * => map must be locked by caller
 95 */
 96
 97#define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
 98	if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
 99		uvm_map_clip_end(MAP,ENTRY,VA); \
100	} \
101}
102
103/*
104 * extract flags
105 */
106#define UVM_EXTRACT_REMOVE	0x01	/* remove mapping from old map */
107#define UVM_EXTRACT_CONTIG	0x02	/* try to keep it contig */
108#define UVM_EXTRACT_QREF	0x04	/* use quick refs */
109#define UVM_EXTRACT_FIXPROT	0x08	/* set prot to maxprot as we go */
110#define UVM_EXTRACT_RESERVED	0x10	/* caller did uvm_map_reserve() */
111#define UVM_EXTRACT_PROT_ALL	0x20	/* set prot to UVM_PROT_ALL */
112
113#endif /* _KERNEL */
114
115#include <sys/rbtree.h>
116#include <sys/pool.h>
117#include <sys/rwlock.h>
118#include <sys/mutex.h>
119#include <sys/condvar.h>
120
121#include <uvm/uvm_anon.h>
122
123/*
124 * Address map entries consist of start and end addresses,
125 * a VM object (or sharing map) and offset into that object,
126 * and user-exported inheritance and protection information.
127 * Also included is control information for virtual copy operations.
128 *
129 * At runtime this is aligned on a cacheline boundary, with fields
130 * used during fault processing to do RB tree lookup clustered at
131 * the beginning.
132 */
133struct vm_map_entry {
134	struct rb_node		rb_node;	/* tree information */
135	vaddr_t			start;		/* start address */
136	vaddr_t			end;		/* end address */
137	vsize_t			gap;		/* free space after */
138	vsize_t			maxgap;		/* space in subtree */
139	struct vm_map_entry	*prev;		/* previous entry */
140	struct vm_map_entry	*next;		/* next entry */
141	union {
142		struct uvm_object *uvm_obj;	/* uvm object */
143		struct vm_map	*sub_map;	/* belongs to another map */
144	} object;				/* object I point to */
145	voff_t			offset;		/* offset into object */
146	uint8_t			etype;		/* entry type */
147	uint8_t			flags;		/* flags */
148	uint8_t			advice;		/* madvise advice */
149	uint8_t			unused;		/* unused */
150	vm_prot_t		protection;	/* protection code */
151	vm_prot_t		max_protection;	/* maximum protection */
152	vm_inherit_t		inheritance;	/* inheritance */
153	int			wired_count;	/* can be paged if == 0 */
154	struct vm_aref		aref;		/* anonymous overlay */
155};
156
157/* flags */
158#define	UVM_MAP_KERNEL		0x01		/* kernel map entry */
159#define	UVM_MAP_STATIC		0x04		/* special static entries */
160#define	UVM_MAP_NOMERGE		0x08		/* this entry is not mergable */
161
162#define	VM_MAPENT_ISWIRED(entry)	((entry)->wired_count != 0)
163
164/*
165 *	Maps are doubly-linked lists of map entries, kept sorted
166 *	by address.  A single hint is provided to start
167 *	searches again from the last successful search,
168 *	insertion, or removal.
169 *
170 *	LOCKING PROTOCOL NOTES:
171 *	-----------------------
172 *
173 *	VM map locking is a little complicated.  There are both shared
174 *	and exclusive locks on maps.  However, it is sometimes required
175 *	to downgrade an exclusive lock to a shared lock, and upgrade to
176 *	an exclusive lock again (to perform error recovery).  However,
177 *	another thread *must not* queue itself to receive an exclusive
178 *	lock while before we upgrade back to exclusive, otherwise the
179 *	error recovery becomes extremely difficult, if not impossible.
180 *
181 *	In order to prevent this scenario, we introduce the notion of
182 *	a `busy' map.  A `busy' map is read-locked, but other threads
183 *	attempting to write-lock wait for this flag to clear before
184 *	entering the lock manager.  A map may only be marked busy
185 *	when the map is write-locked (and then the map must be downgraded
186 *	to read-locked), and may only be marked unbusy by the thread
187 *	which marked it busy (holding *either* a read-lock or a
188 *	write-lock, the latter being gained by an upgrade).
189 *
190 *	Access to the map `flags' member is controlled by the `flags_lock'
191 *	simple lock.  Note that some flags are static (set once at map
192 *	creation time, and never changed), and thus require no locking
193 *	to check those flags.  All flags which are r/w must be set or
194 *	cleared while the `flags_lock' is asserted.  Additional locking
195 *	requirements are:
196 *
197 *		VM_MAP_PAGEABLE		r/o static flag; no locking required
198 *
199 *		VM_MAP_WIREFUTURE	r/w; may only be set or cleared when
200 *					map is write-locked.  may be tested
201 *					without asserting `flags_lock'.
202 *
203 *		VM_MAP_DYING		r/o; set when a vmspace is being
204 *					destroyed to indicate that updates
205 *					to the pmap can be skipped.
206 *
207 *		VM_MAP_TOPDOWN		r/o; set when the vmspace is
208 *					created if the unspecified map
209 *					allocations are to be arranged in
210 *					a "top down" manner.
211 */
212struct vm_map {
213	struct pmap *		pmap;		/* Physical map */
214	krwlock_t		lock;		/* Non-intrsafe lock */
215	struct lwp *		busy;		/* LWP holding map busy */
216	kmutex_t		misc_lock;	/* Lock for cv, busy */
217	kcondvar_t		cv;		/* For signalling */
218	int			flags;		/* flags */
219	struct rb_tree		rb_tree;	/* Tree for entries */
220	struct vm_map_entry	header;		/* List of entries */
221	int			nentries;	/* Number of entries */
222	vsize_t			size;		/* virtual size */
223	volatile int		ref_count;	/* Reference count */
224	struct vm_map_entry *	hint;		/* hint for quick lookups */
225	struct vm_map_entry *	first_free;	/* First free space hint */
226	unsigned int		timestamp;	/* Version number */
227};
228
229#if defined(_KERNEL)
230
231#include <sys/callback.h>
232
233#endif /* defined(_KERNEL) */
234
235#define	VM_MAP_IS_KERNEL(map)	(vm_map_pmap(map) == pmap_kernel())
236
237/* vm_map flags */
238#define	VM_MAP_PAGEABLE		0x01		/* ro: entries are pageable */
239#define	VM_MAP_WIREFUTURE	0x04		/* rw: wire future mappings */
240#define	VM_MAP_DYING		0x20		/* rw: map is being destroyed */
241#define	VM_MAP_TOPDOWN		0x40		/* ro: arrange map top-down */
242#define	VM_MAP_WANTVA		0x100		/* rw: want va */
243
244#define VM_MAP_BITS	"\177\020\
245b\0PAGEABLE\0\
246b\2WIREFUTURE\0\
247b\5DYING\0\
248b\6TOPDOWN\0\
249b\10WANTVA\0"
250
251#ifdef _KERNEL
252struct uvm_map_args {
253	struct vm_map_entry *uma_prev;
254
255	vaddr_t uma_start;
256	vsize_t uma_size;
257
258	struct uvm_object *uma_uobj;
259	voff_t uma_uoffset;
260
261	uvm_flag_t uma_flags;
262};
263#endif /* _KERNEL */
264
265/*
266 * globals:
267 */
268
269#ifdef _KERNEL
270
271#include <sys/proc.h>
272
273#ifdef PMAP_GROWKERNEL
274extern vaddr_t	uvm_maxkaddr;
275#endif
276
277/*
278 * protos: the following prototypes define the interface to vm_map
279 */
280
281void		uvm_map_deallocate(struct vm_map *);
282
283int		uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t);
284int		uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
285void		uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
286		    vaddr_t);
287void		uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
288		    vaddr_t);
289int		uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
290		    struct vm_map *, vaddr_t *, int);
291struct vm_map_entry *
292		uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
293		    vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
294int		uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
295		    vm_inherit_t);
296int		uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
297void		uvm_map_init(void);
298void		uvm_map_init_caches(void);
299bool		uvm_map_lookup_entry(struct vm_map *, vaddr_t,
300		    struct vm_map_entry **);
301void		uvm_map_reference(struct vm_map *);
302int		uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
303		    vaddr_t *, uvm_flag_t);
304void		uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
305int		uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
306		    struct vm_map *);
307void		uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int);
308#define	uvm_unmap(map, s, e)	uvm_unmap1((map), (s), (e), 0)
309void		uvm_unmap_detach(struct vm_map_entry *,int);
310void		uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
311		    struct vm_map_entry **, int);
312
313int		uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
314		    struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
315		    struct uvm_map_args *);
316int		uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
317		    struct vm_map_entry *);
318
319int		uvm_mapent_trymerge(struct vm_map *,
320		    struct vm_map_entry *, int);
321#define	UVM_MERGE_COPYING	1
322
323/*
324 * VM map locking operations.
325 */
326
327bool		vm_map_lock_try(struct vm_map *);
328void		vm_map_lock(struct vm_map *);
329void		vm_map_unlock(struct vm_map *);
330void		vm_map_unbusy(struct vm_map *);
331void		vm_map_lock_read(struct vm_map *);
332void		vm_map_unlock_read(struct vm_map *);
333void		vm_map_busy(struct vm_map *);
334bool		vm_map_locked_p(struct vm_map *);
335
336void		uvm_map_lock_entry(struct vm_map_entry *, krw_t);
337void		uvm_map_unlock_entry(struct vm_map_entry *);
338
339#endif /* _KERNEL */
340
341/*
342 *	Functions implemented as macros
343 */
344#define		vm_map_min(map)		((map)->header.end)
345#define		vm_map_max(map)		((map)->header.start)
346#define		vm_map_setmin(map, v)	((map)->header.end = (v))
347#define		vm_map_setmax(map, v)	((map)->header.start = (v))
348
349#define		vm_map_pmap(map)	((map)->pmap)
350
351#endif /* _UVM_UVM_MAP_H_ */