master
  1/*	$NetBSD: uvm_fault_i.h,v 1.33 2020/02/23 15:46:43 ad Exp $	*/
  2
  3/*
  4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
  5 * All rights reserved.
  6 *
  7 * Redistribution and use in source and binary forms, with or without
  8 * modification, are permitted provided that the following conditions
  9 * are met:
 10 * 1. Redistributions of source code must retain the above copyright
 11 *    notice, this list of conditions and the following disclaimer.
 12 * 2. Redistributions in binary form must reproduce the above copyright
 13 *    notice, this list of conditions and the following disclaimer in the
 14 *    documentation and/or other materials provided with the distribution.
 15 *
 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 26 *
 27 * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
 28 */
 29
 30#ifndef _UVM_UVM_FAULT_I_H_
 31#define _UVM_UVM_FAULT_I_H_
 32
 33/*
 34 * uvm_fault_i.h: fault inline functions
 35 */
 36void uvmfault_update_stats(struct uvm_faultinfo *);
 37
 38
 39/*
 40 * uvmfault_unlockmaps: unlock the maps
 41 */
 42
 43static __inline void
 44uvmfault_unlockmaps(struct uvm_faultinfo *ufi, bool write_locked)
 45{
 46	/*
 47	 * ufi can be NULL when this isn't really a fault,
 48	 * but merely paging in anon data.
 49	 */
 50
 51	if (ufi == NULL) {
 52		return;
 53	}
 54
 55#ifndef __HAVE_NO_PMAP_STATS
 56	uvmfault_update_stats(ufi);
 57#endif
 58	if (write_locked) {
 59		vm_map_unlock(ufi->map);
 60	} else {
 61		vm_map_unlock_read(ufi->map);
 62	}
 63}
 64
 65/*
 66 * uvmfault_unlockall: unlock everything passed in.
 67 *
 68 * => maps must be read-locked (not write-locked).
 69 */
 70
 71static __inline void
 72uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
 73    struct uvm_object *uobj)
 74{
 75
 76	if (uobj)
 77		rw_exit(uobj->vmobjlock);
 78	if (amap)
 79		amap_unlock(amap);
 80	uvmfault_unlockmaps(ufi, false);
 81}
 82
 83/*
 84 * uvmfault_lookup: lookup a virtual address in a map
 85 *
 86 * => caller must provide a uvm_faultinfo structure with the IN
 87 *	params properly filled in
 88 * => we will lookup the map entry (handling submaps) as we go
 89 * => if the lookup is a success we will return with the maps locked
 90 * => if "write_lock" is true, we write_lock the map, otherwise we only
 91 *	get a read lock.
 92 * => note that submaps can only appear in the kernel and they are
 93 *	required to use the same virtual addresses as the map they
 94 *	are referenced by (thus address translation between the main
 95 *	map and the submap is unnecessary).
 96 */
 97
 98static __inline bool
 99uvmfault_lookup(struct uvm_faultinfo *ufi, bool write_lock)
100{
101	struct vm_map *tmpmap;
102
103	/*
104	 * init ufi values for lookup.
105	 */
106
107	ufi->map = ufi->orig_map;
108	ufi->size = ufi->orig_size;
109
110	/*
111	 * keep going down levels until we are done.   note that there can
112	 * only be two levels so we won't loop very long.
113	 */
114
115	for (;;) {
116		/*
117		 * lock map
118		 */
119		if (write_lock) {
120			vm_map_lock(ufi->map);
121		} else {
122			vm_map_lock_read(ufi->map);
123		}
124
125		/*
126		 * lookup
127		 */
128		if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
129		    &ufi->entry)) {
130			uvmfault_unlockmaps(ufi, write_lock);
131			return(false);
132		}
133
134		/*
135		 * reduce size if necessary
136		 */
137		if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
138			ufi->size = ufi->entry->end - ufi->orig_rvaddr;
139
140		/*
141		 * submap?    replace map with the submap and lookup again.
142		 * note: VAs in submaps must match VAs in main map.
143		 */
144		if (UVM_ET_ISSUBMAP(ufi->entry)) {
145			tmpmap = ufi->entry->object.sub_map;
146			if (write_lock) {
147				vm_map_unlock(ufi->map);
148			} else {
149				vm_map_unlock_read(ufi->map);
150			}
151			ufi->map = tmpmap;
152			continue;
153		}
154
155		/*
156		 * got it!
157		 */
158
159		ufi->mapv = ufi->map->timestamp;
160		return(true);
161
162	}	/* while loop */
163
164	/*NOTREACHED*/
165}
166
167/*
168 * uvmfault_relock: attempt to relock the same version of the map
169 *
170 * => fault data structures should be unlocked before calling.
171 * => if a success (true) maps will be locked after call.
172 */
173
174static __inline bool
175uvmfault_relock(struct uvm_faultinfo *ufi)
176{
177	/*
178	 * ufi can be NULL when this isn't really a fault,
179	 * but merely paging in anon data.
180	 */
181
182	if (ufi == NULL) {
183		return true;
184	}
185
186	cpu_count(CPU_COUNT_FLTRELCK, 1);
187
188	/*
189	 * relock map.   fail if version mismatch (in which case nothing
190	 * gets locked).
191	 */
192
193	vm_map_lock_read(ufi->map);
194	if (ufi->mapv != ufi->map->timestamp) {
195		vm_map_unlock_read(ufi->map);
196		return(false);
197	}
198
199	cpu_count(CPU_COUNT_FLTRELCKOK, 1);
200	return(true);
201}
202
203#endif /* _UVM_UVM_FAULT_I_H_ */