1/*	$NetBSD: pmap.h,v 1.173.4.1 2023/10/14 06:52:17 martin Exp $	*/
   2
   3/*
   4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
   5 * All rights reserved.
   6 *
   7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
   8 *
   9 * Redistribution and use in source and binary forms, with or without
  10 * modification, are permitted provided that the following conditions
  11 * are met:
  12 * 1. Redistributions of source code must retain the above copyright
  13 *    notice, this list of conditions and the following disclaimer.
  14 * 2. Redistributions in binary form must reproduce the above copyright
  15 *    notice, this list of conditions and the following disclaimer in the
  16 *    documentation and/or other materials provided with the distribution.
  17 * 3. All advertising materials mentioning features or use of this software
  18 *    must display the following acknowledgement:
  19 *	This product includes software developed for the NetBSD Project by
  20 *	Wasabi Systems, Inc.
  21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
  22 *    or promote products derived from this software without specific prior
  23 *    written permission.
  24 *
  25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
  26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
  29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  35 * POSSIBILITY OF SUCH DAMAGE.
  36 */
  37
  38/*
  39 * Copyright (c) 1994,1995 Mark Brinicombe.
  40 * All rights reserved.
  41 *
  42 * Redistribution and use in source and binary forms, with or without
  43 * modification, are permitted provided that the following conditions
  44 * are met:
  45 * 1. Redistributions of source code must retain the above copyright
  46 *    notice, this list of conditions and the following disclaimer.
  47 * 2. Redistributions in binary form must reproduce the above copyright
  48 *    notice, this list of conditions and the following disclaimer in the
  49 *    documentation and/or other materials provided with the distribution.
  50 * 3. All advertising materials mentioning features or use of this software
  51 *    must display the following acknowledgement:
  52 *	This product includes software developed by Mark Brinicombe
  53 * 4. The name of the author may not be used to endorse or promote products
  54 *    derived from this software without specific prior written permission.
  55 *
  56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  66 */
  67
  68#ifndef	_ARM32_PMAP_H_
  69#define	_ARM32_PMAP_H_
  70
  71#ifdef _KERNEL
  72
  73#include <arm/cpuconf.h>
  74#include <arm/arm32/pte.h>
  75#ifndef _LOCORE
  76#if defined(_KERNEL_OPT)
  77#include "opt_arm32_pmap.h"
  78#include "opt_multiprocessor.h"
  79#endif
  80#include <arm/cpufunc.h>
  81#include <arm/locore.h>
  82#include <uvm/uvm_object.h>
  83#include <uvm/pmap/pmap_pvt.h>
  84#endif
  85
  86#ifdef ARM_MMU_EXTENDED
  87#define	PMAP_HWPAGEWALKER		1
  88#define	PMAP_TLB_MAX			1
  89#if PMAP_TLB_MAX > 1
  90#define	PMAP_TLB_NEED_SHOOTDOWN		1
  91#endif
  92#define	PMAP_TLB_FLUSH_ASID_ON_RESET	arm_has_tlbiasid_p
  93#define	PMAP_TLB_NUM_PIDS		256
  94#define	cpu_set_tlb_info(ci, ti)        ((void)((ci)->ci_tlb_info = (ti)))
  95#if PMAP_TLB_MAX > 1
  96#define	cpu_tlb_info(ci)		((ci)->ci_tlb_info)
  97#else
  98#define	cpu_tlb_info(ci)		(&pmap_tlb0_info)
  99#endif
 100#define	pmap_md_tlb_asid_max()		(PMAP_TLB_NUM_PIDS - 1)
 101#include <uvm/pmap/tlb.h>
 102#include <uvm/pmap/pmap_tlb.h>
 103
 104/*
 105 * If we have an EXTENDED MMU and the address space is split evenly between
 106 * user and kernel, we can use the TTBR0/TTBR1 to have separate L1 tables for
 107 * user and kernel address spaces.
 108 */
 109#if (KERNEL_BASE & 0x80000000) == 0
 110#error ARMv6 or later systems must have a KERNEL_BASE >= 0x80000000
 111#endif
 112#endif  /* ARM_MMU_EXTENDED */
 113
 114/*
 115 * a pmap describes a processes' 4GB virtual address space.  this
 116 * virtual address space can be broken up into 4096 1MB regions which
 117 * are described by L1 PTEs in the L1 table.
 118 *
 119 * There is a line drawn at KERNEL_BASE.  Everything below that line
 120 * changes when the VM context is switched.  Everything above that line
 121 * is the same no matter which VM context is running.  This is achieved
 122 * by making the L1 PTEs for those slots above KERNEL_BASE reference
 123 * kernel L2 tables.
 124 *
 125 * The basic layout of the virtual address space thus looks like this:
 126 *
 127 *	0xffffffff
 128 *	.
 129 *	.
 130 *	.
 131 *	KERNEL_BASE
 132 *	--------------------
 133 *	.
 134 *	.
 135 *	.
 136 *	0x00000000
 137 */
 138
 139/*
 140 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
 141 * A bucket size of 16 provides for 16MB of contiguous virtual address
 142 * space per l2_dtable. Most processes will, therefore, require only two or
 143 * three of these to map their whole working set.
 144 */
 145#define	L2_BUCKET_XLOG2	(L1_S_SHIFT)
 146#define	L2_BUCKET_XSIZE	(1 << L2_BUCKET_XLOG2)
 147#define	L2_BUCKET_LOG2	4
 148#define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
 149
 150/*
 151 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
 152 * of l2_dtable structures required to track all possible page descriptors
 153 * mappable by an L1 translation table is given by the following constants:
 154 */
 155#define	L2_LOG2		(32 - (L2_BUCKET_XLOG2 + L2_BUCKET_LOG2))
 156#define	L2_SIZE		(1 << L2_LOG2)
 157
 158/*
 159 * tell MI code that the cache is virtually-indexed.
 160 * ARMv6 is physically-tagged but all others are virtually-tagged.
 161 */
 162#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
 163#define	PMAP_CACHE_VIPT
 164#else
 165#define	PMAP_CACHE_VIVT
 166#endif
 167
 168#ifndef _LOCORE
 169
 170#ifndef ARM_MMU_EXTENDED
 171struct l1_ttable;
 172struct l2_dtable;
 173
 174/*
 175 * Track cache/tlb occupancy using the following structure
 176 */
 177union pmap_cache_state {
 178	struct {
 179		union {
 180			uint8_t csu_cache_b[2];
 181			uint16_t csu_cache;
 182		} cs_cache_u;
 183
 184		union {
 185			uint8_t csu_tlb_b[2];
 186			uint16_t csu_tlb;
 187		} cs_tlb_u;
 188	} cs_s;
 189	uint32_t cs_all;
 190};
 191#define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
 192#define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
 193#define	cs_cache	cs_s.cs_cache_u.csu_cache
 194#define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
 195#define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
 196#define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
 197
 198/*
 199 * Assigned to cs_all to force cacheops to work for a particular pmap
 200 */
 201#define	PMAP_CACHE_STATE_ALL	0xffffffffu
 202#endif /* !ARM_MMU_EXTENDED */
 203
 204/*
 205 * This structure is used by machine-dependent code to describe
 206 * static mappings of devices, created at bootstrap time.
 207 */
 208struct pmap_devmap {
 209	vaddr_t		pd_va;		/* virtual address */
 210	paddr_t		pd_pa;		/* physical address */
 211	psize_t		pd_size;	/* size of region */
 212	vm_prot_t	pd_prot;	/* protection code */
 213	int		pd_cache;	/* cache attributes */
 214};
 215
 216#define	DEVMAP_ALIGN(a)	((a) & ~L1_S_OFFSET)
 217#define	DEVMAP_SIZE(s)	roundup2((s), L1_S_SIZE)
 218#define	DEVMAP_ENTRY(va, pa, sz)			\
 219	{						\
 220		.pd_va = DEVMAP_ALIGN(va),		\
 221		.pd_pa = DEVMAP_ALIGN(pa),		\
 222		.pd_size = DEVMAP_SIZE(sz),		\
 223		.pd_prot = VM_PROT_READ|VM_PROT_WRITE,	\
 224		.pd_cache = PTE_DEV			\
 225	}
 226#define	DEVMAP_ENTRY_END	{ 0 }
 227
 228/*
 229 * The pmap structure itself
 230 */
 231struct pmap {
 232	kmutex_t		pm_lock;
 233	u_int			pm_refs;
 234#ifndef ARM_HAS_VBAR
 235	pd_entry_t		*pm_pl1vec;
 236	pd_entry_t		pm_l1vec;
 237#endif
 238	struct l2_dtable	*pm_l2[L2_SIZE];
 239	struct pmap_statistics	pm_stats;
 240	LIST_ENTRY(pmap)	pm_list;
 241	bool			pm_remove_all;
 242#ifdef ARM_MMU_EXTENDED
 243	pd_entry_t		*pm_l1;
 244	paddr_t			pm_l1_pa;
 245#ifdef MULTIPROCESSOR
 246	kcpuset_t		*pm_onproc;
 247	kcpuset_t		*pm_active;
 248#if PMAP_TLB_MAX > 1
 249	u_int			pm_shootdown_pending;
 250#endif
 251#endif
 252	struct pmap_asid_info	pm_pai[PMAP_TLB_MAX];
 253#else
 254	struct l1_ttable	*pm_l1;
 255	union pmap_cache_state	pm_cstate;
 256	uint8_t			pm_domain;
 257	bool			pm_activated;
 258#endif
 259};
 260
 261struct pmap_kernel {
 262	struct pmap		kernel_pmap;
 263};
 264
 265/*
 266 * Physical / virtual address structure. In a number of places (particularly
 267 * during bootstrapping) we need to keep track of the physical and virtual
 268 * addresses of various pages
 269 */
 270typedef struct pv_addr {
 271	SLIST_ENTRY(pv_addr) pv_list;
 272	paddr_t pv_pa;
 273	vaddr_t pv_va;
 274	vsize_t pv_size;
 275	uint8_t pv_cache;
 276	uint8_t pv_prot;
 277} pv_addr_t;
 278typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
 279
 280extern pv_addrqh_t pmap_freeq;
 281extern pv_addr_t kernelstack;
 282extern pv_addr_t abtstack;
 283extern pv_addr_t fiqstack;
 284extern pv_addr_t irqstack;
 285extern pv_addr_t undstack;
 286extern pv_addr_t idlestack;
 287extern pv_addr_t systempage;
 288extern pv_addr_t kernel_l1pt;
 289#if defined(EFI_RUNTIME)
 290extern pv_addr_t efirt_l1pt;
 291#endif
 292
 293#ifdef ARM_MMU_EXTENDED
 294extern bool arm_has_tlbiasid_p;	/* also in <arm/locore.h> */
 295#endif
 296
 297/*
 298 * Determine various modes for PTEs (user vs. kernel, cacheable
 299 * vs. non-cacheable).
 300 */
 301#define	PTE_KERNEL	0
 302#define	PTE_USER	1
 303#define	PTE_NOCACHE	0
 304#define	PTE_CACHE	1
 305#define	PTE_PAGETABLE	2
 306#define	PTE_DEV		3
 307
 308/*
 309 * Flags that indicate attributes of pages or mappings of pages.
 310 *
 311 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
 312 * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
 313 * pv_entry's for each page.  They live in the same "namespace" so
 314 * that we can clear multiple attributes at a time.
 315 *
 316 * Note the "non-cacheable" flag generally means the page has
 317 * multiple mappings in a given address space.
 318 */
 319#define	PVF_MOD		0x01		/* page is modified */
 320#define	PVF_REF		0x02		/* page is referenced */
 321#define	PVF_WIRED	0x04		/* mapping is wired */
 322#define	PVF_WRITE	0x08		/* mapping is writable */
 323#define	PVF_EXEC	0x10		/* mapping is executable */
 324#ifdef PMAP_CACHE_VIVT
 325#define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
 326#define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
 327#define	PVF_NC		(PVF_UNC|PVF_KNC)
 328#endif
 329#ifdef PMAP_CACHE_VIPT
 330#define	PVF_NC		0x20		/* mapping is 'kernel' non-cacheable */
 331#define	PVF_MULTCLR	0x40		/* mapping is multi-colored */
 332#endif
 333#define	PVF_COLORED	0x80		/* page has or had a color */
 334#define	PVF_KENTRY	0x0100		/* page entered via pmap_kenter_pa */
 335#define	PVF_KMPAGE	0x0200		/* page is used for kmem */
 336#define	PVF_DIRTY	0x0400		/* page may have dirty cache lines */
 337#define	PVF_KMOD	0x0800		/* unmanaged page is modified  */
 338#define	PVF_KWRITE	(PVF_KENTRY|PVF_WRITE)
 339#define	PVF_DMOD	(PVF_MOD|PVF_KMOD|PVF_KMPAGE)
 340
 341/*
 342 * Commonly referenced structures
 343 */
 344extern int		arm_poolpage_vmfreelist;
 345
 346/*
 347 * Macros that we need to export
 348 */
 349#define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
 350#define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
 351
 352#define	pmap_is_modified(pg)	\
 353	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
 354#define	pmap_is_referenced(pg)	\
 355	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
 356#define	pmap_is_page_colored_p(md)	\
 357	(((md)->pvh_attrs & PVF_COLORED) != 0)
 358
 359#define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
 360
 361#define	pmap_phys_address(ppn)		(arm_ptob((ppn)))
 362u_int arm32_mmap_flags(paddr_t);
 363#define	ARM32_MMAP_WRITECOMBINE		0x40000000
 364#define	ARM32_MMAP_CACHEABLE		0x20000000
 365#define	ARM_MMAP_WRITECOMBINE		ARM32_MMAP_WRITECOMBINE
 366#define	ARM_MMAP_CACHEABLE		ARM32_MMAP_CACHEABLE
 367#define	pmap_mmap_flags(ppn)		arm32_mmap_flags(ppn)
 368
 369#define	PMAP_PTE			0x10000000 /* kenter_pa */
 370#define	PMAP_DEV			0x20000000 /* kenter_pa */
 371#define	PMAP_DEV_SO			0x40000000 /* kenter_pa */
 372#define	PMAP_DEV_MASK			(PMAP_DEV | PMAP_DEV_SO)
 373
 374/*
 375 * Functions that we need to export
 376 */
 377void	pmap_procwr(struct proc *, vaddr_t, int);
 378bool	pmap_remove_all(pmap_t);
 379bool	pmap_extract(pmap_t, vaddr_t, paddr_t *);
 380
 381#define	PMAP_NEED_PROCWR
 382#define	PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
 383#define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
 384
 385#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
 386#define	PMAP_PREFER(hint, vap, sz, td)	pmap_prefer((hint), (vap), (td))
 387void	pmap_prefer(vaddr_t, vaddr_t *, int);
 388#endif
 389
 390#ifdef ARM_MMU_EXTENDED
 391int	pmap_maxproc_set(int);
 392struct pmap *
 393	pmap_efirt(void);
 394#endif
 395
 396void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
 397
 398/* Functions we use internally. */
 399#ifdef PMAP_STEAL_MEMORY
 400void	pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
 401void	pmap_boot_pageadd(pv_addr_t *);
 402vaddr_t	pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
 403#endif
 404void	pmap_bootstrap(vaddr_t, vaddr_t);
 405
 406struct pmap *
 407	pmap_efirt(void);
 408void	pmap_activate_efirt(void);
 409void	pmap_deactivate_efirt(void);
 410
 411void	pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
 412int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
 413int	pmap_prefetchabt_fixup(void *);
 414bool	pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
 415bool	pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
 416bool	pmap_extract_coherency(pmap_t, vaddr_t, paddr_t *, bool *);
 417
 418void	pmap_postinit(void);
 419
 420void	vector_page_setprot(int);
 421
 422const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
 423const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
 424
 425/* Bootstrapping routines. */
 426void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
 427void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
 428vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
 429void	pmap_unmap_chunk(vaddr_t, vaddr_t, vsize_t);
 430void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
 431void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
 432void	pmap_devmap_register(const struct pmap_devmap *);
 433
 434/*
 435 * Special page zero routine for use by the idle loop (no cache cleans).
 436 */
 437bool	pmap_pageidlezero(paddr_t);
 438#define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
 439
 440#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
 441/*
 442 * For the pmap, this is a more useful way to map a direct mapped page.
 443 * It returns either the direct-mapped VA or the VA supplied if it can't
 444 * be direct mapped.
 445 */
 446vaddr_t	pmap_direct_mapped_phys(paddr_t, bool *, vaddr_t);
 447#endif
 448
 449/*
 450 * used by dumpsys to record the PA of the L1 table
 451 */
 452uint32_t pmap_kernel_L1_addr(void);
 453/*
 454 * The current top of kernel VM
 455 */
 456extern vaddr_t	pmap_curmaxkvaddr;
 457
 458#if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
 459/*
 460 * Ending VA of direct mapped memory (usually KERNEL_VM_BASE).
 461 */
 462extern vaddr_t pmap_directlimit;
 463#endif
 464
 465/*
 466 * Useful macros and constants
 467 */
 468
 469/* Virtual address to page table entry */
 470static inline pt_entry_t *
 471vtopte(vaddr_t va)
 472{
 473	pd_entry_t *pdep;
 474	pt_entry_t *ptep;
 475
 476	KASSERT(trunc_page(va) == va);
 477
 478	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
 479		return (NULL);
 480	return (ptep);
 481}
 482
 483/*
 484 * Virtual address to physical address
 485 */
 486static inline paddr_t
 487vtophys(vaddr_t va)
 488{
 489	paddr_t pa;
 490
 491	if (pmap_extract(pmap_kernel(), va, &pa) == false)
 492		return (0);	/* XXXSCW: Panic? */
 493
 494	return (pa);
 495}
 496
 497/*
 498 * The new pmap ensures that page-tables are always mapping Write-Thru.
 499 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
 500 * on every change.
 501 *
 502 * Unfortunately, not all CPUs have a write-through cache mode.  So we
 503 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
 504 * and if there is the chance for PTE syncs to be needed, we define
 505 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
 506 * the code.
 507 */
 508extern int pmap_needs_pte_sync;
 509#if defined(_KERNEL_OPT)
 510/*
 511 * Perform compile time evaluation of PMAP_NEEDS_PTE_SYNC when only a
 512 * single MMU type is selected.
 513 *
 514 * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
 515 * we need to do PTE syncs. Additionally, V6 MMUs also need PTE syncs.
 516 * Finally, MEMC, GENERIC and XSCALE MMUs do not need PTE syncs.
 517 *
 518 * Use run time evaluation for all other cases.
 519 *
 520 */
 521#if (ARM_NMMUS == 1)
 522#if (ARM_MMU_SA1 + ARM_MMU_V6 != 0)
 523#define	PMAP_INCLUDE_PTE_SYNC
 524#define	PMAP_NEEDS_PTE_SYNC	1
 525#elif (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_XSCALE != 0)
 526#define	PMAP_NEEDS_PTE_SYNC	0
 527#endif
 528#endif
 529#endif /* _KERNEL_OPT */
 530
 531/*
 532 * Provide a fallback in case we were not able to determine it at
 533 * compile-time.
 534 */
 535#ifndef PMAP_NEEDS_PTE_SYNC
 536#define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
 537#define	PMAP_INCLUDE_PTE_SYNC
 538#endif
 539
 540static inline void
 541pmap_ptesync(pt_entry_t *ptep, size_t cnt)
 542{
 543	if (PMAP_NEEDS_PTE_SYNC) {
 544		cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
 545#ifdef SHEEVA_L2_CACHE
 546		cpu_sdcache_wb_range((vaddr_t)ptep, -1,
 547		    cnt * sizeof(pt_entry_t));
 548#endif
 549	}
 550	dsb(sy);
 551}
 552
 553#define	PDE_SYNC(pdep)			pmap_ptesync((pdep), 1)
 554#define	PDE_SYNC_RANGE(pdep, cnt)	pmap_ptesync((pdep), (cnt))
 555#define	PTE_SYNC(ptep)			pmap_ptesync((ptep), PAGE_SIZE / L2_S_SIZE)
 556#define	PTE_SYNC_RANGE(ptep, cnt)	pmap_ptesync((ptep), (cnt))
 557
 558#define	l1pte_valid_p(pde)	((pde) != 0)
 559#define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
 560#define	l1pte_supersection_p(pde) (l1pte_section_p(pde)	\
 561				&& ((pde) & L1_S_V6_SUPER) != 0)
 562#define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
 563#define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
 564#define	l1pte_pa(pde)		((pde) & L1_C_ADDR_MASK)
 565#define	l1pte_index(v)		((vaddr_t)(v) >> L1_S_SHIFT)
 566
 567static inline void
 568l1pte_setone(pt_entry_t *pdep, pt_entry_t pde)
 569{
 570	*pdep = pde;
 571}
 572
 573static inline void
 574l1pte_set(pt_entry_t *pdep, pt_entry_t pde)
 575{
 576	*pdep = pde;
 577	if (l1pte_page_p(pde)) {
 578		KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (PAGE_SIZE / L2_T_SIZE - 1)) == 0, "%p", pdep);
 579		for (int k = 1; k < PAGE_SIZE / L2_T_SIZE; k++) {
 580			pde += L2_T_SIZE;
 581			pdep[k] = pde;
 582		}
 583	} else if (l1pte_supersection_p(pde)) {
 584		KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (L1_SS_SIZE / L1_S_SIZE - 1)) == 0, "%p", pdep);
 585		for (int k = 1; k < L1_SS_SIZE / L1_S_SIZE; k++) {
 586			pdep[k] = pde;
 587		}
 588	}
 589}
 590
 591#define	l2pte_index(v)		((((v) & L2_ADDR_BITS) >> PGSHIFT) << (PGSHIFT-L2_S_SHIFT))
 592#define	l2pte_valid_p(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
 593#define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
 594#define	l1pte_lpage_p(pte)	(((pte) & L2_TYPE_MASK) == L2_TYPE_L)
 595#define	l2pte_minidata_p(pte)	(((pte) & \
 596				 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
 597				 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
 598
 599static inline void
 600l2pte_set(pt_entry_t *ptep, pt_entry_t pte, pt_entry_t opte)
 601{
 602	if (l1pte_lpage_p(pte)) {
 603		KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (L2_L_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
 604		for (int k = 0; k < L2_L_SIZE / L2_S_SIZE; k++) {
 605			*ptep++ = pte;
 606		}
 607	} else {
 608		KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
 609		for (int k = 0; k < PAGE_SIZE / L2_S_SIZE; k++) {
 610			KASSERTMSG(*ptep == opte, "%#x [*%p] != %#x", *ptep, ptep, opte);
 611			*ptep++ = pte;
 612			pte += L2_S_SIZE;
 613			if (opte)
 614				opte += L2_S_SIZE;
 615		}
 616	}
 617}
 618
 619static inline void
 620l2pte_reset(pt_entry_t *ptep)
 621{
 622	KASSERTMSG((((uintptr_t)ptep / sizeof(*ptep)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
 623	*ptep = 0;
 624	for (int k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
 625		ptep[k] = 0;
 626	}
 627}
 628
 629/* L1 and L2 page table macros */
 630#define	pmap_pde_v(pde)		l1pte_valid(*(pde))
 631#define	pmap_pde_section(pde)	l1pte_section_p(*(pde))
 632#define	pmap_pde_supersection(pde)	l1pte_supersection_p(*(pde))
 633#define	pmap_pde_page(pde)	l1pte_page_p(*(pde))
 634#define	pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
 635
 636#define	pmap_pte_v(pte)		l2pte_valid_p(*(pte))
 637#define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
 638
 639static inline uint32_t
 640pte_value(pt_entry_t pte)
 641{
 642	return pte;
 643}
 644
 645static inline bool
 646pte_valid_p(pt_entry_t pte)
 647{
 648
 649	return l2pte_valid_p(pte);
 650}
 651
 652
 653/* Size of the kernel part of the L1 page table */
 654#define	KERNEL_PD_SIZE	\
 655	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
 656
 657void	bzero_page(vaddr_t);
 658void	bcopy_page(vaddr_t, vaddr_t);
 659
 660#ifdef FPU_VFP
 661void	bzero_page_vfp(vaddr_t);
 662void	bcopy_page_vfp(vaddr_t, vaddr_t);
 663#endif
 664
 665/************************* ARM MMU configuration *****************************/
 666
 667#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
 668void	pmap_copy_page_generic(paddr_t, paddr_t);
 669void	pmap_zero_page_generic(paddr_t);
 670
 671void	pmap_pte_init_generic(void);
 672#if defined(CPU_ARM8)
 673void	pmap_pte_init_arm8(void);
 674#endif
 675#if defined(CPU_ARM9)
 676void	pmap_pte_init_arm9(void);
 677#endif /* CPU_ARM9 */
 678#if defined(CPU_ARM10)
 679void	pmap_pte_init_arm10(void);
 680#endif /* CPU_ARM10 */
 681#if defined(CPU_ARM11)	/* ARM_MMU_V6 */
 682void	pmap_pte_init_arm11(void);
 683#endif /* CPU_ARM11 */
 684#if defined(CPU_ARM11MPCORE)	/* ARM_MMU_V6 */
 685void	pmap_pte_init_arm11mpcore(void);
 686#endif
 687#if ARM_MMU_V6 == 1
 688void	pmap_pte_init_armv6(void);
 689#endif /* ARM_MMU_V6 */
 690#if ARM_MMU_V7 == 1
 691void	pmap_pte_init_armv7(void);
 692#endif /* ARM_MMU_V7 */
 693#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
 694
 695#if ARM_MMU_SA1 == 1
 696void	pmap_pte_init_sa1(void);
 697#endif /* ARM_MMU_SA1 == 1 */
 698
 699#if ARM_MMU_XSCALE == 1
 700void	pmap_copy_page_xscale(paddr_t, paddr_t);
 701void	pmap_zero_page_xscale(paddr_t);
 702
 703void	pmap_pte_init_xscale(void);
 704
 705void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
 706
 707#define	PMAP_UAREA(va)		pmap_uarea(va)
 708void	pmap_uarea(vaddr_t);
 709#endif /* ARM_MMU_XSCALE == 1 */
 710
 711extern pt_entry_t		pte_l1_s_nocache_mode;
 712extern pt_entry_t		pte_l2_l_nocache_mode;
 713extern pt_entry_t		pte_l2_s_nocache_mode;
 714
 715extern pt_entry_t		pte_l1_s_cache_mode;
 716extern pt_entry_t		pte_l2_l_cache_mode;
 717extern pt_entry_t		pte_l2_s_cache_mode;
 718
 719extern pt_entry_t		pte_l1_s_cache_mode_pt;
 720extern pt_entry_t		pte_l2_l_cache_mode_pt;
 721extern pt_entry_t		pte_l2_s_cache_mode_pt;
 722
 723extern pt_entry_t		pte_l1_s_wc_mode;
 724extern pt_entry_t		pte_l2_l_wc_mode;
 725extern pt_entry_t		pte_l2_s_wc_mode;
 726
 727extern pt_entry_t		pte_l1_s_cache_mask;
 728extern pt_entry_t		pte_l2_l_cache_mask;
 729extern pt_entry_t		pte_l2_s_cache_mask;
 730
 731extern pt_entry_t		pte_l1_s_prot_u;
 732extern pt_entry_t		pte_l1_s_prot_w;
 733extern pt_entry_t		pte_l1_s_prot_ro;
 734extern pt_entry_t		pte_l1_s_prot_mask;
 735
 736extern pt_entry_t		pte_l2_s_prot_u;
 737extern pt_entry_t		pte_l2_s_prot_w;
 738extern pt_entry_t		pte_l2_s_prot_ro;
 739extern pt_entry_t		pte_l2_s_prot_mask;
 740
 741extern pt_entry_t		pte_l2_l_prot_u;
 742extern pt_entry_t		pte_l2_l_prot_w;
 743extern pt_entry_t		pte_l2_l_prot_ro;
 744extern pt_entry_t		pte_l2_l_prot_mask;
 745
 746extern pt_entry_t		pte_l1_ss_proto;
 747extern pt_entry_t		pte_l1_s_proto;
 748extern pt_entry_t		pte_l1_c_proto;
 749extern pt_entry_t		pte_l2_s_proto;
 750
 751extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
 752extern void (*pmap_zero_page_func)(paddr_t);
 753
 754/*
 755 * Global varaiables in cpufunc_asm_xscale.S supporting the Xscale
 756 * cache clean/purge functions.
 757 */
 758extern vaddr_t xscale_minidata_clean_addr;
 759extern vsize_t xscale_minidata_clean_size;
 760extern vaddr_t xscale_cache_clean_addr;
 761extern vsize_t xscale_cache_clean_size;
 762
 763#endif /* !_LOCORE */
 764
 765/*****************************************************************************/
 766
 767#define	KERNEL_PID		0	/* The kernel uses ASID 0 */
 768
 769/*
 770 * Definitions for MMU domains
 771 */
 772#define	PMAP_DOMAINS		15	/* 15 'user' domains (1-15) */
 773#define	PMAP_DOMAIN_KERNEL	0	/* The kernel pmap uses domain #0 */
 774
 775#ifdef ARM_MMU_EXTENDED
 776#define	PMAP_DOMAIN_USER	1	/* User pmaps use domain #1 */
 777#define	DOMAIN_DEFAULT		((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | (DOMAIN_CLIENT << (PMAP_DOMAIN_USER*2)))
 778#else
 779#define	DOMAIN_DEFAULT		((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)))
 780#endif
 781
 782/*
 783 * These macros define the various bit masks in the PTE.
 784 *
 785 * We use these macros since we use different bits on different processor
 786 * models.
 787 */
 788#define	L1_S_PROT_U_generic	(L1_S_AP(AP_U))
 789#define	L1_S_PROT_W_generic	(L1_S_AP(AP_W))
 790#define	L1_S_PROT_RO_generic	(0)
 791#define	L1_S_PROT_MASK_generic	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
 792
 793#define	L1_S_PROT_U_xscale	(L1_S_AP(AP_U))
 794#define	L1_S_PROT_W_xscale	(L1_S_AP(AP_W))
 795#define	L1_S_PROT_RO_xscale	(0)
 796#define	L1_S_PROT_MASK_xscale	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
 797
 798#define	L1_S_PROT_U_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
 799#define	L1_S_PROT_W_armv6	(L1_S_AP(AP_W))
 800#define	L1_S_PROT_RO_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
 801#define	L1_S_PROT_MASK_armv6	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
 802
 803#define	L1_S_PROT_U_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
 804#define	L1_S_PROT_W_armv7	(L1_S_AP(AP_W))
 805#define	L1_S_PROT_RO_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
 806#define	L1_S_PROT_MASK_armv7	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
 807
 808#define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
 809#define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
 810#define	L1_S_CACHE_MASK_armv6	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
 811#define	L1_S_CACHE_MASK_armv6n	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
 812#define	L1_S_CACHE_MASK_armv7	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
 813
 814#define	L2_L_PROT_U_generic	(L2_AP(AP_U))
 815#define	L2_L_PROT_W_generic	(L2_AP(AP_W))
 816#define	L2_L_PROT_RO_generic	(0)
 817#define	L2_L_PROT_MASK_generic	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
 818
 819#define	L2_L_PROT_U_xscale	(L2_AP(AP_U))
 820#define	L2_L_PROT_W_xscale	(L2_AP(AP_W))
 821#define	L2_L_PROT_RO_xscale	(0)
 822#define	L2_L_PROT_MASK_xscale	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
 823
 824#define	L2_L_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
 825#define	L2_L_PROT_W_armv6n	(L2_AP0(AP_W))
 826#define	L2_L_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
 827#define	L2_L_PROT_MASK_armv6n	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
 828
 829#define	L2_L_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
 830#define	L2_L_PROT_W_armv7	(L2_AP0(AP_W))
 831#define	L2_L_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
 832#define	L2_L_PROT_MASK_armv7	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
 833
 834#define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
 835#define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
 836#define	L2_L_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
 837#define	L2_L_CACHE_MASK_armv6n	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
 838#define	L2_L_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
 839
 840#define	L2_S_PROT_U_generic	(L2_AP(AP_U))
 841#define	L2_S_PROT_W_generic	(L2_AP(AP_W))
 842#define	L2_S_PROT_RO_generic	(0)
 843#define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
 844
 845#define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
 846#define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
 847#define	L2_S_PROT_RO_xscale	(0)
 848#define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
 849
 850#define	L2_S_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
 851#define	L2_S_PROT_W_armv6n	(L2_AP0(AP_W))
 852#define	L2_S_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
 853#define	L2_S_PROT_MASK_armv6n	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
 854
 855#define	L2_S_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
 856#define	L2_S_PROT_W_armv7	(L2_AP0(AP_W))
 857#define	L2_S_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
 858#define	L2_S_PROT_MASK_armv7	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
 859
 860#define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
 861#define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
 862#define	L2_XS_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
 863#ifdef	ARMV6_EXTENDED_SMALL_PAGE
 864#define	L2_S_CACHE_MASK_armv6c	L2_XS_CACHE_MASK_armv6
 865#else
 866#define	L2_S_CACHE_MASK_armv6c	L2_S_CACHE_MASK_generic
 867#endif
 868#define	L2_S_CACHE_MASK_armv6n	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
 869#define	L2_S_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
 870
 871
 872#define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
 873#define	L1_S_PROTO_xscale	(L1_TYPE_S)
 874#define	L1_S_PROTO_armv6	(L1_TYPE_S)
 875#define	L1_S_PROTO_armv7	(L1_TYPE_S)
 876
 877#define	L1_SS_PROTO_generic	0
 878#define	L1_SS_PROTO_xscale	0
 879#define	L1_SS_PROTO_armv6	(L1_TYPE_S | L1_S_V6_SS)
 880#define	L1_SS_PROTO_armv7	(L1_TYPE_S | L1_S_V6_SS)
 881
 882#define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
 883#define	L1_C_PROTO_xscale	(L1_TYPE_C)
 884#define	L1_C_PROTO_armv6	(L1_TYPE_C)
 885#define	L1_C_PROTO_armv7	(L1_TYPE_C)
 886
 887#define	L2_L_PROTO		(L2_TYPE_L)
 888
 889#define	L2_S_PROTO_generic	(L2_TYPE_S)
 890#define	L2_S_PROTO_xscale	(L2_TYPE_XS)
 891#ifdef	ARMV6_EXTENDED_SMALL_PAGE
 892#define	L2_S_PROTO_armv6c	(L2_TYPE_XS)    /* XP=0, extended small page */
 893#else
 894#define	L2_S_PROTO_armv6c	(L2_TYPE_S)	/* XP=0, subpage APs */
 895#endif
 896#ifdef ARM_MMU_EXTENDED
 897#define	L2_S_PROTO_armv6n	(L2_TYPE_S|L2_XS_XN)
 898#else
 899#define	L2_S_PROTO_armv6n	(L2_TYPE_S)	/* with XP=1 */
 900#endif
 901#ifdef ARM_MMU_EXTENDED
 902#define	L2_S_PROTO_armv7	(L2_TYPE_S|L2_XS_XN)
 903#else
 904#define	L2_S_PROTO_armv7	(L2_TYPE_S)
 905#endif
 906
 907/*
 908 * User-visible names for the ones that vary with MMU class.
 909 */
 910
 911#if ARM_NMMUS > 1
 912/* More than one MMU class configured; use variables. */
 913#define	L1_S_PROT_U		pte_l1_s_prot_u
 914#define	L1_S_PROT_W		pte_l1_s_prot_w
 915#define	L1_S_PROT_RO		pte_l1_s_prot_ro
 916#define	L1_S_PROT_MASK		pte_l1_s_prot_mask
 917
 918#define	L2_S_PROT_U		pte_l2_s_prot_u
 919#define	L2_S_PROT_W		pte_l2_s_prot_w
 920#define	L2_S_PROT_RO		pte_l2_s_prot_ro
 921#define	L2_S_PROT_MASK		pte_l2_s_prot_mask
 922
 923#define	L2_L_PROT_U		pte_l2_l_prot_u
 924#define	L2_L_PROT_W		pte_l2_l_prot_w
 925#define	L2_L_PROT_RO		pte_l2_l_prot_ro
 926#define	L2_L_PROT_MASK		pte_l2_l_prot_mask
 927
 928#define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
 929#define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
 930#define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
 931
 932#define	L1_SS_PROTO		pte_l1_ss_proto
 933#define	L1_S_PROTO		pte_l1_s_proto
 934#define	L1_C_PROTO		pte_l1_c_proto
 935#define	L2_S_PROTO		pte_l2_s_proto
 936
 937#define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
 938#define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
 939#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
 940#define	L1_S_PROT_U		L1_S_PROT_U_generic
 941#define	L1_S_PROT_W		L1_S_PROT_W_generic
 942#define	L1_S_PROT_RO		L1_S_PROT_RO_generic
 943#define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
 944
 945#define	L2_S_PROT_U		L2_S_PROT_U_generic
 946#define	L2_S_PROT_W		L2_S_PROT_W_generic
 947#define	L2_S_PROT_RO		L2_S_PROT_RO_generic
 948#define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
 949
 950#define	L2_L_PROT_U		L2_L_PROT_U_generic
 951#define	L2_L_PROT_W		L2_L_PROT_W_generic
 952#define	L2_L_PROT_RO		L2_L_PROT_RO_generic
 953#define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
 954
 955#define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
 956#define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
 957#define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
 958
 959#define	L1_SS_PROTO		L1_SS_PROTO_generic
 960#define	L1_S_PROTO		L1_S_PROTO_generic
 961#define	L1_C_PROTO		L1_C_PROTO_generic
 962#define	L2_S_PROTO		L2_S_PROTO_generic
 963
 964#define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
 965#define	pmap_zero_page(d)	pmap_zero_page_generic((d))
 966#elif ARM_MMU_V6N != 0
 967#define	L1_S_PROT_U		L1_S_PROT_U_armv6
 968#define	L1_S_PROT_W		L1_S_PROT_W_armv6
 969#define	L1_S_PROT_RO		L1_S_PROT_RO_armv6
 970#define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv6
 971
 972#define	L2_S_PROT_U		L2_S_PROT_U_armv6n
 973#define	L2_S_PROT_W		L2_S_PROT_W_armv6n
 974#define	L2_S_PROT_RO		L2_S_PROT_RO_armv6n
 975#define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv6n
 976
 977#define	L2_L_PROT_U		L2_L_PROT_U_armv6n
 978#define	L2_L_PROT_W		L2_L_PROT_W_armv6n
 979#define	L2_L_PROT_RO		L2_L_PROT_RO_armv6n
 980#define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv6n
 981
 982#define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv6n
 983#define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv6n
 984#define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv6n
 985
 986/*
 987 * These prototypes make writeable mappings, while the other MMU types
 988 * make read-only mappings.
 989 */
 990#define	L1_SS_PROTO		L1_SS_PROTO_armv6
 991#define	L1_S_PROTO		L1_S_PROTO_armv6
 992#define	L1_C_PROTO		L1_C_PROTO_armv6
 993#define	L2_S_PROTO		L2_S_PROTO_armv6n
 994
 995#define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
 996#define	pmap_zero_page(d)	pmap_zero_page_generic((d))
 997#elif ARM_MMU_V6C != 0
 998#define	L1_S_PROT_U		L1_S_PROT_U_generic
 999#define	L1_S_PROT_W		L1_S_PROT_W_generic
1000#define	L1_S_PROT_RO		L1_S_PROT_RO_generic
1001#define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
1002
1003#define	L2_S_PROT_U		L2_S_PROT_U_generic
1004#define	L2_S_PROT_W		L2_S_PROT_W_generic
1005#define	L2_S_PROT_RO		L2_S_PROT_RO_generic
1006#define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
1007
1008#define	L2_L_PROT_U		L2_L_PROT_U_generic
1009#define	L2_L_PROT_W		L2_L_PROT_W_generic
1010#define	L2_L_PROT_RO		L2_L_PROT_RO_generic
1011#define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
1012
1013#define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
1014#define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
1015#define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
1016
1017#define	L1_SS_PROTO		L1_SS_PROTO_armv6
1018#define	L1_S_PROTO		L1_S_PROTO_generic
1019#define	L1_C_PROTO		L1_C_PROTO_generic
1020#define	L2_S_PROTO		L2_S_PROTO_generic
1021
1022#define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
1023#define	pmap_zero_page(d)	pmap_zero_page_generic((d))
1024#elif ARM_MMU_XSCALE == 1
1025#define	L1_S_PROT_U		L1_S_PROT_U_generic
1026#define	L1_S_PROT_W		L1_S_PROT_W_generic
1027#define	L1_S_PROT_RO		L1_S_PROT_RO_generic
1028#define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
1029
1030#define	L2_S_PROT_U		L2_S_PROT_U_xscale
1031#define	L2_S_PROT_W		L2_S_PROT_W_xscale
1032#define	L2_S_PROT_RO		L2_S_PROT_RO_xscale
1033#define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
1034
1035#define	L2_L_PROT_U		L2_L_PROT_U_generic
1036#define	L2_L_PROT_W		L2_L_PROT_W_generic
1037#define	L2_L_PROT_RO		L2_L_PROT_RO_generic
1038#define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
1039
1040#define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
1041#define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
1042#define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
1043
1044#define	L1_SS_PROTO		L1_SS_PROTO_xscale
1045#define	L1_S_PROTO		L1_S_PROTO_xscale
1046#define	L1_C_PROTO		L1_C_PROTO_xscale
1047#define	L2_S_PROTO		L2_S_PROTO_xscale
1048
1049#define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
1050#define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
1051#elif ARM_MMU_V7 == 1
1052#define	L1_S_PROT_U		L1_S_PROT_U_armv7
1053#define	L1_S_PROT_W		L1_S_PROT_W_armv7
1054#define	L1_S_PROT_RO		L1_S_PROT_RO_armv7
1055#define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv7
1056
1057#define	L2_S_PROT_U		L2_S_PROT_U_armv7
1058#define	L2_S_PROT_W		L2_S_PROT_W_armv7
1059#define	L2_S_PROT_RO		L2_S_PROT_RO_armv7
1060#define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv7
1061
1062#define	L2_L_PROT_U		L2_L_PROT_U_armv7
1063#define	L2_L_PROT_W		L2_L_PROT_W_armv7
1064#define	L2_L_PROT_RO		L2_L_PROT_RO_armv7
1065#define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv7
1066
1067#define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv7
1068#define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv7
1069#define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv7
1070
1071/*
1072 * These prototypes make writeable mappings, while the other MMU types
1073 * make read-only mappings.
1074 */
1075#define	L1_SS_PROTO		L1_SS_PROTO_armv7
1076#define	L1_S_PROTO		L1_S_PROTO_armv7
1077#define	L1_C_PROTO		L1_C_PROTO_armv7
1078#define	L2_S_PROTO		L2_S_PROTO_armv7
1079
1080#define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
1081#define	pmap_zero_page(d)	pmap_zero_page_generic((d))
1082#endif /* ARM_NMMUS > 1 */
1083
1084/*
1085 * Macros to set and query the write permission on page descriptors.
1086 */
1087#define	l1pte_set_writable(pte)	(((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
1088#define	l1pte_set_readonly(pte)	(((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
1089
1090#define	l2pte_set_writable(pte)	(((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
1091#define	l2pte_set_readonly(pte)	(((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
1092
1093#define	l2pte_writable_p(pte)	(((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
1094				 (L2_S_PROT_RO == 0 || \
1095				  ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
1096
1097/*
1098 * These macros return various bits based on kernel/user and protection.
1099 * Note that the compiler will usually fold these at compile time.
1100 */
1101
1102#define	L1_S_PROT(ku, pr)	(					   \
1103	(((ku) == PTE_USER) ? 						   \
1104	    L1_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)	   \
1105	: 								   \
1106	    (((L1_S_PROT_RO && 						   \
1107		((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
1108		    L1_S_PROT_RO : L1_S_PROT_W)))			   \
1109    )
1110
1111#define	L2_L_PROT(ku, pr)	(					   \
1112	(((ku) == PTE_USER) ?						   \
1113	    L2_L_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)	   \
1114	:								   \
1115	    (((L2_L_PROT_RO && 						   \
1116		((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
1117		    L2_L_PROT_RO : L2_L_PROT_W)))			   \
1118    )
1119
1120#define	L2_S_PROT(ku, pr)	(					   \
1121	(((ku) == PTE_USER) ?						   \
1122	    L2_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)	   \
1123	:								   \
1124	    (((L2_S_PROT_RO &&						   \
1125		((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
1126		    L2_S_PROT_RO : L2_S_PROT_W)))			   \
1127    )
1128
1129/*
1130 * Macros to test if a mapping is mappable with an L1 SuperSection,
1131 * L1 Section, or an L2 Large Page mapping.
1132 */
1133#define	L1_SS_MAPPABLE_P(va, pa, size)					\
1134	((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
1135
1136#define	L1_S_MAPPABLE_P(va, pa, size)					\
1137	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
1138
1139#define	L2_L_MAPPABLE_P(va, pa, size)					\
1140	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
1141
1142#define	PMAP_MAPSIZE1	L2_L_SIZE
1143#define	PMAP_MAPSIZE2	L1_S_SIZE
1144#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1145#define	PMAP_MAPSIZE3	L1_SS_SIZE
1146#endif
1147
1148#ifndef _LOCORE
1149/*
1150 * Hooks for the pool allocator.
1151 */
1152#define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
1153extern paddr_t physical_start, physical_end;
1154#ifdef PMAP_NEED_ALLOC_POOLPAGE
1155struct vm_page *arm_pmap_alloc_poolpage(int);
1156#define	PMAP_ALLOC_POOLPAGE	arm_pmap_alloc_poolpage
1157#endif
1158#if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
1159vaddr_t	pmap_map_poolpage(paddr_t);
1160paddr_t	pmap_unmap_poolpage(vaddr_t);
1161#define	PMAP_MAP_POOLPAGE(pa)	pmap_map_poolpage(pa)
1162#define	PMAP_UNMAP_POOLPAGE(va)	pmap_unmap_poolpage(va)
1163#endif
1164
1165#define	__HAVE_PMAP_PV_TRACK	1
1166
1167void pmap_pv_protect(paddr_t, vm_prot_t);
1168
1169struct pmap_page {
1170	SLIST_HEAD(,pv_entry) pvh_list;		/* pv_entry list */
1171	int pvh_attrs;				/* page attributes */
1172	u_int uro_mappings;
1173	u_int urw_mappings;
1174	union {
1175		u_short s_mappings[2];	/* Assume kernel count <= 65535 */
1176		u_int i_mappings;
1177	} k_u;
1178};
1179
1180/*
1181 * pmap-specific data store in the vm_page structure.
1182 */
1183#define	__HAVE_VM_PAGE_MD
1184struct vm_page_md {
1185	struct pmap_page pp;
1186#define	pvh_list	pp.pvh_list
1187#define	pvh_attrs	pp.pvh_attrs
1188#define	uro_mappings	pp.uro_mappings
1189#define	urw_mappings	pp.urw_mappings
1190#define	kro_mappings	pp.k_u.s_mappings[0]
1191#define	krw_mappings	pp.k_u.s_mappings[1]
1192#define	k_mappings	pp.k_u.i_mappings
1193};
1194
1195#define	PMAP_PAGE_TO_MD(ppage) container_of((ppage), struct vm_page_md, pp)
1196
1197/*
1198 * Set the default color of each page.
1199 */
1200#if ARM_MMU_V6 > 0
1201#define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1202	(pg)->mdpage.pvh_attrs = VM_PAGE_TO_PHYS(pg) & arm_cache_prefer_mask
1203#else
1204#define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1205	(pg)->mdpage.pvh_attrs = 0
1206#endif
1207
1208#define	VM_MDPAGE_INIT(pg)						\
1209do {									\
1210	SLIST_INIT(&(pg)->mdpage.pvh_list);				\
1211	VM_MDPAGE_PVH_ATTRS_INIT(pg);					\
1212	(pg)->mdpage.uro_mappings = 0;					\
1213	(pg)->mdpage.urw_mappings = 0;					\
1214	(pg)->mdpage.k_mappings = 0;					\
1215} while (/*CONSTCOND*/0)
1216
1217#ifndef	__BSD_PTENTRY_T__
1218#define	__BSD_PTENTRY_T__
1219typedef uint32_t pt_entry_t;
1220#define	PRIxPTE		PRIx32
1221#endif
1222
1223#endif /* !_LOCORE */
1224
1225#endif /* _KERNEL */
1226
1227#endif	/* _ARM32_PMAP_H_ */