2 * Copyright (c) 1991 Regents of the University of California.
3 * Copyright (c) 2003 Peter Wemm.
4 * Copyright (c) 2008 The DragonFly Project.
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department and William Jolitz of UUNET Technologies Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * Derived from hp300 version by Mike Hibler, this version by William
40 * Jolitz uses a recursive map [a pde points to the page directory] to
41 * map the page tables using the pagetables themselves. This is done to
42 * reduce the impact on kernel virtual memory for lots of sparse address
43 * space, and to reduce the cost of memory to each process.
45 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
46 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
47 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $
50 #ifndef _MACHINE_PMAP_H_
51 #define _MACHINE_PMAP_H_
56 * Size of Kernel address space. This is the number of page table pages
57 * (2GB each) to use for the kernel. 256 pages == 512 Gigabytes.
58 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
65 * Pte related macros. This is complicated by having to deal with
66 * the sign extension of the 48th bit.
68 #define KVADDR(l4, l3, l2, l1) ( \
69 ((unsigned long)-1 << 47) | \
70 ((unsigned long)(l4) << PML4SHIFT) | \
71 ((unsigned long)(l3) << PDPSHIFT) | \
72 ((unsigned long)(l2) << PDRSHIFT) | \
73 ((unsigned long)(l1) << PAGE_SHIFT))
75 #define UVADDR(l4, l3, l2, l1) ( \
76 ((unsigned long)(l4) << PML4SHIFT) | \
77 ((unsigned long)(l3) << PDPSHIFT) | \
78 ((unsigned long)(l2) << PDRSHIFT) | \
79 ((unsigned long)(l1) << PAGE_SHIFT))
82 * NOTE: We no longer hardwire NKPT, it is calculated in create_pagetables()
84 #define NKPML4E 1 /* number of kernel PML4 slots */
85 /* NKPDPE defined in vmparam.h */
88 * NUPDPs 512 (256 user) number of PDPs in user page table
89 * NUPDs 512 * 512 number of PDs in user page table
90 * NUPTs 512 * 512 * 512 number of PTs in user page table
91 * NUPTEs 512 * 512 * 512 * 512 number of PTEs in user page table
93 * NUPDP_USER number of PDPs reserved for userland
94 * NUPTE_USER number of PTEs reserved for userland (big number)
96 #define NUPDP_USER (NPML4EPG/2)
97 #define NUPDP_TOTAL (NPML4EPG)
98 #define NUPD_TOTAL (NPDPEPG * NUPDP_TOTAL)
99 #define NUPT_TOTAL (NPDEPG * NUPD_TOTAL)
100 #define NUPTE_TOTAL ((vm_pindex_t)NPTEPG * NUPT_TOTAL)
101 #define NUPTE_USER ((vm_pindex_t)NPTEPG * NPDEPG * NPDPEPG * NUPDP_USER)
104 * Number of 512G dmap PML4 slots (max ~254 or so but don't go over 64,
105 * which gives us 32TB of ram). Because we cache free, empty pmaps the
106 * initialization overhead is minimal.
108 * It should be possible to bump this up to 255 (but not 256), which would
109 * be able to address a maximum of ~127TB of physical ram.
114 * The *PML4I values control the layout of virtual memory. Each PML4
115 * entry represents 512G.
117 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */
119 #define KPML4I (NPML4EPG-1) /* Top 512GB for KVM */
120 #define DMPML4I (KPML4I-NDMPML4E) /* Next 512GBxN down for dmap */
123 * The location of KERNBASE in the last PD of the kernel's KVM (KPML4I)
124 * space. Each PD represents 1GB. The kernel must be placed here
125 * for the compile/link options to work properly so absolute 32-bit
126 * addressing can be used to access stuff.
128 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */
131 * per-CPU data assume ~64K x SMP_MAXCPU, say up to 256 cpus
132 * in the future or 16MB of space. Each PD represents 2MB so
133 * use NPDEPG-8 to place the per-CPU data.
135 #define MPPML4I KPML4I
137 #define MPPTDI (NPDEPG-8)
140 * XXX doesn't really belong here I guess...
142 #define ISA_HOLE_START 0xa0000
143 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
147 #ifndef _SYS_TYPES_H_
148 #include <sys/types.h>
150 #ifndef _SYS_QUEUE_H_
151 #include <sys/queue.h>
154 #include <sys/tree.h>
156 #ifndef _SYS_SPINLOCK_H_
157 #include <sys/spinlock.h>
159 #ifndef _SYS_THREAD_H_
160 #include <sys/thread.h>
162 #ifndef _MACHINE_TYPES_H_
163 #include <machine/types.h>
165 #ifndef _MACHINE_PARAM_H_
166 #include <machine/param.h>
170 * Address of current and alternate address space page table maps
174 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0))
175 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0))
176 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
177 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
178 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
179 #define PTmap ((pt_entry_t *)(addr_PTmap))
180 #define PDmap ((pd_entry_t *)(addr_PDmap))
181 #define PDPmap ((pd_entry_t *)(addr_PDPmap))
182 #define PML4map ((pd_entry_t *)(addr_PML4map))
183 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e))
185 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
193 #define vtophys(va) pmap_kextract(((vm_offset_t)(va)))
194 #define vtophys_pte(va) ((pt_entry_t)pmap_kextract(((vm_offset_t)(va))))
198 #define pte_load_clear(pte) atomic_readandclear_long(pte)
201 pte_store(pt_entry_t *ptep, pt_entry_t pte)
206 #define pde_store(pdep, pde) pte_store((pdep), (pde))
217 TAILQ_HEAD(md_page_pv_list, pv_entry);
219 * vm_page structures embed a list of related pv_entry's
222 struct md_page_pv_list pv_list;
226 * vm_object's representing large mappings can contain embedded pmaps
227 * to organize sharing at higher page table levels for PROT_READ and
228 * PROT_READ|PROT_WRITE maps.
231 struct pmap *pmap_rw;
232 struct pmap *pmap_ro;
236 * Each machine dependent implementation is expected to
237 * keep certain statistics. They may do this anyway they
238 * so choose, but are expected to return the statistics
239 * in the following structure.
241 * NOTE: We try to match the size of the pc32 pmap with the vkernel pmap
242 * so the same utilities (like 'ps') can be used on both.
244 struct pmap_statistics {
245 long resident_count; /* # of pages mapped (total) */
246 long wired_count; /* # of pages wired */
248 typedef struct pmap_statistics *pmap_statistics_t;
250 struct pv_entry_rb_tree;
251 RB_PROTOTYPE2(pv_entry_rb_tree, pv_entry, pv_entry,
252 pv_entry_compare, vm_pindex_t);
254 /* Types of PMAP (regular, EPT Intel, NPT Amd) */
255 #define REGULAR_PMAP 0
258 /* Bits indexes in pmap_bits */
268 #define PG_MANAGED_IDX 9
269 #define PG_DEVICE_IDX 10
271 #define PG_BITS_SIZE 12
273 #define PROTECTION_CODES_SIZE 8
274 #define PAT_INDEX_SIZE 8
277 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */
278 struct pv_entry *pm_pmlpv; /* PV entry for pml4 */
279 TAILQ_ENTRY(pmap) pm_pmnode; /* list of pmaps */
280 RB_HEAD(pv_entry_rb_tree, pv_entry) pm_pvroot;
281 int pm_count; /* reference count */
282 cpumask_t pm_active; /* active on cpus */
284 struct pmap_statistics pm_stats; /* pmap statistics */
285 struct pv_entry *pm_pvhint; /* pv_entry lookup hint */
286 int pm_generation; /* detect pvlist deletions */
287 struct spinlock pm_spin;
288 struct lwkt_token pm_token;
290 uint64_t pmap_bits[PG_BITS_SIZE];
291 int protection_codes[PROTECTION_CODES_SIZE];
292 pt_entry_t pmap_cache_bits[PAT_INDEX_SIZE];
293 pt_entry_t pmap_cache_mask;
294 int (*copyinstr)(const void *, void *, size_t, size_t *);
295 int (*copyin)(const void *, void *, size_t);
296 int (*copyout)(const void *, void *, size_t);
297 int (*fubyte)(const void *);
298 int (*subyte)(void *, int);
299 long (*fuword)(const void *);
300 int (*suword)(void *, long);
301 int (*suword32)(void *, int);
304 #define CPUMASK_LOCK CPUMASK(SMP_MAXCPU)
305 #define CPUMASK_BIT SMP_MAXCPU /* for 1LLU << SMP_MAXCPU */
307 #define PMAP_FLAG_SIMPLE 0x00000001
308 #define PMAP_EMULATE_AD_BITS 0x00000002
310 #define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
312 typedef struct pmap *pmap_t;
315 extern struct pmap kernel_pmap;
319 * For each vm_page_t, there is a list of all currently valid virtual
320 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
322 typedef struct pv_entry {
323 pmap_t pv_pmap; /* pmap where mapping lies */
324 vm_pindex_t pv_pindex; /* PTE, PT, PD, PDP, or PML4 */
325 TAILQ_ENTRY(pv_entry) pv_list;
326 RB_ENTRY(pv_entry) pv_entry;
327 struct vm_page *pv_m; /* page being mapped */
328 u_int pv_hold; /* interlock action */
336 #define PV_HOLD_LOCKED 0x80000000U
337 #define PV_HOLD_WAITING 0x40000000U
338 #define PV_HOLD_UNUSED2000 0x20000000U
339 #define PV_HOLD_MASK 0x1FFFFFFFU
341 #define PV_FLAG_VMOBJECT 0x00000001U /* shared pt in VM obj */
345 extern caddr_t CADDR1;
346 extern pt_entry_t *CMAP1;
347 extern vm_paddr_t dump_avail[];
348 extern vm_paddr_t avail_end;
349 extern vm_paddr_t avail_start;
350 extern vm_offset_t clean_eva;
351 extern vm_offset_t clean_sva;
352 extern char *ptvmmap; /* poor name! */
354 typedef struct vm_page *vm_page_t;
355 typedef char vm_memattr_t;
357 void pmap_release(struct pmap *pmap);
358 void pmap_interlock_wait (struct vmspace *);
359 void pmap_bootstrap (vm_paddr_t *);
360 void *pmap_mapbios(vm_paddr_t, vm_size_t);
361 void *pmap_mapdev (vm_paddr_t, vm_size_t);
362 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
363 void *pmap_mapdev_uncacheable(vm_paddr_t, vm_size_t);
364 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
365 void pmap_unmapdev (vm_offset_t, vm_size_t);
366 struct vm_page *pmap_use_pt (pmap_t, vm_offset_t);
367 void pmap_set_opt (void);
368 void pmap_init_pat(void);
369 vm_paddr_t pmap_kextract(vm_offset_t);
370 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
371 void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
372 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
375 pmap_emulate_ad_bits(pmap_t pmap) {
376 return pmap->pm_flags & PMAP_EMULATE_AD_BITS;
383 #endif /* !_MACHINE_PMAP_H_ */