Merge remote-tracking branch 'origin/vendor/LIBEDIT'
[dragonfly.git] / sys / platform / pc64 / x86_64 / pmap.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1994 David Greenman
5  * Copyright (c) 2003 Peter Wemm
6  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
7  * Copyright (c) 2008, 2009 The DragonFly Project.
8  * Copyright (c) 2008, 2009 Jordan Gordeev.
9  * Copyright (c) 2011-2017 Matthew Dillon
10  * All rights reserved.
11  *
12  * This code is derived from software contributed to Berkeley by
13  * the Systems Programming Group of the University of Utah Computer
14  * Science Department and William Jolitz of UUNET Technologies Inc.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  */
44 /*
45  * Manage physical address maps for x86-64 systems.
46  *
47  * Some notes:
48  *      - The 'M'odified bit is only applicable to terminal PTEs.
49  *
50  *      - The 'U'ser access bit can be set for higher-level PTEs as
51  *        long as it isn't set for terminal PTEs for pages we don't
52  *        want user access to.
53  */
54
55 #if 0 /* JG */
56 #include "opt_pmap.h"
57 #endif
58 #include "opt_msgbuf.h"
59
60 #include <sys/param.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/msgbuf.h>
64 #include <sys/vmmeter.h>
65 #include <sys/mman.h>
66 #include <sys/systm.h>
67
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <sys/sysctl.h>
71 #include <sys/lock.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_pager.h>
79 #include <vm/vm_zone.h>
80
81 #include <sys/user.h>
82 #include <sys/thread2.h>
83 #include <sys/spinlock2.h>
84 #include <vm/vm_page2.h>
85
86 #include <machine/cputypes.h>
87 #include <machine/cpu.h>
88 #include <machine/md_var.h>
89 #include <machine/specialreg.h>
90 #include <machine/smp.h>
91 #include <machine_base/apic/apicreg.h>
92 #include <machine/globaldata.h>
93 #include <machine/pmap.h>
94 #include <machine/pmap_inval.h>
95 #include <machine/inttypes.h>
96
97 #include <ddb/ddb.h>
98
99 #define PMAP_KEEP_PDIRS
100 #ifndef PMAP_SHPGPERPROC
101 #define PMAP_SHPGPERPROC 2000
102 #endif
103
104 #if defined(DIAGNOSTIC)
105 #define PMAP_DIAGNOSTIC
106 #endif
107
108 #define MINPV 2048
109
110 /*
111  * pmap debugging will report who owns a pv lock when blocking.
112  */
113 #ifdef PMAP_DEBUG
114
115 #define PMAP_DEBUG_DECL         ,const char *func, int lineno
116 #define PMAP_DEBUG_ARGS         , __func__, __LINE__
117 #define PMAP_DEBUG_COPY         , func, lineno
118
119 #define pv_get(pmap, pindex, pmarkp)    _pv_get(pmap, pindex, pmarkp    \
120                                                         PMAP_DEBUG_ARGS)
121 #define pv_lock(pv)                     _pv_lock(pv                     \
122                                                         PMAP_DEBUG_ARGS)
123 #define pv_hold_try(pv)                 _pv_hold_try(pv                 \
124                                                         PMAP_DEBUG_ARGS)
125 #define pv_alloc(pmap, pindex, isnewp)  _pv_alloc(pmap, pindex, isnewp  \
126                                                         PMAP_DEBUG_ARGS)
127
128 #define pv_free(pv, pvp)                _pv_free(pv, pvp PMAP_DEBUG_ARGS)
129
130 #else
131
132 #define PMAP_DEBUG_DECL
133 #define PMAP_DEBUG_ARGS
134 #define PMAP_DEBUG_COPY
135
136 #define pv_get(pmap, pindex, pmarkp)            _pv_get(pmap, pindex, pmarkp)
137 #define pv_lock(pv)                     _pv_lock(pv)
138 #define pv_hold_try(pv)                 _pv_hold_try(pv)
139 #define pv_alloc(pmap, pindex, isnewp)  _pv_alloc(pmap, pindex, isnewp)
140 #define pv_free(pv, pvp)                _pv_free(pv, pvp)
141
142 #endif
143
144 /*
145  * Get PDEs and PTEs for user/kernel address space
146  */
147 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
148
149 #define pmap_pde_v(pmap, pte)           ((*(pd_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
150 #define pmap_pte_w(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_W_IDX]) != 0)
151 #define pmap_pte_m(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_M_IDX]) != 0)
152 #define pmap_pte_u(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_U_IDX]) != 0)
153 #define pmap_pte_v(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
154
155 /*
156  * Given a map and a machine independent protection code,
157  * convert to a vax protection code.
158  */
159 #define pte_prot(m, p)          \
160         (m->protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
161 static uint64_t protection_codes[PROTECTION_CODES_SIZE];
162
163 struct pmap kernel_pmap;
164 struct pmap iso_pmap;
165
166 MALLOC_DEFINE(M_OBJPMAP, "objpmap", "pmaps associated with VM objects");
167
168 vm_paddr_t avail_start;         /* PA of first available physical page */
169 vm_paddr_t avail_end;           /* PA of last available physical page */
170 vm_offset_t virtual2_start;     /* cutout free area prior to kernel start */
171 vm_offset_t virtual2_end;
172 vm_offset_t virtual_start;      /* VA of first avail page (after kernel bss) */
173 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
174 vm_offset_t KvaStart;           /* VA start of KVA space */
175 vm_offset_t KvaEnd;             /* VA end of KVA space (non-inclusive) */
176 vm_offset_t KvaSize;            /* max size of kernel virtual address space */
177 static boolean_t pmap_initialized = FALSE;      /* Has pmap_init completed? */
178 //static int pgeflag;           /* PG_G or-in */
179 uint64_t PatMsr;
180
181 static int ndmpdp;
182 static vm_paddr_t dmaplimit;
183 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
184
185 static pt_entry_t pat_pte_index[PAT_INDEX_SIZE];        /* PAT -> PG_ bits */
186 /*static pt_entry_t pat_pde_index[PAT_INDEX_SIZE];*/    /* PAT -> PG_ bits */
187
188 static uint64_t KPTbase;
189 static uint64_t KPTphys;
190 static uint64_t KPDphys;        /* phys addr of kernel level 2 */
191 static uint64_t KPDbase;        /* phys addr of kernel level 2 @ KERNBASE */
192 uint64_t KPDPphys;              /* phys addr of kernel level 3 */
193 uint64_t KPML4phys;             /* phys addr of kernel level 4 */
194
195 static uint64_t DMPDphys;       /* phys addr of direct mapped level 2 */
196 static uint64_t DMPDPphys;      /* phys addr of direct mapped level 3 */
197
198 /*
199  * Data for the pv entry allocation mechanism
200  */
201 static vm_zone_t pvzone;
202 static struct vm_zone pvzone_store;
203 static vm_pindex_t pv_entry_max=0, pv_entry_high_water=0;
204 static int pmap_pagedaemon_waken = 0;
205 static struct pv_entry *pvinit;
206
207 /*
208  * All those kernel PT submaps that BSD is so fond of
209  */
210 pt_entry_t *CMAP1 = NULL, *ptmmap;
211 caddr_t CADDR1 = NULL, ptvmmap = NULL;
212 static pt_entry_t *msgbufmap;
213 struct msgbuf *msgbufp=NULL;
214
215 /*
216  * PMAP default PG_* bits. Needed to be able to add
217  * EPT/NPT pagetable pmap_bits for the VMM module
218  */
219 uint64_t pmap_bits_default[] = {
220                 REGULAR_PMAP,                   /* TYPE_IDX             0 */
221                 X86_PG_V,                       /* PG_V_IDX             1 */
222                 X86_PG_RW,                      /* PG_RW_IDX            2 */
223                 X86_PG_U,                       /* PG_U_IDX             3 */
224                 X86_PG_A,                       /* PG_A_IDX             4 */
225                 X86_PG_M,                       /* PG_M_IDX             5 */
226                 X86_PG_PS,                      /* PG_PS_IDX3           6 */
227                 X86_PG_G,                       /* PG_G_IDX             7 */
228                 X86_PG_AVAIL1,                  /* PG_AVAIL1_IDX        8 */
229                 X86_PG_AVAIL2,                  /* PG_AVAIL2_IDX        9 */
230                 X86_PG_AVAIL3,                  /* PG_AVAIL3_IDX        10 */
231                 X86_PG_NC_PWT | X86_PG_NC_PCD,  /* PG_N_IDX             11 */
232                 X86_PG_NX,                      /* PG_NX_IDX            12 */
233 };
234 /*
235  * Crashdump maps.
236  */
237 static pt_entry_t *pt_crashdumpmap;
238 static caddr_t crashdumpmap;
239
240 static int pmap_debug = 0;
241 SYSCTL_INT(_machdep, OID_AUTO, pmap_debug, CTLFLAG_RW,
242     &pmap_debug, 0, "Debug pmap's");
243 #ifdef PMAP_DEBUG2
244 static int pmap_enter_debug = 0;
245 SYSCTL_INT(_machdep, OID_AUTO, pmap_enter_debug, CTLFLAG_RW,
246     &pmap_enter_debug, 0, "Debug pmap_enter's");
247 #endif
248 static int pmap_yield_count = 64;
249 SYSCTL_INT(_machdep, OID_AUTO, pmap_yield_count, CTLFLAG_RW,
250     &pmap_yield_count, 0, "Yield during init_pt/release");
251 static int pmap_mmu_optimize = 0;
252 SYSCTL_INT(_machdep, OID_AUTO, pmap_mmu_optimize, CTLFLAG_RW,
253     &pmap_mmu_optimize, 0, "Share page table pages when possible");
254 int pmap_fast_kernel_cpusync = 0;
255 SYSCTL_INT(_machdep, OID_AUTO, pmap_fast_kernel_cpusync, CTLFLAG_RW,
256     &pmap_fast_kernel_cpusync, 0, "Share page table pages when possible");
257 int pmap_dynamic_delete = 0;
258 SYSCTL_INT(_machdep, OID_AUTO, pmap_dynamic_delete, CTLFLAG_RW,
259     &pmap_dynamic_delete, 0, "Dynamically delete PT/PD/PDPs");
260 int pmap_lock_delay = 100;
261 SYSCTL_INT(_machdep, OID_AUTO, pmap_lock_delay, CTLFLAG_RW,
262     &pmap_lock_delay, 0, "Spin loops");
263 static int meltdown_mitigation = -1;
264 TUNABLE_INT("machdep.meltdown_mitigation", &meltdown_mitigation);
265 SYSCTL_INT(_machdep, OID_AUTO, meltdown_mitigation, CTLFLAG_RW,
266     &meltdown_mitigation, 0, "Userland pmap isolation");
267
268 static int pmap_nx_enable = -1;         /* -1 = auto */
269 /* needs manual TUNABLE in early probe, see below */
270 SYSCTL_INT(_machdep, OID_AUTO, pmap_nx_enable, CTLFLAG_RD,
271     &pmap_nx_enable, 0,
272     "no-execute support (0=disabled, 1=w/READ, 2=w/READ & WRITE)");
273
274 static int pmap_pv_debug = 50;
275 SYSCTL_INT(_machdep, OID_AUTO, pmap_pv_debug, CTLFLAG_RW,
276     &pmap_pv_debug, 0, "");
277
278 /* Standard user access funtions */
279 extern int std_copyinstr (const void *udaddr, void *kaddr, size_t len,
280     size_t *lencopied);
281 extern int std_copyin (const void *udaddr, void *kaddr, size_t len);
282 extern int std_copyout (const void *kaddr, void *udaddr, size_t len);
283 extern int std_fubyte (const uint8_t *base);
284 extern int std_subyte (uint8_t *base, uint8_t byte);
285 extern int32_t std_fuword32 (const uint32_t *base);
286 extern int64_t std_fuword64 (const uint64_t *base);
287 extern int std_suword64 (uint64_t *base, uint64_t word);
288 extern int std_suword32 (uint32_t *base, int word);
289 extern uint32_t std_swapu32 (volatile uint32_t *base, uint32_t v);
290 extern uint64_t std_swapu64 (volatile uint64_t *base, uint64_t v);
291 extern uint32_t std_fuwordadd32 (volatile uint32_t *base, uint32_t v);
292 extern uint64_t std_fuwordadd64 (volatile uint64_t *base, uint64_t v);
293
294 static void pv_hold(pv_entry_t pv);
295 static int _pv_hold_try(pv_entry_t pv
296                                 PMAP_DEBUG_DECL);
297 static void pv_drop(pv_entry_t pv);
298 static void _pv_lock(pv_entry_t pv
299                                 PMAP_DEBUG_DECL);
300 static void pv_unlock(pv_entry_t pv);
301 static pv_entry_t _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew
302                                 PMAP_DEBUG_DECL);
303 static pv_entry_t _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp
304                                 PMAP_DEBUG_DECL);
305 static void _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL);
306 static pv_entry_t pv_get_try(pmap_t pmap, vm_pindex_t pindex,
307                                 vm_pindex_t **pmarkp, int *errorp);
308 static void pv_put(pv_entry_t pv);
309 static void *pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex);
310 static pv_entry_t pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
311                       pv_entry_t *pvpp);
312 static pv_entry_t pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex,
313                       pv_entry_t *pvpp, vm_map_entry_t entry, vm_offset_t va);
314 static void pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp,
315                         pmap_inval_bulk_t *bulk, int destroy);
316 static vm_page_t pmap_remove_pv_page(pv_entry_t pv);
317 static int pmap_release_pv(pv_entry_t pv, pv_entry_t pvp,
318                         pmap_inval_bulk_t *bulk);
319
320 struct pmap_scan_info;
321 static void pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
322                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
323                       pv_entry_t pt_pv, int sharept,
324                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
325 static void pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
326                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
327                       pv_entry_t pt_pv, int sharept,
328                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
329
330 static void x86_64_protection_init (void);
331 static void create_pagetables(vm_paddr_t *firstaddr);
332 static void pmap_remove_all (vm_page_t m);
333 static boolean_t pmap_testbit (vm_page_t m, int bit);
334
335 static pt_entry_t * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
336 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
337
338 static void pmap_pinit_defaults(struct pmap *pmap);
339 static void pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark);
340 static void pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark);
341
342 static int
343 pv_entry_compare(pv_entry_t pv1, pv_entry_t pv2)
344 {
345         if (pv1->pv_pindex < pv2->pv_pindex)
346                 return(-1);
347         if (pv1->pv_pindex > pv2->pv_pindex)
348                 return(1);
349         return(0);
350 }
351
352 RB_GENERATE2(pv_entry_rb_tree, pv_entry, pv_entry,
353              pv_entry_compare, vm_pindex_t, pv_pindex);
354
355 static __inline
356 void
357 pmap_page_stats_adding(vm_page_t m)
358 {
359         globaldata_t gd = mycpu;
360
361         if (TAILQ_EMPTY(&m->md.pv_list)) {
362                 ++gd->gd_vmtotal.t_arm;
363         } else if (TAILQ_FIRST(&m->md.pv_list) ==
364                    TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
365                 ++gd->gd_vmtotal.t_armshr;
366                 ++gd->gd_vmtotal.t_avmshr;
367         } else {
368                 ++gd->gd_vmtotal.t_avmshr;
369         }
370 }
371
372 static __inline
373 void
374 pmap_page_stats_deleting(vm_page_t m)
375 {
376         globaldata_t gd = mycpu;
377
378         if (TAILQ_EMPTY(&m->md.pv_list)) {
379                 --gd->gd_vmtotal.t_arm;
380         } else if (TAILQ_FIRST(&m->md.pv_list) ==
381                    TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
382                 --gd->gd_vmtotal.t_armshr;
383                 --gd->gd_vmtotal.t_avmshr;
384         } else {
385                 --gd->gd_vmtotal.t_avmshr;
386         }
387 }
388
389 /*
390  * This is an ineligent crowbar to prevent heavily threaded programs
391  * from creating long live-locks in the pmap code when pmap_mmu_optimize
392  * is enabled.  Without it a pmap-local page table page can wind up being
393  * constantly created and destroyed (without injury, but also without
394  * progress) as the optimization tries to switch to the object's shared page
395  * table page.
396  */
397 static __inline void
398 pmap_softwait(pmap_t pmap)
399 {
400         while (pmap->pm_softhold) {
401                 tsleep_interlock(&pmap->pm_softhold, 0);
402                 if (pmap->pm_softhold)
403                         tsleep(&pmap->pm_softhold, PINTERLOCKED, "mmopt", 0);
404         }
405 }
406
407 static __inline void
408 pmap_softhold(pmap_t pmap)
409 {
410         while (atomic_swap_int(&pmap->pm_softhold, 1) == 1) {
411                 tsleep_interlock(&pmap->pm_softhold, 0);
412                 if (atomic_swap_int(&pmap->pm_softhold, 1) == 1)
413                         tsleep(&pmap->pm_softhold, PINTERLOCKED, "mmopt", 0);
414         }
415 }
416
417 static __inline void
418 pmap_softdone(pmap_t pmap)
419 {
420         atomic_swap_int(&pmap->pm_softhold, 0);
421         wakeup(&pmap->pm_softhold);
422 }
423
424 /*
425  * Move the kernel virtual free pointer to the next
426  * 2MB.  This is used to help improve performance
427  * by using a large (2MB) page for much of the kernel
428  * (.text, .data, .bss)
429  */
430 static
431 vm_offset_t
432 pmap_kmem_choose(vm_offset_t addr)
433 {
434         vm_offset_t newaddr = addr;
435
436         newaddr = roundup2(addr, NBPDR);
437         return newaddr;
438 }
439
440 /*
441  * Returns the pindex of a page table entry (representing a terminal page).
442  * There are NUPTE_TOTAL page table entries possible (a huge number)
443  *
444  * x86-64 has a 48-bit address space, where bit 47 is sign-extended out.
445  * We want to properly translate negative KVAs.
446  */
447 static __inline
448 vm_pindex_t
449 pmap_pte_pindex(vm_offset_t va)
450 {
451         return ((va >> PAGE_SHIFT) & (NUPTE_TOTAL - 1));
452 }
453
454 /*
455  * Returns the pindex of a page table.
456  */
457 static __inline
458 vm_pindex_t
459 pmap_pt_pindex(vm_offset_t va)
460 {
461         return (NUPTE_TOTAL + ((va >> PDRSHIFT) & (NUPT_TOTAL - 1)));
462 }
463
464 /*
465  * Returns the pindex of a page directory.
466  */
467 static __inline
468 vm_pindex_t
469 pmap_pd_pindex(vm_offset_t va)
470 {
471         return (NUPTE_TOTAL + NUPT_TOTAL +
472                 ((va >> PDPSHIFT) & (NUPD_TOTAL - 1)));
473 }
474
475 static __inline
476 vm_pindex_t
477 pmap_pdp_pindex(vm_offset_t va)
478 {
479         return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
480                 ((va >> PML4SHIFT) & (NUPDP_TOTAL - 1)));
481 }
482
483 static __inline
484 vm_pindex_t
485 pmap_pml4_pindex(void)
486 {
487         return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL);
488 }
489
490 /*
491  * Return various clipped indexes for a given VA
492  *
493  * Returns the index of a pt in a page directory, representing a page
494  * table.
495  */
496 static __inline
497 vm_pindex_t
498 pmap_pt_index(vm_offset_t va)
499 {
500         return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
501 }
502
503 /*
504  * Returns the index of a pd in a page directory page, representing a page
505  * directory.
506  */
507 static __inline
508 vm_pindex_t
509 pmap_pd_index(vm_offset_t va)
510 {
511         return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
512 }
513
514 /*
515  * Returns the index of a pdp in the pml4 table, representing a page
516  * directory page.
517  */
518 static __inline
519 vm_pindex_t
520 pmap_pdp_index(vm_offset_t va)
521 {
522         return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
523 }
524
525 /*
526  * Of all the layers (PTE, PT, PD, PDP, PML4) the best one to cache is
527  * the PT layer.  This will speed up core pmap operations considerably.
528  * We also cache the PTE layer to (hopefully) improve relative lookup
529  * speeds.
530  *
531  * NOTE: The pmap spinlock does not need to be held but the passed-in pv
532  *       must be in a known associated state (typically by being locked when
533  *       the pmap spinlock isn't held).  We allow the race for that case.
534  *
535  * NOTE: pm_pvhint* is only accessed (read) with the spin-lock held, using
536  *       cpu_ccfence() to prevent compiler optimizations from reloading the
537  *       field.
538  */
539 static __inline
540 void
541 pv_cache(pmap_t pmap, pv_entry_t pv, vm_pindex_t pindex)
542 {
543         if (pindex < pmap_pt_pindex(0)) {
544                 pmap->pm_pvhint_pte = pv;
545         } else if (pindex < pmap_pd_pindex(0)) {
546                 pmap->pm_pvhint_pt = pv;
547         }
548 }
549
550 /*
551  * Locate the requested pt_entry
552  */
553 static __inline
554 pv_entry_t
555 pv_entry_lookup(pmap_t pmap, vm_pindex_t pindex)
556 {
557         pv_entry_t pv;
558
559 #if 1
560         if (pindex < pmap_pt_pindex(0))
561                 pv = pmap->pm_pvhint_pte;
562         else if (pindex < pmap_pd_pindex(0))
563                 pv = pmap->pm_pvhint_pt;
564         else
565                 pv = NULL;
566         cpu_ccfence();
567         if (pv == NULL || pv->pv_pmap != pmap) {
568                 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
569                 if (pv)
570                         pv_cache(pmap, pv, pindex);
571         } else if (pv->pv_pindex != pindex) {
572                 pv = pv_entry_rb_tree_RB_LOOKUP_REL(&pmap->pm_pvroot,
573                                                     pindex, pv);
574                 if (pv)
575                         pv_cache(pmap, pv, pindex);
576         }
577 #else
578         pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
579 #endif
580         return pv;
581 }
582
583 /*
584  * pmap_pte_quick:
585  *
586  *      Super fast pmap_pte routine best used when scanning the pv lists.
587  *      This eliminates many course-grained invltlb calls.  Note that many of
588  *      the pv list scans are across different pmaps and it is very wasteful
589  *      to do an entire invltlb when checking a single mapping.
590  */
591 static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va);
592
593 static
594 pt_entry_t *
595 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
596 {
597         return pmap_pte(pmap, va);
598 }
599
600 /*
601  * The placemarker hash must be broken up into four zones so lock
602  * ordering semantics continue to work (e.g. pte, pt, pd, then pdp).
603  *
604  * Placemarkers are used to 'lock' page table indices that do not have
605  * a pv_entry.  This allows the pmap to support managed and unmanaged
606  * pages and shared page tables.
607  */
608 #define PM_PLACE_BASE   (PM_PLACEMARKS >> 2)
609
610 static __inline
611 vm_pindex_t *
612 pmap_placemarker_hash(pmap_t pmap, vm_pindex_t pindex)
613 {
614         int hi;
615
616         if (pindex < pmap_pt_pindex(0))         /* zone 0 - PTE */
617                 hi = 0;
618         else if (pindex < pmap_pd_pindex(0))    /* zone 1 - PT */
619                 hi = PM_PLACE_BASE;
620         else if (pindex < pmap_pdp_pindex(0))   /* zone 2 - PD */
621                 hi = PM_PLACE_BASE << 1;
622         else                                    /* zone 3 - PDP (and PML4E) */
623                 hi = PM_PLACE_BASE | (PM_PLACE_BASE << 1);
624         hi += pindex & (PM_PLACE_BASE - 1);
625
626         return (&pmap->pm_placemarks[hi]);
627 }
628
629
630 /*
631  * Generic procedure to index a pte from a pt, pd, or pdp.
632  *
633  * NOTE: Normally passed pindex as pmap_xx_index().  pmap_xx_pindex() is NOT
634  *       a page table page index but is instead of PV lookup index.
635  */
636 static
637 void *
638 pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex)
639 {
640         pt_entry_t *pte;
641
642         pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pv->pv_m));
643         return(&pte[pindex]);
644 }
645
646 /*
647  * Return pointer to PDP slot in the PML4
648  */
649 static __inline
650 pml4_entry_t *
651 pmap_pdp(pmap_t pmap, vm_offset_t va)
652 {
653         return (&pmap->pm_pml4[pmap_pdp_index(va)]);
654 }
655
656 /*
657  * Return pointer to PD slot in the PDP given a pointer to the PDP
658  */
659 static __inline
660 pdp_entry_t *
661 pmap_pdp_to_pd(pml4_entry_t pdp_pte, vm_offset_t va)
662 {
663         pdp_entry_t *pd;
664
665         pd = (pdp_entry_t *)PHYS_TO_DMAP(pdp_pte & PG_FRAME);
666         return (&pd[pmap_pd_index(va)]);
667 }
668
669 /*
670  * Return pointer to PD slot in the PDP.
671  */
672 static __inline
673 pdp_entry_t *
674 pmap_pd(pmap_t pmap, vm_offset_t va)
675 {
676         pml4_entry_t *pdp;
677
678         pdp = pmap_pdp(pmap, va);
679         if ((*pdp & pmap->pmap_bits[PG_V_IDX]) == 0)
680                 return NULL;
681         return (pmap_pdp_to_pd(*pdp, va));
682 }
683
684 /*
685  * Return pointer to PT slot in the PD given a pointer to the PD
686  */
687 static __inline
688 pd_entry_t *
689 pmap_pd_to_pt(pdp_entry_t pd_pte, vm_offset_t va)
690 {
691         pd_entry_t *pt;
692
693         pt = (pd_entry_t *)PHYS_TO_DMAP(pd_pte & PG_FRAME);
694         return (&pt[pmap_pt_index(va)]);
695 }
696
697 /*
698  * Return pointer to PT slot in the PD
699  *
700  * SIMPLE PMAP NOTE: Simple pmaps (embedded in objects) do not have PDPs,
701  *                   so we cannot lookup the PD via the PDP.  Instead we
702  *                   must look it up via the pmap.
703  */
704 static __inline
705 pd_entry_t *
706 pmap_pt(pmap_t pmap, vm_offset_t va)
707 {
708         pdp_entry_t *pd;
709         pv_entry_t pv;
710         vm_pindex_t pd_pindex;
711         vm_paddr_t phys;
712
713         if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
714                 pd_pindex = pmap_pd_pindex(va);
715                 spin_lock_shared(&pmap->pm_spin);
716                 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pd_pindex);
717                 if (pv == NULL || pv->pv_m == NULL) {
718                         spin_unlock_shared(&pmap->pm_spin);
719                         return NULL;
720                 }
721                 phys = VM_PAGE_TO_PHYS(pv->pv_m);
722                 spin_unlock_shared(&pmap->pm_spin);
723                 return (pmap_pd_to_pt(phys, va));
724         } else {
725                 pd = pmap_pd(pmap, va);
726                 if (pd == NULL || (*pd & pmap->pmap_bits[PG_V_IDX]) == 0)
727                          return NULL;
728                 return (pmap_pd_to_pt(*pd, va));
729         }
730 }
731
732 /*
733  * Return pointer to PTE slot in the PT given a pointer to the PT
734  */
735 static __inline
736 pt_entry_t *
737 pmap_pt_to_pte(pd_entry_t pt_pte, vm_offset_t va)
738 {
739         pt_entry_t *pte;
740
741         pte = (pt_entry_t *)PHYS_TO_DMAP(pt_pte & PG_FRAME);
742         return (&pte[pmap_pte_index(va)]);
743 }
744
745 /*
746  * Return pointer to PTE slot in the PT
747  */
748 static __inline
749 pt_entry_t *
750 pmap_pte(pmap_t pmap, vm_offset_t va)
751 {
752         pd_entry_t *pt;
753
754         pt = pmap_pt(pmap, va);
755         if (pt == NULL || (*pt & pmap->pmap_bits[PG_V_IDX]) == 0)
756                  return NULL;
757         if ((*pt & pmap->pmap_bits[PG_PS_IDX]) != 0)
758                 return ((pt_entry_t *)pt);
759         return (pmap_pt_to_pte(*pt, va));
760 }
761
762 /*
763  * Return address of PT slot in PD (KVM only)
764  *
765  * Cannot be used for user page tables because it might interfere with
766  * the shared page-table-page optimization (pmap_mmu_optimize).
767  */
768 static __inline
769 pd_entry_t *
770 vtopt(vm_offset_t va)
771 {
772         uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
773                                   NPML4EPGSHIFT)) - 1);
774
775         return (PDmap + ((va >> PDRSHIFT) & mask));
776 }
777
778 /*
779  * KVM - return address of PTE slot in PT
780  */
781 static __inline
782 pt_entry_t *
783 vtopte(vm_offset_t va)
784 {
785         uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
786                                   NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
787
788         return (PTmap + ((va >> PAGE_SHIFT) & mask));
789 }
790
791 /*
792  * Returns the physical address translation from va for a user address.
793  * (vm_paddr_t)-1 is returned on failure.
794  */
795 vm_paddr_t
796 uservtophys(vm_offset_t va)
797 {
798         uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
799                                   NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
800         vm_paddr_t pa;
801         pt_entry_t pte;
802         pmap_t pmap;
803
804         pmap = vmspace_pmap(mycpu->gd_curthread->td_lwp->lwp_vmspace);
805         pa = (vm_paddr_t)-1;
806         if (va < VM_MAX_USER_ADDRESS) {
807                 pte = kreadmem64(PTmap + ((va >> PAGE_SHIFT) & mask));
808                 if (pte & pmap->pmap_bits[PG_V_IDX])
809                         pa = (pte & PG_FRAME) | (va & PAGE_MASK);
810         }
811         return pa;
812 }
813
814 static uint64_t
815 allocpages(vm_paddr_t *firstaddr, long n)
816 {
817         uint64_t ret;
818
819         ret = *firstaddr;
820         bzero((void *)ret, n * PAGE_SIZE);
821         *firstaddr += n * PAGE_SIZE;
822         return (ret);
823 }
824
825 static
826 void
827 create_pagetables(vm_paddr_t *firstaddr)
828 {
829         long i;         /* must be 64 bits */
830         long nkpt_base;
831         long nkpt_phys;
832         long nkpd_phys;
833         int j;
834
835         /*
836          * We are running (mostly) V=P at this point
837          *
838          * Calculate how many 1GB PD entries in our PDP pages are needed
839          * for the DMAP.  This is only allocated if the system does not
840          * support 1GB pages.  Otherwise ndmpdp is simply a count of
841          * the number of 1G terminal entries in our PDP pages are needed.
842          *
843          * NOTE: Maxmem is in pages
844          */
845         ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
846         if (ndmpdp < 4)         /* Minimum 4GB of dirmap */
847                 ndmpdp = 4;
848         KKASSERT(ndmpdp <= NDMPML4E * NPML4EPG);
849
850         /*
851          * Starting at KERNBASE - map all 2G worth of page table pages.
852          * KERNBASE is offset -2G from the end of kvm.  This will accomodate
853          * all KVM allocations above KERNBASE, including the SYSMAPs below.
854          *
855          * We do this by allocating 2*512 PT pages.  Each PT page can map
856          * 2MB, for 2GB total.
857          */
858         nkpt_base = (NPDPEPG - KPDPI) * NPTEPG; /* typically 2 x 512 */
859
860         /*
861          * Starting at the beginning of kvm (VM_MIN_KERNEL_ADDRESS),
862          * Calculate how many page table pages we need to preallocate
863          * for early vm_map allocations.
864          *
865          * A few extra won't hurt, they will get used up in the running
866          * system.
867          *
868          * vm_page array
869          * initial pventry's
870          */
871         nkpt_phys = (Maxmem * sizeof(struct vm_page) + NBPDR - 1) / NBPDR;
872         nkpt_phys += (Maxmem * sizeof(struct pv_entry) + NBPDR - 1) / NBPDR;
873         nkpt_phys += 128;       /* a few extra */
874
875         /*
876          * The highest value nkpd_phys can be set to is
877          * NKPDPE - (NPDPEPG - KPDPI) (i.e. NKPDPE - 2).
878          *
879          * Doing so would cause all PD pages to be pre-populated for
880          * a maximal KVM space (approximately 16*512 pages, or 32MB.
881          * We can save memory by not doing this.
882          */
883         nkpd_phys = (nkpt_phys + NPDPEPG - 1) / NPDPEPG;
884
885         /*
886          * Allocate pages
887          *
888          * Normally NKPML4E=1-16 (1-16 kernel PDP page)
889          * Normally NKPDPE= NKPML4E*512-1 (511 min kernel PD pages)
890          *
891          * Only allocate enough PD pages
892          * NOTE: We allocate all kernel PD pages up-front, typically
893          *       ~511G of KVM, requiring 511 PD pages.
894          */
895         KPTbase = allocpages(firstaddr, nkpt_base);     /* KERNBASE to end */
896         KPTphys = allocpages(firstaddr, nkpt_phys);     /* KVA start */
897         KPML4phys = allocpages(firstaddr, 1);           /* recursive PML4 map */
898         KPDPphys = allocpages(firstaddr, NKPML4E);      /* kernel PDP pages */
899         KPDphys = allocpages(firstaddr, nkpd_phys);     /* kernel PD pages */
900
901         /*
902          * Alloc PD pages for the area starting at KERNBASE.
903          */
904         KPDbase = allocpages(firstaddr, NPDPEPG - KPDPI);
905
906         /*
907          * Stuff for our DMAP
908          */
909         DMPDPphys = allocpages(firstaddr, NDMPML4E);
910         if ((amd_feature & AMDID_PAGE1GB) == 0)
911                 DMPDphys = allocpages(firstaddr, ndmpdp);
912         dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
913
914         /*
915          * Fill in the underlying page table pages for the area around
916          * KERNBASE.  This remaps low physical memory to KERNBASE.
917          *
918          * Read-only from zero to physfree
919          * XXX not fully used, underneath 2M pages
920          */
921         for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
922                 ((pt_entry_t *)KPTbase)[i] = i << PAGE_SHIFT;
923                 ((pt_entry_t *)KPTbase)[i] |=
924                     pmap_bits_default[PG_RW_IDX] |
925                     pmap_bits_default[PG_V_IDX] |
926                     pmap_bits_default[PG_G_IDX];
927         }
928
929         /*
930          * Now map the initial kernel page tables.  One block of page
931          * tables is placed at the beginning of kernel virtual memory,
932          * and another block is placed at KERNBASE to map the kernel binary,
933          * data, bss, and initial pre-allocations.
934          */
935         for (i = 0; i < nkpt_base; i++) {
936                 ((pd_entry_t *)KPDbase)[i] = KPTbase + (i << PAGE_SHIFT);
937                 ((pd_entry_t *)KPDbase)[i] |=
938                     pmap_bits_default[PG_RW_IDX] |
939                     pmap_bits_default[PG_V_IDX];
940         }
941         for (i = 0; i < nkpt_phys; i++) {
942                 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
943                 ((pd_entry_t *)KPDphys)[i] |=
944                     pmap_bits_default[PG_RW_IDX] |
945                     pmap_bits_default[PG_V_IDX];
946         }
947
948         /*
949          * Map from zero to end of allocations using 2M pages as an
950          * optimization.  This will bypass some of the KPTBase pages
951          * above in the KERNBASE area.
952          */
953         for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
954                 ((pd_entry_t *)KPDbase)[i] = i << PDRSHIFT;
955                 ((pd_entry_t *)KPDbase)[i] |=
956                     pmap_bits_default[PG_RW_IDX] |
957                     pmap_bits_default[PG_V_IDX] |
958                     pmap_bits_default[PG_PS_IDX] |
959                     pmap_bits_default[PG_G_IDX];
960         }
961
962         /*
963          * Load PD addresses into the PDP pages for primary KVA space to
964          * cover existing page tables.  PD's for KERNBASE are handled in
965          * the next loop.
966          *
967          * expected to pre-populate all of its PDs.  See NKPDPE in vmparam.h.
968          */
969         for (i = 0; i < nkpd_phys; i++) {
970                 ((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] =
971                                 KPDphys + (i << PAGE_SHIFT);
972                 ((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] |=
973                     pmap_bits_default[PG_RW_IDX] |
974                     pmap_bits_default[PG_V_IDX] |
975                     pmap_bits_default[PG_A_IDX];
976         }
977
978         /*
979          * Load PDs for KERNBASE to the end
980          */
981         i = (NKPML4E - 1) * NPDPEPG + KPDPI;
982         for (j = 0; j < NPDPEPG - KPDPI; ++j) {
983                 ((pdp_entry_t *)KPDPphys)[i + j] =
984                                 KPDbase + (j << PAGE_SHIFT);
985                 ((pdp_entry_t *)KPDPphys)[i + j] |=
986                     pmap_bits_default[PG_RW_IDX] |
987                     pmap_bits_default[PG_V_IDX] |
988                     pmap_bits_default[PG_A_IDX];
989         }
990
991         /*
992          * Now set up the direct map space using either 2MB or 1GB pages
993          * Preset PG_M and PG_A because demotion expects it.
994          *
995          * When filling in entries in the PD pages make sure any excess
996          * entries are set to zero as we allocated enough PD pages
997          */
998         if ((amd_feature & AMDID_PAGE1GB) == 0) {
999                 /*
1000                  * Use 2MB pages
1001                  */
1002                 for (i = 0; i < NPDEPG * ndmpdp; i++) {
1003                         ((pd_entry_t *)DMPDphys)[i] = i << PDRSHIFT;
1004                         ((pd_entry_t *)DMPDphys)[i] |=
1005                             pmap_bits_default[PG_RW_IDX] |
1006                             pmap_bits_default[PG_V_IDX] |
1007                             pmap_bits_default[PG_PS_IDX] |
1008                             pmap_bits_default[PG_G_IDX] |
1009                             pmap_bits_default[PG_M_IDX] |
1010                             pmap_bits_default[PG_A_IDX];
1011                 }
1012
1013                 /*
1014                  * And the direct map space's PDP
1015                  */
1016                 for (i = 0; i < ndmpdp; i++) {
1017                         ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
1018                                                         (i << PAGE_SHIFT);
1019                         ((pdp_entry_t *)DMPDPphys)[i] |=
1020                             pmap_bits_default[PG_RW_IDX] |
1021                             pmap_bits_default[PG_V_IDX];
1022                 }
1023         } else {
1024                 /*
1025                  * 1GB pages
1026                  */
1027                 for (i = 0; i < ndmpdp; i++) {
1028                         ((pdp_entry_t *)DMPDPphys)[i] =
1029                                                 (vm_paddr_t)i << PDPSHIFT;
1030                         ((pdp_entry_t *)DMPDPphys)[i] |=
1031                             pmap_bits_default[PG_RW_IDX] |
1032                             pmap_bits_default[PG_V_IDX] |
1033                             pmap_bits_default[PG_PS_IDX] |
1034                             pmap_bits_default[PG_G_IDX] |
1035                             pmap_bits_default[PG_M_IDX] |
1036                             pmap_bits_default[PG_A_IDX];
1037                 }
1038         }
1039
1040         /* And recursively map PML4 to itself in order to get PTmap */
1041         ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
1042         ((pdp_entry_t *)KPML4phys)[PML4PML4I] |=
1043             pmap_bits_default[PG_RW_IDX] |
1044             pmap_bits_default[PG_V_IDX] |
1045             pmap_bits_default[PG_A_IDX];
1046
1047         /*
1048          * Connect the Direct Map slots up to the PML4
1049          */
1050         for (j = 0; j < NDMPML4E; ++j) {
1051                 ((pdp_entry_t *)KPML4phys)[DMPML4I + j] =
1052                     (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
1053                     pmap_bits_default[PG_RW_IDX] |
1054                     pmap_bits_default[PG_V_IDX] |
1055                     pmap_bits_default[PG_A_IDX];
1056         }
1057
1058         /*
1059          * Connect the KVA slot up to the PML4
1060          */
1061         for (j = 0; j < NKPML4E; ++j) {
1062                 ((pdp_entry_t *)KPML4phys)[KPML4I + j] =
1063                     KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT);
1064                 ((pdp_entry_t *)KPML4phys)[KPML4I + j] |=
1065                     pmap_bits_default[PG_RW_IDX] |
1066                     pmap_bits_default[PG_V_IDX] |
1067                     pmap_bits_default[PG_A_IDX];
1068         }
1069         cpu_mfence();
1070         cpu_invltlb();
1071 }
1072
1073 /*
1074  *      Bootstrap the system enough to run with virtual memory.
1075  *
1076  *      On x86_64 this is called after mapping has already been enabled
1077  *      and just syncs the pmap module with what has already been done.
1078  *      [We can't call it easily with mapping off since the kernel is not
1079  *      mapped with PA == VA, hence we would have to relocate every address
1080  *      from the linked base (virtual) address "KERNBASE" to the actual
1081  *      (physical) address starting relative to 0]
1082  */
1083 void
1084 pmap_bootstrap(vm_paddr_t *firstaddr)
1085 {
1086         vm_offset_t va;
1087         pt_entry_t *pte;
1088         int i;
1089
1090         KvaStart = VM_MIN_KERNEL_ADDRESS;
1091         KvaEnd = VM_MAX_KERNEL_ADDRESS;
1092         KvaSize = KvaEnd - KvaStart;
1093
1094         avail_start = *firstaddr;
1095
1096         /*
1097          * Create an initial set of page tables to run the kernel in.
1098          */
1099         create_pagetables(firstaddr);
1100
1101         virtual2_start = KvaStart;
1102         virtual2_end = PTOV_OFFSET;
1103
1104         virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr;
1105         virtual_start = pmap_kmem_choose(virtual_start);
1106
1107         virtual_end = VM_MAX_KERNEL_ADDRESS;
1108
1109         /* XXX do %cr0 as well */
1110         load_cr4(rcr4() | CR4_PGE | CR4_PSE);
1111         load_cr3(KPML4phys);
1112
1113         /*
1114          * Initialize protection array.
1115          */
1116         x86_64_protection_init();
1117
1118         /*
1119          * The kernel's pmap is statically allocated so we don't have to use
1120          * pmap_create, which is unlikely to work correctly at this part of
1121          * the boot sequence (XXX and which no longer exists).
1122          */
1123         kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys);
1124         kernel_pmap.pm_count = 1;
1125         CPUMASK_ASSALLONES(kernel_pmap.pm_active);
1126         RB_INIT(&kernel_pmap.pm_pvroot);
1127         spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
1128         for (i = 0; i < PM_PLACEMARKS; ++i)
1129                 kernel_pmap.pm_placemarks[i] = PM_NOPLACEMARK;
1130
1131         /*
1132          * Reserve some special page table entries/VA space for temporary
1133          * mapping of pages.
1134          */
1135 #define SYSMAP(c, p, v, n)      \
1136         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
1137
1138         va = virtual_start;
1139         pte = vtopte(va);
1140
1141         /*
1142          * CMAP1/CMAP2 are used for zeroing and copying pages.
1143          */
1144         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
1145
1146         /*
1147          * Crashdump maps.
1148          */
1149         SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
1150
1151         /*
1152          * ptvmmap is used for reading arbitrary physical pages via
1153          * /dev/mem.
1154          */
1155         SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
1156
1157         /*
1158          * msgbufp is used to map the system message buffer.
1159          * XXX msgbufmap is not used.
1160          */
1161         SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
1162                atop(round_page(MSGBUF_SIZE)))
1163
1164         virtual_start = va;
1165         virtual_start = pmap_kmem_choose(virtual_start);
1166
1167         *CMAP1 = 0;
1168
1169         /*
1170          * PG_G is terribly broken on SMP because we IPI invltlb's in some
1171          * cases rather then invl1pg.  Actually, I don't even know why it
1172          * works under UP because self-referential page table mappings
1173          */
1174 //      pgeflag = 0;
1175
1176         cpu_invltlb();
1177
1178         /* Initialize the PAT MSR */
1179         pmap_init_pat();
1180         pmap_pinit_defaults(&kernel_pmap);
1181
1182         TUNABLE_INT_FETCH("machdep.pmap_fast_kernel_cpusync",
1183                           &pmap_fast_kernel_cpusync);
1184
1185 }
1186
1187 /*
1188  * Setup the PAT MSR.
1189  */
1190 void
1191 pmap_init_pat(void)
1192 {
1193         uint64_t pat_msr;
1194         u_long cr0, cr4;
1195
1196         /*
1197          * Default values mapping PATi,PCD,PWT bits at system reset.
1198          * The default values effectively ignore the PATi bit by
1199          * repeating the encodings for 0-3 in 4-7, and map the PCD
1200          * and PWT bit combinations to the expected PAT types.
1201          */
1202         pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |        /* 000 */
1203                   PAT_VALUE(1, PAT_WRITE_THROUGH) |     /* 001 */
1204                   PAT_VALUE(2, PAT_UNCACHED) |          /* 010 */
1205                   PAT_VALUE(3, PAT_UNCACHEABLE) |       /* 011 */
1206                   PAT_VALUE(4, PAT_WRITE_BACK) |        /* 100 */
1207                   PAT_VALUE(5, PAT_WRITE_THROUGH) |     /* 101 */
1208                   PAT_VALUE(6, PAT_UNCACHED) |          /* 110 */
1209                   PAT_VALUE(7, PAT_UNCACHEABLE);        /* 111 */
1210         pat_pte_index[PAT_WRITE_BACK]   = 0;
1211         pat_pte_index[PAT_WRITE_THROUGH]= 0         | X86_PG_NC_PWT;
1212         pat_pte_index[PAT_UNCACHED]     = X86_PG_NC_PCD;
1213         pat_pte_index[PAT_UNCACHEABLE]  = X86_PG_NC_PCD | X86_PG_NC_PWT;
1214         pat_pte_index[PAT_WRITE_PROTECTED] = pat_pte_index[PAT_UNCACHEABLE];
1215         pat_pte_index[PAT_WRITE_COMBINING] = pat_pte_index[PAT_UNCACHEABLE];
1216
1217         if (cpu_feature & CPUID_PAT) {
1218                 /*
1219                  * If we support the PAT then set-up entries for
1220                  * WRITE_PROTECTED and WRITE_COMBINING using bit patterns
1221                  * 5 and 6.
1222                  */
1223                 pat_msr = (pat_msr & ~PAT_MASK(5)) |
1224                           PAT_VALUE(5, PAT_WRITE_PROTECTED);
1225                 pat_msr = (pat_msr & ~PAT_MASK(6)) |
1226                           PAT_VALUE(6, PAT_WRITE_COMBINING);
1227                 pat_pte_index[PAT_WRITE_PROTECTED] = X86_PG_PTE_PAT | X86_PG_NC_PWT;
1228                 pat_pte_index[PAT_WRITE_COMBINING] = X86_PG_PTE_PAT | X86_PG_NC_PCD;
1229
1230                 /*
1231                  * Then enable the PAT
1232                  */
1233
1234                 /* Disable PGE. */
1235                 cr4 = rcr4();
1236                 load_cr4(cr4 & ~CR4_PGE);
1237
1238                 /* Disable caches (CD = 1, NW = 0). */
1239                 cr0 = rcr0();
1240                 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1241
1242                 /* Flushes caches and TLBs. */
1243                 wbinvd();
1244                 cpu_invltlb();
1245
1246                 /* Update PAT and index table. */
1247                 wrmsr(MSR_PAT, pat_msr);
1248
1249                 /* Flush caches and TLBs again. */
1250                 wbinvd();
1251                 cpu_invltlb();
1252
1253                 /* Restore caches and PGE. */
1254                 load_cr0(cr0);
1255                 load_cr4(cr4);
1256                 PatMsr = pat_msr;
1257         }
1258 }
1259
1260 /*
1261  * Set 4mb pdir for mp startup
1262  */
1263 void
1264 pmap_set_opt(void)
1265 {
1266         if (cpu_feature & CPUID_PSE) {
1267                 load_cr4(rcr4() | CR4_PSE);
1268                 if (mycpu->gd_cpuid == 0)       /* only on BSP */
1269                         cpu_invltlb();
1270         }
1271 }
1272
1273 /*
1274  * Early initialization of the pmap module.
1275  *
1276  * Called by vm_init, to initialize any structures that the pmap
1277  * system needs to map virtual memory.  pmap_init has been enhanced to
1278  * support in a fairly consistant way, discontiguous physical memory.
1279  */
1280 void
1281 pmap_init(void)
1282 {
1283         vm_pindex_t initial_pvs;
1284         vm_pindex_t i;
1285
1286         /*
1287          * Allocate memory for random pmap data structures.  Includes the
1288          * pv_head_table.
1289          */
1290         for (i = 0; i < vm_page_array_size; i++) {
1291                 vm_page_t m;
1292
1293                 m = &vm_page_array[i];
1294                 TAILQ_INIT(&m->md.pv_list);
1295         }
1296
1297         /*
1298          * init the pv free list
1299          */
1300         initial_pvs = vm_page_array_size;
1301         if (initial_pvs < MINPV)
1302                 initial_pvs = MINPV;
1303         pvzone = &pvzone_store;
1304         pvinit = (void *)kmem_alloc(&kernel_map,
1305                                     initial_pvs * sizeof (struct pv_entry),
1306                                     VM_SUBSYS_PVENTRY);
1307         zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
1308                   pvinit, initial_pvs);
1309
1310         /*
1311          * Now it is safe to enable pv_table recording.
1312          */
1313         pmap_initialized = TRUE;
1314 }
1315
1316 /*
1317  * Initialize the address space (zone) for the pv_entries.  Set a
1318  * high water mark so that the system can recover from excessive
1319  * numbers of pv entries.
1320  *
1321  * Also create the kernel page table template for isolated user
1322  * pmaps.
1323  */
1324 static void pmap_init_iso_range(vm_offset_t base, size_t bytes);
1325 static void pmap_init2_iso_pmap(void);
1326 #if 0
1327 static void dump_pmap(pmap_t pmap, pt_entry_t pte, int level, vm_offset_t base);
1328 #endif
1329
1330 void
1331 pmap_init2(void)
1332 {
1333         vm_pindex_t shpgperproc = PMAP_SHPGPERPROC;
1334         vm_pindex_t entry_max;
1335
1336         TUNABLE_LONG_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1337         pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
1338         TUNABLE_LONG_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1339         pv_entry_high_water = 9 * (pv_entry_max / 10);
1340
1341         /*
1342          * Subtract out pages already installed in the zone (hack)
1343          */
1344         entry_max = pv_entry_max - vm_page_array_size;
1345         if (entry_max <= 0)
1346                 entry_max = 1;
1347
1348         zinitna(pvzone, NULL, 0, entry_max, ZONE_INTERRUPT);
1349
1350         /*
1351          * Enable dynamic deletion of empty higher-level page table pages
1352          * by default only if system memory is < 8GB (use 7GB for slop).
1353          * This can save a little memory, but imposes significant
1354          * performance overhead for things like bulk builds, and for programs
1355          * which do a lot of memory mapping and memory unmapping.
1356          */
1357         if (pmap_dynamic_delete < 0) {
1358                 if (vmstats.v_page_count < 7LL * 1024 * 1024 * 1024 / PAGE_SIZE)
1359                         pmap_dynamic_delete = 1;
1360                 else
1361                         pmap_dynamic_delete = 0;
1362         }
1363
1364         /*
1365          * Automatic detection of Intel meltdown bug requiring user/kernel
1366          * mmap isolation.
1367          *
1368          * Currently there are so many Intel cpu's impacted that its better
1369          * to whitelist future Intel CPUs.  Most? AMD cpus are not impacted
1370          * so the default is off for AMD.
1371          */
1372         if (meltdown_mitigation < 0) {
1373                 if (cpu_vendor_id == CPU_VENDOR_INTEL)
1374                         meltdown_mitigation = 1;
1375                 else
1376                         meltdown_mitigation = 0;
1377         }
1378         if (meltdown_mitigation) {
1379                 kprintf("machdep.meltdown_mitigation enabled to "
1380                         "protect against (mostly Intel) meltdown bug\n");
1381                 kprintf("system call performance will be impacted\n");
1382         }
1383
1384         pmap_init2_iso_pmap();
1385 }
1386
1387 /*
1388  * Create the isolation pmap template.  Once created, the template
1389  * is static and its PML4e entries are used to populate the
1390  * kernel portion of any isolated user pmaps.
1391  *
1392  * Our isolation pmap must contain:
1393  * (1) trampoline area for all cpus
1394  * (2) common_tss area for all cpus (its part of the trampoline area now)
1395  * (3) IDT for all cpus
1396  * (4) GDT for all cpus
1397  */
1398 static void
1399 pmap_init2_iso_pmap(void)
1400 {
1401         int n;
1402
1403         if (bootverbose)
1404                 kprintf("Initialize isolation pmap\n");
1405
1406         /*
1407          * Try to use our normal API calls to make this easier.  We have
1408          * to scrap the shadowed kernel PDPs pmap_pinit() creates for our
1409          * iso_pmap.
1410          */
1411         pmap_pinit(&iso_pmap);
1412         bzero(iso_pmap.pm_pml4, PAGE_SIZE);
1413
1414         /*
1415          * Install areas needed by the cpu and trampoline.
1416          */
1417         for (n = 0; n < ncpus; ++n) {
1418                 struct privatespace *ps;
1419
1420                 ps = CPU_prvspace[n];
1421                 pmap_init_iso_range((vm_offset_t)&ps->trampoline,
1422                                     sizeof(ps->trampoline));
1423                 pmap_init_iso_range((vm_offset_t)&ps->dblstack,
1424                                     sizeof(ps->dblstack));
1425                 pmap_init_iso_range((vm_offset_t)&ps->dbgstack,
1426                                     sizeof(ps->dbgstack));
1427                 pmap_init_iso_range((vm_offset_t)&ps->common_tss,
1428                                     sizeof(ps->common_tss));
1429                 pmap_init_iso_range(r_idt_arr[n].rd_base,
1430                                     r_idt_arr[n].rd_limit + 1);
1431         }
1432         pmap_init_iso_range((register_t)gdt, sizeof(gdt));
1433         pmap_init_iso_range((vm_offset_t)(int *)btext,
1434                             (vm_offset_t)(int *)etext -
1435                              (vm_offset_t)(int *)btext);
1436
1437 #if 0
1438         kprintf("Dump iso_pmap:\n");
1439         dump_pmap(&iso_pmap, vtophys(iso_pmap.pm_pml4), 0, 0);
1440         kprintf("\nDump kernel_pmap:\n");
1441         dump_pmap(&kernel_pmap, vtophys(kernel_pmap.pm_pml4), 0, 0);
1442 #endif
1443 }
1444
1445 /*
1446  * This adds a kernel virtual address range to the isolation pmap.
1447  */
1448 static void
1449 pmap_init_iso_range(vm_offset_t base, size_t bytes)
1450 {
1451         pv_entry_t pv;
1452         pv_entry_t pvp;
1453         pt_entry_t *ptep;
1454         pt_entry_t pte;
1455         vm_offset_t va;
1456
1457         if (bootverbose) {
1458                 kprintf("isolate %016jx-%016jx (%zd)\n",
1459                         base, base + bytes, bytes);
1460         }
1461         va = base & ~(vm_offset_t)PAGE_MASK;
1462         while (va < base + bytes) {
1463                 if ((va & PDRMASK) == 0 && va + NBPDR <= base + bytes &&
1464                     (ptep = pmap_pt(&kernel_pmap, va)) != NULL &&
1465                     (*ptep & kernel_pmap.pmap_bits[PG_V_IDX]) &&
1466                     (*ptep & kernel_pmap.pmap_bits[PG_PS_IDX])) {
1467                         /*
1468                          * Use 2MB pages if possible
1469                          */
1470                         pte = *ptep;
1471                         pv = pmap_allocpte(&iso_pmap, pmap_pd_pindex(va), &pvp);
1472                         ptep = pv_pte_lookup(pv, (va >> PDRSHIFT) & 511);
1473                         *ptep = pte;
1474                         va += NBPDR;
1475                 } else {
1476                         /*
1477                          * Otherwise use 4KB pages
1478                          */
1479                         pv = pmap_allocpte(&iso_pmap, pmap_pt_pindex(va), &pvp);
1480                         ptep = pv_pte_lookup(pv, (va >> PAGE_SHIFT) & 511);
1481                         *ptep = vtophys(va) | kernel_pmap.pmap_bits[PG_RW_IDX] |
1482                                               kernel_pmap.pmap_bits[PG_V_IDX] |
1483                                               kernel_pmap.pmap_bits[PG_A_IDX] |
1484                                               kernel_pmap.pmap_bits[PG_M_IDX];
1485
1486                         va += PAGE_SIZE;
1487                 }
1488                 pv_put(pv);
1489                 pv_put(pvp);
1490         }
1491 }
1492
1493 #if 0
1494 /*
1495  * Useful debugging pmap dumper, do not remove (#if 0 when not in use)
1496  */
1497 static
1498 void
1499 dump_pmap(pmap_t pmap, pt_entry_t pte, int level, vm_offset_t base)
1500 {
1501         pt_entry_t *ptp;
1502         vm_offset_t incr;
1503         int i;
1504
1505         switch(level) {
1506         case 0:                                 /* PML4e page, 512G entries */
1507                 incr = (1LL << 48) / 512;
1508                 break;
1509         case 1:                                 /* PDP page, 1G entries */
1510                 incr = (1LL << 39) / 512;
1511                 break;
1512         case 2:                                 /* PD page, 2MB entries */
1513                 incr = (1LL << 30) / 512;
1514                 break;
1515         case 3:                                 /* PT page, 4KB entries */
1516                 incr = (1LL << 21) / 512;
1517                 break;
1518         default:
1519                 incr = 0;
1520                 break;
1521         }
1522
1523         if (level == 0)
1524                 kprintf("cr3 %016jx @ va=%016jx\n", pte, base);
1525         ptp = (void *)PHYS_TO_DMAP(pte & ~(pt_entry_t)PAGE_MASK);
1526         for (i = 0; i < 512; ++i) {
1527                 if (level == 0 && i == 128)
1528                         base += 0xFFFF000000000000LLU;
1529                 if (ptp[i]) {
1530                         kprintf("%*.*s ", level * 4, level * 4, "");
1531                         if (level == 1 && (ptp[i] & 0x180) == 0x180) {
1532                                 kprintf("va=%016jx %3d term %016jx (1GB)\n",
1533                                         base, i, ptp[i]);
1534                         } else if (level == 2 && (ptp[i] & 0x180) == 0x180) {
1535                                 kprintf("va=%016jx %3d term %016jx (2MB)\n",
1536                                         base, i, ptp[i]);
1537                         } else if (level == 3) {
1538                                 kprintf("va=%016jx %3d term %016jx\n",
1539                                         base, i, ptp[i]);
1540                         } else {
1541                                 kprintf("va=%016jx %3d deep %016jx\n",
1542                                         base, i, ptp[i]);
1543                                 dump_pmap(pmap, ptp[i], level + 1, base);
1544                         }
1545                 }
1546                 base += incr;
1547         }
1548 }
1549
1550 #endif
1551
1552 /*
1553  * Typically used to initialize a fictitious page by vm/device_pager.c
1554  */
1555 void
1556 pmap_page_init(struct vm_page *m)
1557 {
1558         vm_page_init(m);
1559         TAILQ_INIT(&m->md.pv_list);
1560 }
1561
1562 /***************************************************
1563  * Low level helper routines.....
1564  ***************************************************/
1565
1566 /*
1567  * this routine defines the region(s) of memory that should
1568  * not be tested for the modified bit.
1569  */
1570 static __inline
1571 int
1572 pmap_track_modified(vm_pindex_t pindex)
1573 {
1574         vm_offset_t va = (vm_offset_t)pindex << PAGE_SHIFT;
1575         if ((va < clean_sva) || (va >= clean_eva)) 
1576                 return 1;
1577         else
1578                 return 0;
1579 }
1580
1581 /*
1582  * Extract the physical page address associated with the map/VA pair.
1583  * The page must be wired for this to work reliably.
1584  */
1585 vm_paddr_t 
1586 pmap_extract(pmap_t pmap, vm_offset_t va, void **handlep)
1587 {
1588         vm_paddr_t rtval;
1589         pv_entry_t pt_pv;
1590         pt_entry_t *ptep;
1591
1592         rtval = 0;
1593         if (va >= VM_MAX_USER_ADDRESS) {
1594                 /*
1595                  * Kernel page directories might be direct-mapped and
1596                  * there is typically no PV tracking of pte's
1597                  */
1598                 pd_entry_t *pt;
1599
1600                 pt = pmap_pt(pmap, va);
1601                 if (pt && (*pt & pmap->pmap_bits[PG_V_IDX])) {
1602                         if (*pt & pmap->pmap_bits[PG_PS_IDX]) {
1603                                 rtval = *pt & PG_PS_FRAME;
1604                                 rtval |= va & PDRMASK;
1605                         } else {
1606                                 ptep = pmap_pt_to_pte(*pt, va);
1607                                 if (*pt & pmap->pmap_bits[PG_V_IDX]) {
1608                                         rtval = *ptep & PG_FRAME;
1609                                         rtval |= va & PAGE_MASK;
1610                                 }
1611                         }
1612                 }
1613                 if (handlep)
1614                         *handlep = NULL;
1615         } else {
1616                 /*
1617                  * User pages currently do not direct-map the page directory
1618                  * and some pages might not used managed PVs.  But all PT's
1619                  * will have a PV.
1620                  */
1621                 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
1622                 if (pt_pv) {
1623                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1624                         if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
1625                                 rtval = *ptep & PG_FRAME;
1626                                 rtval |= va & PAGE_MASK;
1627                         }
1628                         if (handlep)
1629                                 *handlep = pt_pv;       /* locked until done */
1630                         else
1631                                 pv_put (pt_pv);
1632                 } else if (handlep) {
1633                         *handlep = NULL;
1634                 }
1635         }
1636         return rtval;
1637 }
1638
1639 void
1640 pmap_extract_done(void *handle)
1641 {
1642         if (handle)
1643                 pv_put((pv_entry_t)handle);
1644 }
1645
1646 /*
1647  * Similar to extract but checks protections, SMP-friendly short-cut for
1648  * vm_fault_page[_quick]().  Can return NULL to cause the caller to
1649  * fall-through to the real fault code.  Does not work with HVM page
1650  * tables.
1651  *
1652  * if busyp is NULL the returned page, if not NULL, is held (and not busied).
1653  *
1654  * If busyp is not NULL and this function sets *busyp non-zero, the returned
1655  * page is busied (and not held).
1656  *
1657  * If busyp is not NULL and this function sets *busyp to zero, the returned
1658  * page is held (and not busied).
1659  *
1660  * If VM_PROT_WRITE is set in prot, and the pte is already writable, the
1661  * returned page will be dirtied.  If the pte is not already writable NULL
1662  * is returned.  In otherwords, if the bit is set and a vm_page_t is returned,
1663  * any COW will already have happened and that page can be written by the
1664  * caller.
1665  *
1666  * WARNING! THE RETURNED PAGE IS ONLY HELD AND NOT SUITABLE FOR READING
1667  *          OR WRITING AS-IS.
1668  */
1669 vm_page_t
1670 pmap_fault_page_quick(pmap_t pmap, vm_offset_t va, vm_prot_t prot, int *busyp)
1671 {
1672         if (pmap &&
1673             va < VM_MAX_USER_ADDRESS &&
1674             (pmap->pm_flags & PMAP_HVM) == 0) {
1675                 pv_entry_t pt_pv;
1676                 pv_entry_t pte_pv;
1677                 pt_entry_t *ptep;
1678                 pt_entry_t req;
1679                 vm_page_t m;
1680                 int error;
1681
1682                 req = pmap->pmap_bits[PG_V_IDX] |
1683                       pmap->pmap_bits[PG_U_IDX];
1684                 if (prot & VM_PROT_WRITE)
1685                         req |= pmap->pmap_bits[PG_RW_IDX];
1686
1687                 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
1688                 if (pt_pv == NULL)
1689                         return (NULL);
1690                 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1691                 if ((*ptep & req) != req) {
1692                         pv_put(pt_pv);
1693                         return (NULL);
1694                 }
1695                 pte_pv = pv_get_try(pmap, pmap_pte_pindex(va), NULL, &error);
1696                 if (pte_pv && error == 0) {
1697                         m = pte_pv->pv_m;
1698                         if (prot & VM_PROT_WRITE) {
1699                                 /* interlocked by presence of pv_entry */
1700                                 vm_page_dirty(m);
1701                         }
1702                         if (busyp) {
1703                                 if (prot & VM_PROT_WRITE) {
1704                                         if (vm_page_busy_try(m, TRUE))
1705                                                 m = NULL;
1706                                         *busyp = 1;
1707                                 } else {
1708                                         vm_page_hold(m);
1709                                         *busyp = 0;
1710                                 }
1711                         } else {
1712                                 vm_page_hold(m);
1713                         }
1714                         pv_put(pte_pv);
1715                 } else if (pte_pv) {
1716                         pv_drop(pte_pv);
1717                         m = NULL;
1718                 } else {
1719                         /* error, since we didn't request a placemarker */
1720                         m = NULL;
1721                 }
1722                 pv_put(pt_pv);
1723                 return(m);
1724         } else {
1725                 return(NULL);
1726         }
1727 }
1728
1729 /*
1730  * Extract the physical page address associated kernel virtual address.
1731  */
1732 vm_paddr_t
1733 pmap_kextract(vm_offset_t va)
1734 {
1735         pd_entry_t pt;          /* pt entry in pd */
1736         vm_paddr_t pa;
1737
1738         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1739                 pa = DMAP_TO_PHYS(va);
1740         } else {
1741                 pt = *vtopt(va);
1742                 if (pt & kernel_pmap.pmap_bits[PG_PS_IDX]) {
1743                         pa = (pt & PG_PS_FRAME) | (va & PDRMASK);
1744                 } else {
1745                         /*
1746                          * Beware of a concurrent promotion that changes the
1747                          * PDE at this point!  For example, vtopte() must not
1748                          * be used to access the PTE because it would use the
1749                          * new PDE.  It is, however, safe to use the old PDE
1750                          * because the page table page is preserved by the
1751                          * promotion.
1752                          */
1753                         pa = *pmap_pt_to_pte(pt, va);
1754                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1755                 }
1756         }
1757         return pa;
1758 }
1759
1760 /***************************************************
1761  * Low level mapping routines.....
1762  ***************************************************/
1763
1764 /*
1765  * Routine: pmap_kenter
1766  * Function:
1767  *      Add a wired page to the KVA
1768  *      NOTE! note that in order for the mapping to take effect -- you
1769  *      should do an invltlb after doing the pmap_kenter().
1770  */
1771 void 
1772 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1773 {
1774         pt_entry_t *ptep;
1775         pt_entry_t npte;
1776
1777         npte = pa |
1778                kernel_pmap.pmap_bits[PG_RW_IDX] |
1779                kernel_pmap.pmap_bits[PG_V_IDX];
1780 //             pgeflag;
1781         ptep = vtopte(va);
1782 #if 1
1783         pmap_inval_smp(&kernel_pmap, va, 1, ptep, npte);
1784 #else
1785         /* FUTURE */
1786         if (*ptep)
1787                 pmap_inval_smp(&kernel_pmap, va, ptep, npte);
1788         else
1789                 *ptep = npte;
1790 #endif
1791 }
1792
1793 /*
1794  * Similar to pmap_kenter(), except we only invalidate the mapping on the
1795  * current CPU.  Returns 0 if the previous pte was 0, 1 if it wasn't
1796  * (caller can conditionalize calling smp_invltlb()).
1797  */
1798 int
1799 pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
1800 {
1801         pt_entry_t *ptep;
1802         pt_entry_t npte;
1803         int res;
1804
1805         npte = pa | kernel_pmap.pmap_bits[PG_RW_IDX] |
1806                     kernel_pmap.pmap_bits[PG_V_IDX];
1807         // npte |= pgeflag;
1808         ptep = vtopte(va);
1809 #if 1
1810         res = 1;
1811 #else
1812         /* FUTURE */
1813         res = (*ptep != 0);
1814 #endif
1815         atomic_swap_long(ptep, npte);
1816         cpu_invlpg((void *)va);
1817
1818         return res;
1819 }
1820
1821 /*
1822  * Enter addresses into the kernel pmap but don't bother
1823  * doing any tlb invalidations.  Caller will do a rollup
1824  * invalidation via pmap_rollup_inval().
1825  */
1826 int
1827 pmap_kenter_noinval(vm_offset_t va, vm_paddr_t pa)
1828 {
1829         pt_entry_t *ptep;
1830         pt_entry_t npte;
1831         int res;
1832
1833         npte = pa |
1834             kernel_pmap.pmap_bits[PG_RW_IDX] |
1835             kernel_pmap.pmap_bits[PG_V_IDX];
1836 //          pgeflag;
1837         ptep = vtopte(va);
1838 #if 1
1839         res = 1;
1840 #else
1841         /* FUTURE */
1842         res = (*ptep != 0);
1843 #endif
1844         atomic_swap_long(ptep, npte);
1845         cpu_invlpg((void *)va);
1846
1847         return res;
1848 }
1849
1850 /*
1851  * remove a page from the kernel pagetables
1852  */
1853 void
1854 pmap_kremove(vm_offset_t va)
1855 {
1856         pt_entry_t *ptep;
1857
1858         ptep = vtopte(va);
1859         pmap_inval_smp(&kernel_pmap, va, 1, ptep, 0);
1860 }
1861
1862 void
1863 pmap_kremove_quick(vm_offset_t va)
1864 {
1865         pt_entry_t *ptep;
1866
1867         ptep = vtopte(va);
1868         (void)pte_load_clear(ptep);
1869         cpu_invlpg((void *)va);
1870 }
1871
1872 /*
1873  * Remove addresses from the kernel pmap but don't bother
1874  * doing any tlb invalidations.  Caller will do a rollup
1875  * invalidation via pmap_rollup_inval().
1876  */
1877 void
1878 pmap_kremove_noinval(vm_offset_t va)
1879 {
1880         pt_entry_t *ptep;
1881
1882         ptep = vtopte(va);
1883         (void)pte_load_clear(ptep);
1884 }
1885
1886 /*
1887  * XXX these need to be recoded.  They are not used in any critical path.
1888  */
1889 void
1890 pmap_kmodify_rw(vm_offset_t va)
1891 {
1892         atomic_set_long(vtopte(va), kernel_pmap.pmap_bits[PG_RW_IDX]);
1893         cpu_invlpg((void *)va);
1894 }
1895
1896 /* NOT USED
1897 void
1898 pmap_kmodify_nc(vm_offset_t va)
1899 {
1900         atomic_set_long(vtopte(va), PG_N);
1901         cpu_invlpg((void *)va);
1902 }
1903 */
1904
1905 /*
1906  * Used to map a range of physical addresses into kernel virtual
1907  * address space during the low level boot, typically to map the
1908  * dump bitmap, message buffer, and vm_page_array.
1909  *
1910  * These mappings are typically made at some pointer after the end of the
1911  * kernel text+data.
1912  *
1913  * We could return PHYS_TO_DMAP(start) here and not allocate any
1914  * via (*virtp), but then kmem from userland and kernel dumps won't
1915  * have access to the related pointers.
1916  */
1917 vm_offset_t
1918 pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
1919 {
1920         vm_offset_t va;
1921         vm_offset_t va_start;
1922
1923         /*return PHYS_TO_DMAP(start);*/
1924
1925         va_start = *virtp;
1926         va = va_start;
1927
1928         while (start < end) {
1929                 pmap_kenter_quick(va, start);
1930                 va += PAGE_SIZE;
1931                 start += PAGE_SIZE;
1932         }
1933         *virtp = va;
1934         return va_start;
1935 }
1936
1937 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
1938
1939 /*
1940  * Remove the specified set of pages from the data and instruction caches.
1941  *
1942  * In contrast to pmap_invalidate_cache_range(), this function does not
1943  * rely on the CPU's self-snoop feature, because it is intended for use
1944  * when moving pages into a different cache domain.
1945  */
1946 void
1947 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1948 {
1949         vm_offset_t daddr, eva;
1950         int i;
1951
1952         if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1953             (cpu_feature & CPUID_CLFSH) == 0)
1954                 wbinvd();
1955         else {
1956                 cpu_mfence();
1957                 for (i = 0; i < count; i++) {
1958                         daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
1959                         eva = daddr + PAGE_SIZE;
1960                         for (; daddr < eva; daddr += cpu_clflush_line_size)
1961                                 clflush(daddr);
1962                 }
1963                 cpu_mfence();
1964         }
1965 }
1966
1967 void
1968 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
1969 {
1970         KASSERT((sva & PAGE_MASK) == 0,
1971             ("pmap_invalidate_cache_range: sva not page-aligned"));
1972         KASSERT((eva & PAGE_MASK) == 0,
1973             ("pmap_invalidate_cache_range: eva not page-aligned"));
1974
1975         if (cpu_feature & CPUID_SS) {
1976                 ; /* If "Self Snoop" is supported, do nothing. */
1977         } else {
1978                 /* Globally invalidate caches */
1979                 cpu_wbinvd_on_all_cpus();
1980         }
1981 }
1982
1983 /*
1984  * Invalidate the specified range of virtual memory on all cpus associated
1985  * with the pmap.
1986  */
1987 void
1988 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1989 {
1990         pmap_inval_smp(pmap, sva, (eva - sva) >> PAGE_SHIFT, NULL, 0);
1991 }
1992
1993 /*
1994  * Add a list of wired pages to the kva.  This routine is used for temporary
1995  * kernel mappings such as those found in buffer cache buffer.  Page
1996  * modifications and accesses are not tracked or recorded.
1997  *
1998  * NOTE! Old mappings are simply overwritten, and we cannot assume relaxed
1999  *       semantics as previous mappings may have been zerod without any
2000  *       invalidation.
2001  *
2002  * The page *must* be wired.
2003  */
2004 static __inline void
2005 _pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count, int doinval)
2006 {
2007         vm_offset_t end_va;
2008         vm_offset_t va;
2009
2010         end_va = beg_va + count * PAGE_SIZE;
2011
2012         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2013                 pt_entry_t pte;
2014                 pt_entry_t *ptep;
2015
2016                 ptep = vtopte(va);
2017                 pte = VM_PAGE_TO_PHYS(*m) |
2018                         kernel_pmap.pmap_bits[PG_RW_IDX] |
2019                         kernel_pmap.pmap_bits[PG_V_IDX] |
2020                         kernel_pmap.pmap_cache_bits[(*m)->pat_mode];
2021 //              pgeflag;
2022                 atomic_swap_long(ptep, pte);
2023                 m++;
2024         }
2025         if (doinval)
2026                 pmap_invalidate_range(&kernel_pmap, beg_va, end_va);
2027 }
2028
2029 void
2030 pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count)
2031 {
2032         _pmap_qenter(beg_va, m, count, 1);
2033 }
2034
2035 void
2036 pmap_qenter_noinval(vm_offset_t beg_va, vm_page_t *m, int count)
2037 {
2038         _pmap_qenter(beg_va, m, count, 0);
2039 }
2040
2041 /*
2042  * This routine jerks page mappings from the kernel -- it is meant only
2043  * for temporary mappings such as those found in buffer cache buffers.
2044  * No recording modified or access status occurs.
2045  *
2046  * MPSAFE, INTERRUPT SAFE (cluster callback)
2047  */
2048 void
2049 pmap_qremove(vm_offset_t beg_va, int count)
2050 {
2051         vm_offset_t end_va;
2052         vm_offset_t va;
2053
2054         end_va = beg_va + count * PAGE_SIZE;
2055
2056         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2057                 pt_entry_t *pte;
2058
2059                 pte = vtopte(va);
2060                 (void)pte_load_clear(pte);
2061                 cpu_invlpg((void *)va);
2062         }
2063         pmap_invalidate_range(&kernel_pmap, beg_va, end_va);
2064 }
2065
2066 /*
2067  * This routine removes temporary kernel mappings, only invalidating them
2068  * on the current cpu.  It should only be used under carefully controlled
2069  * conditions.
2070  */
2071 void
2072 pmap_qremove_quick(vm_offset_t beg_va, int count)
2073 {
2074         vm_offset_t end_va;
2075         vm_offset_t va;
2076
2077         end_va = beg_va + count * PAGE_SIZE;
2078
2079         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2080                 pt_entry_t *pte;
2081
2082                 pte = vtopte(va);
2083                 (void)pte_load_clear(pte);
2084                 cpu_invlpg((void *)va);
2085         }
2086 }
2087
2088 /*
2089  * This routine removes temporary kernel mappings *without* invalidating
2090  * the TLB.  It can only be used on permanent kva reservations such as those
2091  * found in buffer cache buffers, under carefully controlled circumstances.
2092  *
2093  * NOTE: Repopulating these KVAs requires unconditional invalidation.
2094  *       (pmap_qenter() does unconditional invalidation).
2095  */
2096 void
2097 pmap_qremove_noinval(vm_offset_t beg_va, int count)
2098 {
2099         vm_offset_t end_va;
2100         vm_offset_t va;
2101
2102         end_va = beg_va + count * PAGE_SIZE;
2103
2104         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2105                 pt_entry_t *pte;
2106
2107                 pte = vtopte(va);
2108                 (void)pte_load_clear(pte);
2109         }
2110 }
2111
2112 /*
2113  * Create a new thread and optionally associate it with a (new) process.
2114  * NOTE! the new thread's cpu may not equal the current cpu.
2115  */
2116 void
2117 pmap_init_thread(thread_t td)
2118 {
2119         /* enforce pcb placement & alignment */
2120         td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
2121         td->td_pcb = (struct pcb *)((intptr_t)td->td_pcb & ~(intptr_t)0xF);
2122         td->td_savefpu = &td->td_pcb->pcb_save;
2123         td->td_sp = (char *)td->td_pcb; /* no -16 */
2124 }
2125
2126 /*
2127  * This routine directly affects the fork perf for a process.
2128  */
2129 void
2130 pmap_init_proc(struct proc *p)
2131 {
2132 }
2133
2134 static void
2135 pmap_pinit_defaults(struct pmap *pmap)
2136 {
2137         bcopy(pmap_bits_default, pmap->pmap_bits,
2138               sizeof(pmap_bits_default));
2139         bcopy(protection_codes, pmap->protection_codes,
2140               sizeof(protection_codes));
2141         bcopy(pat_pte_index, pmap->pmap_cache_bits,
2142               sizeof(pat_pte_index));
2143         pmap->pmap_cache_mask = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PTE_PAT;
2144         pmap->copyinstr = std_copyinstr;
2145         pmap->copyin = std_copyin;
2146         pmap->copyout = std_copyout;
2147         pmap->fubyte = std_fubyte;
2148         pmap->subyte = std_subyte;
2149         pmap->fuword32 = std_fuword32;
2150         pmap->fuword64 = std_fuword64;
2151         pmap->suword32 = std_suword32;
2152         pmap->suword64 = std_suword64;
2153         pmap->swapu32 = std_swapu32;
2154         pmap->swapu64 = std_swapu64;
2155         pmap->fuwordadd32 = std_fuwordadd32;
2156         pmap->fuwordadd64 = std_fuwordadd64;
2157 }
2158 /*
2159  * Initialize pmap0/vmspace0.
2160  *
2161  * On architectures where the kernel pmap is not integrated into the user
2162  * process pmap, this pmap represents the process pmap, not the kernel pmap.
2163  * kernel_pmap should be used to directly access the kernel_pmap.
2164  */
2165 void
2166 pmap_pinit0(struct pmap *pmap)
2167 {
2168         int i;
2169
2170         pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys);
2171         pmap->pm_count = 1;
2172         CPUMASK_ASSZERO(pmap->pm_active);
2173         pmap->pm_pvhint_pt = NULL;
2174         pmap->pm_pvhint_pte = NULL;
2175         RB_INIT(&pmap->pm_pvroot);
2176         spin_init(&pmap->pm_spin, "pmapinit0");
2177         for (i = 0; i < PM_PLACEMARKS; ++i)
2178                 pmap->pm_placemarks[i] = PM_NOPLACEMARK;
2179         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2180         pmap_pinit_defaults(pmap);
2181 }
2182
2183 /*
2184  * Initialize a preallocated and zeroed pmap structure,
2185  * such as one in a vmspace structure.
2186  */
2187 static void
2188 pmap_pinit_simple(struct pmap *pmap)
2189 {
2190         int i;
2191
2192         /*
2193          * Misc initialization
2194          */
2195         pmap->pm_count = 1;
2196         CPUMASK_ASSZERO(pmap->pm_active);
2197         pmap->pm_pvhint_pt = NULL;
2198         pmap->pm_pvhint_pte = NULL;
2199         pmap->pm_flags = PMAP_FLAG_SIMPLE;
2200
2201         pmap_pinit_defaults(pmap);
2202
2203         /*
2204          * Don't blow up locks/tokens on re-use (XXX fix/use drop code
2205          * for this).
2206          */
2207         if (pmap->pm_pmlpv == NULL) {
2208                 RB_INIT(&pmap->pm_pvroot);
2209                 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2210                 spin_init(&pmap->pm_spin, "pmapinitsimple");
2211                 for (i = 0; i < PM_PLACEMARKS; ++i)
2212                         pmap->pm_placemarks[i] = PM_NOPLACEMARK;
2213         }
2214 }
2215
2216 void
2217 pmap_pinit(struct pmap *pmap)
2218 {
2219         pv_entry_t pv;
2220         int j;
2221
2222         if (pmap->pm_pmlpv) {
2223                 if (pmap->pmap_bits[TYPE_IDX] != REGULAR_PMAP) {
2224                         pmap_puninit(pmap);
2225                 }
2226         }
2227
2228         pmap_pinit_simple(pmap);
2229         pmap->pm_flags &= ~PMAP_FLAG_SIMPLE;
2230
2231         /*
2232          * No need to allocate page table space yet but we do need a valid
2233          * page directory table.
2234          */
2235         if (pmap->pm_pml4 == NULL) {
2236                 pmap->pm_pml4 =
2237                     (pml4_entry_t *)kmem_alloc_pageable(&kernel_map,
2238                                                         PAGE_SIZE * 2,
2239                                                         VM_SUBSYS_PML4);
2240                 pmap->pm_pml4_iso = (void *)((char *)pmap->pm_pml4 + PAGE_SIZE);
2241         }
2242
2243         /*
2244          * Allocate the PML4e table, which wires it even though it isn't
2245          * being entered into some higher level page table (it being the
2246          * highest level).  If one is already cached we don't have to do
2247          * anything.
2248          */
2249         if ((pv = pmap->pm_pmlpv) == NULL) {
2250                 pv = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
2251                 pmap->pm_pmlpv = pv;
2252                 pmap_kenter((vm_offset_t)pmap->pm_pml4,
2253                             VM_PAGE_TO_PHYS(pv->pv_m));
2254                 pv_put(pv);
2255
2256                 /*
2257                  * Install DMAP and KMAP.
2258                  */
2259                 for (j = 0; j < NDMPML4E; ++j) {
2260                         pmap->pm_pml4[DMPML4I + j] =
2261                             (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
2262                             pmap->pmap_bits[PG_RW_IDX] |
2263                             pmap->pmap_bits[PG_V_IDX] |
2264                             pmap->pmap_bits[PG_A_IDX];
2265                 }
2266                 for (j = 0; j < NKPML4E; ++j) {
2267                         pmap->pm_pml4[KPML4I + j] =
2268                             (KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
2269                             pmap->pmap_bits[PG_RW_IDX] |
2270                             pmap->pmap_bits[PG_V_IDX] |
2271                             pmap->pmap_bits[PG_A_IDX];
2272                 }
2273
2274                 /*
2275                  * install self-referential address mapping entry
2276                  */
2277                 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pv->pv_m) |
2278                     pmap->pmap_bits[PG_V_IDX] |
2279                     pmap->pmap_bits[PG_RW_IDX] |
2280                     pmap->pmap_bits[PG_A_IDX];
2281         } else {
2282                 KKASSERT(pv->pv_m->flags & PG_MAPPED);
2283                 KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
2284         }
2285         KKASSERT(pmap->pm_pml4[255] == 0);
2286
2287         /*
2288          * When implementing an isolated userland pmap, a second PML4e table
2289          * is needed.  We use pmap_pml4_pindex() + 1 for convenience, but
2290          * note that we do not operate on this table using our API functions
2291          * so handling of the + 1 case is mostly just to prevent implosions.
2292          *
2293          * We install an isolated version of the kernel PDPs into this
2294          * second PML4e table.  The pmap code will mirror all user PDPs
2295          * between the primary and secondary PML4e table.
2296          */
2297         if ((pv = pmap->pm_pmlpv_iso) == NULL && meltdown_mitigation &&
2298             pmap != &iso_pmap) {
2299                 pv = pmap_allocpte(pmap, pmap_pml4_pindex() + 1, NULL);
2300                 pmap->pm_pmlpv_iso = pv;
2301                 pmap_kenter((vm_offset_t)pmap->pm_pml4_iso,
2302                             VM_PAGE_TO_PHYS(pv->pv_m));
2303                 pv_put(pv);
2304
2305                 /*
2306                  * Install an isolated version of the kernel pmap for
2307                  * user consumption, using PDPs constructed in iso_pmap.
2308                  */
2309                 for (j = 0; j < NKPML4E; ++j) {
2310                         pmap->pm_pml4_iso[KPML4I + j] =
2311                                 iso_pmap.pm_pml4[KPML4I + j];
2312                 }
2313         } else if (pv) {
2314                 KKASSERT(pv->pv_m->flags & PG_MAPPED);
2315                 KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
2316         }
2317 }
2318
2319 /*
2320  * Clean up a pmap structure so it can be physically freed.  This routine
2321  * is called by the vmspace dtor function.  A great deal of pmap data is
2322  * left passively mapped to improve vmspace management so we have a bit
2323  * of cleanup work to do here.
2324  */
2325 void
2326 pmap_puninit(pmap_t pmap)
2327 {
2328         pv_entry_t pv;
2329         vm_page_t p;
2330
2331         KKASSERT(CPUMASK_TESTZERO(pmap->pm_active));
2332         if ((pv = pmap->pm_pmlpv) != NULL) {
2333                 if (pv_hold_try(pv) == 0)
2334                         pv_lock(pv);
2335                 KKASSERT(pv == pmap->pm_pmlpv);
2336                 p = pmap_remove_pv_page(pv);
2337                 pv_free(pv, NULL);
2338                 pv = NULL;      /* safety */
2339                 pmap_kremove((vm_offset_t)pmap->pm_pml4);
2340                 vm_page_busy_wait(p, FALSE, "pgpun");
2341                 KKASSERT(p->flags & (PG_FICTITIOUS|PG_UNMANAGED));
2342                 vm_page_unwire(p, 0);
2343                 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
2344                 vm_page_free(p);
2345                 pmap->pm_pmlpv = NULL;
2346         }
2347         if ((pv = pmap->pm_pmlpv_iso) != NULL) {
2348                 if (pv_hold_try(pv) == 0)
2349                         pv_lock(pv);
2350                 KKASSERT(pv == pmap->pm_pmlpv_iso);
2351                 p = pmap_remove_pv_page(pv);
2352                 pv_free(pv, NULL);
2353                 pv = NULL;      /* safety */
2354                 pmap_kremove((vm_offset_t)pmap->pm_pml4_iso);
2355                 vm_page_busy_wait(p, FALSE, "pgpun");
2356                 KKASSERT(p->flags & (PG_FICTITIOUS|PG_UNMANAGED));
2357                 vm_page_unwire(p, 0);
2358                 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
2359                 vm_page_free(p);
2360                 pmap->pm_pmlpv_iso = NULL;
2361         }
2362         if (pmap->pm_pml4) {
2363                 KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys));
2364                 kmem_free(&kernel_map,
2365                           (vm_offset_t)pmap->pm_pml4, PAGE_SIZE * 2);
2366                 pmap->pm_pml4 = NULL;
2367                 pmap->pm_pml4_iso = NULL;
2368         }
2369         KKASSERT(pmap->pm_stats.resident_count == 0);
2370         KKASSERT(pmap->pm_stats.wired_count == 0);
2371 }
2372
2373 /*
2374  * This function is now unused (used to add the pmap to the pmap_list)
2375  */
2376 void
2377 pmap_pinit2(struct pmap *pmap)
2378 {
2379 }
2380
2381 /*
2382  * This routine is called when various levels in the page table need to
2383  * be populated.  This routine cannot fail.
2384  *
2385  * This function returns two locked pv_entry's, one representing the
2386  * requested pv and one representing the requested pv's parent pv.  If
2387  * an intermediate page table does not exist it will be created, mapped,
2388  * wired, and the parent page table will be given an additional hold
2389  * count representing the presence of the child pv_entry.
2390  */
2391 static
2392 pv_entry_t
2393 pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp)
2394 {
2395         pt_entry_t *ptep;
2396         pt_entry_t *ptep_iso;
2397         pv_entry_t pv;
2398         pv_entry_t pvp;
2399         pt_entry_t v;
2400         vm_pindex_t pt_pindex;
2401         vm_page_t m;
2402         int isnew;
2403         int ispt;
2404
2405         /*
2406          * If the pv already exists and we aren't being asked for the
2407          * parent page table page we can just return it.  A locked+held pv
2408          * is returned.  The pv will also have a second hold related to the
2409          * pmap association that we don't have to worry about.
2410          */
2411         ispt = 0;
2412         pv = pv_alloc(pmap, ptepindex, &isnew);
2413         if (isnew == 0 && pvpp == NULL)
2414                 return(pv);
2415
2416         /*
2417          * Special case terminal PVs.  These are not page table pages so
2418          * no vm_page is allocated (the caller supplied the vm_page).  If
2419          * pvpp is non-NULL we are being asked to also removed the pt_pv
2420          * for this pv.
2421          *
2422          * Note that pt_pv's are only returned for user VAs. We assert that
2423          * a pt_pv is not being requested for kernel VAs.  The kernel
2424          * pre-wires all higher-level page tables so don't overload managed
2425          * higher-level page tables on top of it!
2426          *
2427          * However, its convenient for us to allow the case when creating
2428          * iso_pmap.  This is a bit of a hack but it simplifies iso_pmap
2429          * a lot.
2430          */
2431         if (ptepindex < pmap_pt_pindex(0)) {
2432                 if (ptepindex >= NUPTE_USER && pmap != &iso_pmap) {
2433                         /* kernel manages this manually for KVM */
2434                         KKASSERT(pvpp == NULL);
2435                 } else {
2436                         KKASSERT(pvpp != NULL);
2437                         pt_pindex = NUPTE_TOTAL + (ptepindex >> NPTEPGSHIFT);
2438                         pvp = pmap_allocpte(pmap, pt_pindex, NULL);
2439                         if (isnew)
2440                                 vm_page_wire_quick(pvp->pv_m);
2441                         *pvpp = pvp;
2442                 }
2443                 return(pv);
2444         }
2445
2446         /*
2447          * The kernel never uses managed PT/PD/PDP pages.
2448          */
2449         KKASSERT(pmap != &kernel_pmap);
2450
2451         /*
2452          * Non-terminal PVs allocate a VM page to represent the page table,
2453          * so we have to resolve pvp and calculate ptepindex for the pvp
2454          * and then for the page table entry index in the pvp for
2455          * fall-through.
2456          */
2457         if (ptepindex < pmap_pd_pindex(0)) {
2458                 /*
2459                  * pv is PT, pvp is PD
2460                  */
2461                 ptepindex = (ptepindex - pmap_pt_pindex(0)) >> NPDEPGSHIFT;
2462                 ptepindex += NUPTE_TOTAL + NUPT_TOTAL;
2463                 pvp = pmap_allocpte(pmap, ptepindex, NULL);
2464
2465                 /*
2466                  * PT index in PD
2467                  */
2468                 ptepindex = pv->pv_pindex - pmap_pt_pindex(0);
2469                 ptepindex &= ((1ul << NPDEPGSHIFT) - 1);
2470                 ispt = 1;
2471         } else if (ptepindex < pmap_pdp_pindex(0)) {
2472                 /*
2473                  * pv is PD, pvp is PDP
2474                  *
2475                  * SIMPLE PMAP NOTE: Simple pmaps do not allocate above
2476                  *                   the PD.
2477                  */
2478                 ptepindex = (ptepindex - pmap_pd_pindex(0)) >> NPDPEPGSHIFT;
2479                 ptepindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
2480
2481                 if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
2482                         KKASSERT(pvpp == NULL);
2483                         pvp = NULL;
2484                 } else {
2485                         pvp = pmap_allocpte(pmap, ptepindex, NULL);
2486                 }
2487
2488                 /*
2489                  * PD index in PDP
2490                  */
2491                 ptepindex = pv->pv_pindex - pmap_pd_pindex(0);
2492                 ptepindex &= ((1ul << NPDPEPGSHIFT) - 1);
2493         } else if (ptepindex < pmap_pml4_pindex()) {
2494                 /*
2495                  * pv is PDP, pvp is the root pml4 table
2496                  */
2497                 pvp = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
2498
2499                 /*
2500                  * PDP index in PML4
2501                  */
2502                 ptepindex = pv->pv_pindex - pmap_pdp_pindex(0);
2503                 ptepindex &= ((1ul << NPML4EPGSHIFT) - 1);
2504         } else {
2505                 /*
2506                  * pv represents the top-level PML4, there is no parent.
2507                  */
2508                 pvp = NULL;
2509         }
2510
2511         if (isnew == 0)
2512                 goto notnew;
2513
2514         /*
2515          * (isnew) is TRUE, pv is not terminal.
2516          *
2517          * (1) Add a wire count to the parent page table (pvp).
2518          * (2) Allocate a VM page for the page table.
2519          * (3) Enter the VM page into the parent page table.
2520          *
2521          * page table pages are marked PG_WRITEABLE and PG_MAPPED.
2522          */
2523         if (pvp)
2524                 vm_page_wire_quick(pvp->pv_m);
2525
2526         for (;;) {
2527                 m = vm_page_alloc(NULL, pv->pv_pindex,
2528                                   VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM |
2529                                   VM_ALLOC_INTERRUPT);
2530                 if (m)
2531                         break;
2532                 vm_wait(0);
2533         }
2534         vm_page_wire(m);        /* wire for mapping in parent */
2535         vm_page_unmanage(m);    /* m must be spinunlocked */
2536         pmap_zero_page(VM_PAGE_TO_PHYS(m));
2537         m->valid = VM_PAGE_BITS_ALL;
2538
2539         vm_page_spin_lock(m);
2540         pmap_page_stats_adding(m);
2541
2542         /*
2543          * PGTABLE pv's only exist in the context of the pmap RB tree
2544          * (pmap->pm_pvroot).
2545          */
2546 #if 0
2547         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2548 #endif
2549         pv->pv_flags |= PV_FLAG_PGTABLE;
2550         pv->pv_m = m;
2551         vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
2552         vm_page_spin_unlock(m);
2553
2554         /*
2555          * (isnew) is TRUE, pv is not terminal.
2556          *
2557          * Wire the page into pvp.  Bump the resident_count for the pmap.
2558          * There is no pvp for the top level, address the pm_pml4[] array
2559          * directly.
2560          *
2561          * If the caller wants the parent we return it, otherwise
2562          * we just put it away.
2563          *
2564          * No interlock is needed for pte 0 -> non-zero.
2565          *
2566          * In the situation where *ptep is valid we might have an unmanaged
2567          * page table page shared from another page table which we need to
2568          * unshare before installing our private page table page.
2569          */
2570         if (pvp) {
2571                 v = VM_PAGE_TO_PHYS(m) |
2572                     (pmap->pmap_bits[PG_RW_IDX] |
2573                      pmap->pmap_bits[PG_V_IDX] |
2574                      pmap->pmap_bits[PG_A_IDX]);
2575                 if (ptepindex < NUPTE_USER)
2576                         v |= pmap->pmap_bits[PG_U_IDX];
2577                 if (ptepindex < pmap_pt_pindex(0))
2578                         v |= pmap->pmap_bits[PG_M_IDX];
2579
2580                 ptep = pv_pte_lookup(pvp, ptepindex);
2581                 if (pvp == pmap->pm_pmlpv && pmap->pm_pmlpv_iso)
2582                         ptep_iso = pv_pte_lookup(pmap->pm_pmlpv_iso, ptepindex);
2583                 else
2584                         ptep_iso  = NULL;
2585                 if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
2586                         pt_entry_t pte;
2587
2588                         if (ispt == 0) {
2589                                 panic("pmap_allocpte: unexpected pte %p/%d",
2590                                       pvp, (int)ptepindex);
2591                         }
2592                         pte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1,
2593                                              ptep, v);
2594                         if (ptep_iso) {
2595                                 pmap_inval_smp(pmap, (vm_offset_t)-1, 1,
2596                                                ptep_iso, v);
2597                         }
2598                         if (vm_page_unwire_quick(
2599                                         PHYS_TO_VM_PAGE(pte & PG_FRAME))) {
2600                                 panic("pmap_allocpte: shared pgtable "
2601                                       "pg bad wirecount");
2602                         }
2603                 } else {
2604                         pt_entry_t pte;
2605
2606                         pte = atomic_swap_long(ptep, v);
2607                         if (ptep_iso)
2608                                 atomic_swap_long(ptep_iso, v);
2609                         if (pte != 0) {
2610                                 kprintf("install pgtbl mixup 0x%016jx "
2611                                         "old/new 0x%016jx/0x%016jx\n",
2612                                         (intmax_t)ptepindex, pte, v);
2613                         }
2614                 }
2615         }
2616         vm_page_wakeup(m);
2617
2618         /*
2619          * (isnew) may be TRUE or FALSE, pv may or may not be terminal.
2620          */
2621 notnew:
2622         if (pvp) {
2623                 KKASSERT(pvp->pv_m != NULL);
2624                 ptep = pv_pte_lookup(pvp, ptepindex);
2625                 v = VM_PAGE_TO_PHYS(pv->pv_m) |
2626                     (pmap->pmap_bits[PG_RW_IDX] |
2627                      pmap->pmap_bits[PG_V_IDX] |
2628                      pmap->pmap_bits[PG_A_IDX]);
2629                 if (ptepindex < NUPTE_USER)
2630                         v |= pmap->pmap_bits[PG_U_IDX];
2631                 if (ptepindex < pmap_pt_pindex(0))
2632                         v |= pmap->pmap_bits[PG_M_IDX];
2633                 if (*ptep != v) {
2634                         kprintf("mismatched upper level pt %016jx/%016jx\n",
2635                                 *ptep, v);
2636                 }
2637         }
2638         if (pvpp)
2639                 *pvpp = pvp;
2640         else if (pvp)
2641                 pv_put(pvp);
2642         return (pv);
2643 }
2644
2645 /*
2646  * This version of pmap_allocpte() checks for possible segment optimizations
2647  * that would allow page-table sharing.  It can be called for terminal
2648  * page or page table page ptepindex's.
2649  *
2650  * The function is called with page table page ptepindex's for fictitious
2651  * and unmanaged terminal pages.  That is, we don't want to allocate a
2652  * terminal pv, we just want the pt_pv.  pvpp is usually passed as NULL
2653  * for this case.
2654  *
2655  * This function can return a pv and *pvpp associated with the passed in pmap
2656  * OR a pv and *pvpp associated with the shared pmap.  In the latter case
2657  * an unmanaged page table page will be entered into the pass in pmap.
2658  */
2659 static
2660 pv_entry_t
2661 pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp,
2662                   vm_map_entry_t entry, vm_offset_t va)
2663 {
2664         vm_object_t object;
2665         pmap_t obpmap;
2666         pmap_t *obpmapp;
2667         vm_pindex_t *pt_placemark;
2668         vm_offset_t b;
2669         pv_entry_t pte_pv;      /* in original or shared pmap */
2670         pv_entry_t pt_pv;       /* in original or shared pmap */
2671         pv_entry_t proc_pd_pv;  /* in original pmap */
2672         pv_entry_t proc_pt_pv;  /* in original pmap */
2673         pv_entry_t xpv;         /* PT in shared pmap */
2674         pd_entry_t *pt;         /* PT entry in PD of original pmap */
2675         pd_entry_t opte;        /* contents of *pt */
2676         pd_entry_t npte;        /* contents of *pt */
2677         vm_page_t m;
2678         int softhold;
2679
2680         /*
2681          * Basic tests, require a non-NULL vm_map_entry, require proper
2682          * alignment and type for the vm_map_entry, require that the
2683          * underlying object already be allocated.
2684          *
2685          * We allow almost any type of object to use this optimization.
2686          * The object itself does NOT have to be sized to a multiple of the
2687          * segment size, but the memory mapping does.
2688          *
2689          * XXX don't handle devices currently, because VM_PAGE_TO_PHYS()
2690          *     won't work as expected.
2691          */
2692         if (entry == NULL ||
2693             pmap_mmu_optimize == 0 ||                   /* not enabled */
2694             (pmap->pm_flags & PMAP_HVM) ||              /* special pmap */
2695             ptepindex >= pmap_pd_pindex(0) ||           /* not terminal or pt */
2696             entry->inheritance != VM_INHERIT_SHARE ||   /* not shared */
2697             entry->maptype != VM_MAPTYPE_NORMAL ||      /* weird map type */
2698             entry->object.vm_object == NULL ||          /* needs VM object */
2699             entry->object.vm_object->type == OBJT_DEVICE ||     /* ick */
2700             entry->object.vm_object->type == OBJT_MGTDEVICE ||  /* ick */
2701             (entry->offset & SEG_MASK) ||               /* must be aligned */
2702             (entry->start & SEG_MASK)) {
2703                 return(pmap_allocpte(pmap, ptepindex, pvpp));
2704         }
2705
2706         /*
2707          * Make sure the full segment can be represented.
2708          */
2709         b = va & ~(vm_offset_t)SEG_MASK;
2710         if (b < entry->start || b + SEG_SIZE > entry->end)
2711                 return(pmap_allocpte(pmap, ptepindex, pvpp));
2712
2713         /*
2714          * If the full segment can be represented dive the VM object's
2715          * shared pmap, allocating as required.
2716          */
2717         object = entry->object.vm_object;
2718
2719         if (entry->protection & VM_PROT_WRITE)
2720                 obpmapp = &object->md.pmap_rw;
2721         else
2722                 obpmapp = &object->md.pmap_ro;
2723
2724 #ifdef PMAP_DEBUG2
2725         if (pmap_enter_debug > 0) {
2726                 --pmap_enter_debug;
2727                 kprintf("pmap_allocpte_seg: va=%jx prot %08x o=%p "
2728                         "obpmapp %p %p\n",
2729                         va, entry->protection, object,
2730                         obpmapp, *obpmapp);
2731                 kprintf("pmap_allocpte_seg: entry %p %jx-%jx\n",
2732                         entry, entry->start, entry->end);
2733         }
2734 #endif
2735
2736         /*
2737          * We allocate what appears to be a normal pmap but because portions
2738          * of this pmap are shared with other unrelated pmaps we have to
2739          * set pm_active to point to all cpus.
2740          *
2741          * XXX Currently using pmap_spin to interlock the update, can't use
2742          *     vm_object_hold/drop because the token might already be held
2743          *     shared OR exclusive and we don't know.
2744          */
2745         while ((obpmap = *obpmapp) == NULL) {
2746                 obpmap = kmalloc(sizeof(*obpmap), M_OBJPMAP, M_WAITOK|M_ZERO);
2747                 pmap_pinit_simple(obpmap);
2748                 pmap_pinit2(obpmap);
2749                 spin_lock(&pmap_spin);
2750                 if (*obpmapp != NULL) {
2751                         /*
2752                          * Handle race
2753                          */
2754                         spin_unlock(&pmap_spin);
2755                         pmap_release(obpmap);
2756                         pmap_puninit(obpmap);
2757                         kfree(obpmap, M_OBJPMAP);
2758                         obpmap = *obpmapp; /* safety */
2759                 } else {
2760                         obpmap->pm_active = smp_active_mask;
2761                         obpmap->pm_flags |= PMAP_SEGSHARED;
2762                         *obpmapp = obpmap;
2763                         spin_unlock(&pmap_spin);
2764                 }
2765         }
2766
2767         /*
2768          * Layering is: PTE, PT, PD, PDP, PML4.  We have to return the
2769          * pte/pt using the shared pmap from the object but also adjust
2770          * the process pmap's page table page as a side effect.
2771          */
2772
2773         /*
2774          * Resolve the terminal PTE and PT in the shared pmap.  This is what
2775          * we will return.  This is true if ptepindex represents a terminal
2776          * page, otherwise pte_pv is actually the PT and pt_pv is actually
2777          * the PD.
2778          */
2779         pt_pv = NULL;
2780         pte_pv = pmap_allocpte(obpmap, ptepindex, &pt_pv);
2781         softhold = 0;
2782 retry:
2783         if (ptepindex >= pmap_pt_pindex(0))
2784                 xpv = pte_pv;
2785         else
2786                 xpv = pt_pv;
2787
2788         /*
2789          * Resolve the PD in the process pmap so we can properly share the
2790          * page table page.  Lock order is bottom-up (leaf first)!
2791          *
2792          * NOTE: proc_pt_pv can be NULL.
2793          */
2794         proc_pt_pv = pv_get(pmap, pmap_pt_pindex(b), &pt_placemark);
2795         proc_pd_pv = pmap_allocpte(pmap, pmap_pd_pindex(b), NULL);
2796 #ifdef PMAP_DEBUG2
2797         if (pmap_enter_debug > 0) {
2798                 --pmap_enter_debug;
2799                 kprintf("proc_pt_pv %p (wc %d) pd_pv %p va=%jx\n",
2800                         proc_pt_pv,
2801                         (proc_pt_pv ? proc_pt_pv->pv_m->wire_count : -1),
2802                         proc_pd_pv,
2803                         va);
2804         }
2805 #endif
2806
2807         /*
2808          * xpv is the page table page pv from the shared object
2809          * (for convenience), from above.
2810          *
2811          * Calculate the pte value for the PT to load into the process PD.
2812          * If we have to change it we must properly dispose of the previous
2813          * entry.
2814          */
2815         pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b));
2816         npte = VM_PAGE_TO_PHYS(xpv->pv_m) |
2817                (pmap->pmap_bits[PG_U_IDX] |
2818                 pmap->pmap_bits[PG_RW_IDX] |
2819                 pmap->pmap_bits[PG_V_IDX] |
2820                 pmap->pmap_bits[PG_A_IDX] |
2821                 pmap->pmap_bits[PG_M_IDX]);
2822
2823         /*
2824          * Dispose of previous page table page if it was local to the
2825          * process pmap.  If the old pt is not empty we cannot dispose of it
2826          * until we clean it out.  This case should not arise very often so
2827          * it is not optimized.
2828          *
2829          * Leave pt_pv and pte_pv (in our object pmap) locked and intact
2830          * for the retry.
2831          */
2832         if (proc_pt_pv) {
2833                 pmap_inval_bulk_t bulk;
2834
2835                 if (proc_pt_pv->pv_m->wire_count != 1) {
2836                         /*
2837                          * The page table has a bunch of stuff in it
2838                          * which we have to scrap.
2839                          */
2840                         if (softhold == 0) {
2841                                 softhold = 1;
2842                                 pmap_softhold(pmap);
2843                         }
2844                         pv_put(proc_pd_pv);
2845                         pv_put(proc_pt_pv);
2846                         pmap_remove(pmap,
2847                                     va & ~(vm_offset_t)SEG_MASK,
2848                                     (va + SEG_SIZE) & ~(vm_offset_t)SEG_MASK);
2849                 } else {
2850                         /*
2851                          * The page table is empty and can be destroyed.
2852                          * However, doing so leaves the pt slot unlocked,
2853                          * so we have to loop-up to handle any races until
2854                          * we get a NULL proc_pt_pv and a proper pt_placemark.
2855                          */
2856                         pmap_inval_bulk_init(&bulk, proc_pt_pv->pv_pmap);
2857                         pmap_release_pv(proc_pt_pv, proc_pd_pv, &bulk);
2858                         pmap_inval_bulk_flush(&bulk);
2859                         pv_put(proc_pd_pv);
2860                 }
2861                 goto retry;
2862         }
2863
2864         /*
2865          * Handle remaining cases.  We are holding pt_placemark to lock
2866          * the page table page in the primary pmap while we manipulate
2867          * it.
2868          */
2869         if (*pt == 0) {
2870                 atomic_swap_long(pt, npte);
2871                 vm_page_wire_quick(xpv->pv_m);          /* shared pt -> proc */
2872                 vm_page_wire_quick(proc_pd_pv->pv_m);   /* proc pd for sh pt */
2873                 atomic_add_long(&pmap->pm_stats.resident_count, 1);
2874         } else if (*pt != npte) {
2875                 opte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, pt, npte);
2876
2877 #if 0
2878                 opte = pte_load_clear(pt);
2879                 KKASSERT(opte && opte != npte);
2880
2881                 *pt = npte;
2882 #endif
2883                 vm_page_wire_quick(xpv->pv_m);          /* shared pt -> proc */
2884
2885                 /*
2886                  * Clean up opte, bump the wire_count for the process
2887                  * PD page representing the new entry if it was
2888                  * previously empty.
2889                  *
2890                  * If the entry was not previously empty and we have
2891                  * a PT in the proc pmap then opte must match that
2892                  * pt.  The proc pt must be retired (this is done
2893                  * later on in this procedure).
2894                  *
2895                  * NOTE: replacing valid pte, wire_count on proc_pd_pv
2896                  * stays the same.
2897                  */
2898                 KKASSERT(opte & pmap->pmap_bits[PG_V_IDX]);
2899                 m = PHYS_TO_VM_PAGE(opte & PG_FRAME);
2900                 if (vm_page_unwire_quick(m)) {
2901                         panic("pmap_allocpte_seg: "
2902                               "bad wire count %p",
2903                               m);
2904                 }
2905         }
2906
2907         if (softhold)
2908                 pmap_softdone(pmap);
2909
2910         /*
2911          * Remove our earmark on the page table page.
2912          */
2913         pv_placemarker_wakeup(pmap, pt_placemark);
2914
2915         /*
2916          * The existing process page table was replaced and must be destroyed
2917          * here.
2918          */
2919         if (proc_pd_pv)
2920                 pv_put(proc_pd_pv);
2921         if (pvpp)
2922                 *pvpp = pt_pv;
2923         else
2924                 pv_put(pt_pv);
2925         return (pte_pv);
2926 }
2927
2928 /*
2929  * Release any resources held by the given physical map.
2930  *
2931  * Called when a pmap initialized by pmap_pinit is being released.  Should
2932  * only be called if the map contains no valid mappings.
2933  */
2934 struct pmap_release_info {
2935         pmap_t  pmap;
2936         int     retry;
2937         pv_entry_t pvp;
2938 };
2939
2940 static int pmap_release_callback(pv_entry_t pv, void *data);
2941
2942 void
2943 pmap_release(struct pmap *pmap)
2944 {
2945         struct pmap_release_info info;
2946
2947         KASSERT(CPUMASK_TESTZERO(pmap->pm_active),
2948                 ("pmap still active! %016jx",
2949                 (uintmax_t)CPUMASK_LOWMASK(pmap->pm_active)));
2950
2951         /*
2952          * There is no longer a pmap_list, if there were we would remove the
2953          * pmap from it here.
2954          */
2955
2956         /*
2957          * Pull pv's off the RB tree in order from low to high and release
2958          * each page.
2959          */
2960         info.pmap = pmap;
2961         do {
2962                 info.retry = 0;
2963                 info.pvp = NULL;
2964
2965                 spin_lock(&pmap->pm_spin);
2966                 RB_SCAN(pv_entry_rb_tree, &pmap->pm_pvroot, NULL,
2967                         pmap_release_callback, &info);
2968                 spin_unlock(&pmap->pm_spin);
2969
2970                 if (info.pvp)
2971                         pv_put(info.pvp);
2972         } while (info.retry);
2973
2974
2975         /*
2976          * One resident page (the pml4 page) should remain.  Two if
2977          * the pmap has implemented an isolated userland PML4E table.
2978          * No wired pages should remain.
2979          */
2980         int expected_res = 0;
2981
2982         if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0)
2983                 ++expected_res;
2984         if (pmap->pm_pmlpv_iso)
2985                 ++expected_res;
2986
2987 #if 1
2988         if (pmap->pm_stats.resident_count != expected_res ||
2989             pmap->pm_stats.wired_count != 0) {
2990                 kprintf("fatal pmap problem - pmap %p flags %08x "
2991                         "rescnt=%jd wirecnt=%jd\n",
2992                         pmap,
2993                         pmap->pm_flags,
2994                         pmap->pm_stats.resident_count,
2995                         pmap->pm_stats.wired_count);
2996                 tsleep(pmap, 0, "DEAD", 0);
2997         }
2998 #else
2999         KKASSERT(pmap->pm_stats.resident_count == expected_res);
3000         KKASSERT(pmap->pm_stats.wired_count == 0);
3001 #endif
3002 }
3003
3004 /*
3005  * Called from low to high.  We must cache the proper parent pv so we
3006  * can adjust its wired count.
3007  */
3008 static int
3009 pmap_release_callback(pv_entry_t pv, void *data)
3010 {
3011         struct pmap_release_info *info = data;
3012         pmap_t pmap = info->pmap;
3013         vm_pindex_t pindex;
3014         int r;
3015
3016         /*
3017          * Acquire a held and locked pv, check for release race
3018          */
3019         pindex = pv->pv_pindex;
3020         if (info->pvp == pv) {
3021                 spin_unlock(&pmap->pm_spin);
3022                 info->pvp = NULL;
3023         } else if (pv_hold_try(pv)) {
3024                 spin_unlock(&pmap->pm_spin);
3025         } else {
3026                 spin_unlock(&pmap->pm_spin);
3027                 pv_lock(pv);
3028                 pv_put(pv);
3029                 info->retry = 1;
3030                 spin_lock(&pmap->pm_spin);
3031
3032                 return -1;
3033         }
3034         KKASSERT(pv->pv_pmap == pmap && pindex == pv->pv_pindex);
3035
3036         if (pv->pv_pindex < pmap_pt_pindex(0)) {
3037                 /*
3038                  * I am PTE, parent is PT
3039                  */
3040                 pindex = pv->pv_pindex >> NPTEPGSHIFT;
3041                 pindex += NUPTE_TOTAL;
3042         } else if (pv->pv_pindex < pmap_pd_pindex(0)) {
3043                 /*
3044                  * I am PT, parent is PD
3045                  */
3046                 pindex = (pv->pv_pindex - NUPTE_TOTAL) >> NPDEPGSHIFT;
3047                 pindex += NUPTE_TOTAL + NUPT_TOTAL;
3048         } else if (pv->pv_pindex < pmap_pdp_pindex(0)) {
3049                 /*
3050                  * I am PD, parent is PDP
3051                  */
3052                 pindex = (pv->pv_pindex - NUPTE_TOTAL - NUPT_TOTAL) >>
3053                          NPDPEPGSHIFT;
3054                 pindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
3055         } else if (pv->pv_pindex < pmap_pml4_pindex()) {
3056                 /*
3057                  * I am PDP, parent is PML4.  We always calculate the
3058                  * normal PML4 here, not the isolated PML4.
3059                  */
3060                 pindex = pmap_pml4_pindex();
3061         } else {
3062                 /*
3063                  * parent is NULL
3064                  */
3065                 if (info->pvp) {
3066                         pv_put(info->pvp);
3067                         info->pvp = NULL;
3068                 }
3069                 pindex = 0;
3070         }
3071         if (pindex) {
3072                 if (info->pvp && info->pvp->pv_pindex != pindex) {
3073                         pv_put(info->pvp);
3074                         info->pvp = NULL;
3075                 }
3076                 if (info->pvp == NULL)
3077                         info->pvp = pv_get(pmap, pindex, NULL);
3078         } else {
3079                 if (info->pvp) {
3080                         pv_put(info->pvp);
3081                         info->pvp = NULL;
3082                 }
3083         }
3084         r = pmap_release_pv(pv, info->pvp, NULL);
3085         spin_lock(&pmap->pm_spin);
3086
3087         return(r);
3088 }
3089
3090 /*
3091  * Called with held (i.e. also locked) pv.  This function will dispose of
3092  * the lock along with the pv.
3093  *
3094  * If the caller already holds the locked parent page table for pv it
3095  * must pass it as pvp, allowing us to avoid a deadlock, else it can
3096  * pass NULL for pvp.
3097  */
3098 static int
3099 pmap_release_pv(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk)
3100 {
3101         vm_page_t p;
3102
3103         /*
3104          * The pmap is currently not spinlocked, pv is held+locked.
3105          * Remove the pv's page from its parent's page table.  The
3106          * parent's page table page's wire_count will be decremented.
3107          *
3108          * This will clean out the pte at any level of the page table.
3109          * If smp != 0 all cpus are affected.
3110          *
3111          * Do not tear-down recursively, its faster to just let the
3112          * release run its course.
3113          */
3114         pmap_remove_pv_pte(pv, pvp, bulk, 0);
3115
3116         /*
3117          * Terminal pvs are unhooked from their vm_pages.  Because
3118          * terminal pages aren't page table pages they aren't wired
3119          * by us, so we have to be sure not to unwire them either.
3120          */
3121         if (pv->pv_pindex < pmap_pt_pindex(0)) {
3122                 pmap_remove_pv_page(pv);
3123                 goto skip;
3124         }
3125
3126         /*
3127          * We leave the top-level page table page cached, wired, and
3128          * mapped in the pmap until the dtor function (pmap_puninit())
3129          * gets called.
3130          *
3131          * Since we are leaving the top-level pv intact we need
3132          * to break out of what would otherwise be an infinite loop.
3133          *
3134          * This covers both the normal and the isolated PML4 page.
3135          */
3136         if (pv->pv_pindex >= pmap_pml4_pindex()) {
3137                 pv_put(pv);
3138                 return(-1);
3139         }
3140
3141         /*
3142          * For page table pages (other than the top-level page),
3143          * remove and free the vm_page.  The representitive mapping
3144          * removed above by pmap_remove_pv_pte() did not undo the
3145          * last wire_count so we have to do that as well.
3146          */
3147         p = pmap_remove_pv_page(pv);
3148         vm_page_busy_wait(p, FALSE, "pmaprl");
3149         if (p->wire_count != 1) {
3150                 kprintf("p->wire_count was %016lx %d\n",
3151                         pv->pv_pindex, p->wire_count);
3152         }
3153         KKASSERT(p->wire_count == 1);
3154         KKASSERT(p->flags & PG_UNMANAGED);
3155
3156         vm_page_unwire(p, 0);
3157         KKASSERT(p->wire_count == 0);
3158
3159         vm_page_free(p);
3160 skip:
3161         pv_free(pv, pvp);
3162
3163         return 0;
3164 }
3165
3166 /*
3167  * This function will remove the pte associated with a pv from its parent.
3168  * Terminal pv's are supported.  All cpus specified by (bulk) are properly
3169  * invalidated.
3170  *
3171  * The wire count will be dropped on the parent page table.  The wire
3172  * count on the page being removed (pv->pv_m) from the parent page table
3173  * is NOT touched.  Note that terminal pages will not have any additional
3174  * wire counts while page table pages will have at least one representing
3175  * the mapping, plus others representing sub-mappings.
3176  *
3177  * NOTE: Cannot be called on kernel page table pages, only KVM terminal
3178  *       pages and user page table and terminal pages.
3179  *
3180  * NOTE: The pte being removed might be unmanaged, and the pv supplied might
3181  *       be freshly allocated and not imply that the pte is managed.  In this
3182  *       case pv->pv_m should be NULL.
3183  *
3184  * The pv must be locked.  The pvp, if supplied, must be locked.  All
3185  * supplied pv's will remain locked on return.
3186  *
3187  * XXX must lock parent pv's if they exist to remove pte XXX
3188  */
3189 static
3190 void
3191 pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk,
3192                    int destroy)
3193 {
3194         vm_pindex_t ptepindex = pv->pv_pindex;
3195         pmap_t pmap = pv->pv_pmap;
3196         vm_page_t p;
3197         int gotpvp = 0;
3198
3199         KKASSERT(pmap);
3200
3201         if (ptepindex >= pmap_pml4_pindex()) {
3202                 /*
3203                  * We are the top level PML4E table, there is no parent.
3204                  *
3205                  * This is either the normal or isolated PML4E table.
3206                  * Only the normal is used in regular operation, the isolated
3207                  * is only passed in when breaking down the whole pmap.
3208                  */
3209                 p = pmap->pm_pmlpv->pv_m;
3210                 KKASSERT(pv->pv_m == p);        /* debugging */
3211         } else if (ptepindex >= pmap_pdp_pindex(0)) {
3212                 /*
3213                  * Remove a PDP page from the PML4E.  This can only occur
3214                  * with user page tables.  We do not have to lock the
3215                  * pml4 PV so just ignore pvp.
3216                  */
3217                 vm_pindex_t pml4_pindex;
3218                 vm_pindex_t pdp_index;
3219                 pml4_entry_t *pdp;
3220                 pml4_entry_t *pdp_iso;
3221
3222                 pdp_index = ptepindex - pmap_pdp_pindex(0);
3223                 if (pvp == NULL) {
3224                         pml4_pindex = pmap_pml4_pindex();
3225                         pvp = pv_get(pv->pv_pmap, pml4_pindex, NULL);
3226                         KKASSERT(pvp);
3227                         gotpvp = 1;
3228                 }
3229
3230                 pdp = &pmap->pm_pml4[pdp_index & ((1ul << NPML4EPGSHIFT) - 1)];
3231                 KKASSERT((*pdp & pmap->pmap_bits[PG_V_IDX]) != 0);
3232                 p = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
3233                 pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp, 0);
3234
3235                 /*
3236                  * Also remove the PDP from the isolated PML4E if the
3237                  * process uses one.
3238                  */
3239                 if (pvp == pmap->pm_pmlpv && pmap->pm_pmlpv_iso) {
3240                         pdp_iso = &pmap->pm_pml4_iso[pdp_index &
3241                                                 ((1ul << NPML4EPGSHIFT) - 1)];
3242                         pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp_iso, 0);
3243                 }
3244                 KKASSERT(pv->pv_m == p);        /* debugging */
3245         } else if (ptepindex >= pmap_pd_pindex(0)) {
3246                 /*
3247                  * Remove a PD page from the PDP
3248                  *
3249                  * SIMPLE PMAP NOTE: Non-existant pvp's are ok in the case
3250                  *                   of a simple pmap because it stops at
3251                  *                   the PD page.
3252                  */
3253                 vm_pindex_t pdp_pindex;
3254                 vm_pindex_t pd_index;
3255                 pdp_entry_t *pd;
3256
3257                 pd_index = ptepindex - pmap_pd_pindex(0);
3258
3259                 if (pvp == NULL) {
3260                         pdp_pindex = NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
3261                                      (pd_index >> NPML4EPGSHIFT);
3262                         pvp = pv_get(pv->pv_pmap, pdp_pindex, NULL);
3263                         gotpvp = 1;
3264                 }
3265
3266                 if (pvp) {
3267                         pd = pv_pte_lookup(pvp, pd_index &
3268                                                 ((1ul << NPDPEPGSHIFT) - 1));
3269                         KKASSERT((*pd & pmap->pmap_bits[PG_V_IDX]) != 0);
3270                         p = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
3271                         pmap_inval_bulk(bulk, (vm_offset_t)-1, pd, 0);
3272                 } else {
3273                         KKASSERT(pmap->pm_flags & PMAP_FLAG_SIMPLE);
3274                         p = pv->pv_m;           /* degenerate test later */
3275                 }
3276                 KKASSERT(pv->pv_m == p);        /* debugging */
3277         } else if (ptepindex >= pmap_pt_pindex(0)) {
3278                 /*
3279                  *  Remove a PT page from the PD
3280                  */
3281                 vm_pindex_t pd_pindex;
3282                 vm_pindex_t pt_index;
3283                 pd_entry_t *pt;
3284
3285                 pt_index = ptepindex - pmap_pt_pindex(0);
3286
3287                 if (pvp == NULL) {
3288                         pd_pindex = NUPTE_TOTAL + NUPT_TOTAL +
3289                                     (pt_index >> NPDPEPGSHIFT);
3290                         pvp = pv_get(pv->pv_pmap, pd_pindex, NULL);
3291                         KKASSERT(pvp);
3292                         gotpvp = 1;
3293                 }
3294
3295                 pt = pv_pte_lookup(pvp, pt_index & ((1ul << NPDPEPGSHIFT) - 1));
3296 #if 0
3297                 KASSERT((*pt & pmap->pmap_bits[PG_V_IDX]) != 0,
3298                         ("*pt unexpectedly invalid %016jx "
3299                          "gotpvp=%d ptepindex=%ld ptindex=%ld pv=%p pvp=%p",
3300                         *pt, gotpvp, ptepindex, pt_index, pv, pvp));
3301                 p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
3302 #else
3303                 if ((*pt & pmap->pmap_bits[PG_V_IDX]) == 0) {
3304                         kprintf("*pt unexpectedly invalid %016jx "
3305                                 "gotpvp=%d ptepindex=%ld ptindex=%ld "
3306                                 "pv=%p pvp=%p\n",
3307                                 *pt, gotpvp, ptepindex, pt_index, pv, pvp);
3308                         tsleep(pt, 0, "DEAD", 0);
3309                         p = pv->pv_m;
3310                 } else {
3311                         p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
3312                 }
3313 #endif
3314                 pmap_inval_bulk(bulk, (vm_offset_t)-1, pt, 0);
3315                 KKASSERT(pv->pv_m == p);        /* debugging */
3316         } else {
3317                 /*
3318                  * Remove a PTE from the PT page.  The PV might exist even if
3319                  * the PTE is not managed, in whichcase pv->pv_m should be
3320                  * NULL.
3321                  *
3322                  * NOTE: Userland pmaps manage the parent PT/PD/PDP page
3323                  *       table pages but the kernel_pmap does not.
3324                  *
3325                  * NOTE: pv's must be locked bottom-up to avoid deadlocking.
3326                  *       pv is a pte_pv so we can safely lock pt_pv.
3327                  *
3328                  * NOTE: FICTITIOUS pages may have multiple physical mappings
3329                  *       so PHYS_TO_VM_PAGE() will not necessarily work for
3330                  *       terminal ptes.
3331                  */
3332                 vm_pindex_t pt_pindex;
3333                 pt_entry_t *ptep;
3334                 pt_entry_t pte;
3335                 vm_offset_t va;
3336
3337                 pt_pindex = ptepindex >> NPTEPGSHIFT;
3338                 va = (vm_offset_t)ptepindex << PAGE_SHIFT;
3339
3340                 if (ptepindex >= NUPTE_USER) {
3341                         ptep = vtopte(ptepindex << PAGE_SHIFT);
3342                         KKASSERT(pvp == NULL);
3343                         /* pvp remains NULL */
3344                 } else {
3345                         if (pvp == NULL) {
3346                                 pt_pindex = NUPTE_TOTAL +
3347                                             (ptepindex >> NPDPEPGSHIFT);
3348                                 pvp = pv_get(pv->pv_pmap, pt_pindex, NULL);
3349                                 KKASSERT(pvp);
3350                                 gotpvp = 1;
3351                         }
3352                         ptep = pv_pte_lookup(pvp, ptepindex &
3353                                                   ((1ul << NPDPEPGSHIFT) - 1));
3354                 }
3355                 pte = pmap_inval_bulk(bulk, va, ptep, 0);
3356                 if (bulk == NULL)               /* XXX */
3357                         cpu_invlpg((void *)va); /* XXX */
3358
3359                 /*
3360                  * Now update the vm_page_t
3361                  */
3362                 if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) &&
3363                     (pte & pmap->pmap_bits[PG_V_IDX])) {
3364                         /*
3365                          * Valid managed page, adjust (p).
3366                          */
3367                         if (pte & pmap->pmap_bits[PG_DEVICE_IDX]) {
3368                                 p = pv->pv_m;
3369                         } else {
3370                                 p = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3371                                 KKASSERT(pv->pv_m == p);
3372                         }
3373                         if (pte & pmap->pmap_bits[PG_M_IDX]) {
3374                                 if (pmap_track_modified(ptepindex))
3375                                         vm_page_dirty(p);
3376                         }
3377                         if (pte & pmap->pmap_bits[PG_A_IDX]) {
3378                                 vm_page_flag_set(p, PG_REFERENCED);
3379                         }
3380                 } else {
3381                         /*
3382                          * Unmanaged page, do not try to adjust the vm_page_t.
3383                          * pv could be freshly allocated for a pmap_enter(),
3384                          * replacing an unmanaged page with a managed one.
3385                          *
3386                          * pv->pv_m might reflect the new page and not the
3387                          * existing page.
3388                          *
3389                          * We could extract p from the physical address and
3390                          * adjust it but we explicitly do not for unmanaged
3391                          * pages.
3392                          */
3393                         p = NULL;
3394                 }
3395                 if (pte & pmap->pmap_bits[PG_W_IDX])
3396                         atomic_add_long(&pmap->pm_stats.wired_count, -1);
3397                 if (pte & pmap->pmap_bits[PG_G_IDX])
3398                         cpu_invlpg((void *)va);
3399         }
3400
3401         /*
3402          * If requested, scrap the underlying pv->pv_m and the underlying
3403          * pv.  If this is a page-table-page we must also free the page.
3404          *
3405          * pvp must be returned locked.
3406          */
3407         if (destroy == 1) {
3408                 /*
3409                  * page table page (PT, PD, PDP, PML4), caller was responsible
3410                  * for testing wired_count.
3411                  */
3412                 KKASSERT(pv->pv_m->wire_count == 1);
3413                 p = pmap_remove_pv_page(pv);
3414                 pv_free(pv, pvp);
3415                 pv = NULL;
3416
3417                 vm_page_busy_wait(p, FALSE, "pgpun");
3418                 vm_page_unwire(p, 0);
3419                 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
3420                 vm_page_free(p);
3421         } else if (destroy == 2) {
3422                 /*
3423                  * Normal page, remove from pmap and leave the underlying
3424                  * page untouched.
3425                  */
3426                 pmap_remove_pv_page(pv);
3427                 pv_free(pv, pvp);
3428                 pv = NULL;              /* safety */
3429         }
3430
3431         /*
3432          * If we acquired pvp ourselves then we are responsible for
3433          * recursively deleting it.
3434          */
3435         if (pvp && gotpvp) {
3436                 /*
3437                  * Recursively destroy higher-level page tables.
3438                  *
3439                  * This is optional.  If we do not, they will still
3440                  * be destroyed when the process exits.
3441                  *
3442                  * NOTE: Do not destroy pv_entry's with extra hold refs,
3443                  *       a caller may have unlocked it and intends to
3444                  *       continue to use it.
3445                  */
3446                 if (pmap_dynamic_delete &&
3447                     pvp->pv_m &&
3448                     pvp->pv_m->wire_count == 1 &&
3449                     (pvp->pv_hold & PV_HOLD_MASK) == 2 &&
3450                     pvp->pv_pindex < pmap_pml4_pindex()) {
3451                         if (pmap_dynamic_delete == 2)
3452                                 kprintf("A %jd %08x\n", pvp->pv_pindex, pvp->pv_hold);
3453                         if (pmap != &kernel_pmap) {
3454                                 pmap_remove_pv_pte(pvp, NULL, bulk, 1);
3455                                 pvp = NULL;     /* safety */
3456                         } else {
3457                                 kprintf("Attempt to remove kernel_pmap pindex "
3458                                         "%jd\n", pvp->pv_pindex);
3459                                 pv_put(pvp);
3460                         }
3461                 } else {
3462                         pv_put(pvp);
3463                 }
3464         }
3465 }
3466
3467 /*
3468  * Remove the vm_page association to a pv.  The pv must be locked.
3469  */
3470 static
3471 vm_page_t
3472 pmap_remove_pv_page(pv_entry_t pv)
3473 {
3474         vm_page_t m;
3475
3476         m = pv->pv_m;
3477         vm_page_spin_lock(m);
3478         KKASSERT(m && m == pv->pv_m);
3479         pv->pv_m = NULL;
3480         if (pv->pv_flags & PV_FLAG_PGTABLE) {
3481                 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
3482                 KKASSERT(TAILQ_EMPTY(&m->md.pv_list));
3483         } else {
3484                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3485                 if (TAILQ_EMPTY(&m->md.pv_list))
3486                         vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
3487         }
3488         pmap_page_stats_deleting(m);
3489         vm_page_spin_unlock(m);
3490
3491         return(m);
3492 }
3493
3494 /*
3495  * Grow the number of kernel page table entries, if needed.
3496  *
3497  * This routine is always called to validate any address space
3498  * beyond KERNBASE (for kldloads).  kernel_vm_end only governs the address
3499  * space below KERNBASE.
3500  *
3501  * kernel_map must be locked exclusively by the caller.
3502  */
3503 void
3504 pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
3505 {
3506         vm_paddr_t paddr;
3507         vm_offset_t ptppaddr;
3508         vm_page_t nkpg;
3509         pd_entry_t *pt, newpt;
3510         pdp_entry_t *pd, newpd;
3511         int update_kernel_vm_end;
3512
3513         /*
3514          * bootstrap kernel_vm_end on first real VM use
3515          */
3516         if (kernel_vm_end == 0) {
3517                 kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
3518
3519                 for (;;) {
3520                         pt = pmap_pt(&kernel_pmap, kernel_vm_end);
3521                         if (pt == NULL)
3522                                 break;
3523                         if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) == 0)
3524                                 break;
3525                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
3526                                         ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3527                         if (kernel_vm_end - 1 >= vm_map_max(&kernel_map)) {
3528                                 kernel_vm_end = vm_map_max(&kernel_map);
3529                                 break;                       
3530                         }
3531                 }
3532         }
3533
3534         /*
3535          * Fill in the gaps.  kernel_vm_end is only adjusted for ranges
3536          * below KERNBASE.  Ranges above KERNBASE are kldloaded and we
3537          * do not want to force-fill 128G worth of page tables.
3538          */
3539         if (kstart < KERNBASE) {
3540                 if (kstart > kernel_vm_end)
3541                         kstart = kernel_vm_end;
3542                 KKASSERT(kend <= KERNBASE);
3543                 update_kernel_vm_end = 1;
3544         } else {
3545                 update_kernel_vm_end = 0;
3546         }
3547
3548         kstart = rounddown2(kstart, (vm_offset_t)(PAGE_SIZE * NPTEPG));
3549         kend = roundup2(kend, (vm_offset_t)(PAGE_SIZE * NPTEPG));
3550
3551         if (kend - 1 >= vm_map_max(&kernel_map))
3552                 kend = vm_map_max(&kernel_map);
3553
3554         while (kstart < kend) {
3555                 pt = pmap_pt(&kernel_pmap, kstart);
3556                 if (pt == NULL) {
3557                         /*
3558                          * We need a new PD entry
3559                          */
3560                         nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++,
3561                                              VM_ALLOC_NORMAL |
3562                                              VM_ALLOC_SYSTEM |
3563                                              VM_ALLOC_INTERRUPT);
3564                         if (nkpg == NULL) {
3565                                 panic("pmap_growkernel: no memory to grow "
3566                                       "kernel");
3567                         }
3568                         paddr = VM_PAGE_TO_PHYS(nkpg);
3569                         pmap_zero_page(paddr);
3570                         pd = pmap_pd(&kernel_pmap, kstart);
3571
3572                         newpd = (pdp_entry_t)
3573                             (paddr |
3574                             kernel_pmap.pmap_bits[PG_V_IDX] |
3575                             kernel_pmap.pmap_bits[PG_RW_IDX] |
3576                             kernel_pmap.pmap_bits[PG_A_IDX]);
3577                         atomic_swap_long(pd, newpd);
3578
3579 #if 0
3580                         kprintf("NEWPD pd=%p pde=%016jx phys=%016jx\n",
3581                                 pd, newpd, paddr);
3582 #endif
3583
3584                         continue; /* try again */
3585                 }
3586
3587                 if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) {
3588                         kstart = (kstart + PAGE_SIZE * NPTEPG) &
3589                                  ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3590                         if (kstart - 1 >= vm_map_max(&kernel_map)) {
3591                                 kstart = vm_map_max(&kernel_map);
3592                                 break;                       
3593                         }
3594                         continue;
3595                 }
3596
3597                 /*
3598                  * We need a new PT
3599                  *
3600                  * This index is bogus, but out of the way
3601                  */
3602                 nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++,
3603                                      VM_ALLOC_NORMAL |
3604                                      VM_ALLOC_SYSTEM |
3605                                      VM_ALLOC_INTERRUPT);
3606                 if (nkpg == NULL)
3607                         panic("pmap_growkernel: no memory to grow kernel");
3608
3609                 vm_page_wire(nkpg);
3610                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
3611                 pmap_zero_page(ptppaddr);
3612                 newpt = (pd_entry_t)(ptppaddr |
3613                                      kernel_pmap.pmap_bits[PG_V_IDX] |
3614                                      kernel_pmap.pmap_bits[PG_RW_IDX] |
3615                                      kernel_pmap.pmap_bits[PG_A_IDX]);
3616                 atomic_swap_long(pt, newpt);
3617
3618                 kstart = (kstart + PAGE_SIZE * NPTEPG) &
3619                           ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3620
3621                 if (kstart - 1 >= vm_map_max(&kernel_map)) {
3622                         kstart = vm_map_max(&kernel_map);
3623                         break;                       
3624                 }
3625         }
3626
3627         /*
3628          * Only update kernel_vm_end for areas below KERNBASE.
3629          */
3630         if (update_kernel_vm_end && kernel_vm_end < kstart)
3631                 kernel_vm_end = kstart;
3632 }
3633
3634 /*
3635  *      Add a reference to the specified pmap.
3636  */
3637 void
3638 pmap_reference(pmap_t pmap)
3639 {
3640         if (pmap != NULL)
3641                 atomic_add_int(&pmap->pm_count, 1);
3642 }
3643
3644 /***************************************************
3645  * page management routines.
3646  ***************************************************/
3647
3648 /*
3649  * Hold a pv without locking it
3650  */
3651 static void
3652 pv_hold(pv_entry_t pv)
3653 {
3654         atomic_add_int(&pv->pv_hold, 1);
3655 }
3656
3657 /*
3658  * Hold a pv_entry, preventing its destruction.  TRUE is returned if the pv
3659  * was successfully locked, FALSE if it wasn't.  The caller must dispose of
3660  * the pv properly.
3661  *
3662  * Either the pmap->pm_spin or the related vm_page_spin (if traversing a
3663  * pv list via its page) must be held by the caller in order to stabilize
3664  * the pv.
3665  */
3666 static int
3667 _pv_hold_try(pv_entry_t pv PMAP_DEBUG_DECL)
3668 {
3669         u_int count;
3670
3671         /*
3672          * Critical path shortcut expects pv to already have one ref
3673          * (for the pv->pv_pmap).
3674          */
3675         count = pv->pv_hold;
3676         cpu_ccfence();
3677         for (;;) {
3678                 if ((count & PV_HOLD_LOCKED) == 0) {
3679                         if (atomic_fcmpset_int(&pv->pv_hold, &count,
3680                                               (count + 1) | PV_HOLD_LOCKED)) {
3681 #ifdef PMAP_DEBUG
3682                                 pv->pv_func = func;
3683                                 pv->pv_line = lineno;
3684 #endif
3685                                 return TRUE;
3686                         }
3687                 } else {
3688                         if (atomic_fcmpset_int(&pv->pv_hold, &count, count + 1))
3689                                 return FALSE;
3690                 }
3691                 /* retry */
3692         }
3693 }
3694
3695 /*
3696  * Drop a previously held pv_entry which could not be locked, allowing its
3697  * destruction.
3698  *
3699  * Must not be called with a spinlock held as we might zfree() the pv if it
3700  * is no longer associated with a pmap and this was the last hold count.
3701  */
3702 static void
3703 pv_drop(pv_entry_t pv)
3704 {
3705         u_int count;
3706
3707         for (;;) {
3708                 count = pv->pv_hold;
3709                 cpu_ccfence();
3710                 KKASSERT((count & PV_HOLD_MASK) > 0);
3711                 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) !=
3712                          (PV_HOLD_LOCKED | 1));
3713                 if (atomic_cmpset_int(&pv->pv_hold, count, count - 1)) {
3714                         if ((count & PV_HOLD_MASK) == 1) {
3715 #ifdef PMAP_DEBUG2
3716                                 if (pmap_enter_debug > 0) {
3717                                         --pmap_enter_debug;
3718                                         kprintf("pv_drop: free pv %p\n", pv);
3719                                 }
3720 #endif
3721                                 KKASSERT(count == 1);
3722                                 KKASSERT(pv->pv_pmap == NULL);
3723                                 zfree(pvzone, pv);
3724                         }
3725                         return;
3726                 }
3727                 /* retry */
3728         }
3729 }
3730
3731 /*
3732  * Find or allocate the requested PV entry, returning a locked, held pv.
3733  *
3734  * If (*isnew) is non-zero, the returned pv will have two hold counts, one
3735  * for the caller and one representing the pmap and vm_page association.
3736  *
3737  * If (*isnew) is zero, the returned pv will have only one hold count.
3738  *
3739  * Since both associations can only be adjusted while the pv is locked,
3740  * together they represent just one additional hold.
3741  */
3742 static
3743 pv_entry_t
3744 _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew PMAP_DEBUG_DECL)
3745 {
3746         struct mdglobaldata *md = mdcpu;
3747         pv_entry_t pv;
3748         pv_entry_t pnew;
3749         int pmap_excl = 0;
3750
3751         pnew = NULL;
3752         if (md->gd_newpv) {
3753 #if 1
3754                 pnew = atomic_swap_ptr((void *)&md->gd_newpv, NULL);
3755 #else
3756                 crit_enter();
3757                 pnew = md->gd_newpv;    /* might race NULL */
3758                 md->gd_newpv = NULL;
3759                 crit_exit();
3760 #endif
3761         }
3762         if (pnew == NULL)
3763                 pnew = zalloc(pvzone);
3764
3765         spin_lock_shared(&pmap->pm_spin);
3766         for (;;) {
3767                 /*
3768                  * Shortcut cache
3769                  */
3770                 pv = pv_entry_lookup(pmap, pindex);
3771                 if (pv == NULL) {
3772                         vm_pindex_t *pmark;
3773
3774                         /*
3775                          * Requires exclusive pmap spinlock
3776                          */
3777                         if (pmap_excl == 0) {
3778                                 pmap_excl = 1;
3779                                 if (!spin_lock_upgrade_try(&pmap->pm_spin)) {
3780                                         spin_unlock_shared(&pmap->pm_spin);
3781                                         spin_lock(&pmap->pm_spin);
3782                                         continue;
3783                                 }
3784                         }
3785
3786                         /*
3787                          * We need to block if someone is holding our
3788                          * placemarker.  As long as we determine the
3789                          * placemarker has not been aquired we do not
3790                          * need to get it as acquision also requires
3791                          * the pmap spin lock.
3792                          *
3793                          * However, we can race the wakeup.
3794                          */
3795                         pmark = pmap_placemarker_hash(pmap, pindex);
3796
3797                         if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3798                                 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
3799                                 tsleep_interlock(pmark, 0);
3800                                 if (((*pmark ^ pindex) &
3801                                      ~PM_PLACEMARK_WAKEUP) == 0) {
3802                                         spin_unlock(&pmap->pm_spin);
3803                                         tsleep(pmark, PINTERLOCKED, "pvplc", 0);
3804                                         spin_lock(&pmap->pm_spin);
3805                                 }
3806                                 continue;
3807                         }
3808
3809                         /*
3810                          * Setup the new entry
3811                          */
3812                         pnew->pv_pmap = pmap;
3813                         pnew->pv_pindex = pindex;
3814                         pnew->pv_hold = PV_HOLD_LOCKED | 2;
3815                         pnew->pv_flags = 0;
3816 #ifdef PMAP_DEBUG
3817                         pnew->pv_func = func;
3818                         pnew->pv_line = lineno;
3819                         if (pnew->pv_line_lastfree > 0) {
3820                                 pnew->pv_line_lastfree =
3821                                                 -pnew->pv_line_lastfree;
3822                         }
3823 #endif
3824                         pv = pv_entry_rb_tree_RB_INSERT(&pmap->pm_pvroot, pnew);
3825                         atomic_add_long(&pmap->pm_stats.resident_count, 1);
3826                         spin_unlock(&pmap->pm_spin);
3827                         *isnew = 1;
3828
3829                         KASSERT(pv == NULL, ("pv insert failed %p->%p", pnew, pv));
3830                         return(pnew);
3831                 }
3832
3833                 /*
3834                  * We already have an entry, cleanup the staged pnew if
3835                  * we can get the lock, otherwise block and retry.
3836                  */
3837                 if (__predict_true(_pv_hold_try(pv PMAP_DEBUG_COPY))) {
3838                         if (pmap_excl)
3839                                 spin_unlock(&pmap->pm_spin);
3840                         else
3841                                 spin_unlock_shared(&pmap->pm_spin);
3842 #if 1
3843                         pnew = atomic_swap_ptr((void *)&md->gd_newpv, pnew);
3844                         if (pnew)
3845                                 zfree(pvzone, pnew);
3846 #else
3847                         crit_enter();
3848                         if (md->gd_newpv == NULL)
3849                                 md->gd_newpv = pnew;
3850                         else
3851                                 zfree(pvzone, pnew);
3852                         crit_exit();
3853 #endif
3854                         KKASSERT(pv->pv_pmap == pmap &&
3855                                  pv->pv_pindex == pindex);
3856                         *isnew = 0;
3857                         return(pv);
3858                 }
3859                 if (pmap_excl) {
3860                         spin_unlock(&pmap->pm_spin);
3861                         _pv_lock(pv PMAP_DEBUG_COPY);
3862                         pv_put(pv);
3863                         spin_lock(&pmap->pm_spin);
3864                 } else {
3865                         spin_unlock_shared(&pmap->pm_spin);
3866                         _pv_lock(pv PMAP_DEBUG_COPY);
3867                         pv_put(pv);
3868                         spin_lock_shared(&pmap->pm_spin);
3869                 }
3870         }
3871         /* NOT REACHED */
3872 }
3873
3874 /*
3875  * Find the requested PV entry, returning a locked+held pv or NULL
3876  */
3877 static
3878 pv_entry_t
3879 _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp PMAP_DEBUG_DECL)
3880 {
3881         pv_entry_t pv;
3882         int pmap_excl = 0;
3883
3884         spin_lock_shared(&pmap->pm_spin);
3885         for (;;) {
3886                 /*
3887                  * Shortcut cache
3888                  */
3889                 pv = pv_entry_lookup(pmap, pindex);
3890                 if (pv == NULL) {
3891                         /*
3892                          * Block if there is ANY placemarker.  If we are to
3893                          * return it, we must also aquire the spot, so we
3894                          * have to block even if the placemarker is held on
3895                          * a different address.
3896                          *
3897                          * OPTIMIZATION: If pmarkp is passed as NULL the
3898                          * caller is just probing (or looking for a real
3899                          * pv_entry), and in this case we only need to check
3900                          * to see if the placemarker matches pindex.
3901                          */
3902                         vm_pindex_t *pmark;
3903
3904                         /*
3905                          * Requires exclusive pmap spinlock
3906                          */
3907                         if (pmap_excl == 0) {
3908                                 pmap_excl = 1;
3909                                 if (!spin_lock_upgrade_try(&pmap->pm_spin)) {
3910                                         spin_unlock_shared(&pmap->pm_spin);
3911                                         spin_lock(&pmap->pm_spin);
3912                                         continue;
3913                                 }
3914                         }
3915
3916                         pmark = pmap_placemarker_hash(pmap, pindex);
3917
3918                         if ((pmarkp && *pmark != PM_NOPLACEMARK) ||
3919                             ((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3920                                 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
3921                                 tsleep_interlock(pmark, 0);
3922                                 if ((pmarkp && *pmark != PM_NOPLACEMARK) ||
3923                                     ((*pmark ^ pindex) &
3924                                      ~PM_PLACEMARK_WAKEUP) == 0) {
3925                                         spin_unlock(&pmap->pm_spin);
3926                                         tsleep(pmark, PINTERLOCKED, "pvpld", 0);
3927                                         spin_lock(&pmap->pm_spin);
3928                                 }
3929                                 continue;
3930                         }
3931                         if (pmarkp) {
3932                                 if (atomic_swap_long(pmark, pindex) !=
3933                                     PM_NOPLACEMARK) {
3934                                         panic("_pv_get: pmark race");
3935                                 }
3936                                 *pmarkp = pmark;
3937                         }
3938                         spin_unlock(&pmap->pm_spin);
3939                         return NULL;
3940                 }
3941                 if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
3942                         if (pmap_excl)
3943                                 spin_unlock(&pmap->pm_spin);
3944                         else
3945                                 spin_unlock_shared(&pmap->pm_spin);
3946                         KKASSERT(pv->pv_pmap == pmap &&
3947                                  pv->pv_pindex == pindex);
3948                         return(pv);
3949                 }
3950                 if (pmap_excl) {
3951                         spin_unlock(&pmap->pm_spin);
3952                         _pv_lock(pv PMAP_DEBUG_COPY);
3953                         pv_put(pv);
3954                         spin_lock(&pmap->pm_spin);
3955                 } else {
3956                         spin_unlock_shared(&pmap->pm_spin);
3957                         _pv_lock(pv PMAP_DEBUG_COPY);
3958                         pv_put(pv);
3959                         spin_lock_shared(&pmap->pm_spin);
3960                 }
3961         }
3962 }
3963
3964 /*
3965  * Lookup, hold, and attempt to lock (pmap,pindex).
3966  *
3967  * If the entry does not exist NULL is returned and *errorp is set to 0
3968  *
3969  * If the entry exists and could be successfully locked it is returned and
3970  * errorp is set to 0.
3971  *
3972  * If the entry exists but could NOT be successfully locked it is returned
3973  * held and *errorp is set to 1.
3974  *
3975  * If the entry is placemarked by someone else NULL is returned and *errorp
3976  * is set to 1.
3977  */
3978 static
3979 pv_entry_t
3980 pv_get_try(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp, int *errorp)
3981 {
3982         pv_entry_t pv;
3983
3984         spin_lock_shared(&pmap->pm_spin);
3985
3986         pv = pv_entry_lookup(pmap, pindex);
3987         if (pv == NULL) {
3988                 vm_pindex_t *pmark;
3989
3990                 pmark = pmap_placemarker_hash(pmap, pindex);
3991
3992                 if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3993                         *errorp = 1;
3994                 } else if (pmarkp &&
3995                            atomic_cmpset_long(pmark, PM_NOPLACEMARK, pindex)) {
3996                         *errorp = 0;
3997                 } else {
3998                         /*
3999                          * Can't set a placemark with a NULL pmarkp, or if
4000                          * pmarkp is non-NULL but we failed to set our
4001                          * placemark.
4002                          */
4003                         *errorp = 1;
4004                 }
4005                 if (pmarkp)
4006                         *pmarkp = pmark;
4007                 spin_unlock_shared(&pmap->pm_spin);
4008
4009                 return NULL;
4010         }
4011
4012         /*
4013          * XXX This has problems if the lock is shared, why?
4014          */
4015         if (pv_hold_try(pv)) {
4016                 spin_unlock_shared(&pmap->pm_spin);
4017                 *errorp = 0;
4018                 KKASSERT(pv->pv_pmap == pmap && pv->pv_pindex == pindex);
4019                 return(pv);     /* lock succeeded */
4020         }
4021         spin_unlock_shared(&pmap->pm_spin);
4022         *errorp = 1;
4023
4024         return (pv);            /* lock failed */
4025 }
4026
4027 /*
4028  * Lock a held pv, keeping the hold count
4029  */
4030 static
4031 void
4032 _pv_lock(pv_entry_t pv PMAP_DEBUG_DECL)
4033 {
4034         u_int count;
4035
4036         for (;;) {
4037                 count = pv->pv_hold;
4038                 cpu_ccfence();
4039                 if ((count & PV_HOLD_LOCKED) == 0) {
4040                         if (atomic_cmpset_int(&pv->pv_hold, count,
4041                                               count | PV_HOLD_LOCKED)) {
4042 #ifdef PMAP_DEBUG
4043                                 pv->pv_func = func;
4044                                 pv->pv_line = lineno;
4045 #endif
4046                                 return;
4047                         }
4048                         continue;
4049                 }
4050                 tsleep_interlock(pv, 0);
4051                 if (atomic_cmpset_int(&pv->pv_hold, count,
4052                                       count | PV_HOLD_WAITING)) {
4053 #ifdef PMAP_DEBUG2
4054                         if (pmap_enter_debug > 0) {
4055                                 --pmap_enter_debug;
4056                                 kprintf("pv waiting on %s:%d\n",
4057                                         pv->pv_func, pv->pv_line);
4058                         }
4059 #endif
4060                         tsleep(pv, PINTERLOCKED, "pvwait", hz);
4061                 }
4062                 /* retry */
4063         }
4064 }
4065
4066 /*
4067  * Unlock a held and locked pv, keeping the hold count.
4068  */
4069 static
4070 void
4071 pv_unlock(pv_entry_t pv)
4072 {
4073         u_int count;
4074
4075         for (;;) {
4076                 count = pv->pv_hold;
4077                 cpu_ccfence();
4078                 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) >=
4079                          (PV_HOLD_LOCKED | 1));
4080                 if (atomic_cmpset_int(&pv->pv_hold, count,
4081                                       count &
4082                                       ~(PV_HOLD_LOCKED | PV_HOLD_WAITING))) {
4083                         if (count & PV_HOLD_WAITING)
4084                                 wakeup(pv);
4085                         break;
4086                 }
4087         }
4088 }
4089
4090 /*
4091  * Unlock and drop a pv.  If the pv is no longer associated with a pmap
4092  * and the hold count drops to zero we will free it.
4093  *
4094  * Caller should not hold any spin locks.  We are protected from hold races
4095  * by virtue of holds only occuring only with a pmap_spin or vm_page_spin
4096  * lock held.  A pv cannot be located otherwise.
4097  */
4098 static
4099 void
4100 pv_put(pv_entry_t pv)
4101 {
4102 #ifdef PMAP_DEBUG2
4103         if (pmap_enter_debug > 0) {
4104                 --pmap_enter_debug;
4105                 kprintf("pv_put pv=%p hold=%08x\n", pv, pv->pv_hold);
4106         }
4107 #endif
4108
4109         /*
4110          * Normal put-aways must have a pv_m associated with the pv,
4111          * but allow the case where the pv has been destructed due
4112          * to pmap_dynamic_delete.
4113          */
4114         KKASSERT(pv->pv_pmap == NULL || pv->pv_m != NULL);
4115
4116         /*
4117          * Fast - shortcut most common condition
4118          */
4119         if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 1))
4120                 return;
4121
4122         /*
4123          * Slow
4124          */
4125         pv_unlock(pv);
4126         pv_drop(pv);
4127 }
4128
4129 /*
4130  * Remove the pmap association from a pv, require that pv_m already be removed,
4131  * then unlock and drop the pv.  Any pte operations must have already been
4132  * completed.  This call may result in a last-drop which will physically free
4133  * the pv.
4134  *
4135  * Removing the pmap association entails an additional drop.
4136  *
4137  * pv must be exclusively locked on call and will be disposed of on return.
4138  */
4139 static
4140 void
4141 _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL)
4142 {
4143         pmap_t pmap;
4144
4145 #ifdef PMAP_DEBUG
4146         pv->pv_func_lastfree = func;
4147         pv->pv_line_lastfree = lineno;
4148 #endif
4149         KKASSERT(pv->pv_m == NULL);
4150         KKASSERT((pv->pv_hold & (PV_HOLD_LOCKED|PV_HOLD_MASK)) >=
4151                   (PV_HOLD_LOCKED|1));
4152         if ((pmap = pv->pv_pmap) != NULL) {
4153                 spin_lock(&pmap->pm_spin);
4154                 KKASSERT(pv->pv_pmap == pmap);
4155                 if (pmap->pm_pvhint_pt == pv)
4156                         pmap->pm_pvhint_pt = NULL;
4157                 if (pmap->pm_pvhint_pte == pv)
4158                         pmap->pm_pvhint_pte = NULL;
4159                 pv_entry_rb_tree_RB_REMOVE(&pmap->pm_pvroot, pv);
4160                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
4161                 pv->pv_pmap = NULL;
4162                 pv->pv_pindex = 0;
4163                 spin_unlock(&pmap->pm_spin);
4164
4165                 /*
4166                  * Try to shortcut three atomic ops, otherwise fall through
4167                  * and do it normally.  Drop two refs and the lock all in
4168                  * one go.
4169                  */
4170                 if (pvp)
4171                         vm_page_unwire_quick(pvp->pv_m);
4172                 if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 0)) {
4173 #ifdef PMAP_DEBUG2
4174                         if (pmap_enter_debug > 0) {
4175                                 --pmap_enter_debug;
4176                                 kprintf("pv_free: free pv %p\n", pv);
4177                         }
4178 #endif
4179                         zfree(pvzone, pv);
4180                         return;
4181                 }
4182                 pv_drop(pv);    /* ref for pv_pmap */
4183         }
4184         pv_unlock(pv);
4185         pv_drop(pv);
4186 }
4187
4188 /*
4189  * This routine is very drastic, but can save the system
4190  * in a pinch.
4191  */
4192 void
4193 pmap_collect(void)
4194 {
4195         int i;
4196         vm_page_t m;
4197         static int warningdone=0;
4198
4199         if (pmap_pagedaemon_waken == 0)
4200                 return;
4201         pmap_pagedaemon_waken = 0;
4202         if (warningdone < 5) {
4203                 kprintf("pmap_collect: collecting pv entries -- "
4204                         "suggest increasing PMAP_SHPGPERPROC\n");
4205                 warningdone++;
4206         }
4207
4208         for (i = 0; i < vm_page_array_size; i++) {
4209                 m = &vm_page_array[i];
4210                 if (m->wire_count || m->hold_count)
4211                         continue;
4212                 if (vm_page_busy_try(m, TRUE) == 0) {
4213                         if (m->wire_count == 0 && m->hold_count == 0) {
4214                                 pmap_remove_all(m);
4215                         }
4216                         vm_page_wakeup(m);
4217                 }
4218         }
4219 }
4220
4221 /*
4222  * Scan the pmap for active page table entries and issue a callback.
4223  * The callback must dispose of pte_pv, whos PTE entry is at *ptep in
4224  * its parent page table.
4225  *
4226  * pte_pv will be NULL if the page or page table is unmanaged.
4227  * pt_pv will point to the page table page containing the pte for the page.
4228  *
4229  * NOTE! If we come across an unmanaged page TABLE (verses an unmanaged page),
4230  *       we pass a NULL pte_pv and we pass a pt_pv pointing to the passed
4231  *       process pmap's PD and page to the callback function.  This can be
4232  *       confusing because the pt_pv is really a pd_pv, and the target page
4233  *       table page is simply aliased by the pmap and not owned by it.
4234  *
4235  * It is assumed that the start and end are properly rounded to the page size.
4236  *
4237  * It is assumed that PD pages and above are managed and thus in the RB tree,
4238  * allowing us to use RB_SCAN from the PD pages down for ranged scans.
4239  */
4240 struct pmap_scan_info {
4241         struct pmap *pmap;
4242         vm_offset_t sva;
4243         vm_offset_t eva;
4244         vm_pindex_t sva_pd_pindex;
4245         vm_pindex_t eva_pd_pindex;
4246         void (*func)(pmap_t, struct pmap_scan_info *,
4247                      pv_entry_t, vm_pindex_t *, pv_entry_t,
4248                      int, vm_offset_t,
4249                      pt_entry_t *, void *);
4250         void *arg;
4251         pmap_inval_bulk_t bulk_core;
4252         pmap_inval_bulk_t *bulk;
4253         int count;
4254         int stop;
4255 };
4256
4257 static int pmap_scan_cmp(pv_entry_t pv, void *data);
4258 static int pmap_scan_callback(pv_entry_t pv, void *data);
4259
4260 static void
4261 pmap_scan(struct pmap_scan_info *info, int smp_inval)
4262 {
4263         struct pmap *pmap = info->pmap;
4264         pv_entry_t pd_pv;       /* A page directory PV */
4265         pv_entry_t pt_pv;       /* A page table PV */
4266         pv_entry_t pte_pv;      /* A page table entry PV */
4267         vm_pindex_t *pte_placemark;
4268         vm_pindex_t *pt_placemark;
4269         pt_entry_t *ptep;
4270         pt_entry_t oldpte;
4271         struct pv_entry dummy_pv;
4272
4273         info->stop = 0;
4274         if (pmap == NULL)
4275                 return;
4276         if (info->sva == info->eva)
4277                 return;
4278         if (smp_inval) {
4279                 info->bulk = &info->bulk_core;
4280                 pmap_inval_bulk_init(&info->bulk_core, pmap);
4281         } else {
4282                 info->bulk = NULL;
4283         }
4284
4285         /*
4286          * Hold the token for stability; if the pmap is empty we have nothing
4287          * to do.
4288          */
4289 #if 0
4290         if (pmap->pm_stats.resident_count == 0) {
4291                 return;
4292         }
4293 #endif
4294
4295         info->count = 0;
4296
4297         /*
4298          * Special handling for scanning one page, which is a very common
4299          * operation (it is?).
4300          *
4301          * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4
4302          */
4303         if (info->sva + PAGE_SIZE == info->eva) {
4304                 if (info->sva >= VM_MAX_USER_ADDRESS) {
4305                         /*
4306                          * Kernel mappings do not track wire counts on
4307                          * page table pages and only maintain pd_pv and
4308                          * pte_pv levels so pmap_scan() works.
4309                          */
4310                         pt_pv = NULL;
4311                         pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva),
4312                                         &pte_placemark);
4313                         ptep = vtopte(info->sva);
4314                 } else {
4315                         /*
4316                          * User pages which are unmanaged will not have a
4317                          * pte_pv.  User page table pages which are unmanaged
4318                          * (shared from elsewhere) will also not have a pt_pv.
4319                          * The func() callback will pass both pte_pv and pt_pv
4320                          * as NULL in that case.
4321                          *
4322                          * We hold pte_placemark across the operation for
4323                          * unmanaged pages.
4324                          *
4325                          * WARNING!  We must hold pt_placemark across the
4326                          *           *ptep test to prevent misintepreting
4327                          *           a non-zero *ptep as a shared page
4328                          *           table page.  Hold it across the function
4329                          *           callback as well for SMP safety.
4330                          */
4331                         pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva),
4332                                         &pte_placemark);
4333                         pt_pv = pv_get(pmap, pmap_pt_pindex(info->sva),
4334                                         &pt_placemark);
4335                         if (pt_pv == NULL) {
4336                                 KKASSERT(pte_pv == NULL);
4337                                 pd_pv = pv_get(pmap,
4338                                                pmap_pd_pindex(info->sva),
4339                                                NULL);
4340                                 if (pd_pv) {
4341                                         ptep = pv_pte_lookup(pd_pv,
4342                                                     pmap_pt_index(info->sva));
4343                                         if (*ptep) {
4344                                                 info->func(pmap, info,
4345                                                      NULL, pt_placemark,
4346                                                      pd_pv, 1,
4347                                                      info->sva, ptep,
4348                                                      info->arg);
4349                                         } else {
4350                                                 pv_placemarker_wakeup(pmap,
4351                                                                   pt_placemark);
4352                                         }
4353                                         pv_put(pd_pv);
4354                                 } else {
4355                                         pv_placemarker_wakeup(pmap,
4356                                                               pt_placemark);
4357                                 }
4358                                 pv_placemarker_wakeup(pmap, pte_placemark);
4359                                 goto fast_skip;
4360                         }
4361                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(info->sva));
4362                 }
4363
4364                 /*
4365                  * NOTE: *ptep can't be ripped out from under us if we hold
4366                  *       pte_pv (or pte_placemark) locked, but bits can
4367                  *       change.
4368                  */
4369                 oldpte = *ptep;
4370                 cpu_ccfence();
4371                 if (oldpte == 0) {
4372                         KKASSERT(pte_pv == NULL);
4373                         pv_placemarker_wakeup(pmap, pte_placemark);
4374                 } else if (pte_pv) {
4375                         KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
4376                                            pmap->pmap_bits[PG_V_IDX])) ==
4377                                 (pmap->pmap_bits[PG_MANAGED_IDX] |
4378                                  pmap->pmap_bits[PG_V_IDX]),
4379                             ("badA *ptep %016lx/%016lx sva %016lx pte_pv %p",
4380                             *ptep, oldpte, info->sva, pte_pv));
4381                         info->func(pmap, info, pte_pv, NULL, pt_pv, 0,
4382                                    info->sva, ptep, info->arg);
4383                 } else {
4384                         KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
4385                                            pmap->pmap_bits[PG_V_IDX])) ==
4386                             pmap->pmap_bits[PG_V_IDX],
4387                             ("badB *ptep %016lx/%016lx sva %016lx pte_pv NULL",
4388                             *ptep, oldpte, info->sva));
4389                         info->func(pmap, info, NULL, pte_placemark, pt_pv, 0,
4390                                    info->sva, ptep, info->arg);
4391                 }
4392                 if (pt_pv)
4393                         pv_put(pt_pv);
4394 fast_skip:
4395                 pmap_inval_bulk_flush(info->bulk);
4396                 return;
4397         }
4398
4399         /*
4400          * Nominal scan case, RB_SCAN() for PD pages and iterate from
4401          * there.
4402          *
4403          * WARNING! eva can overflow our standard ((N + mask) >> bits)
4404          *          bounds, resulting in a pd_pindex of 0.  To solve the
4405          *          problem we use an inclusive range.
4406          */
4407         info->sva_pd_pindex = pmap_pd_pindex(info->sva);
4408         info->eva_pd_pindex = pmap_pd_pindex(info->eva - PAGE_SIZE);
4409
4410         if (info->sva >= VM_MAX_USER_ADDRESS) {
4411                 /*
4412                  * The kernel does not currently maintain any pv_entry's for
4413                  * higher-level page tables.
4414                  */
4415                 bzero(&dummy_pv, sizeof(dummy_pv));
4416                 dummy_pv.pv_pindex = info->sva_pd_pindex;
4417                 spin_lock(&pmap->pm_spin);
4418                 while (dummy_pv.pv_pindex <= info->eva_pd_pindex) {
4419                         pmap_scan_callback(&dummy_pv, info);
4420                         ++dummy_pv.pv_pindex;
4421                         if (dummy_pv.pv_pindex < info->sva_pd_pindex) /*wrap*/
4422                                 break;
4423                 }
4424                 spin_unlock(&pmap->pm_spin);
4425         } else {
4426                 /*
4427                  * User page tables maintain local PML4, PDP, and PD
4428                  * pv_entry's at the very least.  PT pv's might be
4429                  * unmanaged and thus not exist.  PTE pv's might be
4430                  * unmanaged and thus not exist.
4431                  */
4432                 spin_lock(&pmap->pm_spin);
4433                 pv_entry_rb_tree_RB_SCAN(&pmap->pm_pvroot, pmap_scan_cmp,
4434                                          pmap_scan_callback, info);
4435                 spin_unlock(&pmap->pm_spin);
4436         }
4437         pmap_inval_bulk_flush(info->bulk);
4438 }
4439
4440 /*
4441  * WARNING! pmap->pm_spin held
4442  *
4443  * WARNING! eva can overflow our standard ((N + mask) >> bits)
4444  *          bounds, resulting in a pd_pindex of 0.  To solve the
4445  *          problem we use an inclusive range.
4446  */
4447 static int
4448 pmap_scan_cmp(pv_entry_t pv, void *data)
4449 {
4450         struct pmap_scan_info *info = data;
4451         if (pv->pv_pindex < info->sva_pd_pindex)
4452                 return(-1);
4453         if (pv->pv_pindex > info->eva_pd_pindex)
4454                 return(1);
4455         return(0);
4456 }
4457
4458 /*
4459  * pmap_scan() by PDs
4460  *
4461  * WARNING! pmap->pm_spin held
4462  */
4463 static int
4464 pmap_scan_callback(pv_entry_t pv, void *data)
4465 {
4466         struct pmap_scan_info *info = data;
4467         struct pmap *pmap = info->pmap;
4468         pv_entry_t pd_pv;       /* A page directory PV */
4469         pv_entry_t pt_pv;       /* A page table PV */
4470         vm_pindex_t *pt_placemark;
4471         pt_entry_t *ptep;
4472         pt_entry_t oldpte;
4473         vm_offset_t sva;
4474         vm_offset_t eva;
4475         vm_offset_t va_next;
4476         vm_pindex_t pd_pindex;
4477         int error;
4478
4479         /*
4480          * Stop if requested
4481          */
4482         if (info->stop)
4483                 return -1;
4484
4485         /*
4486          * Pull the PD pindex from the pv before releasing the spinlock.
4487          *
4488          * WARNING: pv is faked for kernel pmap scans.
4489          */
4490         pd_pindex = pv->pv_pindex;
4491         spin_unlock(&pmap->pm_spin);
4492         pv = NULL;      /* invalid after spinlock unlocked */
4493
4494         /*
4495          * Calculate the page range within the PD.  SIMPLE pmaps are
4496          * direct-mapped for the entire 2^64 address space.  Normal pmaps
4497          * reflect the user and kernel address space which requires
4498          * cannonicalization w/regards to converting pd_pindex's back
4499          * into addresses.
4500          */
4501         sva = (pd_pindex - pmap_pd_pindex(0)) << PDPSHIFT;
4502         if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0 &&
4503             (sva & PML4_SIGNMASK)) {
4504                 sva |= PML4_SIGNMASK;
4505         }
4506         eva = sva + NBPDP;      /* can overflow */
4507         if (sva < info->sva)
4508                 sva = info->sva;
4509         if (eva < info->sva || eva > info->eva)
4510                 eva = info->eva;
4511
4512         /*
4513          * NOTE: kernel mappings do not track page table pages, only
4514          *       terminal pages.
4515          *
4516          * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4.
4517          *       However, for the scan to be efficient we try to
4518          *       cache items top-down.
4519          */
4520         pd_pv = NULL;
4521         pt_pv = NULL;
4522
4523         for (; sva < eva; sva = va_next) {
4524                 if (info->stop)
4525                         break;
4526                 if (sva >= VM_MAX_USER_ADDRESS) {
4527                         if (pt_pv) {
4528                                 pv_put(pt_pv);
4529                                 pt_pv = NULL;
4530                         }
4531                         goto kernel_skip;
4532                 }
4533
4534                 /*
4535                  * PD cache, scan shortcut if it doesn't exist.
4536                  */
4537                 if (pd_pv == NULL) {
4538                         pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL);
4539                 } else if (pd_pv->pv_pmap != pmap ||
4540                            pd_pv->pv_pindex != pmap_pd_pindex(sva)) {
4541                         pv_put(pd_pv);
4542                         pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL);
4543                 }
4544                 if (pd_pv == NULL) {
4545                         va_next = (sva + NBPDP) & ~PDPMASK;
4546                         if (va_next < sva)
4547                                 va_next = eva;
4548                         continue;
4549                 }
4550
4551                 /*
4552                  * PT cache
4553                  *
4554                  * NOTE: The cached pt_pv can be removed from the pmap when
4555                  *       pmap_dynamic_delete is enabled.
4556                  */
4557                 if (pt_pv && (pt_pv->pv_pmap != pmap ||
4558                               pt_pv->pv_pindex != pmap_pt_pindex(sva))) {
4559                         pv_put(pt_pv);
4560                         pt_pv = NULL;
4561                 }
4562                 if (pt_pv == NULL) {
4563                         pt_pv = pv_get_try(pmap, pmap_pt_pindex(sva),
4564                                            &pt_placemark, &error);
4565                         if (error) {
4566                                 pv_put(pd_pv);  /* lock order */
4567                                 pd_pv = NULL;
4568                                 if (pt_pv) {
4569                                         pv_lock(pt_pv);
4570                                         pv_put(pt_pv);
4571                                         pt_pv = NULL;
4572                                 } else {
4573                                         pv_placemarker_wait(pmap, pt_placemark);
4574                                 }
4575                                 va_next = sva;
4576                                 continue;
4577                         }
4578                         /* may have to re-check later if pt_pv is NULL here */
4579                 }
4580
4581                 /*
4582                  * If pt_pv is NULL we either have an shared page table
4583                  * page and must issue a callback specific to that case,
4584                  * or there is no page table page.
4585                  *
4586                  * Either way we can skip the page table page.
4587                  *
4588                  * WARNING! pt_pv can also be NULL due to a pv creation
4589                  *          race where we find it to be NULL and then
4590                  *          later see a pte_pv.  But its possible the pt_pv
4591                  *          got created inbetween the two operations, so
4592                  *          we must check.
4593                  */
4594                 if (pt_pv == NULL) {
4595                         /*
4596                          * Possible unmanaged (shared from another pmap)
4597                          * page table page.
4598                          *
4599                          * WARNING!  We must hold pt_placemark across the
4600                          *           *ptep test to prevent misintepreting
4601                          *           a non-zero *ptep as a shared page
4602                          *           table page.  Hold it across the function
4603                          *           callback as well for SMP safety.
4604                          */
4605                         ptep = pv_pte_lookup(pd_pv, pmap_pt_index(sva));
4606                         if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
4607                                 info->func(pmap, info, NULL, pt_placemark,
4608                                            pd_pv, 1,
4609                                            sva, ptep, info->arg);
4610                         } else {
4611                                 pv_placemarker_wakeup(pmap, pt_placemark);
4612                         }
4613
4614                         /*
4615                          * Done, move to next page table page.
4616                          */
4617                         va_next = (sva + NBPDR) & ~PDRMASK;
4618                         if (va_next < sva)
4619                                 va_next = eva;
4620                         continue;
4621                 }
4622
4623                 /*
4624                  * From this point in the loop testing pt_pv for non-NULL
4625                  * means we are in UVM, else if it is NULL we are in KVM.
4626                  *
4627                  * Limit our scan to either the end of the va represented
4628                  * by the current page table page, or to the end of the
4629                  * range being removed.
4630                  */
4631 kernel_skip:
4632                 va_next = (sva + NBPDR) & ~PDRMASK;
4633                 if (va_next < sva)
4634                         va_next = eva;
4635                 if (va_next > eva)
4636                         va_next = eva;
4637
4638                 /*
4639                  * Scan the page table for pages.  Some pages may not be
4640                  * managed (might not have a pv_entry).
4641                  *
4642                  * There is no page table management for kernel pages so
4643                  * pt_pv will be NULL in that case, but otherwise pt_pv
4644                  * is non-NULL, locked, and referenced.
4645                  */
4646
4647                 /*
4648                  * At this point a non-NULL pt_pv means a UVA, and a NULL
4649                  * pt_pv means a KVA.
4650                  */
4651                 if (pt_pv)
4652                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(sva));
4653                 else
4654                         ptep = vtopte(sva);
4655
4656                 while (sva < va_next) {
4657                         pv_entry_t pte_pv;
4658                         vm_pindex_t *pte_placemark;
4659
4660                         /*
4661                          * Yield every 64 pages, stop if requested.
4662                          */
4663                         if ((++info->count & 63) == 0)
4664                                 lwkt_user_yield();
4665                         if (info->stop)
4666                                 break;
4667
4668                         /*
4669                          * We can shortcut our scan if *ptep == 0.  This is
4670                          * an unlocked check.
4671                          */
4672                         if (*ptep == 0) {
4673                                 sva += PAGE_SIZE;
4674                                 ++ptep;
4675                                 continue;
4676                         }
4677                         cpu_ccfence();
4678
4679                         /*
4680                          * Acquire the related pte_pv, if any.  If *ptep == 0
4681                          * the related pte_pv should not exist, but if *ptep
4682                          * is not zero the pte_pv may or may not exist (e.g.
4683                          * will not exist for an unmanaged page).
4684                          *
4685                          * However a multitude of races are possible here
4686                          * so if we cannot lock definite state we clean out
4687                          * our cache and break the inner while() loop to
4688                          * force a loop up to the top of the for().
4689                          *
4690                          * XXX unlock/relock pd_pv, pt_pv, and re-test their
4691                          *     validity instead of looping up?
4692                          */
4693                         pte_pv = pv_get_try(pmap, pmap_pte_pindex(sva),
4694                                             &pte_placemark, &error);
4695                         if (error) {
4696                                 if (pd_pv) {
4697                                         pv_put(pd_pv);  /* lock order */
4698                                         pd_pv = NULL;
4699                                 }
4700                                 if (pt_pv) {
4701                                         pv_put(pt_pv);  /* lock order */
4702                                         pt_pv = NULL;
4703                                 }
4704                                 if (pte_pv) {           /* block */
4705                                         pv_lock(pte_pv);
4706                                         pv_put(pte_pv);
4707                                         pte_pv = NULL;
4708                                 } else {
4709                                         pv_placemarker_wait(pmap,
4710                                                         pte_placemark);
4711                                 }
4712                                 va_next = sva;          /* retry */
4713                                 break;
4714                         }
4715
4716                         /*
4717                          * Reload *ptep after successfully locking the
4718                          * pindex.  If *ptep == 0 we had better NOT have a
4719                          * pte_pv.
4720                          */
4721                         cpu_ccfence();
4722                         oldpte = *ptep;
4723                         if (oldpte == 0) {
4724                                 if (pte_pv) {
4725                                         kprintf("Unexpected non-NULL pte_pv "
4726                                                 "%p pt_pv %p "
4727                                                 "*ptep = %016lx/%016lx\n",
4728                                                 pte_pv, pt_pv, *ptep, oldpte);
4729                                         panic("Unexpected non-NULL pte_pv");
4730                                 } else {
4731                                         pv_placemarker_wakeup(pmap, pte_placemark);
4732                                 }
4733                                 sva += PAGE_SIZE;
4734                                 ++ptep;
4735                                 continue;
4736                         }
4737
4738                         /*
4739                          * We can't hold pd_pv across the callback (because
4740                          * we don't pass it to the callback and the callback
4741                          * might deadlock)
4742                          */
4743                         if (pd_pv) {
4744                                 vm_page_wire_quick(pd_pv->pv_m);
4745                                 pv_unlock(pd_pv);
4746                         }
4747
4748                         /*
4749                          * Ready for the callback.  The locked pte_pv (if any)
4750                          * is consumed by the callback.  pte_pv will exist if
4751                          * the page is managed, and will not exist if it
4752                          * isn't.
4753                          */
4754                         if (oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) {
4755                                 /*
4756                                  * Managed pte
4757                                  */
4758                                 KASSERT(pte_pv &&
4759                                          (oldpte & pmap->pmap_bits[PG_V_IDX]),
4760                                     ("badC *ptep %016lx/%016lx sva %016lx "
4761                                     "pte_pv %p",
4762                                     *ptep, oldpte, sva, pte_pv));
4763                                 /*
4764                                  * We must unlock pd_pv across the callback
4765                                  * to avoid deadlocks on any recursive
4766                                  * disposal.  Re-check that it still exists
4767                                  * after re-locking.
4768                                  *
4769                                  * Call target disposes of pte_pv and may
4770                                  * destroy but will not dispose of pt_pv.
4771                                  */
4772                                 info->func(pmap, info, pte_pv, NULL,
4773                                            pt_pv, 0,
4774                                            sva, ptep, info->arg);
4775                         } else {
4776                                 /*
4777                                  * Unmanaged pte
4778                                  *
4779                                  * We must unlock pd_pv across the callback
4780                                  * to avoid deadlocks on any recursive
4781                                  * disposal.  Re-check that it still exists
4782                                  * after re-locking.
4783                                  *
4784                                  * Call target disposes of pte_pv or
4785                                  * pte_placemark and may destroy but will
4786                                  * not dispose of pt_pv.
4787                                  */
4788                                 KASSERT(pte_pv == NULL &&
4789                                         (oldpte & pmap->pmap_bits[PG_V_IDX]),
4790                                     ("badD *ptep %016lx/%016lx sva %016lx "
4791                                     "pte_pv %p pte_pv->pv_m %p ",
4792                                      *ptep, oldpte, sva,
4793                                      pte_pv, (pte_pv ? pte_pv->pv_m : NULL)));
4794                                 if (pte_pv)
4795                                         kprintf("RaceD\n");
4796                                 if (pte_pv) {
4797                                         info->func(pmap, info,
4798                                                    pte_pv, NULL,
4799                                                    pt_pv, 0,
4800                                                    sva, ptep, info->arg);
4801                                 } else {
4802                                         info->func(pmap, info,
4803                                                    NULL, pte_placemark,
4804                                                    pt_pv, 0,
4805                                                    sva, ptep, info->arg);
4806                                 }
4807                         }
4808                         if (pd_pv) {
4809                                 pv_lock(pd_pv);
4810                                 vm_page_unwire_quick(pd_pv->pv_m);
4811                                 if (pd_pv->pv_pmap == NULL) {
4812                                         va_next = sva;          /* retry */
4813                                         break;
4814                                 }
4815                         }
4816
4817                         /*
4818                          * NOTE: The cached pt_pv can be removed from the
4819                          *       pmap when pmap_dynamic_delete is enabled,
4820                          *       which will cause ptep to become stale.
4821                          *
4822                          *       This also means that no pages remain under
4823                          *       the PT, so we can just break out of the inner
4824                          *       loop and let the outer loop clean everything
4825                          *       up.
4826                          */
4827                         if (pt_pv && pt_pv->pv_pmap != pmap)
4828                                 break;
4829                         pte_pv = NULL;
4830                         sva += PAGE_SIZE;
4831                         ++ptep;
4832                 }
4833         }
4834         if (pd_pv) {
4835                 pv_put(pd_pv);
4836                 pd_pv = NULL;
4837         }
4838         if (pt_pv) {
4839                 pv_put(pt_pv);
4840                 pt_pv = NULL;
4841         }
4842         if ((++info->count & 7) == 0)
4843                 lwkt_user_yield();
4844
4845         /*
4846          * Relock before returning.
4847          */
4848         spin_lock(&pmap->pm_spin);
4849         return (0);
4850 }
4851
4852 void
4853 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
4854 {
4855         struct pmap_scan_info info;
4856
4857         info.pmap = pmap;
4858         info.sva = sva;
4859         info.eva = eva;
4860         info.func = pmap_remove_callback;
4861         info.arg = NULL;
4862         pmap_scan(&info, 1);
4863 #if 0
4864         cpu_invltlb();
4865         if (eva - sva < 1024*1024) {
4866                 while (sva < eva) {
4867                         cpu_invlpg((void *)sva);
4868                         sva += PAGE_SIZE;
4869                 }
4870         }
4871 #endif
4872 }
4873
4874 static void
4875 pmap_remove_noinval(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
4876 {
4877         struct pmap_scan_info info;
4878
4879         info.pmap = pmap;
4880         info.sva = sva;
4881         info.eva = eva;
4882         info.func = pmap_remove_callback;
4883         info.arg = NULL;
4884         pmap_scan(&info, 0);
4885 }
4886
4887 static void
4888 pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
4889                      pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
4890                      pv_entry_t pt_pv, int sharept,
4891                      vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
4892 {
4893         pt_entry_t pte;
4894
4895         if (pte_pv) {
4896                 /*
4897                  * Managed entry
4898                  *
4899                  * This will also drop pt_pv's wire_count. Note that
4900                  * terminal pages are not wired based on mmu presence.
4901                  *
4902                  * NOTE: If this is the kernel_pmap, pt_pv can be NULL.
4903                  */
4904                 KKASSERT(pte_pv->pv_m != NULL);
4905                 pmap_remove_pv_pte(pte_pv, pt_pv, info->bulk, 2);
4906                 pte_pv = NULL;  /* safety */
4907
4908                 /*
4909                  * Recursively destroy higher-level page tables.
4910                  *
4911                  * This is optional.  If we do not, they will still
4912                  * be destroyed when the process exits.
4913                  *
4914                  * NOTE: Do not destroy pv_entry's with extra hold refs,
4915                  *       a caller may have unlocked it and intends to
4916                  *       continue to use it.
4917                  */
4918                 if (pmap_dynamic_delete &&
4919                     pt_pv &&
4920                     pt_pv->pv_m &&
4921                     pt_pv->pv_m->wire_count == 1 &&
4922                     (pt_pv->pv_hold & PV_HOLD_MASK) == 2 &&
4923                     pt_pv->pv_pindex < pmap_pml4_pindex()) {
4924                         if (pmap_dynamic_delete == 2)
4925                                 kprintf("B %jd %08x\n", pt_pv->pv_pindex, pt_pv->pv_hold);
4926                         pv_hold(pt_pv); /* extra hold */
4927                         pmap_remove_pv_pte(pt_pv, NULL, info->bulk, 1);
4928                         pv_lock(pt_pv); /* prior extra hold + relock */
4929                 }
4930         } else if (sharept == 0) {
4931                 /*
4932                  * Unmanaged pte (pte_placemark is non-NULL)
4933                  *
4934                  * pt_pv's wire_count is still bumped by unmanaged pages
4935                  * so we must decrement it manually.
4936                  *
4937                  * We have to unwire the target page table page.
4938                  */
4939                 pte = pmap_inval_bulk(info->bulk, va, ptep, 0);
4940                 if (pte & pmap->pmap_bits[PG_W_IDX])
4941                         atomic_add_long(&pmap->pm_stats.wired_count, -1);
4942                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
4943                 if (vm_page_unwire_quick(pt_pv->pv_m))
4944                         panic("pmap_remove: insufficient wirecount");
4945                 pv_placemarker_wakeup(pmap, pte_placemark);
4946         } else {
4947                 /*
4948                  * Unmanaged page table (pt, pd, or pdp. Not pte) for
4949                  * a shared page table.
4950                  *
4951                  * pt_pv is actually the pd_pv for our pmap (not the shared
4952                  * object pmap).
4953                  *
4954                  * We have to unwire the target page table page and we
4955                  * have to unwire our page directory page.
4956                  *
4957                  * It is unclear how we can invalidate a segment so we
4958                  * invalidate -1 which invlidates the tlb.
4959                  */
4960                 pte = pmap_inval_bulk(info->bulk, (vm_offset_t)-1, ptep, 0);
4961                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
4962                 KKASSERT((pte & pmap->pmap_bits[PG_DEVICE_IDX]) == 0);
4963                 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
4964                         panic("pmap_remove: shared pgtable1 bad wirecount");
4965                 if (vm_page_unwire_quick(pt_pv->pv_m))
4966                         panic("pmap_remove: shared pgtable2 bad wirecount");
4967                 pv_placemarker_wakeup(pmap, pte_placemark);
4968         }
4969 }
4970
4971 /*
4972  * Removes this physical page from all physical maps in which it resides.
4973  * Reflects back modify bits to the pager.
4974  *
4975  * This routine may not be called from an interrupt.
4976  */
4977 static
4978 void
4979 pmap_remove_all(vm_page_t m)
4980 {
4981         pv_entry_t pv;
4982         pmap_inval_bulk_t bulk;
4983
4984         if (!pmap_initialized /* || (m->flags & PG_FICTITIOUS)*/)
4985                 return;
4986
4987         vm_page_spin_lock(m);
4988         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4989                 if (pv->pv_m != m) {
4990                         kprintf("pmap_remove_all FAILURE\n");
4991                         kprintf("pv %p pv->pv_m %p m %p\n", pv, pv->pv_m, m);
4992                         kprintf("pvflags %08x\n", pv->pv_flags);
4993                 }
4994
4995                 KKASSERT(pv->pv_m == m);
4996                 if (pv_hold_try(pv)) {
4997                         vm_page_spin_unlock(m);
4998                 } else {
4999                         vm_page_spin_unlock(m);
5000                         pv_lock(pv);
5001                         pv_put(pv);
5002                         vm_page_spin_lock(m);
5003                         continue;
5004                 }
5005                 KKASSERT(pv->pv_pmap && pv->pv_m == m);
5006
5007                 /*
5008                  * Holding no spinlocks, pv is locked.  Once we scrap
5009                  * pv we can no longer use it as a list iterator (but
5010                  * we are doing a TAILQ_FIRST() so we are ok).
5011                  */
5012                 pmap_inval_bulk_init(&bulk, pv->pv_pmap);
5013                 pmap_remove_pv_pte(pv, NULL, &bulk, 2);
5014                 pv = NULL;      /* safety */
5015                 pmap_inval_bulk_flush(&bulk);
5016                 vm_page_spin_lock(m);
5017         }
5018         KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
5019         vm_page_spin_unlock(m);
5020 }
5021
5022 /*
5023  * Removes the page from a particular pmap
5024  */
5025 void
5026 pmap_remove_specific(pmap_t pmap, vm_page_t m)
5027 {
5028         pv_entry_t pv;
5029         pmap_inval_bulk_t bulk;
5030
5031         if (!pmap_initialized)
5032                 return;
5033
5034 again:
5035         vm_page_spin_lock(m);
5036         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5037                 if (pv->pv_pmap != pmap)
5038                         continue;
5039                 KKASSERT(pv->pv_m == m);
5040                 if (pv_hold_try(pv)) {
5041                         vm_page_spin_unlock(m);
5042                 } else {
5043                         vm_page_spin_unlock(m);
5044                         pv_lock(pv);
5045                         pv_put(pv);
5046                         goto again;
5047                 }
5048                 KKASSERT(pv->pv_pmap == pmap && pv->pv_m == m);
5049
5050                 /*
5051                  * Holding no spinlocks, pv is locked.  Once gone it can't
5052                  * be used as an iterator.  In fact, because we couldn't
5053                  * necessarily lock it atomically it may have moved within
5054                  * the list and ALSO cannot be used as an iterator.
5055                  */
5056                 pmap_inval_bulk_init(&bulk, pv->pv_pmap);
5057                 pmap_remove_pv_pte(pv, NULL, &bulk, 2);
5058                 pv = NULL;      /* safety */
5059                 pmap_inval_bulk_flush(&bulk);
5060                 goto again;
5061         }
5062         vm_page_spin_unlock(m);
5063 }
5064
5065 /*
5066  * Set the physical protection on the specified range of this map
5067  * as requested.  This function is typically only used for debug watchpoints
5068  * and COW pages.
5069  *
5070  * This function may not be called from an interrupt if the map is
5071  * not the kernel_pmap.
5072  *
5073  * NOTE!  For shared page table pages we just unmap the page.
5074  */
5075 void
5076 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
5077 {
5078         struct pmap_scan_info info;
5079         /* JG review for NX */
5080
5081         if (pmap == NULL)
5082                 return;
5083         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == VM_PROT_NONE) {
5084                 pmap_remove(pmap, sva, eva);
5085                 return;
5086         }
5087         if (prot & VM_PROT_WRITE)
5088                 return;
5089         info.pmap = pmap;
5090         info.sva = sva;
5091         info.eva = eva;
5092         info.func = pmap_protect_callback;
5093         info.arg = &prot;
5094         pmap_scan(&info, 1);
5095 }
5096
5097 static
5098 void
5099 pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
5100                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
5101                       pv_entry_t pt_pv, int sharept,
5102                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
5103 {
5104         pt_entry_t pbits;
5105         pt_entry_t cbits;
5106         pt_entry_t pte;
5107         vm_page_t m;
5108
5109 again:
5110         pbits = *ptep;
5111         cbits = pbits;
5112         if (pte_pv) {
5113                 KKASSERT(pte_pv->pv_m != NULL);
5114                 m = NULL;
5115                 if (pbits & pmap->pmap_bits[PG_A_IDX]) {
5116                         if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
5117                                 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
5118                                 KKASSERT(m == pte_pv->pv_m);
5119                                 vm_page_flag_set(m, PG_REFERENCED);
5120                         }
5121                         cbits &= ~pmap->pmap_bits[PG_A_IDX];
5122                 }
5123                 if (pbits & pmap->pmap_bits[PG_M_IDX]) {
5124                         if (pmap_track_modified(pte_pv->pv_pindex)) {
5125                                 if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
5126                                         if (m == NULL) {
5127                                                 m = PHYS_TO_VM_PAGE(pbits &
5128                                                                     PG_FRAME);
5129                                         }
5130                                         vm_page_dirty(m);
5131                                 }
5132                                 cbits &= ~pmap->pmap_bits[PG_M_IDX];
5133                         }
5134                 }
5135         } else if (sharept) {
5136                 /*
5137                  * Unmanaged page table, pt_pv is actually the pd_pv
5138                  * for our pmap (not the object's shared pmap).
5139                  *
5140                  * When asked to protect something in a shared page table
5141                  * page we just unmap the page table page.  We have to
5142                  * invalidate the tlb in this situation.
5143                  *
5144                  * XXX Warning, shared page tables will not be used for
5145                  * OBJT_DEVICE or OBJT_MGTDEVICE (PG_FICTITIOUS) mappings
5146                  * so PHYS_TO_VM_PAGE() should be safe here.
5147                  */
5148                 pte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, ptep, 0);
5149                 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
5150                         panic("pmap_protect: pgtable1 pg bad wirecount");
5151                 if (vm_page_unwire_quick(pt_pv->pv_m))
5152                         panic("pmap_protect: pgtable2 pg bad wirecount");
5153                 ptep = NULL;
5154         }
5155         /* else unmanaged page, adjust bits, no wire changes */
5156
5157         if (ptep) {
5158                 cbits &= ~pmap->pmap_bits[PG_RW_IDX];
5159 #ifdef PMAP_DEBUG2
5160                 if (pmap_enter_debug > 0) {
5161                         --pmap_enter_debug;
5162                         kprintf("pmap_protect va=%lx ptep=%p pte_pv=%p "
5163                                 "pt_pv=%p cbits=%08lx\n",
5164                                 va, ptep, pte_pv,
5165                                 pt_pv, cbits
5166                         );
5167                 }
5168 #endif
5169                 if (pbits != cbits) {
5170                         vm_offset_t xva;
5171
5172                         xva = (sharept) ? (vm_offset_t)-1 : va;
5173                         if (!pmap_inval_smp_cmpset(pmap, xva,
5174                                                    ptep, pbits, cbits)) {
5175                                 goto again;
5176                         }
5177                 }
5178         }
5179         if (pte_pv)
5180                 pv_put(pte_pv);
5181         else
5182                 pv_placemarker_wakeup(pmap, pte_placemark);
5183 }
5184
5185 /*
5186  * Insert the vm_page (m) at the virtual address (va), replacing any prior
5187  * mapping at that address.  Set protection and wiring as requested.
5188  *
5189  * If entry is non-NULL we check to see if the SEG_SIZE optimization is
5190  * possible.  If it is we enter the page into the appropriate shared pmap
5191  * hanging off the related VM object instead of the passed pmap, then we
5192  * share the page table page from the VM object's pmap into the current pmap.
5193  *
5194  * NOTE: This routine MUST insert the page into the pmap now, it cannot
5195  *       lazy-evaluate.
5196  *
5197  * NOTE: If (m) is PG_UNMANAGED it may also be a temporary fake vm_page_t.
5198  *       never record it.
5199  */
5200 void
5201 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
5202            boolean_t wired, vm_map_entry_t entry)
5203 {
5204         pv_entry_t pt_pv;       /* page table */
5205         pv_entry_t pte_pv;      /* page table entry */
5206         vm_pindex_t *pte_placemark;
5207         pt_entry_t *ptep;
5208         vm_paddr_t opa;
5209         pt_entry_t origpte, newpte;
5210         vm_paddr_t pa;
5211
5212         if (pmap == NULL)
5213                 return;
5214         va = trunc_page(va);
5215 #ifdef PMAP_DIAGNOSTIC
5216         if (va >= KvaEnd)
5217                 panic("pmap_enter: toobig");
5218         if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
5219                 panic("pmap_enter: invalid to pmap_enter page table "
5220                       "pages (va: 0x%lx)", va);
5221 #endif
5222         if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
5223                 kprintf("Warning: pmap_enter called on UVA with "
5224                         "kernel_pmap\n");
5225 #ifdef DDB
5226                 db_print_backtrace();
5227 #endif
5228         }
5229         if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
5230                 kprintf("Warning: pmap_enter called on KVA without"
5231                         "kernel_pmap\n");
5232 #ifdef DDB
5233                 db_print_backtrace();
5234 #endif
5235         }
5236
5237         /*
5238          * Get locked PV entries for our new page table entry (pte_pv or
5239          * pte_placemark) and for its parent page table (pt_pv).  We need
5240          * the parent so we can resolve the location of the ptep.
5241          *
5242          * Only hardware MMU actions can modify the ptep out from
5243          * under us.
5244          *
5245          * if (m) is fictitious or unmanaged we do not create a managing
5246          * pte_pv for it.  Any pre-existing page's management state must
5247          * match (avoiding code complexity).
5248          *
5249          * If the pmap is still being initialized we assume existing
5250          * page tables.
5251          *
5252          * Kernel mapppings do not track page table pages (i.e. pt_pv).
5253          *
5254          * WARNING! If replacing a managed mapping with an unmanaged mapping
5255          *          pte_pv will wind up being non-NULL and must be handled
5256          *          below.
5257          */
5258         if (pmap_initialized == FALSE) {
5259                 pte_pv = NULL;
5260                 pt_pv = NULL;
5261                 pte_placemark = NULL;
5262                 ptep = vtopte(va);
5263                 origpte = *ptep;
5264         } else if (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) { /* XXX */
5265                 pmap_softwait(pmap);
5266                 pte_pv = pv_get(pmap, pmap_pte_pindex(va), &pte_placemark);
5267                 KKASSERT(pte_pv == NULL);
5268                 if (va >= VM_MAX_USER_ADDRESS) {
5269                         pt_pv = NULL;
5270                         ptep = vtopte(va);
5271                 } else {
5272                         pt_pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va),
5273                                                   NULL, entry, va);
5274                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
5275                 }
5276                 origpte = *ptep;
5277                 cpu_ccfence();
5278                 KASSERT(origpte == 0 ||
5279                          (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0,
5280                          ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
5281         } else {
5282                 pmap_softwait(pmap);
5283                 if (va >= VM_MAX_USER_ADDRESS) {
5284                         /*
5285                          * Kernel map, pv_entry-tracked.
5286                          */
5287                         pt_pv = NULL;
5288                         pte_pv = pmap_allocpte(pmap, pmap_pte_pindex(va), NULL);
5289                         ptep = vtopte(va);
5290                 } else {
5291                         /*
5292                          * User map
5293                          */
5294                         pte_pv = pmap_allocpte_seg(pmap, pmap_pte_pindex(va),
5295                                                    &pt_pv, entry, va);
5296                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
5297                 }
5298                 pte_placemark = NULL;   /* safety */
5299                 origpte = *ptep;
5300                 cpu_ccfence();
5301                 KASSERT(origpte == 0 ||
5302                          (origpte & pmap->pmap_bits[PG_MANAGED_IDX]),
5303                          ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
5304         }
5305
5306         pa = VM_PAGE_TO_PHYS(m);
5307         opa = origpte & PG_FRAME;
5308
5309         /*
5310          * Calculate the new PTE.  Note that pte_pv alone does not mean
5311          * the new pte_pv is managed, it could exist because the old pte
5312          * was managed even if the new one is not.
5313          */
5314         newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) |
5315                  pmap->pmap_bits[PG_V_IDX] | pmap->pmap_bits[PG_A_IDX]);
5316         if (wired)
5317                 newpte |= pmap->pmap_bits[PG_W_IDX];
5318         if (va < VM_MAX_USER_ADDRESS)
5319                 newpte |= pmap->pmap_bits[PG_U_IDX];
5320         if (pte_pv && (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) == 0)
5321                 newpte |= pmap->pmap_bits[PG_MANAGED_IDX];
5322 //      if (pmap == &kernel_pmap)
5323 //              newpte |= pgeflag;
5324         newpte |= pmap->pmap_cache_bits[m->pat_mode];
5325         if (m->flags & PG_FICTITIOUS)
5326                 newpte |= pmap->pmap_bits[PG_DEVICE_IDX];
5327
5328         /*
5329          * It is possible for multiple faults to occur in threaded
5330          * environments, the existing pte might be correct.
5331          */
5332         if (((origpte ^ newpte) &
5333             ~(pt_entry_t)(pmap->pmap_bits[PG_M_IDX] |
5334                           pmap->pmap_bits[PG_A_IDX])) == 0) {
5335                 goto done;
5336         }
5337
5338         /*
5339          * Ok, either the address changed or the protection or wiring
5340          * changed.
5341          *
5342          * Clear the current entry, interlocking the removal.  For managed
5343          * pte's this will also flush the modified state to the vm_page.
5344          * Atomic ops are mandatory in order to ensure that PG_M events are
5345          * not lost during any transition.
5346          *
5347          * WARNING: The caller has busied the new page but not the original
5348          *          vm_page which we are trying to replace.  Because we hold
5349          *          the pte_pv lock, but have not busied the page, PG bits
5350          *          can be cleared out from under us.
5351          */
5352         if (opa) {
5353                 if (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) {
5354                         /*
5355                          * Old page was managed.  Expect pte_pv to exist.
5356                          * (it might also exist if the old page was unmanaged).
5357                          *
5358                          * NOTE: pt_pv won't exist for a kernel page
5359                          *       (managed or otherwise).
5360                          *
5361                          * NOTE: We may be reusing the pte_pv so we do not
5362                          *       destroy it in pmap_remove_pv_pte().
5363                          */
5364                         KKASSERT(pte_pv && pte_pv->pv_m);
5365                         if (prot & VM_PROT_NOSYNC) {
5366                                 pmap_remove_pv_pte(pte_pv, pt_pv, NULL, 0);
5367                         } else {
5368                                 pmap_inval_bulk_t bulk;
5369
5370                                 pmap_inval_bulk_init(&bulk, pmap);
5371                                 pmap_remove_pv_pte(pte_pv, pt_pv, &bulk, 0);
5372                                 pmap_inval_bulk_flush(&bulk);
5373                         }
5374                         pmap_remove_pv_page(pte_pv);
5375                         /* will either set pte_pv->pv_m or pv_free() later */
5376                 } else {
5377                         /*
5378                          * Old page was not managed.  If we have a pte_pv
5379                          * it better not have a pv_m assigned to it.  If the
5380                          * new page is managed the pte_pv will be destroyed
5381                          * near the end (we need its interlock).
5382                          *
5383                          * NOTE: We leave the wire count on the PT page
5384                          *       intact for the followup enter, but adjust
5385                          *       the wired-pages count on the pmap.
5386                          */
5387                         KKASSERT(pte_pv == NULL);
5388                         if (prot & VM_PROT_NOSYNC) {
5389                                 /*
5390                                  * NOSYNC (no mmu sync) requested.
5391                                  */
5392                                 (void)pte_load_clear(ptep);
5393                                 cpu_invlpg((void *)va);
5394                         } else {
5395                                 /*
5396                                  * Nominal SYNC
5397                                  */
5398                                 pmap_inval_smp(pmap, va, 1, ptep, 0);
5399                         }
5400
5401                         /*
5402                          * We must adjust pm_stats manually for unmanaged
5403                          * pages.
5404                          */
5405                         if (pt_pv) {
5406                                 atomic_add_long(&pmap->pm_stats.
5407                                                 resident_count, -1);
5408                         }
5409                         if (origpte & pmap->pmap_bits[PG_W_IDX]) {
5410                                 atomic_add_long(&pmap->pm_stats.
5411                                                 wired_count, -1);
5412                         }
5413                 }
5414                 KKASSERT(*ptep == 0);
5415         }
5416
5417 #ifdef PMAP_DEBUG2
5418         if (pmap_enter_debug > 0) {
5419                 --pmap_enter_debug;
5420                 kprintf("pmap_enter: va=%lx m=%p origpte=%lx newpte=%lx ptep=%p"
5421                         " pte_pv=%p pt_pv=%p opa=%lx prot=%02x\n",
5422                         va, m,
5423                         origpte, newpte, ptep,
5424                         pte_pv, pt_pv, opa, prot);
5425         }
5426 #endif
5427
5428         if ((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) {
5429                 /*
5430                  * Entering an unmanaged page.  We must wire the pt_pv unless
5431                  * we retained the wiring from an unmanaged page we had
5432                  * removed (if we retained it via pte_pv that will go away
5433                  * soon).
5434                  */
5435                 if (pt_pv && (opa == 0 ||
5436                               (origpte & pmap->pmap_bits[PG_MANAGED_IDX]))) {
5437                         vm_page_wire_quick(pt_pv->pv_m);
5438                 }
5439                 if (wired)
5440                         atomic_add_long(&pmap->pm_stats.wired_count, 1);
5441
5442                 /*
5443                  * Unmanaged pages need manual resident_count tracking.
5444                  */
5445                 if (pt_pv) {
5446                         atomic_add_long(&pt_pv->pv_pmap->pm_stats.
5447                                         resident_count, 1);
5448                 }
5449                 if (newpte & pmap->pmap_bits[PG_RW_IDX])
5450                         vm_page_flag_set(m, PG_WRITEABLE);
5451         } else {
5452                 /*
5453                  * Entering a managed page.  Our pte_pv takes care of the
5454                  * PT wiring, so if we had removed an unmanaged page before
5455                  * we must adjust.
5456                  *
5457                  * We have to take care of the pmap wired count ourselves.
5458                  *
5459                  * Enter on the PV list if part of our managed memory.
5460                  */
5461
5462                 if (m->object == NULL && pmap_pv_debug > 0) {
5463                         --pmap_pv_debug;
5464                         kprintf("pte_m %p pv_entry %p NOOBJ\n", m, pte_pv);
5465                         print_backtrace(16);
5466                 }
5467
5468                 KKASSERT(pte_pv && (pte_pv->pv_m == NULL || pte_pv->pv_m == m));
5469                 vm_page_spin_lock(m);
5470                 pte_pv->pv_m = m;
5471                 pmap_page_stats_adding(m);
5472                 TAILQ_INSERT_TAIL(&m->md.pv_list, pte_pv, pv_list);
5473
5474                 /*
5475                  * Set vm_page flags.  Avoid a cache mastership change if
5476                  * the bits are already set.
5477                  */
5478                 if ((m->flags & PG_MAPPED) == 0)
5479                         vm_page_flag_set(m, PG_MAPPED);
5480                 if ((newpte & pmap->pmap_bits[PG_RW_IDX]) &&
5481                     (m->flags & PG_WRITEABLE) == 0) {
5482                         vm_page_flag_set(m, PG_WRITEABLE);
5483                 }
5484                 vm_page_spin_unlock(m);
5485
5486                 if (pt_pv && opa &&
5487                     (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) {
5488                         vm_page_unwire_quick(pt_pv->pv_m);
5489                 }
5490
5491                 /*
5492                  * Adjust pmap wired pages count for new entry.
5493                  */
5494                 if (wired) {
5495                         atomic_add_long(&pte_pv->pv_pmap->pm_stats.
5496                                         wired_count, 1);
5497                 }
5498         }
5499
5500         /*
5501          * Kernel VMAs (pt_pv == NULL) require pmap invalidation interlocks.
5502          *
5503          * User VMAs do not because those will be zero->non-zero, so no
5504          * stale entries to worry about at this point.
5505          *
5506          * For KVM there appear to still be issues.  Theoretically we
5507          * should be able to scrap the interlocks entirely but we
5508          * get crashes.
5509          */
5510         if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL) {
5511                 pmap_inval_smp(pmap, va, 1, ptep, newpte);
5512         } else {
5513                 origpte = atomic_swap_long(ptep, newpte);
5514                 if (origpte & pmap->pmap_bits[PG_M_IDX]) {
5515                         kprintf("pmap [M] race @ %016jx\n", va);
5516                         atomic_set_long(ptep, pmap->pmap_bits[PG_M_IDX]);
5517                 }
5518                 if (pt_pv == NULL)
5519                         cpu_invlpg((void *)va);
5520         }
5521
5522         /*
5523          * Cleanup
5524          */
5525 done:
5526         KKASSERT((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0 ||
5527                  (m->flags & PG_MAPPED));
5528
5529         /*
5530          * Cleanup the pv entry, allowing other accessors.  If the new page
5531          * is not managed but we have a pte_pv (which was locking our
5532          * operation), we can free it now.  pte_pv->pv_m should be NULL.
5533          */
5534         if (pte_pv && (newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) {
5535                 pv_free(pte_pv, pt_pv);
5536         } else if (pte_pv) {
5537                 pv_put(pte_pv);
5538         } else if (pte_placemark) {
5539                 pv_placemarker_wakeup(pmap, pte_placemark);
5540         }
5541         if (pt_pv)
5542                 pv_put(pt_pv);
5543 }
5544
5545 /*
5546  * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
5547  * This code also assumes that the pmap has no pre-existing entry for this
5548  * VA.
5549  *
5550  * This code currently may only be used on user pmaps, not kernel_pmap.
5551  */
5552 void
5553 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
5554 {
5555         pmap_enter(pmap, va, m, VM_PROT_READ, FALSE, NULL);
5556 }
5557
5558 /*
5559  * Make a temporary mapping for a physical address.  This is only intended
5560  * to be used for panic dumps.
5561  *
5562  * The caller is responsible for calling smp_invltlb().
5563  */
5564 void *
5565 pmap_kenter_temporary(vm_paddr_t pa, long i)
5566 {
5567         pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
5568         return ((void *)crashdumpmap);
5569 }
5570
5571 #define MAX_INIT_PT (96)
5572
5573 /*
5574  * This routine preloads the ptes for a given object into the specified pmap.
5575  * This eliminates the blast of soft faults on process startup and
5576  * immediately after an mmap.
5577  */
5578 static int pmap_object_init_pt_callback(vm_page_t p, void *data);
5579
5580 void
5581 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
5582                     vm_object_t object, vm_pindex_t pindex,
5583                     vm_size_t size, int limit)
5584 {
5585         struct rb_vm_page_scan_info info;
5586         struct lwp *lp;
5587         vm_size_t psize;
5588
5589         /*
5590          * We can't preinit if read access isn't set or there is no pmap
5591          * or object.
5592          */
5593         if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
5594                 return;
5595
5596         /*
5597          * We can't preinit if the pmap is not the current pmap
5598          */
5599         lp = curthread->td_lwp;
5600         if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
5601                 return;
5602
5603         /*
5604          * Misc additional checks
5605          */
5606         psize = x86_64_btop(size);
5607
5608         if ((object->type != OBJT_VNODE) ||
5609                 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
5610                         (object->resident_page_count > MAX_INIT_PT))) {
5611                 return;
5612         }
5613
5614         if (pindex + psize > object->size) {
5615                 if (object->size < pindex)
5616                         return;           
5617                 psize = object->size - pindex;
5618         }
5619
5620         if (psize == 0)
5621                 return;
5622
5623         /*
5624          * If everything is segment-aligned do not pre-init here.  Instead
5625          * allow the normal vm_fault path to pass a segment hint to
5626          * pmap_enter() which will then use an object-referenced shared
5627          * page table page.
5628          */
5629         if ((addr & SEG_MASK) == 0 &&
5630             (ctob(psize) & SEG_MASK) == 0 &&
5631             (ctob(pindex) & SEG_MASK) == 0) {
5632                 return;
5633         }
5634
5635         /*
5636          * Use a red-black scan to traverse the requested range and load
5637          * any valid pages found into the pmap.
5638          *
5639          * We cannot safely scan the object's memq without holding the
5640          * object token.
5641          */
5642         info.start_pindex = pindex;
5643         info.end_pindex = pindex + psize - 1;
5644         info.limit = limit;
5645         info.mpte = NULL;
5646         info.addr = addr;
5647         info.pmap = pmap;
5648         info.object = object;
5649
5650         /*
5651          * By using the NOLK scan, the callback function must be sure
5652          * to return -1 if the VM page falls out of the object.
5653          */
5654         vm_object_hold_shared(object);
5655         vm_page_rb_tree_RB_SCAN_NOLK(&object->rb_memq, rb_vm_page_scancmp,
5656                                      pmap_object_init_pt_callback, &info);
5657         vm_object_drop(object);
5658 }
5659
5660 static
5661 int
5662 pmap_object_init_pt_callback(vm_page_t p, void *data)
5663 {
5664         struct rb_vm_page_scan_info *info = data;
5665         vm_pindex_t rel_index;
5666         int hard_busy;
5667
5668         /*
5669          * don't allow an madvise to blow away our really
5670          * free pages allocating pv entries.
5671          */
5672         if ((info->limit & MAP_PREFAULT_MADVISE) &&
5673                 vmstats.v_free_count < vmstats.v_free_reserved) {
5674                     return(-1);
5675         }
5676
5677         /*
5678          * Ignore list markers and ignore pages we cannot instantly
5679          * busy (while holding the object token).
5680          */
5681         if (p->flags & PG_MARKER)
5682                 return 0;
5683         hard_busy = 0;
5684 again:
5685         if (hard_busy) {
5686                 if (vm_page_busy_try(p, TRUE))
5687                         return 0;
5688         } else {
5689                 if (vm_page_sbusy_try(p))
5690                         return 0;
5691         }
5692         if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
5693             (p->flags & PG_FICTITIOUS) == 0) {
5694                 if ((p->queue - p->pc) == PQ_CACHE) {
5695                         if (hard_busy == 0) {
5696                                 vm_page_sbusy_drop(p);
5697                                 hard_busy = 1;
5698                                 goto again;
5699                         }
5700                         vm_page_deactivate(p);
5701                 }
5702                 rel_index = p->pindex - info->start_pindex;
5703                 pmap_enter_quick(info->pmap,
5704                                  info->addr + x86_64_ptob(rel_index), p);
5705         }
5706         if (hard_busy)
5707                 vm_page_wakeup(p);
5708         else
5709                 vm_page_sbusy_drop(p);
5710
5711         /*
5712          * We are using an unlocked scan (that is, the scan expects its
5713          * current element to remain in the tree on return).  So we have
5714          * to check here and abort the scan if it isn't.
5715          */
5716         if (p->object != info->object)
5717                 return -1;
5718         lwkt_yield();
5719         return(0);
5720 }
5721
5722 /*
5723  * Return TRUE if the pmap is in shape to trivially pre-fault the specified
5724  * address.
5725  *
5726  * Returns FALSE if it would be non-trivial or if a pte is already loaded
5727  * into the slot.
5728  *
5729  * XXX This is safe only because page table pages are not freed.
5730  */
5731 int
5732 pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
5733 {
5734         pt_entry_t *pte;
5735
5736         /*spin_lock(&pmap->pm_spin);*/
5737         if ((pte = pmap_pte(pmap, addr)) != NULL) {
5738                 if (*pte & pmap->pmap_bits[PG_V_IDX]) {
5739                         /*spin_unlock(&pmap->pm_spin);*/
5740                         return FALSE;
5741                 }
5742         }
5743         /*spin_unlock(&pmap->pm_spin);*/
5744         return TRUE;
5745 }
5746
5747 /*
5748  * Change the wiring attribute for a pmap/va pair.  The mapping must already
5749  * exist in the pmap.  The mapping may or may not be managed.  The wiring in
5750  * the page is not changed, the page is returned so the caller can adjust
5751  * its wiring (the page is not locked in any way).
5752  *
5753  * Wiring is not a hardware characteristic so there is no need to invalidate
5754  * TLB.  However, in an SMP environment we must use a locked bus cycle to
5755  * update the pte (if we are not using the pmap_inval_*() API that is)...
5756  * it's ok to do this for simple wiring changes.
5757  */
5758 vm_page_t
5759 pmap_unwire(pmap_t pmap, vm_offset_t va)
5760 {
5761         pt_entry_t *ptep;
5762         pv_entry_t pt_pv;
5763         vm_paddr_t pa;
5764         vm_page_t m;
5765
5766         if (pmap == NULL)
5767                 return NULL;
5768
5769         /*
5770          * Assume elements in the kernel pmap are stable
5771          */
5772         if (pmap == &kernel_pmap) {
5773                 if (pmap_pt(pmap, va) == 0)
5774                         return NULL;
5775                 ptep = pmap_pte_quick(pmap, va);
5776                 if (pmap_pte_v(pmap, ptep)) {
5777                         if (pmap_pte_w(pmap, ptep))
5778                                 atomic_add_long(&pmap->pm_stats.wired_count,-1);
5779                         atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
5780                         pa = *ptep & PG_FRAME;
5781                         m = PHYS_TO_VM_PAGE(pa);
5782                 } else {
5783                         m = NULL;
5784                 }
5785         } else {
5786                 /*
5787                  * We can only [un]wire pmap-local pages (we cannot wire
5788                  * shared pages)
5789                  */
5790                 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
5791                 if (pt_pv == NULL)
5792                         return NULL;
5793
5794                 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
5795                 if ((*ptep & pmap->pmap_bits[PG_V_IDX]) == 0) {
5796                         pv_put(pt_pv);
5797                         return NULL;
5798                 }
5799
5800                 if (pmap_pte_w(pmap, ptep)) {
5801                         atomic_add_long(&pt_pv->pv_pmap->pm_stats.wired_count,
5802                                         -1);
5803                 }
5804                 /* XXX else return NULL so caller doesn't unwire m ? */
5805
5806                 atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
5807
5808                 pa = *ptep & PG_FRAME;
5809                 m = PHYS_TO_VM_PAGE(pa);        /* held by wired count */
5810                 pv_put(pt_pv);
5811         }
5812         return m;
5813 }
5814
5815 /*
5816  * Copy the range specified by src_addr/len from the source map to
5817  * the range dst_addr/len in the destination map.
5818  *
5819  * This routine is only advisory and need not do anything.
5820  */
5821 void
5822 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 
5823           vm_size_t len, vm_offset_t src_addr)
5824 {
5825 }       
5826
5827 /*
5828  * pmap_zero_page:
5829  *
5830  *      Zero the specified physical page.
5831  *
5832  *      This function may be called from an interrupt and no locking is
5833  *      required.
5834  */
5835 void
5836 pmap_zero_page(vm_paddr_t phys)
5837 {
5838         vm_offset_t va = PHYS_TO_DMAP(phys);
5839
5840         pagezero((void *)va);
5841 }
5842
5843 /*
5844  * pmap_zero_page:
5845  *
5846  *      Zero part of a physical page by mapping it into memory and clearing
5847  *      its contents with bzero.
5848  *
5849  *      off and size may not cover an area beyond a single hardware page.
5850  */
5851 void
5852 pmap_zero_page_area(vm_paddr_t phys, int off, int size)
5853 {
5854         vm_offset_t virt = PHYS_TO_DMAP(phys);
5855
5856         bzero((char *)virt + off, size);
5857 }
5858
5859 /*
5860  * pmap_copy_page:
5861  *
5862  *      Copy the physical page from the source PA to the target PA.
5863  *      This function may be called from an interrupt.  No locking
5864  *      is required.
5865  */
5866 void
5867 pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
5868 {
5869         vm_offset_t src_virt, dst_virt;
5870
5871         src_virt = PHYS_TO_DMAP(src);
5872         dst_virt = PHYS_TO_DMAP(dst);
5873         bcopy((void *)src_virt, (void *)dst_virt, PAGE_SIZE);
5874 }
5875
5876 /*
5877  * pmap_copy_page_frag:
5878  *
5879  *      Copy the physical page from the source PA to the target PA.
5880  *      This function may be called from an interrupt.  No locking
5881  *      is required.
5882  */
5883 void
5884 pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
5885 {
5886         vm_offset_t src_virt, dst_virt;
5887
5888         src_virt = PHYS_TO_DMAP(src);
5889         dst_virt = PHYS_TO_DMAP(dst);
5890
5891         bcopy((char *)src_virt + (src & PAGE_MASK),
5892               (char *)dst_virt + (dst & PAGE_MASK),
5893               bytes);
5894 }
5895
5896 /*
5897  * Returns true if the pmap's pv is one of the first 16 pvs linked to from
5898  * this page.  This count may be changed upwards or downwards in the future;
5899  * it is only necessary that true be returned for a small subset of pmaps
5900  * for proper page aging.
5901  */
5902 boolean_t
5903 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
5904 {
5905         pv_entry_t pv;
5906         int loops = 0;
5907
5908         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
5909                 return FALSE;
5910
5911         vm_page_spin_lock(m);
5912         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5913                 if (pv->pv_pmap == pmap) {
5914                         vm_page_spin_unlock(m);
5915                         return TRUE;
5916                 }
5917                 loops++;
5918                 if (loops >= 16)
5919                         break;
5920         }
5921         vm_page_spin_unlock(m);
5922         return (FALSE);
5923 }
5924
5925 /*
5926  * Remove all pages from specified address space this aids process exit
5927  * speeds.  Also, this code may be special cased for the current process
5928  * only.
5929  */
5930 void
5931 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5932 {
5933         pmap_remove_noinval(pmap, sva, eva);
5934         cpu_invltlb();
5935 }
5936
5937 /*
5938  * pmap_testbit tests bits in pte's note that the testbit/clearbit
5939  * routines are inline, and a lot of things compile-time evaluate.
5940  */
5941
5942 static
5943 boolean_t
5944 pmap_testbit(vm_page_t m, int bit)
5945 {
5946         pv_entry_t pv;
5947         pt_entry_t *pte;
5948         pmap_t pmap;
5949
5950         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
5951                 return FALSE;
5952
5953         if (TAILQ_FIRST(&m->md.pv_list) == NULL)
5954                 return FALSE;
5955         vm_page_spin_lock(m);
5956         if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
5957                 vm_page_spin_unlock(m);
5958                 return FALSE;
5959         }
5960
5961         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5962 #if defined(PMAP_DIAGNOSTIC)
5963                 if (pv->pv_pmap == NULL) {
5964                         kprintf("Null pmap (tb) at pindex: %"PRIu64"\n",
5965                             pv->pv_pindex);
5966                         continue;
5967                 }
5968 #endif
5969                 pmap = pv->pv_pmap;
5970
5971                 /*
5972                  * If the bit being tested is the modified bit, then
5973                  * mark clean_map and ptes as never
5974                  * modified.
5975                  *
5976                  * WARNING!  Because we do not lock the pv, *pte can be in a
5977                  *           state of flux.  Despite this the value of *pte
5978                  *           will still be related to the vm_page in some way
5979                  *           because the pv cannot be destroyed as long as we
5980                  *           hold the vm_page spin lock.
5981                  */
5982                 if (bit == PG_A_IDX || bit == PG_M_IDX) {
5983                                 //& (pmap->pmap_bits[PG_A_IDX] | pmap->pmap_bits[PG_M_IDX])) {
5984                         if (!pmap_track_modified(pv->pv_pindex))
5985                                 continue;
5986                 }
5987
5988                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
5989                 if (*pte & pmap->pmap_bits[bit]) {
5990                         vm_page_spin_unlock(m);
5991                         return TRUE;
5992                 }
5993         }
5994         vm_page_spin_unlock(m);
5995         return (FALSE);
5996 }
5997
5998 /*
5999  * This routine is used to modify bits in ptes.  Only one bit should be
6000  * specified.  PG_RW requires special handling.
6001  *
6002  * Caller must NOT hold any spin locks
6003  */
6004 static __inline
6005 void
6006 pmap_clearbit(vm_page_t m, int bit_index)
6007 {
6008         pv_entry_t pv;
6009         pt_entry_t *pte;
6010         pt_entry_t pbits;
6011         pmap_t pmap;
6012
6013         if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
6014                 if (bit_index == PG_RW_IDX)
6015                         vm_page_flag_clear(m, PG_WRITEABLE);
6016                 return;
6017         }
6018
6019         /*
6020          * PG_M or PG_A case
6021          *
6022          * Loop over all current mappings setting/clearing as appropos If
6023          * setting RO do we need to clear the VAC?
6024          *
6025          * NOTE: When clearing PG_M we could also (not implemented) drop
6026          *       through to the PG_RW code and clear PG_RW too, forcing
6027          *       a fault on write to redetect PG_M for virtual kernels, but
6028          *       it isn't necessary since virtual kernels invalidate the
6029          *       pte when they clear the VPTE_M bit in their virtual page
6030          *       tables.
6031          *
6032          * NOTE: Does not re-dirty the page when clearing only PG_M.
6033          *
6034          * NOTE: Because we do not lock the pv, *pte can be in a state of
6035          *       flux.  Despite this the value of *pte is still somewhat
6036          *       related while we hold the vm_page spin lock.
6037          *
6038          *       *pte can be zero due to this race.  Since we are clearing
6039          *       bits we basically do no harm when this race occurs.
6040          */
6041         if (bit_index != PG_RW_IDX) {
6042                 vm_page_spin_lock(m);
6043                 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
6044 #if defined(PMAP_DIAGNOSTIC)
6045                         if (pv->pv_pmap == NULL) {
6046                                 kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
6047                                     pv->pv_pindex);
6048                                 continue;
6049                         }
6050 #endif
6051                         pmap = pv->pv_pmap;
6052                         pte = pmap_pte_quick(pv->pv_pmap,
6053                                              pv->pv_pindex << PAGE_SHIFT);
6054                         pbits = *pte;
6055                         if (pbits & pmap->pmap_bits[bit_index])
6056                                 atomic_clear_long(pte, pmap->pmap_bits[bit_index]);
6057                 }
6058                 vm_page_spin_unlock(m);
6059                 return;
6060         }
6061
6062         /*
6063          * Clear PG_RW.  Also clears PG_M and marks the page dirty if PG_M
6064          * was set.
6065          */
6066 restart:
6067         vm_page_spin_lock(m);
6068         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
6069                 /*
6070                  * don't write protect pager mappings
6071                  */
6072                 if (!pmap_track_modified(pv->pv_pindex))
6073                         continue;
6074
6075 #if defined(PMAP_DIAGNOSTIC)
6076                 if (pv->pv_pmap == NULL) {
6077                         kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
6078                                 pv->pv_pindex);
6079                         continue;
6080                 }
6081 #endif
6082                 pmap = pv->pv_pmap;
6083
6084                 /*
6085                  * Skip pages which do not have PG_RW set.
6086                  */
6087                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
6088                 if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0)
6089                         continue;
6090
6091                 /*
6092                  * We must lock the PV to be able to safely test the pte.
6093                  */
6094                 if (pv_hold_try(pv)) {
6095                         vm_page_spin_unlock(m);
6096                 } else {
6097                         vm_page_spin_unlock(m);
6098                         pv_lock(pv);    /* held, now do a blocking lock */
6099                         pv_put(pv);
6100                         goto restart;
6101                 }
6102
6103                 /*
6104                  * Reload pte after acquiring pv.
6105                  */
6106                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
6107 #if 0
6108                 if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0) {
6109                         pv_put(pv);
6110                         goto restart;
6111                 }
6112 #endif
6113
6114                 KKASSERT(pv->pv_pmap == pmap && pv->pv_m == m);
6115                 for (;;) {
6116                         pt_entry_t nbits;
6117
6118                         pbits = *pte;
6119                         cpu_ccfence();
6120                         nbits = pbits & ~(pmap->pmap_bits[PG_RW_IDX] |
6121                                           pmap->pmap_bits[PG_M_IDX]);
6122                         if (pmap_inval_smp_cmpset(pmap,
6123                                      ((vm_offset_t)pv->pv_pindex << PAGE_SHIFT),
6124                                      pte, pbits, nbits)) {
6125                                 break;
6126                         }
6127                         cpu_pause();
6128                 }
6129
6130                 /*
6131                  * If PG_M was found to be set while we were clearing PG_RW
6132                  * we also clear PG_M (done above) and mark the page dirty.
6133                  * Callers expect this behavior.
6134                  *
6135                  * we lost pv so it cannot be used as an iterator.  In fact,
6136                  * because we couldn't necessarily lock it atomically it may
6137                  * have moved within the list and ALSO cannot be used as an
6138                  * iterator.
6139                  */
6140                 vm_page_spin_lock(m);
6141                 if (pbits & pmap->pmap_bits[PG_M_IDX])
6142                         vm_page_dirty(m);
6143                 vm_page_spin_unlock(m);
6144                 pv_put(pv);
6145                 goto restart;
6146         }
6147         if (bit_index == PG_RW_IDX)
6148                 vm_page_flag_clear(m, PG_WRITEABLE);
6149         vm_page_spin_unlock(m);
6150 }
6151
6152 /*
6153  * Lower the permission for all mappings to a given page.
6154  *
6155  * Page must be busied by caller.  Because page is busied by caller this
6156  * should not be able to race a pmap_enter().
6157  */
6158 void
6159 pmap_page_protect(vm_page_t m, vm_prot_t prot)
6160 {
6161         /* JG NX support? */
6162         if ((prot & VM_PROT_WRITE) == 0) {
6163                 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
6164                         /*
6165                          * NOTE: pmap_clearbit(.. PG_RW) also clears
6166                          *       the PG_WRITEABLE flag in (m).
6167                          */
6168                         pmap_clearbit(m, PG_RW_IDX);
6169                 } else {
6170                         pmap_remove_all(m);
6171                 }
6172         }
6173 }
6174
6175 vm_paddr_t
6176 pmap_phys_address(vm_pindex_t ppn)
6177 {
6178         return (x86_64_ptob(ppn));
6179 }
6180
6181 /*
6182  * Return a count of reference bits for a page, clearing those bits.
6183  * It is not necessary for every reference bit to be cleared, but it
6184  * is necessary that 0 only be returned when there are truly no
6185  * reference bits set.
6186  *
6187  * XXX: The exact number of bits to check and clear is a matter that
6188  * should be tested and standardized at some point in the future for
6189  * optimal aging of shared pages.
6190  *
6191  * This routine may not block.
6192  */
6193 int
6194 pmap_ts_referenced(vm_page_t m)
6195 {
6196         pv_entry_t pv;
6197         pt_entry_t *pte;
6198         pmap_t pmap;
6199         int rtval = 0;
6200
6201         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
6202                 return (rtval);
6203
6204         vm_page_spin_lock(m);
6205         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
6206                 if (!pmap_track_modified(pv->pv_pindex))
6207                         continue;
6208                 pmap = pv->pv_pmap;
6209                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
6210                 if (pte && (*pte & pmap->pmap_bits[PG_A_IDX])) {
6211                         atomic_clear_long(pte, pmap->pmap_bits[PG_A_IDX]);
6212                         rtval++;
6213                         if (rtval > 4)
6214                                 break;
6215                 }
6216         }
6217         vm_page_spin_unlock(m);
6218         return (rtval);
6219 }
6220
6221 /*
6222  *      pmap_is_modified:
6223  *
6224  *      Return whether or not the specified physical page was modified
6225  *      in any physical maps.
6226  */
6227 boolean_t
6228 pmap_is_modified(vm_page_t m)
6229 {
6230         boolean_t res;
6231
6232         res = pmap_testbit(m, PG_M_IDX);
6233         return (res);
6234 }
6235
6236 /*
6237  *      Clear the modify bits on the specified physical page.
6238  */
6239 void
6240 pmap_clear_modify(vm_page_t m)
6241 {
6242         pmap_clearbit(m, PG_M_IDX);
6243 }
6244
6245 /*
6246  *      pmap_clear_reference:
6247  *
6248  *      Clear the reference bit on the specified physical page.
6249  */
6250 void
6251 pmap_clear_reference(vm_page_t m)
6252 {
6253         pmap_clearbit(m, PG_A_IDX);
6254 }
6255
6256 /*
6257  * Miscellaneous support routines follow
6258  */
6259
6260 static
6261 void
6262 x86_64_protection_init(void)
6263 {
6264         uint64_t *kp;
6265         int prot;
6266
6267         /*
6268          * NX supported? (boot time loader.conf override only)
6269          *
6270          * -1   Automatic (sets mode 1)
6271          *  0   Disabled
6272          *  1   NX implemented, differentiates PROT_READ vs PROT_READ|PROT_EXEC
6273          *  2   NX implemented for all cases
6274          */
6275         TUNABLE_INT_FETCH("machdep.pmap_nx_enable", &pmap_nx_enable);
6276         if ((amd_feature & AMDID_NX) == 0) {
6277                 pmap_bits_default[PG_NX_IDX] = 0;
6278                 pmap_nx_enable = 0;
6279         } else if (pmap_nx_enable < 0) {
6280                 pmap_nx_enable = 1;             /* default to mode 1 (READ) */
6281         }
6282
6283         /*
6284          * 0 is basically read-only access, but also set the NX (no-execute)
6285          * bit when VM_PROT_EXECUTE is not specified.
6286          */
6287         kp = protection_codes;
6288         for (prot = 0; prot < PROTECTION_CODES_SIZE; prot++) {
6289                 switch (prot) {
6290                 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
6291                         /*
6292                          * This case handled elsewhere
6293                          */
6294                         *kp = 0;
6295                         break;
6296                 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
6297                         /*
6298                          * Read-only is 0|NX    (pmap_nx_enable mode >= 1)
6299                          */
6300                         if (pmap_nx_enable >= 1)
6301                                 *kp = pmap_bits_default[PG_NX_IDX];
6302                         break;
6303                 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
6304                 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
6305                         /*
6306                          * Execute requires read access
6307                          */
6308                         *kp = 0;
6309                         break;
6310                 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
6311                 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
6312                         /*
6313                          * Write without execute is RW|NX
6314                          *                      (pmap_nx_enable mode >= 2)
6315                          */
6316                         *kp = pmap_bits_default[PG_RW_IDX];
6317                         if (pmap_nx_enable >= 2)
6318                                 *kp |= pmap_bits_default[PG_NX_IDX];
6319                         break;
6320                 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
6321                 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
6322                         /*
6323                          * Write with execute is RW
6324                          */
6325                         *kp = pmap_bits_default[PG_RW_IDX];
6326                         break;
6327                 }
6328                 ++kp;
6329         }
6330 }
6331
6332 /*
6333  * Map a set of physical memory pages into the kernel virtual
6334  * address space. Return a pointer to where it is mapped. This
6335  * routine is intended to be used for mapping device memory,
6336  * NOT real memory.
6337  *
6338  * NOTE: We can't use pgeflag unless we invalidate the pages one at
6339  *       a time.
6340  *
6341  * NOTE: The PAT attributes {WRITE_BACK, WRITE_THROUGH, UNCACHED, UNCACHEABLE}
6342  *       work whether the cpu supports PAT or not.  The remaining PAT
6343  *       attributes {WRITE_PROTECTED, WRITE_COMBINING} only work if the cpu
6344  *       supports PAT.
6345  */
6346 void *
6347 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
6348 {
6349         return(pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6350 }
6351
6352 void *
6353 pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size)
6354 {
6355         return(pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
6356 }
6357
6358 void *
6359 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
6360 {
6361         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6362 }
6363
6364 /*
6365  * Map a set of physical memory pages into the kernel virtual
6366  * address space. Return a pointer to where it is mapped. This
6367  * routine is intended to be used for mapping device memory,
6368  * NOT real memory.
6369  */
6370 void *
6371 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
6372 {
6373         vm_offset_t va, tmpva, offset;
6374         pt_entry_t *pte;
6375         vm_size_t tmpsize;
6376
6377         offset = pa & PAGE_MASK;
6378         size = roundup(offset + size, PAGE_SIZE);
6379
6380         va = kmem_alloc_nofault(&kernel_map, size, VM_SUBSYS_MAPDEV, PAGE_SIZE);
6381         if (va == 0)
6382                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
6383
6384         pa = pa & ~PAGE_MASK;
6385         for (tmpva = va, tmpsize = size; tmpsize > 0;) {
6386                 pte = vtopte(tmpva);
6387                 *pte = pa |
6388                     kernel_pmap.pmap_bits[PG_RW_IDX] |
6389                     kernel_pmap.pmap_bits[PG_V_IDX] | /* pgeflag | */
6390                     kernel_pmap.pmap_cache_bits[mode];
6391                 tmpsize -= PAGE_SIZE;
6392                 tmpva += PAGE_SIZE;
6393                 pa += PAGE_SIZE;
6394         }
6395         pmap_invalidate_range(&kernel_pmap, va, va + size);
6396         pmap_invalidate_cache_range(va, va + size);
6397
6398         return ((void *)(va + offset));
6399 }
6400
6401 void
6402 pmap_unmapdev(vm_offset_t va, vm_size_t size)
6403 {
6404         vm_offset_t base, offset;
6405
6406         base = va & ~PAGE_MASK;
6407         offset = va & PAGE_MASK;
6408         size = roundup(offset + size, PAGE_SIZE);
6409         pmap_qremove(va, size >> PAGE_SHIFT);
6410         kmem_free(&kernel_map, base, size);
6411 }
6412
6413 /*
6414  * Sets the memory attribute for the specified page.
6415  */
6416 void
6417 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
6418 {
6419
6420     m->pat_mode = ma;
6421
6422     /*
6423      * If "m" is a normal page, update its direct mapping.  This update
6424      * can be relied upon to perform any cache operations that are
6425      * required for data coherence.
6426      */
6427     if ((m->flags & PG_FICTITIOUS) == 0)
6428         pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 1, m->pat_mode);
6429 }
6430
6431 /*
6432  * Change the PAT attribute on an existing kernel memory map.  Caller
6433  * must ensure that the virtual memory in question is not accessed
6434  * during the adjustment.
6435  */
6436 void
6437 pmap_change_attr(vm_offset_t va, vm_size_t count, int mode)
6438 {
6439         pt_entry_t *pte;
6440         vm_offset_t base;
6441         int changed = 0;
6442
6443         if (va == 0)
6444                 panic("pmap_change_attr: va is NULL");
6445         base = trunc_page(va);
6446
6447         while (count) {
6448                 pte = vtopte(va);
6449                 *pte = (*pte & ~(pt_entry_t)(kernel_pmap.pmap_cache_mask)) |
6450                        kernel_pmap.pmap_cache_bits[mode];
6451                 --count;
6452                 va += PAGE_SIZE;
6453         }
6454
6455         changed = 1;    /* XXX: not optimal */
6456
6457         /*
6458          * Flush CPU caches if required to make sure any data isn't cached that
6459          * shouldn't be, etc.
6460          */
6461         if (changed) {
6462                 pmap_invalidate_range(&kernel_pmap, base, va);
6463                 pmap_invalidate_cache_range(base, va);
6464         }
6465 }
6466
6467 /*
6468  * perform the pmap work for mincore
6469  */
6470 int
6471 pmap_mincore(pmap_t pmap, vm_offset_t addr)
6472 {
6473         pt_entry_t *ptep, pte;
6474         vm_page_t m;
6475         int val = 0;
6476         
6477         ptep = pmap_pte(pmap, addr);
6478
6479         if (ptep && (pte = *ptep) != 0) {
6480                 vm_offset_t pa;
6481
6482                 val = MINCORE_INCORE;
6483                 if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0)
6484                         goto done;
6485
6486                 pa = pte & PG_FRAME;
6487
6488                 if (pte & pmap->pmap_bits[PG_DEVICE_IDX])
6489                         m = NULL;
6490                 else
6491                         m = PHYS_TO_VM_PAGE(pa);
6492
6493                 /*
6494                  * Modified by us
6495                  */
6496                 if (pte & pmap->pmap_bits[PG_M_IDX])
6497                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
6498                 /*
6499                  * Modified by someone
6500                  */
6501                 else if (m && (m->dirty || pmap_is_modified(m)))
6502                         val |= MINCORE_MODIFIED_OTHER;
6503                 /*
6504                  * Referenced by us
6505                  */
6506                 if (pte & pmap->pmap_bits[PG_A_IDX])
6507                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
6508
6509                 /*
6510                  * Referenced by someone
6511                  */
6512                 else if (m && ((m->flags & PG_REFERENCED) ||
6513                                 pmap_ts_referenced(m))) {
6514                         val |= MINCORE_REFERENCED_OTHER;
6515                         vm_page_flag_set(m, PG_REFERENCED);
6516                 }
6517         } 
6518 done:
6519
6520         return val;
6521 }
6522
6523 /*
6524  * Replace p->p_vmspace with a new one.  If adjrefs is non-zero the new
6525  * vmspace will be ref'd and the old one will be deref'd.
6526  *
6527  * The vmspace for all lwps associated with the process will be adjusted
6528  * and cr3 will be reloaded if any lwp is the current lwp.
6529  *
6530  * The process must hold the vmspace->vm_map.token for oldvm and newvm
6531  */
6532 void
6533 pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
6534 {
6535         struct vmspace *oldvm;
6536         struct lwp *lp;
6537
6538         oldvm = p->p_vmspace;
6539         if (oldvm != newvm) {
6540                 if (adjrefs)
6541                         vmspace_ref(newvm);
6542                 p->p_vmspace = newvm;
6543                 KKASSERT(p->p_nthreads == 1);
6544                 lp = RB_ROOT(&p->p_lwp_tree);
6545                 pmap_setlwpvm(lp, newvm);
6546                 if (adjrefs)
6547                         vmspace_rel(oldvm);
6548         }
6549 }
6550
6551 /*
6552  * Set the vmspace for a LWP.  The vmspace is almost universally set the
6553  * same as the process vmspace, but virtual kernels need to swap out contexts
6554  * on a per-lwp basis.
6555  *
6556  * Caller does not necessarily hold any vmspace tokens.  Caller must control
6557  * the lwp (typically be in the context of the lwp).  We use a critical
6558  * section to protect against statclock and hardclock (statistics collection).
6559  */
6560 void
6561 pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
6562 {
6563         struct vmspace *oldvm;
6564         struct pmap *pmap;
6565         thread_t td;
6566
6567         oldvm = lp->lwp_vmspace;
6568
6569         if (oldvm != newvm) {
6570                 crit_enter();
6571                 td = curthread;
6572                 KKASSERT((newvm->vm_refcnt & VM_REF_DELETED) == 0);
6573                 lp->lwp_vmspace = newvm;
6574                 if (td->td_lwp == lp) {
6575                         pmap = vmspace_pmap(newvm);
6576                         ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid);
6577                         if (pmap->pm_active_lock & CPULOCK_EXCL)
6578                                 pmap_interlock_wait(newvm);
6579 #if defined(SWTCH_OPTIM_STATS)
6580                         tlb_flush_count++;
6581 #endif
6582                         if (pmap->pmap_bits[TYPE_IDX] == REGULAR_PMAP) {
6583                                 td->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4);
6584                                 if (meltdown_mitigation && pmap->pm_pmlpv_iso) {
6585                                         td->td_pcb->pcb_cr3_iso =
6586                                                 vtophys(pmap->pm_pml4_iso);
6587                                         td->td_pcb->pcb_flags |= PCB_ISOMMU;
6588                                 } else {
6589                                         td->td_pcb->pcb_cr3_iso = 0;
6590                                         td->td_pcb->pcb_flags &= ~PCB_ISOMMU;
6591                                 }
6592                         } else if (pmap->pmap_bits[TYPE_IDX] == EPT_PMAP) {
6593                                 td->td_pcb->pcb_cr3 = KPML4phys;
6594                                 td->td_pcb->pcb_cr3_iso = 0;
6595                                 td->td_pcb->pcb_flags &= ~PCB_ISOMMU;
6596                         } else {
6597                                 panic("pmap_setlwpvm: unknown pmap type\n");
6598                         }
6599
6600                         /*
6601                          * The MMU separation fields needs to be updated.
6602                          * (it can't access the pcb directly from the
6603                          * restricted user pmap).
6604                          */
6605                         {
6606                                 struct trampframe *tramp;
6607
6608                                 tramp = &pscpu->trampoline;
6609                                 tramp->tr_pcb_cr3 = td->td_pcb->pcb_cr3;
6610                                 tramp->tr_pcb_cr3_iso = td->td_pcb->pcb_cr3_iso;
6611                                 tramp->tr_pcb_flags = td->td_pcb->pcb_flags;
6612                                 tramp->tr_pcb_rsp = (register_t)td->td_pcb;
6613                                 /* tr_pcb_rsp doesn't change */
6614                         }
6615
6616                         /*
6617                          * In kernel-land we always use the normal PML4E
6618                          * so the kernel is fully mapped and can also access
6619                          * user memory.
6620                          */
6621                         load_cr3(td->td_pcb->pcb_cr3);
6622                         pmap = vmspace_pmap(oldvm);
6623                         ATOMIC_CPUMASK_NANDBIT(pmap->pm_active,
6624                                                mycpu->gd_cpuid);
6625                 }
6626                 crit_exit();
6627         }
6628 }
6629
6630 /*
6631  * Called when switching to a locked pmap, used to interlock against pmaps
6632  * undergoing modifications to prevent us from activating the MMU for the
6633  * target pmap until all such modifications have completed.  We have to do
6634  * this because the thread making the modifications has already set up its
6635  * SMP synchronization mask.
6636  *
6637  * This function cannot sleep!
6638  *
6639  * No requirements.
6640  */
6641 void
6642 pmap_interlock_wait(struct vmspace *vm)
6643 {
6644         struct pmap *pmap = &vm->vm_pmap;
6645
6646         if (pmap->pm_active_lock & CPULOCK_EXCL) {
6647                 crit_enter();
6648                 KKASSERT(curthread->td_critcount >= 2);
6649                 DEBUG_PUSH_INFO("pmap_interlock_wait");
6650                 while (pmap->pm_active_lock & CPULOCK_EXCL) {
6651                         cpu_ccfence();
6652                         lwkt_process_ipiq();
6653                 }
6654                 DEBUG_POP_INFO();
6655                 crit_exit();
6656         }
6657 }
6658
6659 vm_offset_t
6660 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
6661 {
6662
6663         if ((obj == NULL) || (size < NBPDR) ||
6664             ((obj->type != OBJT_DEVICE) && (obj->type != OBJT_MGTDEVICE))) {
6665                 return addr;
6666         }
6667
6668         addr = roundup2(addr, NBPDR);
6669         return addr;
6670 }
6671
6672 /*
6673  * Used by kmalloc/kfree, page already exists at va
6674  */
6675 vm_page_t
6676 pmap_kvtom(vm_offset_t va)
6677 {
6678         pt_entry_t *ptep = vtopte(va);
6679
6680         KKASSERT((*ptep & kernel_pmap.pmap_bits[PG_DEVICE_IDX]) == 0);
6681         return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME));
6682 }
6683
6684 /*
6685  * Initialize machine-specific shared page directory support.  This
6686  * is executed when a VM object is created.
6687  */
6688 void
6689 pmap_object_init(vm_object_t object)
6690 {
6691         object->md.pmap_rw = NULL;
6692         object->md.pmap_ro = NULL;
6693 }
6694
6695 /*
6696  * Clean up machine-specific shared page directory support.  This
6697  * is executed when a VM object is destroyed.
6698  */
6699 void
6700 pmap_object_free(vm_object_t object)
6701 {
6702         pmap_t pmap;
6703
6704         if ((pmap = object->md.pmap_rw) != NULL) {
6705                 object->md.pmap_rw = NULL;
6706                 pmap_remove_noinval(pmap,
6707                                   VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
6708                 CPUMASK_ASSZERO(pmap->pm_active);
6709                 pmap_release(pmap);
6710                 pmap_puninit(pmap);
6711                 kfree(pmap, M_OBJPMAP);
6712         }
6713         if ((pmap = object->md.pmap_ro) != NULL) {
6714                 object->md.pmap_ro = NULL;
6715                 pmap_remove_noinval(pmap,
6716                                   VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
6717                 CPUMASK_ASSZERO(pmap->pm_active);
6718                 pmap_release(pmap);
6719                 pmap_puninit(pmap);
6720                 kfree(pmap, M_OBJPMAP);
6721         }
6722 }
6723
6724 /*
6725  * pmap_pgscan_callback - Used by pmap_pgscan to acquire the related
6726  * VM page and issue a pginfo->callback.
6727  *
6728  * We are expected to dispose of any non-NULL pte_pv.
6729  */
6730 static
6731 void
6732 pmap_pgscan_callback(pmap_t pmap, struct pmap_scan_info *info,
6733                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
6734                       pv_entry_t pt_pv, int sharept,
6735                       vm_offset_t va, pt_entry_t *ptep, void *arg)
6736 {
6737         struct pmap_pgscan_info *pginfo = arg;
6738         vm_page_t m;
6739
6740         if (pte_pv) {
6741                 /*
6742                  * Try to busy the page while we hold the pte_pv locked.
6743                  */
6744                 KKASSERT(pte_pv->pv_m);
6745                 m = PHYS_TO_VM_PAGE(*ptep & PG_FRAME);
6746                 if (vm_page_busy_try(m, TRUE) == 0) {
6747                         if (m == PHYS_TO_VM_PAGE(*ptep & PG_FRAME)) {
6748                                 /*
6749                                  * The callback is issued with the pte_pv
6750                                  * unlocked and put away, and the pt_pv
6751                                  * unlocked.
6752                                  */
6753                                 pv_put(pte_pv);
6754                                 if (pt_pv) {
6755                                         vm_page_wire_quick(pt_pv->pv_m);
6756                                         pv_unlock(pt_pv);
6757                                 }
6758                                 if (pginfo->callback(pginfo, va, m) < 0)
6759                                         info->stop = 1;
6760                                 if (pt_pv) {
6761                                         pv_lock(pt_pv);
6762                                         vm_page_unwire_quick(pt_pv->pv_m);
6763                                 }
6764                         } else {
6765                                 vm_page_wakeup(m);
6766                                 pv_put(pte_pv);
6767                         }
6768                 } else {
6769                         ++pginfo->busycount;
6770                         pv_put(pte_pv);
6771                 }
6772         } else {
6773                 /*
6774                  * Shared page table or unmanaged page (sharept or !sharept)
6775                  */
6776                 pv_placemarker_wakeup(pmap, pte_placemark);
6777         }
6778 }
6779
6780 void
6781 pmap_pgscan(struct pmap_pgscan_info *pginfo)
6782 {
6783         struct pmap_scan_info info;
6784
6785         pginfo->offset = pginfo->beg_addr;
6786         info.pmap = pginfo->pmap;
6787         info.sva = pginfo->beg_addr;
6788         info.eva = pginfo->end_addr;
6789         info.func = pmap_pgscan_callback;
6790         info.arg = pginfo;
6791         pmap_scan(&info, 0);
6792         if (info.stop == 0)
6793                 pginfo->offset = pginfo->end_addr;
6794 }
6795
6796 /*
6797  * Wait for a placemarker that we do not own to clear.  The placemarker
6798  * in question is not necessarily set to the pindex we want, we may have
6799  * to wait on the element because we want to reserve it ourselves.
6800  *
6801  * NOTE: PM_PLACEMARK_WAKEUP sets a bit which is already set in
6802  *       PM_NOPLACEMARK, so it does not interfere with placemarks
6803  *       which have already been woken up.
6804  */
6805 static
6806 void
6807 pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark)
6808 {
6809         if (*pmark != PM_NOPLACEMARK) {
6810                 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
6811                 tsleep_interlock(pmark, 0);
6812                 if (*pmark != PM_NOPLACEMARK)
6813                         tsleep(pmark, PINTERLOCKED, "pvplw", 0);
6814         }
6815 }
6816
6817 /*
6818  * Wakeup a placemarker that we own.  Replace the entry with
6819  * PM_NOPLACEMARK and issue a wakeup() if necessary.
6820  */
6821 static
6822 void
6823 pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark)
6824 {
6825         vm_pindex_t pindex;
6826
6827         pindex = atomic_swap_long(pmark, PM_NOPLACEMARK);
6828         KKASSERT(pindex != PM_NOPLACEMARK);
6829         if (pindex & PM_PLACEMARK_WAKEUP)
6830                 wakeup(pmark);
6831 }