kernel - Fix rare pmap_scan_callback() panic
[dragonfly.git] / sys / platform / pc64 / x86_64 / pmap.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1994 David Greenman
5  * Copyright (c) 2003 Peter Wemm
6  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
7  * Copyright (c) 2008, 2009 The DragonFly Project.
8  * Copyright (c) 2008, 2009 Jordan Gordeev.
9  * Copyright (c) 2011-2017 Matthew Dillon
10  * All rights reserved.
11  *
12  * This code is derived from software contributed to Berkeley by
13  * the Systems Programming Group of the University of Utah Computer
14  * Science Department and William Jolitz of UUNET Technologies Inc.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  */
44 /*
45  * Manage physical address maps for x86-64 systems.
46  *
47  * Some notes:
48  *      - The 'M'odified bit is only applicable to terminal PTEs.
49  *
50  *      - The 'U'ser access bit can be set for higher-level PTEs as
51  *        long as it isn't set for terminal PTEs for pages we don't
52  *        want user access to.
53  */
54
55 #if 0 /* JG */
56 #include "opt_pmap.h"
57 #endif
58 #include "opt_msgbuf.h"
59
60 #include <sys/param.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/msgbuf.h>
64 #include <sys/vmmeter.h>
65 #include <sys/mman.h>
66 #include <sys/systm.h>
67
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <sys/sysctl.h>
71 #include <sys/lock.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_pager.h>
79 #include <vm/vm_zone.h>
80
81 #include <sys/user.h>
82 #include <sys/thread2.h>
83 #include <sys/spinlock2.h>
84 #include <vm/vm_page2.h>
85
86 #include <machine/cputypes.h>
87 #include <machine/cpu.h>
88 #include <machine/md_var.h>
89 #include <machine/specialreg.h>
90 #include <machine/smp.h>
91 #include <machine_base/apic/apicreg.h>
92 #include <machine/globaldata.h>
93 #include <machine/pmap.h>
94 #include <machine/pmap_inval.h>
95 #include <machine/inttypes.h>
96
97 #include <ddb/ddb.h>
98
99 #define PMAP_KEEP_PDIRS
100 #ifndef PMAP_SHPGPERPROC
101 #define PMAP_SHPGPERPROC 2000
102 #endif
103
104 #if defined(DIAGNOSTIC)
105 #define PMAP_DIAGNOSTIC
106 #endif
107
108 #define MINPV 2048
109
110 /*
111  * pmap debugging will report who owns a pv lock when blocking.
112  */
113 #ifdef PMAP_DEBUG
114
115 #define PMAP_DEBUG_DECL         ,const char *func, int lineno
116 #define PMAP_DEBUG_ARGS         , __func__, __LINE__
117 #define PMAP_DEBUG_COPY         , func, lineno
118
119 #define pv_get(pmap, pindex, pmarkp)    _pv_get(pmap, pindex, pmarkp    \
120                                                         PMAP_DEBUG_ARGS)
121 #define pv_lock(pv)                     _pv_lock(pv                     \
122                                                         PMAP_DEBUG_ARGS)
123 #define pv_hold_try(pv)                 _pv_hold_try(pv                 \
124                                                         PMAP_DEBUG_ARGS)
125 #define pv_alloc(pmap, pindex, isnewp)  _pv_alloc(pmap, pindex, isnewp  \
126                                                         PMAP_DEBUG_ARGS)
127
128 #define pv_free(pv, pvp)                _pv_free(pv, pvp PMAP_DEBUG_ARGS)
129
130 #else
131
132 #define PMAP_DEBUG_DECL
133 #define PMAP_DEBUG_ARGS
134 #define PMAP_DEBUG_COPY
135
136 #define pv_get(pmap, pindex, pmarkp)            _pv_get(pmap, pindex, pmarkp)
137 #define pv_lock(pv)                     _pv_lock(pv)
138 #define pv_hold_try(pv)                 _pv_hold_try(pv)
139 #define pv_alloc(pmap, pindex, isnewp)  _pv_alloc(pmap, pindex, isnewp)
140 #define pv_free(pv, pvp)                _pv_free(pv, pvp)
141
142 #endif
143
144 /*
145  * Get PDEs and PTEs for user/kernel address space
146  */
147 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
148
149 #define pmap_pde_v(pmap, pte)           ((*(pd_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
150 #define pmap_pte_w(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_W_IDX]) != 0)
151 #define pmap_pte_m(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_M_IDX]) != 0)
152 #define pmap_pte_u(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_U_IDX]) != 0)
153 #define pmap_pte_v(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
154
155 /*
156  * Given a map and a machine independent protection code,
157  * convert to a vax protection code.
158  */
159 #define pte_prot(m, p)          \
160         (m->protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
161 static uint64_t protection_codes[PROTECTION_CODES_SIZE];
162
163 struct pmap kernel_pmap;
164 struct pmap iso_pmap;
165
166 MALLOC_DEFINE(M_OBJPMAP, "objpmap", "pmaps associated with VM objects");
167
168 vm_paddr_t avail_start;         /* PA of first available physical page */
169 vm_paddr_t avail_end;           /* PA of last available physical page */
170 vm_offset_t virtual2_start;     /* cutout free area prior to kernel start */
171 vm_offset_t virtual2_end;
172 vm_offset_t virtual_start;      /* VA of first avail page (after kernel bss) */
173 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
174 vm_offset_t KvaStart;           /* VA start of KVA space */
175 vm_offset_t KvaEnd;             /* VA end of KVA space (non-inclusive) */
176 vm_offset_t KvaSize;            /* max size of kernel virtual address space */
177 static boolean_t pmap_initialized = FALSE;      /* Has pmap_init completed? */
178 //static int pgeflag;           /* PG_G or-in */
179 uint64_t PatMsr;
180
181 static int ndmpdp;
182 static vm_paddr_t dmaplimit;
183 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
184
185 static pt_entry_t pat_pte_index[PAT_INDEX_SIZE];        /* PAT -> PG_ bits */
186 /*static pt_entry_t pat_pde_index[PAT_INDEX_SIZE];*/    /* PAT -> PG_ bits */
187
188 static uint64_t KPTbase;
189 static uint64_t KPTphys;
190 static uint64_t KPDphys;        /* phys addr of kernel level 2 */
191 static uint64_t KPDbase;        /* phys addr of kernel level 2 @ KERNBASE */
192 uint64_t KPDPphys;              /* phys addr of kernel level 3 */
193 uint64_t KPML4phys;             /* phys addr of kernel level 4 */
194
195 static uint64_t DMPDphys;       /* phys addr of direct mapped level 2 */
196 static uint64_t DMPDPphys;      /* phys addr of direct mapped level 3 */
197
198 /*
199  * Data for the pv entry allocation mechanism
200  */
201 static vm_zone_t pvzone;
202 static struct vm_zone pvzone_store;
203 static vm_pindex_t pv_entry_max=0, pv_entry_high_water=0;
204 static int pmap_pagedaemon_waken = 0;
205 static struct pv_entry *pvinit;
206
207 /*
208  * All those kernel PT submaps that BSD is so fond of
209  */
210 pt_entry_t *CMAP1 = NULL, *ptmmap;
211 caddr_t CADDR1 = NULL, ptvmmap = NULL;
212 static pt_entry_t *msgbufmap;
213 struct msgbuf *msgbufp=NULL;
214
215 /*
216  * PMAP default PG_* bits. Needed to be able to add
217  * EPT/NPT pagetable pmap_bits for the VMM module
218  */
219 uint64_t pmap_bits_default[] = {
220                 REGULAR_PMAP,                   /* TYPE_IDX             0 */
221                 X86_PG_V,                       /* PG_V_IDX             1 */
222                 X86_PG_RW,                      /* PG_RW_IDX            2 */
223                 X86_PG_U,                       /* PG_U_IDX             3 */
224                 X86_PG_A,                       /* PG_A_IDX             4 */
225                 X86_PG_M,                       /* PG_M_IDX             5 */
226                 X86_PG_PS,                      /* PG_PS_IDX3           6 */
227                 X86_PG_G,                       /* PG_G_IDX             7 */
228                 X86_PG_AVAIL1,                  /* PG_AVAIL1_IDX        8 */
229                 X86_PG_AVAIL2,                  /* PG_AVAIL2_IDX        9 */
230                 X86_PG_AVAIL3,                  /* PG_AVAIL3_IDX        10 */
231                 X86_PG_NC_PWT | X86_PG_NC_PCD,  /* PG_N_IDX             11 */
232                 X86_PG_NX,                      /* PG_NX_IDX            12 */
233 };
234 /*
235  * Crashdump maps.
236  */
237 static pt_entry_t *pt_crashdumpmap;
238 static caddr_t crashdumpmap;
239
240 static int pmap_debug = 0;
241 SYSCTL_INT(_machdep, OID_AUTO, pmap_debug, CTLFLAG_RW,
242     &pmap_debug, 0, "Debug pmap's");
243 #ifdef PMAP_DEBUG2
244 static int pmap_enter_debug = 0;
245 SYSCTL_INT(_machdep, OID_AUTO, pmap_enter_debug, CTLFLAG_RW,
246     &pmap_enter_debug, 0, "Debug pmap_enter's");
247 #endif
248 static int pmap_yield_count = 64;
249 SYSCTL_INT(_machdep, OID_AUTO, pmap_yield_count, CTLFLAG_RW,
250     &pmap_yield_count, 0, "Yield during init_pt/release");
251 static int pmap_mmu_optimize = 0;
252 SYSCTL_INT(_machdep, OID_AUTO, pmap_mmu_optimize, CTLFLAG_RW,
253     &pmap_mmu_optimize, 0, "Share page table pages when possible");
254 int pmap_fast_kernel_cpusync = 0;
255 SYSCTL_INT(_machdep, OID_AUTO, pmap_fast_kernel_cpusync, CTLFLAG_RW,
256     &pmap_fast_kernel_cpusync, 0, "Share page table pages when possible");
257 int pmap_dynamic_delete = 0;
258 SYSCTL_INT(_machdep, OID_AUTO, pmap_dynamic_delete, CTLFLAG_RW,
259     &pmap_dynamic_delete, 0, "Dynamically delete PT/PD/PDPs");
260 int pmap_lock_delay = 100;
261 SYSCTL_INT(_machdep, OID_AUTO, pmap_lock_delay, CTLFLAG_RW,
262     &pmap_lock_delay, 0, "Spin loops");
263 static int meltdown_mitigation = -1;
264 TUNABLE_INT("machdep.meltdown_mitigation", &meltdown_mitigation);
265 SYSCTL_INT(_machdep, OID_AUTO, meltdown_mitigation, CTLFLAG_RW,
266     &meltdown_mitigation, 0, "Userland pmap isolation");
267
268 static int pmap_nx_enable = 0;
269 /* needs manual TUNABLE in early probe, see below */
270
271 /* Standard user access funtions */
272 extern int std_copyinstr (const void *udaddr, void *kaddr, size_t len,
273     size_t *lencopied);
274 extern int std_copyin (const void *udaddr, void *kaddr, size_t len);
275 extern int std_copyout (const void *kaddr, void *udaddr, size_t len);
276 extern int std_fubyte (const uint8_t *base);
277 extern int std_subyte (uint8_t *base, uint8_t byte);
278 extern int32_t std_fuword32 (const uint32_t *base);
279 extern int64_t std_fuword64 (const uint64_t *base);
280 extern int std_suword64 (uint64_t *base, uint64_t word);
281 extern int std_suword32 (uint32_t *base, int word);
282 extern uint32_t std_swapu32 (volatile uint32_t *base, uint32_t v);
283 extern uint64_t std_swapu64 (volatile uint64_t *base, uint64_t v);
284
285 static void pv_hold(pv_entry_t pv);
286 static int _pv_hold_try(pv_entry_t pv
287                                 PMAP_DEBUG_DECL);
288 static void pv_drop(pv_entry_t pv);
289 static void _pv_lock(pv_entry_t pv
290                                 PMAP_DEBUG_DECL);
291 static void pv_unlock(pv_entry_t pv);
292 static pv_entry_t _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew
293                                 PMAP_DEBUG_DECL);
294 static pv_entry_t _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp
295                                 PMAP_DEBUG_DECL);
296 static void _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL);
297 static pv_entry_t pv_get_try(pmap_t pmap, vm_pindex_t pindex,
298                                 vm_pindex_t **pmarkp, int *errorp);
299 static void pv_put(pv_entry_t pv);
300 static void *pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex);
301 static pv_entry_t pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
302                       pv_entry_t *pvpp);
303 static pv_entry_t pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex,
304                       pv_entry_t *pvpp, vm_map_entry_t entry, vm_offset_t va);
305 static void pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp,
306                         pmap_inval_bulk_t *bulk, int destroy);
307 static vm_page_t pmap_remove_pv_page(pv_entry_t pv);
308 static int pmap_release_pv(pv_entry_t pv, pv_entry_t pvp,
309                         pmap_inval_bulk_t *bulk);
310
311 struct pmap_scan_info;
312 static void pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
313                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
314                       pv_entry_t pt_pv, int sharept,
315                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
316 static void pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
317                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
318                       pv_entry_t pt_pv, int sharept,
319                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
320
321 static void x86_64_protection_init (void);
322 static void create_pagetables(vm_paddr_t *firstaddr);
323 static void pmap_remove_all (vm_page_t m);
324 static boolean_t pmap_testbit (vm_page_t m, int bit);
325
326 static pt_entry_t * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
327 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
328
329 static void pmap_pinit_defaults(struct pmap *pmap);
330 static void pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark);
331 static void pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark);
332
333 static int
334 pv_entry_compare(pv_entry_t pv1, pv_entry_t pv2)
335 {
336         if (pv1->pv_pindex < pv2->pv_pindex)
337                 return(-1);
338         if (pv1->pv_pindex > pv2->pv_pindex)
339                 return(1);
340         return(0);
341 }
342
343 RB_GENERATE2(pv_entry_rb_tree, pv_entry, pv_entry,
344              pv_entry_compare, vm_pindex_t, pv_pindex);
345
346 static __inline
347 void
348 pmap_page_stats_adding(vm_page_t m)
349 {
350         globaldata_t gd = mycpu;
351
352         if (TAILQ_EMPTY(&m->md.pv_list)) {
353                 ++gd->gd_vmtotal.t_arm;
354         } else if (TAILQ_FIRST(&m->md.pv_list) ==
355                    TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
356                 ++gd->gd_vmtotal.t_armshr;
357                 ++gd->gd_vmtotal.t_avmshr;
358         } else {
359                 ++gd->gd_vmtotal.t_avmshr;
360         }
361 }
362
363 static __inline
364 void
365 pmap_page_stats_deleting(vm_page_t m)
366 {
367         globaldata_t gd = mycpu;
368
369         if (TAILQ_EMPTY(&m->md.pv_list)) {
370                 --gd->gd_vmtotal.t_arm;
371         } else if (TAILQ_FIRST(&m->md.pv_list) ==
372                    TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
373                 --gd->gd_vmtotal.t_armshr;
374                 --gd->gd_vmtotal.t_avmshr;
375         } else {
376                 --gd->gd_vmtotal.t_avmshr;
377         }
378 }
379
380 /*
381  * This is an ineligent crowbar to prevent heavily threaded programs
382  * from creating long live-locks in the pmap code when pmap_mmu_optimize
383  * is enabled.  Without it a pmap-local page table page can wind up being
384  * constantly created and destroyed (without injury, but also without
385  * progress) as the optimization tries to switch to the object's shared page
386  * table page.
387  */
388 static __inline void
389 pmap_softwait(pmap_t pmap)
390 {
391         while (pmap->pm_softhold) {
392                 tsleep_interlock(&pmap->pm_softhold, 0);
393                 if (pmap->pm_softhold)
394                         tsleep(&pmap->pm_softhold, PINTERLOCKED, "mmopt", 0);
395         }
396 }
397
398 static __inline void
399 pmap_softhold(pmap_t pmap)
400 {
401         while (atomic_swap_int(&pmap->pm_softhold, 1) == 1) {
402                 tsleep_interlock(&pmap->pm_softhold, 0);
403                 if (atomic_swap_int(&pmap->pm_softhold, 1) == 1)
404                         tsleep(&pmap->pm_softhold, PINTERLOCKED, "mmopt", 0);
405         }
406 }
407
408 static __inline void
409 pmap_softdone(pmap_t pmap)
410 {
411         atomic_swap_int(&pmap->pm_softhold, 0);
412         wakeup(&pmap->pm_softhold);
413 }
414
415 /*
416  * Move the kernel virtual free pointer to the next
417  * 2MB.  This is used to help improve performance
418  * by using a large (2MB) page for much of the kernel
419  * (.text, .data, .bss)
420  */
421 static
422 vm_offset_t
423 pmap_kmem_choose(vm_offset_t addr)
424 {
425         vm_offset_t newaddr = addr;
426
427         newaddr = roundup2(addr, NBPDR);
428         return newaddr;
429 }
430
431 /*
432  * Returns the pindex of a page table entry (representing a terminal page).
433  * There are NUPTE_TOTAL page table entries possible (a huge number)
434  *
435  * x86-64 has a 48-bit address space, where bit 47 is sign-extended out.
436  * We want to properly translate negative KVAs.
437  */
438 static __inline
439 vm_pindex_t
440 pmap_pte_pindex(vm_offset_t va)
441 {
442         return ((va >> PAGE_SHIFT) & (NUPTE_TOTAL - 1));
443 }
444
445 /*
446  * Returns the pindex of a page table.
447  */
448 static __inline
449 vm_pindex_t
450 pmap_pt_pindex(vm_offset_t va)
451 {
452         return (NUPTE_TOTAL + ((va >> PDRSHIFT) & (NUPT_TOTAL - 1)));
453 }
454
455 /*
456  * Returns the pindex of a page directory.
457  */
458 static __inline
459 vm_pindex_t
460 pmap_pd_pindex(vm_offset_t va)
461 {
462         return (NUPTE_TOTAL + NUPT_TOTAL +
463                 ((va >> PDPSHIFT) & (NUPD_TOTAL - 1)));
464 }
465
466 static __inline
467 vm_pindex_t
468 pmap_pdp_pindex(vm_offset_t va)
469 {
470         return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
471                 ((va >> PML4SHIFT) & (NUPDP_TOTAL - 1)));
472 }
473
474 static __inline
475 vm_pindex_t
476 pmap_pml4_pindex(void)
477 {
478         return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL);
479 }
480
481 /*
482  * Return various clipped indexes for a given VA
483  *
484  * Returns the index of a pt in a page directory, representing a page
485  * table.
486  */
487 static __inline
488 vm_pindex_t
489 pmap_pt_index(vm_offset_t va)
490 {
491         return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
492 }
493
494 /*
495  * Returns the index of a pd in a page directory page, representing a page
496  * directory.
497  */
498 static __inline
499 vm_pindex_t
500 pmap_pd_index(vm_offset_t va)
501 {
502         return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
503 }
504
505 /*
506  * Returns the index of a pdp in the pml4 table, representing a page
507  * directory page.
508  */
509 static __inline
510 vm_pindex_t
511 pmap_pdp_index(vm_offset_t va)
512 {
513         return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
514 }
515
516 /*
517  * Of all the layers (PTE, PT, PD, PDP, PML4) the best one to cache is
518  * the PT layer.  This will speed up core pmap operations considerably.
519  * We also cache the PTE layer to (hopefully) improve relative lookup
520  * speeds.
521  *
522  * NOTE: The pmap spinlock does not need to be held but the passed-in pv
523  *       must be in a known associated state (typically by being locked when
524  *       the pmap spinlock isn't held).  We allow the race for that case.
525  *
526  * NOTE: pm_pvhint* is only accessed (read) with the spin-lock held, using
527  *       cpu_ccfence() to prevent compiler optimizations from reloading the
528  *       field.
529  */
530 static __inline
531 void
532 pv_cache(pmap_t pmap, pv_entry_t pv, vm_pindex_t pindex)
533 {
534         if (pindex < pmap_pt_pindex(0)) {
535                 pmap->pm_pvhint_pte = pv;
536         } else if (pindex < pmap_pd_pindex(0)) {
537                 pmap->pm_pvhint_pt = pv;
538         }
539 }
540
541 /*
542  * Locate the requested pt_entry
543  */
544 static __inline
545 pv_entry_t
546 pv_entry_lookup(pmap_t pmap, vm_pindex_t pindex)
547 {
548         pv_entry_t pv;
549
550 #if 1
551         if (pindex < pmap_pt_pindex(0))
552                 pv = pmap->pm_pvhint_pte;
553         else if (pindex < pmap_pd_pindex(0))
554                 pv = pmap->pm_pvhint_pt;
555         else
556                 pv = NULL;
557         cpu_ccfence();
558         if (pv == NULL || pv->pv_pmap != pmap) {
559                 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
560                 if (pv)
561                         pv_cache(pmap, pv, pindex);
562         } else if (pv->pv_pindex != pindex) {
563                 pv = pv_entry_rb_tree_RB_LOOKUP_REL(&pmap->pm_pvroot,
564                                                     pindex, pv);
565                 if (pv)
566                         pv_cache(pmap, pv, pindex);
567         }
568 #else
569         pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
570 #endif
571         return pv;
572 }
573
574 /*
575  * pmap_pte_quick:
576  *
577  *      Super fast pmap_pte routine best used when scanning the pv lists.
578  *      This eliminates many course-grained invltlb calls.  Note that many of
579  *      the pv list scans are across different pmaps and it is very wasteful
580  *      to do an entire invltlb when checking a single mapping.
581  */
582 static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va);
583
584 static
585 pt_entry_t *
586 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
587 {
588         return pmap_pte(pmap, va);
589 }
590
591 /*
592  * The placemarker hash must be broken up into four zones so lock
593  * ordering semantics continue to work (e.g. pte, pt, pd, then pdp).
594  *
595  * Placemarkers are used to 'lock' page table indices that do not have
596  * a pv_entry.  This allows the pmap to support managed and unmanaged
597  * pages and shared page tables.
598  */
599 #define PM_PLACE_BASE   (PM_PLACEMARKS >> 2)
600
601 static __inline
602 vm_pindex_t *
603 pmap_placemarker_hash(pmap_t pmap, vm_pindex_t pindex)
604 {
605         int hi;
606
607         if (pindex < pmap_pt_pindex(0))         /* zone 0 - PTE */
608                 hi = 0;
609         else if (pindex < pmap_pd_pindex(0))    /* zone 1 - PT */
610                 hi = PM_PLACE_BASE;
611         else if (pindex < pmap_pdp_pindex(0))   /* zone 2 - PD */
612                 hi = PM_PLACE_BASE << 1;
613         else                                    /* zone 3 - PDP (and PML4E) */
614                 hi = PM_PLACE_BASE | (PM_PLACE_BASE << 1);
615         hi += pindex & (PM_PLACE_BASE - 1);
616
617         return (&pmap->pm_placemarks[hi]);
618 }
619
620
621 /*
622  * Generic procedure to index a pte from a pt, pd, or pdp.
623  *
624  * NOTE: Normally passed pindex as pmap_xx_index().  pmap_xx_pindex() is NOT
625  *       a page table page index but is instead of PV lookup index.
626  */
627 static
628 void *
629 pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex)
630 {
631         pt_entry_t *pte;
632
633         pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pv->pv_m));
634         return(&pte[pindex]);
635 }
636
637 /*
638  * Return pointer to PDP slot in the PML4
639  */
640 static __inline
641 pml4_entry_t *
642 pmap_pdp(pmap_t pmap, vm_offset_t va)
643 {
644         return (&pmap->pm_pml4[pmap_pdp_index(va)]);
645 }
646
647 /*
648  * Return pointer to PD slot in the PDP given a pointer to the PDP
649  */
650 static __inline
651 pdp_entry_t *
652 pmap_pdp_to_pd(pml4_entry_t pdp_pte, vm_offset_t va)
653 {
654         pdp_entry_t *pd;
655
656         pd = (pdp_entry_t *)PHYS_TO_DMAP(pdp_pte & PG_FRAME);
657         return (&pd[pmap_pd_index(va)]);
658 }
659
660 /*
661  * Return pointer to PD slot in the PDP.
662  */
663 static __inline
664 pdp_entry_t *
665 pmap_pd(pmap_t pmap, vm_offset_t va)
666 {
667         pml4_entry_t *pdp;
668
669         pdp = pmap_pdp(pmap, va);
670         if ((*pdp & pmap->pmap_bits[PG_V_IDX]) == 0)
671                 return NULL;
672         return (pmap_pdp_to_pd(*pdp, va));
673 }
674
675 /*
676  * Return pointer to PT slot in the PD given a pointer to the PD
677  */
678 static __inline
679 pd_entry_t *
680 pmap_pd_to_pt(pdp_entry_t pd_pte, vm_offset_t va)
681 {
682         pd_entry_t *pt;
683
684         pt = (pd_entry_t *)PHYS_TO_DMAP(pd_pte & PG_FRAME);
685         return (&pt[pmap_pt_index(va)]);
686 }
687
688 /*
689  * Return pointer to PT slot in the PD
690  *
691  * SIMPLE PMAP NOTE: Simple pmaps (embedded in objects) do not have PDPs,
692  *                   so we cannot lookup the PD via the PDP.  Instead we
693  *                   must look it up via the pmap.
694  */
695 static __inline
696 pd_entry_t *
697 pmap_pt(pmap_t pmap, vm_offset_t va)
698 {
699         pdp_entry_t *pd;
700         pv_entry_t pv;
701         vm_pindex_t pd_pindex;
702         vm_paddr_t phys;
703
704         if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
705                 pd_pindex = pmap_pd_pindex(va);
706                 spin_lock_shared(&pmap->pm_spin);
707                 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pd_pindex);
708                 if (pv == NULL || pv->pv_m == NULL) {
709                         spin_unlock_shared(&pmap->pm_spin);
710                         return NULL;
711                 }
712                 phys = VM_PAGE_TO_PHYS(pv->pv_m);
713                 spin_unlock_shared(&pmap->pm_spin);
714                 return (pmap_pd_to_pt(phys, va));
715         } else {
716                 pd = pmap_pd(pmap, va);
717                 if (pd == NULL || (*pd & pmap->pmap_bits[PG_V_IDX]) == 0)
718                          return NULL;
719                 return (pmap_pd_to_pt(*pd, va));
720         }
721 }
722
723 /*
724  * Return pointer to PTE slot in the PT given a pointer to the PT
725  */
726 static __inline
727 pt_entry_t *
728 pmap_pt_to_pte(pd_entry_t pt_pte, vm_offset_t va)
729 {
730         pt_entry_t *pte;
731
732         pte = (pt_entry_t *)PHYS_TO_DMAP(pt_pte & PG_FRAME);
733         return (&pte[pmap_pte_index(va)]);
734 }
735
736 /*
737  * Return pointer to PTE slot in the PT
738  */
739 static __inline
740 pt_entry_t *
741 pmap_pte(pmap_t pmap, vm_offset_t va)
742 {
743         pd_entry_t *pt;
744
745         pt = pmap_pt(pmap, va);
746         if (pt == NULL || (*pt & pmap->pmap_bits[PG_V_IDX]) == 0)
747                  return NULL;
748         if ((*pt & pmap->pmap_bits[PG_PS_IDX]) != 0)
749                 return ((pt_entry_t *)pt);
750         return (pmap_pt_to_pte(*pt, va));
751 }
752
753 /*
754  * Return address of PT slot in PD (KVM only)
755  *
756  * Cannot be used for user page tables because it might interfere with
757  * the shared page-table-page optimization (pmap_mmu_optimize).
758  */
759 static __inline
760 pd_entry_t *
761 vtopt(vm_offset_t va)
762 {
763         uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
764                                   NPML4EPGSHIFT)) - 1);
765
766         return (PDmap + ((va >> PDRSHIFT) & mask));
767 }
768
769 /*
770  * KVM - return address of PTE slot in PT
771  */
772 static __inline
773 pt_entry_t *
774 vtopte(vm_offset_t va)
775 {
776         uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
777                                   NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
778
779         return (PTmap + ((va >> PAGE_SHIFT) & mask));
780 }
781
782 /*
783  * Returns the physical address translation from va for a user address.
784  * (vm_paddr_t)-1 is returned on failure.
785  */
786 vm_paddr_t
787 uservtophys(vm_offset_t va)
788 {
789         uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
790                                   NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
791         vm_paddr_t pa;
792         pt_entry_t pte;
793         pmap_t pmap;
794
795         pmap = vmspace_pmap(mycpu->gd_curthread->td_lwp->lwp_vmspace);
796         pa = (vm_paddr_t)-1;
797         if (va < VM_MAX_USER_ADDRESS) {
798                 pte = kreadmem64(PTmap + ((va >> PAGE_SHIFT) & mask));
799                 if (pte & pmap->pmap_bits[PG_V_IDX])
800                         pa = (pte & PG_FRAME) | (va & PAGE_MASK);
801         }
802         return pa;
803 }
804
805 static uint64_t
806 allocpages(vm_paddr_t *firstaddr, long n)
807 {
808         uint64_t ret;
809
810         ret = *firstaddr;
811         bzero((void *)ret, n * PAGE_SIZE);
812         *firstaddr += n * PAGE_SIZE;
813         return (ret);
814 }
815
816 static
817 void
818 create_pagetables(vm_paddr_t *firstaddr)
819 {
820         long i;         /* must be 64 bits */
821         long nkpt_base;
822         long nkpt_phys;
823         long nkpd_phys;
824         int j;
825
826         /*
827          * We are running (mostly) V=P at this point
828          *
829          * Calculate how many 1GB PD entries in our PDP pages are needed
830          * for the DMAP.  This is only allocated if the system does not
831          * support 1GB pages.  Otherwise ndmpdp is simply a count of
832          * the number of 1G terminal entries in our PDP pages are needed.
833          *
834          * NOTE: Maxmem is in pages
835          */
836         ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
837         if (ndmpdp < 4)         /* Minimum 4GB of dirmap */
838                 ndmpdp = 4;
839         KKASSERT(ndmpdp <= NDMPML4E * NPML4EPG);
840
841         /*
842          * Starting at KERNBASE - map all 2G worth of page table pages.
843          * KERNBASE is offset -2G from the end of kvm.  This will accomodate
844          * all KVM allocations above KERNBASE, including the SYSMAPs below.
845          *
846          * We do this by allocating 2*512 PT pages.  Each PT page can map
847          * 2MB, for 2GB total.
848          */
849         nkpt_base = (NPDPEPG - KPDPI) * NPTEPG; /* typically 2 x 512 */
850
851         /*
852          * Starting at the beginning of kvm (VM_MIN_KERNEL_ADDRESS),
853          * Calculate how many page table pages we need to preallocate
854          * for early vm_map allocations.
855          *
856          * A few extra won't hurt, they will get used up in the running
857          * system.
858          *
859          * vm_page array
860          * initial pventry's
861          */
862         nkpt_phys = (Maxmem * sizeof(struct vm_page) + NBPDR - 1) / NBPDR;
863         nkpt_phys += (Maxmem * sizeof(struct pv_entry) + NBPDR - 1) / NBPDR;
864         nkpt_phys += 128;       /* a few extra */
865
866         /*
867          * The highest value nkpd_phys can be set to is
868          * NKPDPE - (NPDPEPG - KPDPI) (i.e. NKPDPE - 2).
869          *
870          * Doing so would cause all PD pages to be pre-populated for
871          * a maximal KVM space (approximately 16*512 pages, or 32MB.
872          * We can save memory by not doing this.
873          */
874         nkpd_phys = (nkpt_phys + NPDPEPG - 1) / NPDPEPG;
875
876         /*
877          * Allocate pages
878          *
879          * Normally NKPML4E=1-16 (1-16 kernel PDP page)
880          * Normally NKPDPE= NKPML4E*512-1 (511 min kernel PD pages)
881          *
882          * Only allocate enough PD pages
883          * NOTE: We allocate all kernel PD pages up-front, typically
884          *       ~511G of KVM, requiring 511 PD pages.
885          */
886         KPTbase = allocpages(firstaddr, nkpt_base);     /* KERNBASE to end */
887         KPTphys = allocpages(firstaddr, nkpt_phys);     /* KVA start */
888         KPML4phys = allocpages(firstaddr, 1);           /* recursive PML4 map */
889         KPDPphys = allocpages(firstaddr, NKPML4E);      /* kernel PDP pages */
890         KPDphys = allocpages(firstaddr, nkpd_phys);     /* kernel PD pages */
891
892         /*
893          * Alloc PD pages for the area starting at KERNBASE.
894          */
895         KPDbase = allocpages(firstaddr, NPDPEPG - KPDPI);
896
897         /*
898          * Stuff for our DMAP
899          */
900         DMPDPphys = allocpages(firstaddr, NDMPML4E);
901         if ((amd_feature & AMDID_PAGE1GB) == 0)
902                 DMPDphys = allocpages(firstaddr, ndmpdp);
903         dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
904
905         /*
906          * Fill in the underlying page table pages for the area around
907          * KERNBASE.  This remaps low physical memory to KERNBASE.
908          *
909          * Read-only from zero to physfree
910          * XXX not fully used, underneath 2M pages
911          */
912         for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
913                 ((pt_entry_t *)KPTbase)[i] = i << PAGE_SHIFT;
914                 ((pt_entry_t *)KPTbase)[i] |=
915                     pmap_bits_default[PG_RW_IDX] |
916                     pmap_bits_default[PG_V_IDX] |
917                     pmap_bits_default[PG_G_IDX];
918         }
919
920         /*
921          * Now map the initial kernel page tables.  One block of page
922          * tables is placed at the beginning of kernel virtual memory,
923          * and another block is placed at KERNBASE to map the kernel binary,
924          * data, bss, and initial pre-allocations.
925          */
926         for (i = 0; i < nkpt_base; i++) {
927                 ((pd_entry_t *)KPDbase)[i] = KPTbase + (i << PAGE_SHIFT);
928                 ((pd_entry_t *)KPDbase)[i] |=
929                     pmap_bits_default[PG_RW_IDX] |
930                     pmap_bits_default[PG_V_IDX];
931         }
932         for (i = 0; i < nkpt_phys; i++) {
933                 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
934                 ((pd_entry_t *)KPDphys)[i] |=
935                     pmap_bits_default[PG_RW_IDX] |
936                     pmap_bits_default[PG_V_IDX];
937         }
938
939         /*
940          * Map from zero to end of allocations using 2M pages as an
941          * optimization.  This will bypass some of the KPTBase pages
942          * above in the KERNBASE area.
943          */
944         for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
945                 ((pd_entry_t *)KPDbase)[i] = i << PDRSHIFT;
946                 ((pd_entry_t *)KPDbase)[i] |=
947                     pmap_bits_default[PG_RW_IDX] |
948                     pmap_bits_default[PG_V_IDX] |
949                     pmap_bits_default[PG_PS_IDX] |
950                     pmap_bits_default[PG_G_IDX];
951         }
952
953         /*
954          * Load PD addresses into the PDP pages for primary KVA space to
955          * cover existing page tables.  PD's for KERNBASE are handled in
956          * the next loop.
957          *
958          * expected to pre-populate all of its PDs.  See NKPDPE in vmparam.h.
959          */
960         for (i = 0; i < nkpd_phys; i++) {
961                 ((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] =
962                                 KPDphys + (i << PAGE_SHIFT);
963                 ((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] |=
964                     pmap_bits_default[PG_RW_IDX] |
965                     pmap_bits_default[PG_V_IDX] |
966                     pmap_bits_default[PG_A_IDX];
967         }
968
969         /*
970          * Load PDs for KERNBASE to the end
971          */
972         i = (NKPML4E - 1) * NPDPEPG + KPDPI;
973         for (j = 0; j < NPDPEPG - KPDPI; ++j) {
974                 ((pdp_entry_t *)KPDPphys)[i + j] =
975                                 KPDbase + (j << PAGE_SHIFT);
976                 ((pdp_entry_t *)KPDPphys)[i + j] |=
977                     pmap_bits_default[PG_RW_IDX] |
978                     pmap_bits_default[PG_V_IDX] |
979                     pmap_bits_default[PG_A_IDX];
980         }
981
982         /*
983          * Now set up the direct map space using either 2MB or 1GB pages
984          * Preset PG_M and PG_A because demotion expects it.
985          *
986          * When filling in entries in the PD pages make sure any excess
987          * entries are set to zero as we allocated enough PD pages
988          */
989         if ((amd_feature & AMDID_PAGE1GB) == 0) {
990                 /*
991                  * Use 2MB pages
992                  */
993                 for (i = 0; i < NPDEPG * ndmpdp; i++) {
994                         ((pd_entry_t *)DMPDphys)[i] = i << PDRSHIFT;
995                         ((pd_entry_t *)DMPDphys)[i] |=
996                             pmap_bits_default[PG_RW_IDX] |
997                             pmap_bits_default[PG_V_IDX] |
998                             pmap_bits_default[PG_PS_IDX] |
999                             pmap_bits_default[PG_G_IDX] |
1000                             pmap_bits_default[PG_M_IDX] |
1001                             pmap_bits_default[PG_A_IDX];
1002                 }
1003
1004                 /*
1005                  * And the direct map space's PDP
1006                  */
1007                 for (i = 0; i < ndmpdp; i++) {
1008                         ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
1009                                                         (i << PAGE_SHIFT);
1010                         ((pdp_entry_t *)DMPDPphys)[i] |=
1011                             pmap_bits_default[PG_RW_IDX] |
1012                             pmap_bits_default[PG_V_IDX];
1013                 }
1014         } else {
1015                 /*
1016                  * 1GB pages
1017                  */
1018                 for (i = 0; i < ndmpdp; i++) {
1019                         ((pdp_entry_t *)DMPDPphys)[i] =
1020                                                 (vm_paddr_t)i << PDPSHIFT;
1021                         ((pdp_entry_t *)DMPDPphys)[i] |=
1022                             pmap_bits_default[PG_RW_IDX] |
1023                             pmap_bits_default[PG_V_IDX] |
1024                             pmap_bits_default[PG_PS_IDX] |
1025                             pmap_bits_default[PG_G_IDX] |
1026                             pmap_bits_default[PG_M_IDX] |
1027                             pmap_bits_default[PG_A_IDX];
1028                 }
1029         }
1030
1031         /* And recursively map PML4 to itself in order to get PTmap */
1032         ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
1033         ((pdp_entry_t *)KPML4phys)[PML4PML4I] |=
1034             pmap_bits_default[PG_RW_IDX] |
1035             pmap_bits_default[PG_V_IDX] |
1036             pmap_bits_default[PG_A_IDX];
1037
1038         /*
1039          * Connect the Direct Map slots up to the PML4
1040          */
1041         for (j = 0; j < NDMPML4E; ++j) {
1042                 ((pdp_entry_t *)KPML4phys)[DMPML4I + j] =
1043                     (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
1044                     pmap_bits_default[PG_RW_IDX] |
1045                     pmap_bits_default[PG_V_IDX] |
1046                     pmap_bits_default[PG_A_IDX];
1047         }
1048
1049         /*
1050          * Connect the KVA slot up to the PML4
1051          */
1052         for (j = 0; j < NKPML4E; ++j) {
1053                 ((pdp_entry_t *)KPML4phys)[KPML4I + j] =
1054                     KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT);
1055                 ((pdp_entry_t *)KPML4phys)[KPML4I + j] |=
1056                     pmap_bits_default[PG_RW_IDX] |
1057                     pmap_bits_default[PG_V_IDX] |
1058                     pmap_bits_default[PG_A_IDX];
1059         }
1060         cpu_mfence();
1061         cpu_invltlb();
1062 }
1063
1064 /*
1065  *      Bootstrap the system enough to run with virtual memory.
1066  *
1067  *      On x86_64 this is called after mapping has already been enabled
1068  *      and just syncs the pmap module with what has already been done.
1069  *      [We can't call it easily with mapping off since the kernel is not
1070  *      mapped with PA == VA, hence we would have to relocate every address
1071  *      from the linked base (virtual) address "KERNBASE" to the actual
1072  *      (physical) address starting relative to 0]
1073  */
1074 void
1075 pmap_bootstrap(vm_paddr_t *firstaddr)
1076 {
1077         vm_offset_t va;
1078         pt_entry_t *pte;
1079         int i;
1080
1081         KvaStart = VM_MIN_KERNEL_ADDRESS;
1082         KvaEnd = VM_MAX_KERNEL_ADDRESS;
1083         KvaSize = KvaEnd - KvaStart;
1084
1085         avail_start = *firstaddr;
1086
1087         /*
1088          * Create an initial set of page tables to run the kernel in.
1089          */
1090         create_pagetables(firstaddr);
1091
1092         virtual2_start = KvaStart;
1093         virtual2_end = PTOV_OFFSET;
1094
1095         virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr;
1096         virtual_start = pmap_kmem_choose(virtual_start);
1097
1098         virtual_end = VM_MAX_KERNEL_ADDRESS;
1099
1100         /* XXX do %cr0 as well */
1101         load_cr4(rcr4() | CR4_PGE | CR4_PSE);
1102         load_cr3(KPML4phys);
1103
1104         /*
1105          * Initialize protection array.
1106          */
1107         x86_64_protection_init();
1108
1109         /*
1110          * The kernel's pmap is statically allocated so we don't have to use
1111          * pmap_create, which is unlikely to work correctly at this part of
1112          * the boot sequence (XXX and which no longer exists).
1113          */
1114         kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys);
1115         kernel_pmap.pm_count = 1;
1116         CPUMASK_ASSALLONES(kernel_pmap.pm_active);
1117         RB_INIT(&kernel_pmap.pm_pvroot);
1118         spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
1119         for (i = 0; i < PM_PLACEMARKS; ++i)
1120                 kernel_pmap.pm_placemarks[i] = PM_NOPLACEMARK;
1121
1122         /*
1123          * Reserve some special page table entries/VA space for temporary
1124          * mapping of pages.
1125          */
1126 #define SYSMAP(c, p, v, n)      \
1127         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
1128
1129         va = virtual_start;
1130         pte = vtopte(va);
1131
1132         /*
1133          * CMAP1/CMAP2 are used for zeroing and copying pages.
1134          */
1135         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
1136
1137         /*
1138          * Crashdump maps.
1139          */
1140         SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
1141
1142         /*
1143          * ptvmmap is used for reading arbitrary physical pages via
1144          * /dev/mem.
1145          */
1146         SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
1147
1148         /*
1149          * msgbufp is used to map the system message buffer.
1150          * XXX msgbufmap is not used.
1151          */
1152         SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
1153                atop(round_page(MSGBUF_SIZE)))
1154
1155         virtual_start = va;
1156         virtual_start = pmap_kmem_choose(virtual_start);
1157
1158         *CMAP1 = 0;
1159
1160         /*
1161          * PG_G is terribly broken on SMP because we IPI invltlb's in some
1162          * cases rather then invl1pg.  Actually, I don't even know why it
1163          * works under UP because self-referential page table mappings
1164          */
1165 //      pgeflag = 0;
1166
1167         cpu_invltlb();
1168
1169         /* Initialize the PAT MSR */
1170         pmap_init_pat();
1171         pmap_pinit_defaults(&kernel_pmap);
1172
1173         TUNABLE_INT_FETCH("machdep.pmap_fast_kernel_cpusync",
1174                           &pmap_fast_kernel_cpusync);
1175
1176 }
1177
1178 /*
1179  * Setup the PAT MSR.
1180  */
1181 void
1182 pmap_init_pat(void)
1183 {
1184         uint64_t pat_msr;
1185         u_long cr0, cr4;
1186
1187         /*
1188          * Default values mapping PATi,PCD,PWT bits at system reset.
1189          * The default values effectively ignore the PATi bit by
1190          * repeating the encodings for 0-3 in 4-7, and map the PCD
1191          * and PWT bit combinations to the expected PAT types.
1192          */
1193         pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |        /* 000 */
1194                   PAT_VALUE(1, PAT_WRITE_THROUGH) |     /* 001 */
1195                   PAT_VALUE(2, PAT_UNCACHED) |          /* 010 */
1196                   PAT_VALUE(3, PAT_UNCACHEABLE) |       /* 011 */
1197                   PAT_VALUE(4, PAT_WRITE_BACK) |        /* 100 */
1198                   PAT_VALUE(5, PAT_WRITE_THROUGH) |     /* 101 */
1199                   PAT_VALUE(6, PAT_UNCACHED) |          /* 110 */
1200                   PAT_VALUE(7, PAT_UNCACHEABLE);        /* 111 */
1201         pat_pte_index[PAT_WRITE_BACK]   = 0;
1202         pat_pte_index[PAT_WRITE_THROUGH]= 0         | X86_PG_NC_PWT;
1203         pat_pte_index[PAT_UNCACHED]     = X86_PG_NC_PCD;
1204         pat_pte_index[PAT_UNCACHEABLE]  = X86_PG_NC_PCD | X86_PG_NC_PWT;
1205         pat_pte_index[PAT_WRITE_PROTECTED] = pat_pte_index[PAT_UNCACHEABLE];
1206         pat_pte_index[PAT_WRITE_COMBINING] = pat_pte_index[PAT_UNCACHEABLE];
1207
1208         if (cpu_feature & CPUID_PAT) {
1209                 /*
1210                  * If we support the PAT then set-up entries for
1211                  * WRITE_PROTECTED and WRITE_COMBINING using bit patterns
1212                  * 5 and 6.
1213                  */
1214                 pat_msr = (pat_msr & ~PAT_MASK(5)) |
1215                           PAT_VALUE(5, PAT_WRITE_PROTECTED);
1216                 pat_msr = (pat_msr & ~PAT_MASK(6)) |
1217                           PAT_VALUE(6, PAT_WRITE_COMBINING);
1218                 pat_pte_index[PAT_WRITE_PROTECTED] = X86_PG_PTE_PAT | X86_PG_NC_PWT;
1219                 pat_pte_index[PAT_WRITE_COMBINING] = X86_PG_PTE_PAT | X86_PG_NC_PCD;
1220
1221                 /*
1222                  * Then enable the PAT
1223                  */
1224
1225                 /* Disable PGE. */
1226                 cr4 = rcr4();
1227                 load_cr4(cr4 & ~CR4_PGE);
1228
1229                 /* Disable caches (CD = 1, NW = 0). */
1230                 cr0 = rcr0();
1231                 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1232
1233                 /* Flushes caches and TLBs. */
1234                 wbinvd();
1235                 cpu_invltlb();
1236
1237                 /* Update PAT and index table. */
1238                 wrmsr(MSR_PAT, pat_msr);
1239
1240                 /* Flush caches and TLBs again. */
1241                 wbinvd();
1242                 cpu_invltlb();
1243
1244                 /* Restore caches and PGE. */
1245                 load_cr0(cr0);
1246                 load_cr4(cr4);
1247                 PatMsr = pat_msr;
1248         }
1249 }
1250
1251 /*
1252  * Set 4mb pdir for mp startup
1253  */
1254 void
1255 pmap_set_opt(void)
1256 {
1257         if (cpu_feature & CPUID_PSE) {
1258                 load_cr4(rcr4() | CR4_PSE);
1259                 if (mycpu->gd_cpuid == 0)       /* only on BSP */
1260                         cpu_invltlb();
1261         }
1262 }
1263
1264 /*
1265  * Early initialization of the pmap module.
1266  *
1267  * Called by vm_init, to initialize any structures that the pmap
1268  * system needs to map virtual memory.  pmap_init has been enhanced to
1269  * support in a fairly consistant way, discontiguous physical memory.
1270  */
1271 void
1272 pmap_init(void)
1273 {
1274         vm_pindex_t initial_pvs;
1275         vm_pindex_t i;
1276
1277         /*
1278          * Allocate memory for random pmap data structures.  Includes the
1279          * pv_head_table.
1280          */
1281         for (i = 0; i < vm_page_array_size; i++) {
1282                 vm_page_t m;
1283
1284                 m = &vm_page_array[i];
1285                 TAILQ_INIT(&m->md.pv_list);
1286         }
1287
1288         /*
1289          * init the pv free list
1290          */
1291         initial_pvs = vm_page_array_size;
1292         if (initial_pvs < MINPV)
1293                 initial_pvs = MINPV;
1294         pvzone = &pvzone_store;
1295         pvinit = (void *)kmem_alloc(&kernel_map,
1296                                     initial_pvs * sizeof (struct pv_entry),
1297                                     VM_SUBSYS_PVENTRY);
1298         zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
1299                   pvinit, initial_pvs);
1300
1301         /*
1302          * Now it is safe to enable pv_table recording.
1303          */
1304         pmap_initialized = TRUE;
1305 }
1306
1307 /*
1308  * Initialize the address space (zone) for the pv_entries.  Set a
1309  * high water mark so that the system can recover from excessive
1310  * numbers of pv entries.
1311  *
1312  * Also create the kernel page table template for isolated user
1313  * pmaps.
1314  */
1315 static void pmap_init_iso_range(vm_offset_t base, size_t bytes);
1316 static void pmap_init2_iso_pmap(void);
1317 #if 0
1318 static void dump_pmap(pmap_t pmap, pt_entry_t pte, int level, vm_offset_t base);
1319 #endif
1320
1321 void
1322 pmap_init2(void)
1323 {
1324         vm_pindex_t shpgperproc = PMAP_SHPGPERPROC;
1325         vm_pindex_t entry_max;
1326
1327         TUNABLE_LONG_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1328         pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
1329         TUNABLE_LONG_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1330         pv_entry_high_water = 9 * (pv_entry_max / 10);
1331
1332         /*
1333          * Subtract out pages already installed in the zone (hack)
1334          */
1335         entry_max = pv_entry_max - vm_page_array_size;
1336         if (entry_max <= 0)
1337                 entry_max = 1;
1338
1339         zinitna(pvzone, NULL, 0, entry_max, ZONE_INTERRUPT);
1340
1341         /*
1342          * Enable dynamic deletion of empty higher-level page table pages
1343          * by default only if system memory is < 8GB (use 7GB for slop).
1344          * This can save a little memory, but imposes significant
1345          * performance overhead for things like bulk builds, and for programs
1346          * which do a lot of memory mapping and memory unmapping.
1347          */
1348         if (pmap_dynamic_delete < 0) {
1349                 if (vmstats.v_page_count < 7LL * 1024 * 1024 * 1024 / PAGE_SIZE)
1350                         pmap_dynamic_delete = 1;
1351                 else
1352                         pmap_dynamic_delete = 0;
1353         }
1354
1355         /*
1356          * Automatic detection of Intel meltdown bug requiring user/kernel
1357          * mmap isolation.
1358          *
1359          * Currently there are so many Intel cpu's impacted that its better
1360          * to whitelist future Intel CPUs.  Most? AMD cpus are not impacted
1361          * so the default is off for AMD.
1362          */
1363         if (meltdown_mitigation < 0) {
1364                 if (cpu_vendor_id == CPU_VENDOR_INTEL)
1365                         meltdown_mitigation = 1;
1366                 else
1367                         meltdown_mitigation = 0;
1368         }
1369         if (meltdown_mitigation) {
1370                 kprintf("machdep.meltdown_mitigation enabled to "
1371                         "protect against (mostly Intel) meltdown bug\n");
1372                 kprintf("system call performance will be impacted\n");
1373         }
1374
1375         pmap_init2_iso_pmap();
1376 }
1377
1378 /*
1379  * Create the isolation pmap template.  Once created, the template
1380  * is static and its PML4e entries are used to populate the
1381  * kernel portion of any isolated user pmaps.
1382  *
1383  * Our isolation pmap must contain:
1384  * (1) trampoline area for all cpus
1385  * (2) common_tss area for all cpus (its part of the trampoline area now)
1386  * (3) IDT for all cpus
1387  * (4) GDT for all cpus
1388  */
1389 static void
1390 pmap_init2_iso_pmap(void)
1391 {
1392         int n;
1393
1394         if (bootverbose)
1395                 kprintf("Initialize isolation pmap\n");
1396
1397         /*
1398          * Try to use our normal API calls to make this easier.  We have
1399          * to scrap the shadowed kernel PDPs pmap_pinit() creates for our
1400          * iso_pmap.
1401          */
1402         pmap_pinit(&iso_pmap);
1403         bzero(iso_pmap.pm_pml4, PAGE_SIZE);
1404
1405         /*
1406          * Install areas needed by the cpu and trampoline.
1407          */
1408         for (n = 0; n < ncpus; ++n) {
1409                 struct privatespace *ps;
1410
1411                 ps = CPU_prvspace[n];
1412                 pmap_init_iso_range((vm_offset_t)&ps->trampoline,
1413                                     sizeof(ps->trampoline));
1414                 pmap_init_iso_range((vm_offset_t)&ps->dblstack,
1415                                     sizeof(ps->dblstack));
1416                 pmap_init_iso_range((vm_offset_t)&ps->dbgstack,
1417                                     sizeof(ps->dbgstack));
1418                 pmap_init_iso_range((vm_offset_t)&ps->common_tss,
1419                                     sizeof(ps->common_tss));
1420                 pmap_init_iso_range(r_idt_arr[n].rd_base,
1421                                     r_idt_arr[n].rd_limit + 1);
1422         }
1423         pmap_init_iso_range((register_t)gdt, sizeof(gdt));
1424         pmap_init_iso_range((vm_offset_t)(int *)btext,
1425                             (vm_offset_t)(int *)etext -
1426                              (vm_offset_t)(int *)btext);
1427
1428 #if 0
1429         kprintf("Dump iso_pmap:\n");
1430         dump_pmap(&iso_pmap, vtophys(iso_pmap.pm_pml4), 0, 0);
1431         kprintf("\nDump kernel_pmap:\n");
1432         dump_pmap(&kernel_pmap, vtophys(kernel_pmap.pm_pml4), 0, 0);
1433 #endif
1434 }
1435
1436 /*
1437  * This adds a kernel virtual address range to the isolation pmap.
1438  */
1439 static void
1440 pmap_init_iso_range(vm_offset_t base, size_t bytes)
1441 {
1442         pv_entry_t pv;
1443         pv_entry_t pvp;
1444         pt_entry_t *ptep;
1445         pt_entry_t pte;
1446         vm_offset_t va;
1447
1448         if (bootverbose) {
1449                 kprintf("isolate %016jx-%016jx (%zd)\n",
1450                         base, base + bytes, bytes);
1451         }
1452         va = base & ~(vm_offset_t)PAGE_MASK;
1453         while (va < base + bytes) {
1454                 if ((va & PDRMASK) == 0 && va + NBPDR <= base + bytes &&
1455                     (ptep = pmap_pt(&kernel_pmap, va)) != NULL &&
1456                     (*ptep & kernel_pmap.pmap_bits[PG_V_IDX]) &&
1457                     (*ptep & kernel_pmap.pmap_bits[PG_PS_IDX])) {
1458                         /*
1459                          * Use 2MB pages if possible
1460                          */
1461                         pte = *ptep;
1462                         pv = pmap_allocpte(&iso_pmap, pmap_pd_pindex(va), &pvp);
1463                         ptep = pv_pte_lookup(pv, (va >> PDRSHIFT) & 511);
1464                         *ptep = pte;
1465                         va += NBPDR;
1466                 } else {
1467                         /*
1468                          * Otherwise use 4KB pages
1469                          */
1470                         pv = pmap_allocpte(&iso_pmap, pmap_pt_pindex(va), &pvp);
1471                         ptep = pv_pte_lookup(pv, (va >> PAGE_SHIFT) & 511);
1472                         *ptep = vtophys(va) | kernel_pmap.pmap_bits[PG_RW_IDX] |
1473                                               kernel_pmap.pmap_bits[PG_V_IDX] |
1474                                               kernel_pmap.pmap_bits[PG_A_IDX] |
1475                                               kernel_pmap.pmap_bits[PG_M_IDX];
1476
1477                         va += PAGE_SIZE;
1478                 }
1479                 pv_put(pv);
1480                 pv_put(pvp);
1481         }
1482 }
1483
1484 #if 0
1485 /*
1486  * Useful debugging pmap dumper, do not remove (#if 0 when not in use)
1487  */
1488 static
1489 void
1490 dump_pmap(pmap_t pmap, pt_entry_t pte, int level, vm_offset_t base)
1491 {
1492         pt_entry_t *ptp;
1493         vm_offset_t incr;
1494         int i;
1495
1496         switch(level) {
1497         case 0:                                 /* PML4e page, 512G entries */
1498                 incr = (1LL << 48) / 512;
1499                 break;
1500         case 1:                                 /* PDP page, 1G entries */
1501                 incr = (1LL << 39) / 512;
1502                 break;
1503         case 2:                                 /* PD page, 2MB entries */
1504                 incr = (1LL << 30) / 512;
1505                 break;
1506         case 3:                                 /* PT page, 4KB entries */
1507                 incr = (1LL << 21) / 512;
1508                 break;
1509         default:
1510                 incr = 0;
1511                 break;
1512         }
1513
1514         if (level == 0)
1515                 kprintf("cr3 %016jx @ va=%016jx\n", pte, base);
1516         ptp = (void *)PHYS_TO_DMAP(pte & ~(pt_entry_t)PAGE_MASK);
1517         for (i = 0; i < 512; ++i) {
1518                 if (level == 0 && i == 128)
1519                         base += 0xFFFF000000000000LLU;
1520                 if (ptp[i]) {
1521                         kprintf("%*.*s ", level * 4, level * 4, "");
1522                         if (level == 1 && (ptp[i] & 0x180) == 0x180) {
1523                                 kprintf("va=%016jx %3d term %016jx (1GB)\n",
1524                                         base, i, ptp[i]);
1525                         } else if (level == 2 && (ptp[i] & 0x180) == 0x180) {
1526                                 kprintf("va=%016jx %3d term %016jx (2MB)\n",
1527                                         base, i, ptp[i]);
1528                         } else if (level == 3) {
1529                                 kprintf("va=%016jx %3d term %016jx\n",
1530                                         base, i, ptp[i]);
1531                         } else {
1532                                 kprintf("va=%016jx %3d deep %016jx\n",
1533                                         base, i, ptp[i]);
1534                                 dump_pmap(pmap, ptp[i], level + 1, base);
1535                         }
1536                 }
1537                 base += incr;
1538         }
1539 }
1540
1541 #endif
1542
1543 /*
1544  * Typically used to initialize a fictitious page by vm/device_pager.c
1545  */
1546 void
1547 pmap_page_init(struct vm_page *m)
1548 {
1549         vm_page_init(m);
1550         TAILQ_INIT(&m->md.pv_list);
1551 }
1552
1553 /***************************************************
1554  * Low level helper routines.....
1555  ***************************************************/
1556
1557 /*
1558  * this routine defines the region(s) of memory that should
1559  * not be tested for the modified bit.
1560  */
1561 static __inline
1562 int
1563 pmap_track_modified(vm_pindex_t pindex)
1564 {
1565         vm_offset_t va = (vm_offset_t)pindex << PAGE_SHIFT;
1566         if ((va < clean_sva) || (va >= clean_eva)) 
1567                 return 1;
1568         else
1569                 return 0;
1570 }
1571
1572 /*
1573  * Extract the physical page address associated with the map/VA pair.
1574  * The page must be wired for this to work reliably.
1575  */
1576 vm_paddr_t 
1577 pmap_extract(pmap_t pmap, vm_offset_t va, void **handlep)
1578 {
1579         vm_paddr_t rtval;
1580         pv_entry_t pt_pv;
1581         pt_entry_t *ptep;
1582
1583         rtval = 0;
1584         if (va >= VM_MAX_USER_ADDRESS) {
1585                 /*
1586                  * Kernel page directories might be direct-mapped and
1587                  * there is typically no PV tracking of pte's
1588                  */
1589                 pd_entry_t *pt;
1590
1591                 pt = pmap_pt(pmap, va);
1592                 if (pt && (*pt & pmap->pmap_bits[PG_V_IDX])) {
1593                         if (*pt & pmap->pmap_bits[PG_PS_IDX]) {
1594                                 rtval = *pt & PG_PS_FRAME;
1595                                 rtval |= va & PDRMASK;
1596                         } else {
1597                                 ptep = pmap_pt_to_pte(*pt, va);
1598                                 if (*pt & pmap->pmap_bits[PG_V_IDX]) {
1599                                         rtval = *ptep & PG_FRAME;
1600                                         rtval |= va & PAGE_MASK;
1601                                 }
1602                         }
1603                 }
1604                 if (handlep)
1605                         *handlep = NULL;
1606         } else {
1607                 /*
1608                  * User pages currently do not direct-map the page directory
1609                  * and some pages might not used managed PVs.  But all PT's
1610                  * will have a PV.
1611                  */
1612                 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
1613                 if (pt_pv) {
1614                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1615                         if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
1616                                 rtval = *ptep & PG_FRAME;
1617                                 rtval |= va & PAGE_MASK;
1618                         }
1619                         if (handlep)
1620                                 *handlep = pt_pv;       /* locked until done */
1621                         else
1622                                 pv_put (pt_pv);
1623                 } else if (handlep) {
1624                         *handlep = NULL;
1625                 }
1626         }
1627         return rtval;
1628 }
1629
1630 void
1631 pmap_extract_done(void *handle)
1632 {
1633         if (handle)
1634                 pv_put((pv_entry_t)handle);
1635 }
1636
1637 /*
1638  * Similar to extract but checks protections, SMP-friendly short-cut for
1639  * vm_fault_page[_quick]().  Can return NULL to cause the caller to
1640  * fall-through to the real fault code.  Does not work with HVM page
1641  * tables.
1642  *
1643  * if busyp is NULL the returned page, if not NULL, is held (and not busied).
1644  *
1645  * If busyp is not NULL and this function sets *busyp non-zero, the returned
1646  * page is busied (and not held).
1647  *
1648  * If busyp is not NULL and this function sets *busyp to zero, the returned
1649  * page is held (and not busied).
1650  *
1651  * If VM_PROT_WRITE is set in prot, and the pte is already writable, the
1652  * returned page will be dirtied.  If the pte is not already writable NULL
1653  * is returned.  In otherwords, if the bit is set and a vm_page_t is returned,
1654  * any COW will already have happened and that page can be written by the
1655  * caller.
1656  *
1657  * WARNING! THE RETURNED PAGE IS ONLY HELD AND NOT SUITABLE FOR READING
1658  *          OR WRITING AS-IS.
1659  */
1660 vm_page_t
1661 pmap_fault_page_quick(pmap_t pmap, vm_offset_t va, vm_prot_t prot, int *busyp)
1662 {
1663         if (pmap &&
1664             va < VM_MAX_USER_ADDRESS &&
1665             (pmap->pm_flags & PMAP_HVM) == 0) {
1666                 pv_entry_t pt_pv;
1667                 pv_entry_t pte_pv;
1668                 pt_entry_t *ptep;
1669                 pt_entry_t req;
1670                 vm_page_t m;
1671                 int error;
1672
1673                 req = pmap->pmap_bits[PG_V_IDX] |
1674                       pmap->pmap_bits[PG_U_IDX];
1675                 if (prot & VM_PROT_WRITE)
1676                         req |= pmap->pmap_bits[PG_RW_IDX];
1677
1678                 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
1679                 if (pt_pv == NULL)
1680                         return (NULL);
1681                 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1682                 if ((*ptep & req) != req) {
1683                         pv_put(pt_pv);
1684                         return (NULL);
1685                 }
1686                 pte_pv = pv_get_try(pmap, pmap_pte_pindex(va), NULL, &error);
1687                 if (pte_pv && error == 0) {
1688                         m = pte_pv->pv_m;
1689                         if (prot & VM_PROT_WRITE) {
1690                                 /* interlocked by presence of pv_entry */
1691                                 vm_page_dirty(m);
1692                         }
1693                         if (busyp) {
1694                                 if (prot & VM_PROT_WRITE) {
1695                                         if (vm_page_busy_try(m, TRUE))
1696                                                 m = NULL;
1697                                         *busyp = 1;
1698                                 } else {
1699                                         vm_page_hold(m);
1700                                         *busyp = 0;
1701                                 }
1702                         } else {
1703                                 vm_page_hold(m);
1704                         }
1705                         pv_put(pte_pv);
1706                 } else if (pte_pv) {
1707                         pv_drop(pte_pv);
1708                         m = NULL;
1709                 } else {
1710                         /* error, since we didn't request a placemarker */
1711                         m = NULL;
1712                 }
1713                 pv_put(pt_pv);
1714                 return(m);
1715         } else {
1716                 return(NULL);
1717         }
1718 }
1719
1720 /*
1721  * Extract the physical page address associated kernel virtual address.
1722  */
1723 vm_paddr_t
1724 pmap_kextract(vm_offset_t va)
1725 {
1726         pd_entry_t pt;          /* pt entry in pd */
1727         vm_paddr_t pa;
1728
1729         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1730                 pa = DMAP_TO_PHYS(va);
1731         } else {
1732                 pt = *vtopt(va);
1733                 if (pt & kernel_pmap.pmap_bits[PG_PS_IDX]) {
1734                         pa = (pt & PG_PS_FRAME) | (va & PDRMASK);
1735                 } else {
1736                         /*
1737                          * Beware of a concurrent promotion that changes the
1738                          * PDE at this point!  For example, vtopte() must not
1739                          * be used to access the PTE because it would use the
1740                          * new PDE.  It is, however, safe to use the old PDE
1741                          * because the page table page is preserved by the
1742                          * promotion.
1743                          */
1744                         pa = *pmap_pt_to_pte(pt, va);
1745                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1746                 }
1747         }
1748         return pa;
1749 }
1750
1751 /***************************************************
1752  * Low level mapping routines.....
1753  ***************************************************/
1754
1755 /*
1756  * Routine: pmap_kenter
1757  * Function:
1758  *      Add a wired page to the KVA
1759  *      NOTE! note that in order for the mapping to take effect -- you
1760  *      should do an invltlb after doing the pmap_kenter().
1761  */
1762 void 
1763 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1764 {
1765         pt_entry_t *ptep;
1766         pt_entry_t npte;
1767
1768         npte = pa |
1769                kernel_pmap.pmap_bits[PG_RW_IDX] |
1770                kernel_pmap.pmap_bits[PG_V_IDX];
1771 //             pgeflag;
1772         ptep = vtopte(va);
1773 #if 1
1774         pmap_inval_smp(&kernel_pmap, va, 1, ptep, npte);
1775 #else
1776         /* FUTURE */
1777         if (*ptep)
1778                 pmap_inval_smp(&kernel_pmap, va, ptep, npte);
1779         else
1780                 *ptep = npte;
1781 #endif
1782 }
1783
1784 /*
1785  * Similar to pmap_kenter(), except we only invalidate the mapping on the
1786  * current CPU.  Returns 0 if the previous pte was 0, 1 if it wasn't
1787  * (caller can conditionalize calling smp_invltlb()).
1788  */
1789 int
1790 pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
1791 {
1792         pt_entry_t *ptep;
1793         pt_entry_t npte;
1794         int res;
1795
1796         npte = pa | kernel_pmap.pmap_bits[PG_RW_IDX] |
1797                     kernel_pmap.pmap_bits[PG_V_IDX];
1798         // npte |= pgeflag;
1799         ptep = vtopte(va);
1800 #if 1
1801         res = 1;
1802 #else
1803         /* FUTURE */
1804         res = (*ptep != 0);
1805 #endif
1806         atomic_swap_long(ptep, npte);
1807         cpu_invlpg((void *)va);
1808
1809         return res;
1810 }
1811
1812 /*
1813  * Enter addresses into the kernel pmap but don't bother
1814  * doing any tlb invalidations.  Caller will do a rollup
1815  * invalidation via pmap_rollup_inval().
1816  */
1817 int
1818 pmap_kenter_noinval(vm_offset_t va, vm_paddr_t pa)
1819 {
1820         pt_entry_t *ptep;
1821         pt_entry_t npte;
1822         int res;
1823
1824         npte = pa |
1825             kernel_pmap.pmap_bits[PG_RW_IDX] |
1826             kernel_pmap.pmap_bits[PG_V_IDX];
1827 //          pgeflag;
1828         ptep = vtopte(va);
1829 #if 1
1830         res = 1;
1831 #else
1832         /* FUTURE */
1833         res = (*ptep != 0);
1834 #endif
1835         atomic_swap_long(ptep, npte);
1836         cpu_invlpg((void *)va);
1837
1838         return res;
1839 }
1840
1841 /*
1842  * remove a page from the kernel pagetables
1843  */
1844 void
1845 pmap_kremove(vm_offset_t va)
1846 {
1847         pt_entry_t *ptep;
1848
1849         ptep = vtopte(va);
1850         pmap_inval_smp(&kernel_pmap, va, 1, ptep, 0);
1851 }
1852
1853 void
1854 pmap_kremove_quick(vm_offset_t va)
1855 {
1856         pt_entry_t *ptep;
1857
1858         ptep = vtopte(va);
1859         (void)pte_load_clear(ptep);
1860         cpu_invlpg((void *)va);
1861 }
1862
1863 /*
1864  * Remove addresses from the kernel pmap but don't bother
1865  * doing any tlb invalidations.  Caller will do a rollup
1866  * invalidation via pmap_rollup_inval().
1867  */
1868 void
1869 pmap_kremove_noinval(vm_offset_t va)
1870 {
1871         pt_entry_t *ptep;
1872
1873         ptep = vtopte(va);
1874         (void)pte_load_clear(ptep);
1875 }
1876
1877 /*
1878  * XXX these need to be recoded.  They are not used in any critical path.
1879  */
1880 void
1881 pmap_kmodify_rw(vm_offset_t va)
1882 {
1883         atomic_set_long(vtopte(va), kernel_pmap.pmap_bits[PG_RW_IDX]);
1884         cpu_invlpg((void *)va);
1885 }
1886
1887 /* NOT USED
1888 void
1889 pmap_kmodify_nc(vm_offset_t va)
1890 {
1891         atomic_set_long(vtopte(va), PG_N);
1892         cpu_invlpg((void *)va);
1893 }
1894 */
1895
1896 /*
1897  * Used to map a range of physical addresses into kernel virtual
1898  * address space during the low level boot, typically to map the
1899  * dump bitmap, message buffer, and vm_page_array.
1900  *
1901  * These mappings are typically made at some pointer after the end of the
1902  * kernel text+data.
1903  *
1904  * We could return PHYS_TO_DMAP(start) here and not allocate any
1905  * via (*virtp), but then kmem from userland and kernel dumps won't
1906  * have access to the related pointers.
1907  */
1908 vm_offset_t
1909 pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
1910 {
1911         vm_offset_t va;
1912         vm_offset_t va_start;
1913
1914         /*return PHYS_TO_DMAP(start);*/
1915
1916         va_start = *virtp;
1917         va = va_start;
1918
1919         while (start < end) {
1920                 pmap_kenter_quick(va, start);
1921                 va += PAGE_SIZE;
1922                 start += PAGE_SIZE;
1923         }
1924         *virtp = va;
1925         return va_start;
1926 }
1927
1928 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
1929
1930 /*
1931  * Remove the specified set of pages from the data and instruction caches.
1932  *
1933  * In contrast to pmap_invalidate_cache_range(), this function does not
1934  * rely on the CPU's self-snoop feature, because it is intended for use
1935  * when moving pages into a different cache domain.
1936  */
1937 void
1938 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1939 {
1940         vm_offset_t daddr, eva;
1941         int i;
1942
1943         if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1944             (cpu_feature & CPUID_CLFSH) == 0)
1945                 wbinvd();
1946         else {
1947                 cpu_mfence();
1948                 for (i = 0; i < count; i++) {
1949                         daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
1950                         eva = daddr + PAGE_SIZE;
1951                         for (; daddr < eva; daddr += cpu_clflush_line_size)
1952                                 clflush(daddr);
1953                 }
1954                 cpu_mfence();
1955         }
1956 }
1957
1958 void
1959 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
1960 {
1961         KASSERT((sva & PAGE_MASK) == 0,
1962             ("pmap_invalidate_cache_range: sva not page-aligned"));
1963         KASSERT((eva & PAGE_MASK) == 0,
1964             ("pmap_invalidate_cache_range: eva not page-aligned"));
1965
1966         if (cpu_feature & CPUID_SS) {
1967                 ; /* If "Self Snoop" is supported, do nothing. */
1968         } else {
1969                 /* Globally invalidate caches */
1970                 cpu_wbinvd_on_all_cpus();
1971         }
1972 }
1973
1974 /*
1975  * Invalidate the specified range of virtual memory on all cpus associated
1976  * with the pmap.
1977  */
1978 void
1979 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1980 {
1981         pmap_inval_smp(pmap, sva, (eva - sva) >> PAGE_SHIFT, NULL, 0);
1982 }
1983
1984 /*
1985  * Add a list of wired pages to the kva.  This routine is used for temporary
1986  * kernel mappings such as those found in buffer cache buffer.  Page
1987  * modifications and accesses are not tracked or recorded.
1988  *
1989  * NOTE! Old mappings are simply overwritten, and we cannot assume relaxed
1990  *       semantics as previous mappings may have been zerod without any
1991  *       invalidation.
1992  *
1993  * The page *must* be wired.
1994  */
1995 static __inline void
1996 _pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count, int doinval)
1997 {
1998         vm_offset_t end_va;
1999         vm_offset_t va;
2000
2001         end_va = beg_va + count * PAGE_SIZE;
2002
2003         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2004                 pt_entry_t pte;
2005                 pt_entry_t *ptep;
2006
2007                 ptep = vtopte(va);
2008                 pte = VM_PAGE_TO_PHYS(*m) |
2009                         kernel_pmap.pmap_bits[PG_RW_IDX] |
2010                         kernel_pmap.pmap_bits[PG_V_IDX] |
2011                         kernel_pmap.pmap_cache_bits[(*m)->pat_mode];
2012 //              pgeflag;
2013                 atomic_swap_long(ptep, pte);
2014                 m++;
2015         }
2016         if (doinval)
2017                 pmap_invalidate_range(&kernel_pmap, beg_va, end_va);
2018 }
2019
2020 void
2021 pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count)
2022 {
2023         _pmap_qenter(beg_va, m, count, 1);
2024 }
2025
2026 void
2027 pmap_qenter_noinval(vm_offset_t beg_va, vm_page_t *m, int count)
2028 {
2029         _pmap_qenter(beg_va, m, count, 0);
2030 }
2031
2032 /*
2033  * This routine jerks page mappings from the kernel -- it is meant only
2034  * for temporary mappings such as those found in buffer cache buffers.
2035  * No recording modified or access status occurs.
2036  *
2037  * MPSAFE, INTERRUPT SAFE (cluster callback)
2038  */
2039 void
2040 pmap_qremove(vm_offset_t beg_va, int count)
2041 {
2042         vm_offset_t end_va;
2043         vm_offset_t va;
2044
2045         end_va = beg_va + count * PAGE_SIZE;
2046
2047         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2048                 pt_entry_t *pte;
2049
2050                 pte = vtopte(va);
2051                 (void)pte_load_clear(pte);
2052                 cpu_invlpg((void *)va);
2053         }
2054         pmap_invalidate_range(&kernel_pmap, beg_va, end_va);
2055 }
2056
2057 /*
2058  * This routine removes temporary kernel mappings, only invalidating them
2059  * on the current cpu.  It should only be used under carefully controlled
2060  * conditions.
2061  */
2062 void
2063 pmap_qremove_quick(vm_offset_t beg_va, int count)
2064 {
2065         vm_offset_t end_va;
2066         vm_offset_t va;
2067
2068         end_va = beg_va + count * PAGE_SIZE;
2069
2070         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2071                 pt_entry_t *pte;
2072
2073                 pte = vtopte(va);
2074                 (void)pte_load_clear(pte);
2075                 cpu_invlpg((void *)va);
2076         }
2077 }
2078
2079 /*
2080  * This routine removes temporary kernel mappings *without* invalidating
2081  * the TLB.  It can only be used on permanent kva reservations such as those
2082  * found in buffer cache buffers, under carefully controlled circumstances.
2083  *
2084  * NOTE: Repopulating these KVAs requires unconditional invalidation.
2085  *       (pmap_qenter() does unconditional invalidation).
2086  */
2087 void
2088 pmap_qremove_noinval(vm_offset_t beg_va, int count)
2089 {
2090         vm_offset_t end_va;
2091         vm_offset_t va;
2092
2093         end_va = beg_va + count * PAGE_SIZE;
2094
2095         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
2096                 pt_entry_t *pte;
2097
2098                 pte = vtopte(va);
2099                 (void)pte_load_clear(pte);
2100         }
2101 }
2102
2103 /*
2104  * Create a new thread and optionally associate it with a (new) process.
2105  * NOTE! the new thread's cpu may not equal the current cpu.
2106  */
2107 void
2108 pmap_init_thread(thread_t td)
2109 {
2110         /* enforce pcb placement & alignment */
2111         td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
2112         td->td_pcb = (struct pcb *)((intptr_t)td->td_pcb & ~(intptr_t)0xF);
2113         td->td_savefpu = &td->td_pcb->pcb_save;
2114         td->td_sp = (char *)td->td_pcb; /* no -16 */
2115 }
2116
2117 /*
2118  * This routine directly affects the fork perf for a process.
2119  */
2120 void
2121 pmap_init_proc(struct proc *p)
2122 {
2123 }
2124
2125 static void
2126 pmap_pinit_defaults(struct pmap *pmap)
2127 {
2128         bcopy(pmap_bits_default, pmap->pmap_bits,
2129               sizeof(pmap_bits_default));
2130         bcopy(protection_codes, pmap->protection_codes,
2131               sizeof(protection_codes));
2132         bcopy(pat_pte_index, pmap->pmap_cache_bits,
2133               sizeof(pat_pte_index));
2134         pmap->pmap_cache_mask = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PTE_PAT;
2135         pmap->copyinstr = std_copyinstr;
2136         pmap->copyin = std_copyin;
2137         pmap->copyout = std_copyout;
2138         pmap->fubyte = std_fubyte;
2139         pmap->subyte = std_subyte;
2140         pmap->fuword32 = std_fuword32;
2141         pmap->fuword64 = std_fuword64;
2142         pmap->suword32 = std_suword32;
2143         pmap->suword64 = std_suword64;
2144         pmap->swapu32 = std_swapu32;
2145         pmap->swapu64 = std_swapu64;
2146 }
2147 /*
2148  * Initialize pmap0/vmspace0.
2149  *
2150  * On architectures where the kernel pmap is not integrated into the user
2151  * process pmap, this pmap represents the process pmap, not the kernel pmap.
2152  * kernel_pmap should be used to directly access the kernel_pmap.
2153  */
2154 void
2155 pmap_pinit0(struct pmap *pmap)
2156 {
2157         int i;
2158
2159         pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys);
2160         pmap->pm_count = 1;
2161         CPUMASK_ASSZERO(pmap->pm_active);
2162         pmap->pm_pvhint_pt = NULL;
2163         pmap->pm_pvhint_pte = NULL;
2164         RB_INIT(&pmap->pm_pvroot);
2165         spin_init(&pmap->pm_spin, "pmapinit0");
2166         for (i = 0; i < PM_PLACEMARKS; ++i)
2167                 pmap->pm_placemarks[i] = PM_NOPLACEMARK;
2168         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2169         pmap_pinit_defaults(pmap);
2170 }
2171
2172 /*
2173  * Initialize a preallocated and zeroed pmap structure,
2174  * such as one in a vmspace structure.
2175  */
2176 static void
2177 pmap_pinit_simple(struct pmap *pmap)
2178 {
2179         int i;
2180
2181         /*
2182          * Misc initialization
2183          */
2184         pmap->pm_count = 1;
2185         CPUMASK_ASSZERO(pmap->pm_active);
2186         pmap->pm_pvhint_pt = NULL;
2187         pmap->pm_pvhint_pte = NULL;
2188         pmap->pm_flags = PMAP_FLAG_SIMPLE;
2189
2190         pmap_pinit_defaults(pmap);
2191
2192         /*
2193          * Don't blow up locks/tokens on re-use (XXX fix/use drop code
2194          * for this).
2195          */
2196         if (pmap->pm_pmlpv == NULL) {
2197                 RB_INIT(&pmap->pm_pvroot);
2198                 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2199                 spin_init(&pmap->pm_spin, "pmapinitsimple");
2200                 for (i = 0; i < PM_PLACEMARKS; ++i)
2201                         pmap->pm_placemarks[i] = PM_NOPLACEMARK;
2202         }
2203 }
2204
2205 void
2206 pmap_pinit(struct pmap *pmap)
2207 {
2208         pv_entry_t pv;
2209         int j;
2210
2211         if (pmap->pm_pmlpv) {
2212                 if (pmap->pmap_bits[TYPE_IDX] != REGULAR_PMAP) {
2213                         pmap_puninit(pmap);
2214                 }
2215         }
2216
2217         pmap_pinit_simple(pmap);
2218         pmap->pm_flags &= ~PMAP_FLAG_SIMPLE;
2219
2220         /*
2221          * No need to allocate page table space yet but we do need a valid
2222          * page directory table.
2223          */
2224         if (pmap->pm_pml4 == NULL) {
2225                 pmap->pm_pml4 =
2226                     (pml4_entry_t *)kmem_alloc_pageable(&kernel_map,
2227                                                         PAGE_SIZE * 2,
2228                                                         VM_SUBSYS_PML4);
2229                 pmap->pm_pml4_iso = (void *)((char *)pmap->pm_pml4 + PAGE_SIZE);
2230         }
2231
2232         /*
2233          * Allocate the PML4e table, which wires it even though it isn't
2234          * being entered into some higher level page table (it being the
2235          * highest level).  If one is already cached we don't have to do
2236          * anything.
2237          */
2238         if ((pv = pmap->pm_pmlpv) == NULL) {
2239                 pv = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
2240                 pmap->pm_pmlpv = pv;
2241                 pmap_kenter((vm_offset_t)pmap->pm_pml4,
2242                             VM_PAGE_TO_PHYS(pv->pv_m));
2243                 pv_put(pv);
2244
2245                 /*
2246                  * Install DMAP and KMAP.
2247                  */
2248                 for (j = 0; j < NDMPML4E; ++j) {
2249                         pmap->pm_pml4[DMPML4I + j] =
2250                             (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
2251                             pmap->pmap_bits[PG_RW_IDX] |
2252                             pmap->pmap_bits[PG_V_IDX] |
2253                             pmap->pmap_bits[PG_A_IDX];
2254                 }
2255                 for (j = 0; j < NKPML4E; ++j) {
2256                         pmap->pm_pml4[KPML4I + j] =
2257                             (KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
2258                             pmap->pmap_bits[PG_RW_IDX] |
2259                             pmap->pmap_bits[PG_V_IDX] |
2260                             pmap->pmap_bits[PG_A_IDX];
2261                 }
2262
2263                 /*
2264                  * install self-referential address mapping entry
2265                  */
2266                 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pv->pv_m) |
2267                     pmap->pmap_bits[PG_V_IDX] |
2268                     pmap->pmap_bits[PG_RW_IDX] |
2269                     pmap->pmap_bits[PG_A_IDX];
2270         } else {
2271                 KKASSERT(pv->pv_m->flags & PG_MAPPED);
2272                 KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
2273         }
2274         KKASSERT(pmap->pm_pml4[255] == 0);
2275
2276         /*
2277          * When implementing an isolated userland pmap, a second PML4e table
2278          * is needed.  We use pmap_pml4_pindex() + 1 for convenience, but
2279          * note that we do not operate on this table using our API functions
2280          * so handling of the + 1 case is mostly just to prevent implosions.
2281          *
2282          * We install an isolated version of the kernel PDPs into this
2283          * second PML4e table.  The pmap code will mirror all user PDPs
2284          * between the primary and secondary PML4e table.
2285          */
2286         if ((pv = pmap->pm_pmlpv_iso) == NULL && meltdown_mitigation &&
2287             pmap != &iso_pmap) {
2288                 pv = pmap_allocpte(pmap, pmap_pml4_pindex() + 1, NULL);
2289                 pmap->pm_pmlpv_iso = pv;
2290                 pmap_kenter((vm_offset_t)pmap->pm_pml4_iso,
2291                             VM_PAGE_TO_PHYS(pv->pv_m));
2292                 pv_put(pv);
2293
2294                 /*
2295                  * Install an isolated version of the kernel pmap for
2296                  * user consumption, using PDPs constructed in iso_pmap.
2297                  */
2298                 for (j = 0; j < NKPML4E; ++j) {
2299                         pmap->pm_pml4_iso[KPML4I + j] =
2300                                 iso_pmap.pm_pml4[KPML4I + j];
2301                 }
2302         } else if (pv) {
2303                 KKASSERT(pv->pv_m->flags & PG_MAPPED);
2304                 KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
2305         }
2306 }
2307
2308 /*
2309  * Clean up a pmap structure so it can be physically freed.  This routine
2310  * is called by the vmspace dtor function.  A great deal of pmap data is
2311  * left passively mapped to improve vmspace management so we have a bit
2312  * of cleanup work to do here.
2313  */
2314 void
2315 pmap_puninit(pmap_t pmap)
2316 {
2317         pv_entry_t pv;
2318         vm_page_t p;
2319
2320         KKASSERT(CPUMASK_TESTZERO(pmap->pm_active));
2321         if ((pv = pmap->pm_pmlpv) != NULL) {
2322                 if (pv_hold_try(pv) == 0)
2323                         pv_lock(pv);
2324                 KKASSERT(pv == pmap->pm_pmlpv);
2325                 p = pmap_remove_pv_page(pv);
2326                 pv_free(pv, NULL);
2327                 pv = NULL;      /* safety */
2328                 pmap_kremove((vm_offset_t)pmap->pm_pml4);
2329                 vm_page_busy_wait(p, FALSE, "pgpun");
2330                 KKASSERT(p->flags & (PG_FICTITIOUS|PG_UNMANAGED));
2331                 vm_page_unwire(p, 0);
2332                 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
2333                 vm_page_free(p);
2334                 pmap->pm_pmlpv = NULL;
2335         }
2336         if ((pv = pmap->pm_pmlpv_iso) != NULL) {
2337                 if (pv_hold_try(pv) == 0)
2338                         pv_lock(pv);
2339                 KKASSERT(pv == pmap->pm_pmlpv_iso);
2340                 p = pmap_remove_pv_page(pv);
2341                 pv_free(pv, NULL);
2342                 pv = NULL;      /* safety */
2343                 pmap_kremove((vm_offset_t)pmap->pm_pml4_iso);
2344                 vm_page_busy_wait(p, FALSE, "pgpun");
2345                 KKASSERT(p->flags & (PG_FICTITIOUS|PG_UNMANAGED));
2346                 vm_page_unwire(p, 0);
2347                 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
2348                 vm_page_free(p);
2349                 pmap->pm_pmlpv_iso = NULL;
2350         }
2351         if (pmap->pm_pml4) {
2352                 KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys));
2353                 kmem_free(&kernel_map,
2354                           (vm_offset_t)pmap->pm_pml4, PAGE_SIZE * 2);
2355                 pmap->pm_pml4 = NULL;
2356                 pmap->pm_pml4_iso = NULL;
2357         }
2358         KKASSERT(pmap->pm_stats.resident_count == 0);
2359         KKASSERT(pmap->pm_stats.wired_count == 0);
2360 }
2361
2362 /*
2363  * This function is now unused (used to add the pmap to the pmap_list)
2364  */
2365 void
2366 pmap_pinit2(struct pmap *pmap)
2367 {
2368 }
2369
2370 /*
2371  * This routine is called when various levels in the page table need to
2372  * be populated.  This routine cannot fail.
2373  *
2374  * This function returns two locked pv_entry's, one representing the
2375  * requested pv and one representing the requested pv's parent pv.  If
2376  * an intermediate page table does not exist it will be created, mapped,
2377  * wired, and the parent page table will be given an additional hold
2378  * count representing the presence of the child pv_entry.
2379  */
2380 static
2381 pv_entry_t
2382 pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp)
2383 {
2384         pt_entry_t *ptep;
2385         pt_entry_t *ptep_iso;
2386         pv_entry_t pv;
2387         pv_entry_t pvp;
2388         pt_entry_t v;
2389         vm_pindex_t pt_pindex;
2390         vm_page_t m;
2391         int isnew;
2392         int ispt;
2393
2394         /*
2395          * If the pv already exists and we aren't being asked for the
2396          * parent page table page we can just return it.  A locked+held pv
2397          * is returned.  The pv will also have a second hold related to the
2398          * pmap association that we don't have to worry about.
2399          */
2400         ispt = 0;
2401         pv = pv_alloc(pmap, ptepindex, &isnew);
2402         if (isnew == 0 && pvpp == NULL)
2403                 return(pv);
2404
2405         /*
2406          * Special case terminal PVs.  These are not page table pages so
2407          * no vm_page is allocated (the caller supplied the vm_page).  If
2408          * pvpp is non-NULL we are being asked to also removed the pt_pv
2409          * for this pv.
2410          *
2411          * Note that pt_pv's are only returned for user VAs. We assert that
2412          * a pt_pv is not being requested for kernel VAs.  The kernel
2413          * pre-wires all higher-level page tables so don't overload managed
2414          * higher-level page tables on top of it!
2415          *
2416          * However, its convenient for us to allow the case when creating
2417          * iso_pmap.  This is a bit of a hack but it simplifies iso_pmap
2418          * a lot.
2419          */
2420         if (ptepindex < pmap_pt_pindex(0)) {
2421                 if (ptepindex >= NUPTE_USER && pmap != &iso_pmap) {
2422                         /* kernel manages this manually for KVM */
2423                         KKASSERT(pvpp == NULL);
2424                 } else {
2425                         KKASSERT(pvpp != NULL);
2426                         pt_pindex = NUPTE_TOTAL + (ptepindex >> NPTEPGSHIFT);
2427                         pvp = pmap_allocpte(pmap, pt_pindex, NULL);
2428                         if (isnew)
2429                                 vm_page_wire_quick(pvp->pv_m);
2430                         *pvpp = pvp;
2431                 }
2432                 return(pv);
2433         }
2434
2435         /*
2436          * The kernel never uses managed PT/PD/PDP pages.
2437          */
2438         KKASSERT(pmap != &kernel_pmap);
2439
2440         /*
2441          * Non-terminal PVs allocate a VM page to represent the page table,
2442          * so we have to resolve pvp and calculate ptepindex for the pvp
2443          * and then for the page table entry index in the pvp for
2444          * fall-through.
2445          */
2446         if (ptepindex < pmap_pd_pindex(0)) {
2447                 /*
2448                  * pv is PT, pvp is PD
2449                  */
2450                 ptepindex = (ptepindex - pmap_pt_pindex(0)) >> NPDEPGSHIFT;
2451                 ptepindex += NUPTE_TOTAL + NUPT_TOTAL;
2452                 pvp = pmap_allocpte(pmap, ptepindex, NULL);
2453
2454                 /*
2455                  * PT index in PD
2456                  */
2457                 ptepindex = pv->pv_pindex - pmap_pt_pindex(0);
2458                 ptepindex &= ((1ul << NPDEPGSHIFT) - 1);
2459                 ispt = 1;
2460         } else if (ptepindex < pmap_pdp_pindex(0)) {
2461                 /*
2462                  * pv is PD, pvp is PDP
2463                  *
2464                  * SIMPLE PMAP NOTE: Simple pmaps do not allocate above
2465                  *                   the PD.
2466                  */
2467                 ptepindex = (ptepindex - pmap_pd_pindex(0)) >> NPDPEPGSHIFT;
2468                 ptepindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
2469
2470                 if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
2471                         KKASSERT(pvpp == NULL);
2472                         pvp = NULL;
2473                 } else {
2474                         pvp = pmap_allocpte(pmap, ptepindex, NULL);
2475                 }
2476
2477                 /*
2478                  * PD index in PDP
2479                  */
2480                 ptepindex = pv->pv_pindex - pmap_pd_pindex(0);
2481                 ptepindex &= ((1ul << NPDPEPGSHIFT) - 1);
2482         } else if (ptepindex < pmap_pml4_pindex()) {
2483                 /*
2484                  * pv is PDP, pvp is the root pml4 table
2485                  */
2486                 pvp = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
2487
2488                 /*
2489                  * PDP index in PML4
2490                  */
2491                 ptepindex = pv->pv_pindex - pmap_pdp_pindex(0);
2492                 ptepindex &= ((1ul << NPML4EPGSHIFT) - 1);
2493         } else {
2494                 /*
2495                  * pv represents the top-level PML4, there is no parent.
2496                  */
2497                 pvp = NULL;
2498         }
2499
2500         if (isnew == 0)
2501                 goto notnew;
2502
2503         /*
2504          * (isnew) is TRUE, pv is not terminal.
2505          *
2506          * (1) Add a wire count to the parent page table (pvp).
2507          * (2) Allocate a VM page for the page table.
2508          * (3) Enter the VM page into the parent page table.
2509          *
2510          * page table pages are marked PG_WRITEABLE and PG_MAPPED.
2511          */
2512         if (pvp)
2513                 vm_page_wire_quick(pvp->pv_m);
2514
2515         for (;;) {
2516                 m = vm_page_alloc(NULL, pv->pv_pindex,
2517                                   VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM |
2518                                   VM_ALLOC_INTERRUPT);
2519                 if (m)
2520                         break;
2521                 vm_wait(0);
2522         }
2523         vm_page_wire(m);        /* wire for mapping in parent */
2524         vm_page_unmanage(m);    /* m must be spinunlocked */
2525         pmap_zero_page(VM_PAGE_TO_PHYS(m));
2526         m->valid = VM_PAGE_BITS_ALL;
2527
2528         vm_page_spin_lock(m);
2529         pmap_page_stats_adding(m);
2530         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2531         pv->pv_m = m;
2532         vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
2533         vm_page_spin_unlock(m);
2534
2535         /*
2536          * (isnew) is TRUE, pv is not terminal.
2537          *
2538          * Wire the page into pvp.  Bump the resident_count for the pmap.
2539          * There is no pvp for the top level, address the pm_pml4[] array
2540          * directly.
2541          *
2542          * If the caller wants the parent we return it, otherwise
2543          * we just put it away.
2544          *
2545          * No interlock is needed for pte 0 -> non-zero.
2546          *
2547          * In the situation where *ptep is valid we might have an unmanaged
2548          * page table page shared from another page table which we need to
2549          * unshare before installing our private page table page.
2550          */
2551         if (pvp) {
2552                 v = VM_PAGE_TO_PHYS(m) |
2553                     (pmap->pmap_bits[PG_RW_IDX] |
2554                      pmap->pmap_bits[PG_V_IDX] |
2555                      pmap->pmap_bits[PG_A_IDX]);
2556                 if (ptepindex < NUPTE_USER)
2557                         v |= pmap->pmap_bits[PG_U_IDX];
2558                 if (ptepindex < pmap_pt_pindex(0))
2559                         v |= pmap->pmap_bits[PG_M_IDX];
2560
2561                 ptep = pv_pte_lookup(pvp, ptepindex);
2562                 if (pvp == pmap->pm_pmlpv && pmap->pm_pmlpv_iso)
2563                         ptep_iso = pv_pte_lookup(pmap->pm_pmlpv_iso, ptepindex);
2564                 else
2565                         ptep_iso  = NULL;
2566                 if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
2567                         pt_entry_t pte;
2568
2569                         if (ispt == 0) {
2570                                 panic("pmap_allocpte: unexpected pte %p/%d",
2571                                       pvp, (int)ptepindex);
2572                         }
2573                         pte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1,
2574                                              ptep, v);
2575                         if (ptep_iso) {
2576                                 pmap_inval_smp(pmap, (vm_offset_t)-1, 1,
2577                                                ptep_iso, v);
2578                         }
2579                         if (vm_page_unwire_quick(
2580                                         PHYS_TO_VM_PAGE(pte & PG_FRAME))) {
2581                                 panic("pmap_allocpte: shared pgtable "
2582                                       "pg bad wirecount");
2583                         }
2584                 } else {
2585                         pt_entry_t pte;
2586
2587                         pte = atomic_swap_long(ptep, v);
2588                         if (ptep_iso)
2589                                 atomic_swap_long(ptep_iso, v);
2590                         if (pte != 0) {
2591                                 kprintf("install pgtbl mixup 0x%016jx "
2592                                         "old/new 0x%016jx/0x%016jx\n",
2593                                         (intmax_t)ptepindex, pte, v);
2594                         }
2595                 }
2596         }
2597         vm_page_wakeup(m);
2598
2599         /*
2600          * (isnew) may be TRUE or FALSE, pv may or may not be terminal.
2601          */
2602 notnew:
2603         if (pvp) {
2604                 KKASSERT(pvp->pv_m != NULL);
2605                 ptep = pv_pte_lookup(pvp, ptepindex);
2606                 v = VM_PAGE_TO_PHYS(pv->pv_m) |
2607                     (pmap->pmap_bits[PG_RW_IDX] |
2608                      pmap->pmap_bits[PG_V_IDX] |
2609                      pmap->pmap_bits[PG_A_IDX]);
2610                 if (ptepindex < NUPTE_USER)
2611                         v |= pmap->pmap_bits[PG_U_IDX];
2612                 if (ptepindex < pmap_pt_pindex(0))
2613                         v |= pmap->pmap_bits[PG_M_IDX];
2614                 if (*ptep != v) {
2615                         kprintf("mismatched upper level pt %016jx/%016jx\n",
2616                                 *ptep, v);
2617                 }
2618         }
2619         if (pvpp)
2620                 *pvpp = pvp;
2621         else if (pvp)
2622                 pv_put(pvp);
2623         return (pv);
2624 }
2625
2626 /*
2627  * This version of pmap_allocpte() checks for possible segment optimizations
2628  * that would allow page-table sharing.  It can be called for terminal
2629  * page or page table page ptepindex's.
2630  *
2631  * The function is called with page table page ptepindex's for fictitious
2632  * and unmanaged terminal pages.  That is, we don't want to allocate a
2633  * terminal pv, we just want the pt_pv.  pvpp is usually passed as NULL
2634  * for this case.
2635  *
2636  * This function can return a pv and *pvpp associated with the passed in pmap
2637  * OR a pv and *pvpp associated with the shared pmap.  In the latter case
2638  * an unmanaged page table page will be entered into the pass in pmap.
2639  */
2640 static
2641 pv_entry_t
2642 pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp,
2643                   vm_map_entry_t entry, vm_offset_t va)
2644 {
2645         vm_object_t object;
2646         pmap_t obpmap;
2647         pmap_t *obpmapp;
2648         vm_pindex_t *pt_placemark;
2649         vm_offset_t b;
2650         pv_entry_t pte_pv;      /* in original or shared pmap */
2651         pv_entry_t pt_pv;       /* in original or shared pmap */
2652         pv_entry_t proc_pd_pv;  /* in original pmap */
2653         pv_entry_t proc_pt_pv;  /* in original pmap */
2654         pv_entry_t xpv;         /* PT in shared pmap */
2655         pd_entry_t *pt;         /* PT entry in PD of original pmap */
2656         pd_entry_t opte;        /* contents of *pt */
2657         pd_entry_t npte;        /* contents of *pt */
2658         vm_page_t m;
2659         int softhold;
2660
2661         /*
2662          * Basic tests, require a non-NULL vm_map_entry, require proper
2663          * alignment and type for the vm_map_entry, require that the
2664          * underlying object already be allocated.
2665          *
2666          * We allow almost any type of object to use this optimization.
2667          * The object itself does NOT have to be sized to a multiple of the
2668          * segment size, but the memory mapping does.
2669          *
2670          * XXX don't handle devices currently, because VM_PAGE_TO_PHYS()
2671          *     won't work as expected.
2672          */
2673         if (entry == NULL ||
2674             pmap_mmu_optimize == 0 ||                   /* not enabled */
2675             (pmap->pm_flags & PMAP_HVM) ||              /* special pmap */
2676             ptepindex >= pmap_pd_pindex(0) ||           /* not terminal or pt */
2677             entry->inheritance != VM_INHERIT_SHARE ||   /* not shared */
2678             entry->maptype != VM_MAPTYPE_NORMAL ||      /* weird map type */
2679             entry->object.vm_object == NULL ||          /* needs VM object */
2680             entry->object.vm_object->type == OBJT_DEVICE ||     /* ick */
2681             entry->object.vm_object->type == OBJT_MGTDEVICE ||  /* ick */
2682             (entry->offset & SEG_MASK) ||               /* must be aligned */
2683             (entry->start & SEG_MASK)) {
2684                 return(pmap_allocpte(pmap, ptepindex, pvpp));
2685         }
2686
2687         /*
2688          * Make sure the full segment can be represented.
2689          */
2690         b = va & ~(vm_offset_t)SEG_MASK;
2691         if (b < entry->start || b + SEG_SIZE > entry->end)
2692                 return(pmap_allocpte(pmap, ptepindex, pvpp));
2693
2694         /*
2695          * If the full segment can be represented dive the VM object's
2696          * shared pmap, allocating as required.
2697          */
2698         object = entry->object.vm_object;
2699
2700         if (entry->protection & VM_PROT_WRITE)
2701                 obpmapp = &object->md.pmap_rw;
2702         else
2703                 obpmapp = &object->md.pmap_ro;
2704
2705 #ifdef PMAP_DEBUG2
2706         if (pmap_enter_debug > 0) {
2707                 --pmap_enter_debug;
2708                 kprintf("pmap_allocpte_seg: va=%jx prot %08x o=%p "
2709                         "obpmapp %p %p\n",
2710                         va, entry->protection, object,
2711                         obpmapp, *obpmapp);
2712                 kprintf("pmap_allocpte_seg: entry %p %jx-%jx\n",
2713                         entry, entry->start, entry->end);
2714         }
2715 #endif
2716
2717         /*
2718          * We allocate what appears to be a normal pmap but because portions
2719          * of this pmap are shared with other unrelated pmaps we have to
2720          * set pm_active to point to all cpus.
2721          *
2722          * XXX Currently using pmap_spin to interlock the update, can't use
2723          *     vm_object_hold/drop because the token might already be held
2724          *     shared OR exclusive and we don't know.
2725          */
2726         while ((obpmap = *obpmapp) == NULL) {
2727                 obpmap = kmalloc(sizeof(*obpmap), M_OBJPMAP, M_WAITOK|M_ZERO);
2728                 pmap_pinit_simple(obpmap);
2729                 pmap_pinit2(obpmap);
2730                 spin_lock(&pmap_spin);
2731                 if (*obpmapp != NULL) {
2732                         /*
2733                          * Handle race
2734                          */
2735                         spin_unlock(&pmap_spin);
2736                         pmap_release(obpmap);
2737                         pmap_puninit(obpmap);
2738                         kfree(obpmap, M_OBJPMAP);
2739                         obpmap = *obpmapp; /* safety */
2740                 } else {
2741                         obpmap->pm_active = smp_active_mask;
2742                         obpmap->pm_flags |= PMAP_SEGSHARED;
2743                         *obpmapp = obpmap;
2744                         spin_unlock(&pmap_spin);
2745                 }
2746         }
2747
2748         /*
2749          * Layering is: PTE, PT, PD, PDP, PML4.  We have to return the
2750          * pte/pt using the shared pmap from the object but also adjust
2751          * the process pmap's page table page as a side effect.
2752          */
2753
2754         /*
2755          * Resolve the terminal PTE and PT in the shared pmap.  This is what
2756          * we will return.  This is true if ptepindex represents a terminal
2757          * page, otherwise pte_pv is actually the PT and pt_pv is actually
2758          * the PD.
2759          */
2760         pt_pv = NULL;
2761         pte_pv = pmap_allocpte(obpmap, ptepindex, &pt_pv);
2762         softhold = 0;
2763 retry:
2764         if (ptepindex >= pmap_pt_pindex(0))
2765                 xpv = pte_pv;
2766         else
2767                 xpv = pt_pv;
2768
2769         /*
2770          * Resolve the PD in the process pmap so we can properly share the
2771          * page table page.  Lock order is bottom-up (leaf first)!
2772          *
2773          * NOTE: proc_pt_pv can be NULL.
2774          */
2775         proc_pt_pv = pv_get(pmap, pmap_pt_pindex(b), &pt_placemark);
2776         proc_pd_pv = pmap_allocpte(pmap, pmap_pd_pindex(b), NULL);
2777 #ifdef PMAP_DEBUG2
2778         if (pmap_enter_debug > 0) {
2779                 --pmap_enter_debug;
2780                 kprintf("proc_pt_pv %p (wc %d) pd_pv %p va=%jx\n",
2781                         proc_pt_pv,
2782                         (proc_pt_pv ? proc_pt_pv->pv_m->wire_count : -1),
2783                         proc_pd_pv,
2784                         va);
2785         }
2786 #endif
2787
2788         /*
2789          * xpv is the page table page pv from the shared object
2790          * (for convenience), from above.
2791          *
2792          * Calculate the pte value for the PT to load into the process PD.
2793          * If we have to change it we must properly dispose of the previous
2794          * entry.
2795          */
2796         pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b));
2797         npte = VM_PAGE_TO_PHYS(xpv->pv_m) |
2798                (pmap->pmap_bits[PG_U_IDX] |
2799                 pmap->pmap_bits[PG_RW_IDX] |
2800                 pmap->pmap_bits[PG_V_IDX] |
2801                 pmap->pmap_bits[PG_A_IDX] |
2802                 pmap->pmap_bits[PG_M_IDX]);
2803
2804         /*
2805          * Dispose of previous page table page if it was local to the
2806          * process pmap.  If the old pt is not empty we cannot dispose of it
2807          * until we clean it out.  This case should not arise very often so
2808          * it is not optimized.
2809          *
2810          * Leave pt_pv and pte_pv (in our object pmap) locked and intact
2811          * for the retry.
2812          */
2813         if (proc_pt_pv) {
2814                 pmap_inval_bulk_t bulk;
2815
2816                 if (proc_pt_pv->pv_m->wire_count != 1) {
2817                         /*
2818                          * The page table has a bunch of stuff in it
2819                          * which we have to scrap.
2820                          */
2821                         if (softhold == 0) {
2822                                 softhold = 1;
2823                                 pmap_softhold(pmap);
2824                         }
2825                         pv_put(proc_pd_pv);
2826                         pv_put(proc_pt_pv);
2827                         pmap_remove(pmap,
2828                                     va & ~(vm_offset_t)SEG_MASK,
2829                                     (va + SEG_SIZE) & ~(vm_offset_t)SEG_MASK);
2830                 } else {
2831                         /*
2832                          * The page table is empty and can be destroyed.
2833                          * However, doing so leaves the pt slot unlocked,
2834                          * so we have to loop-up to handle any races until
2835                          * we get a NULL proc_pt_pv and a proper pt_placemark.
2836                          */
2837                         pmap_inval_bulk_init(&bulk, proc_pt_pv->pv_pmap);
2838                         pmap_release_pv(proc_pt_pv, proc_pd_pv, &bulk);
2839                         pmap_inval_bulk_flush(&bulk);
2840                         pv_put(proc_pd_pv);
2841                 }
2842                 goto retry;
2843         }
2844
2845         /*
2846          * Handle remaining cases.  We are holding pt_placemark to lock
2847          * the page table page in the primary pmap while we manipulate
2848          * it.
2849          */
2850         if (*pt == 0) {
2851                 atomic_swap_long(pt, npte);
2852                 vm_page_wire_quick(xpv->pv_m);          /* shared pt -> proc */
2853                 vm_page_wire_quick(proc_pd_pv->pv_m);   /* proc pd for sh pt */
2854                 atomic_add_long(&pmap->pm_stats.resident_count, 1);
2855         } else if (*pt != npte) {
2856                 opte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, pt, npte);
2857
2858 #if 0
2859                 opte = pte_load_clear(pt);
2860                 KKASSERT(opte && opte != npte);
2861
2862                 *pt = npte;
2863 #endif
2864                 vm_page_wire_quick(xpv->pv_m);          /* shared pt -> proc */
2865
2866                 /*
2867                  * Clean up opte, bump the wire_count for the process
2868                  * PD page representing the new entry if it was
2869                  * previously empty.
2870                  *
2871                  * If the entry was not previously empty and we have
2872                  * a PT in the proc pmap then opte must match that
2873                  * pt.  The proc pt must be retired (this is done
2874                  * later on in this procedure).
2875                  *
2876                  * NOTE: replacing valid pte, wire_count on proc_pd_pv
2877                  * stays the same.
2878                  */
2879                 KKASSERT(opte & pmap->pmap_bits[PG_V_IDX]);
2880                 m = PHYS_TO_VM_PAGE(opte & PG_FRAME);
2881                 if (vm_page_unwire_quick(m)) {
2882                         panic("pmap_allocpte_seg: "
2883                               "bad wire count %p",
2884                               m);
2885                 }
2886         }
2887
2888         if (softhold)
2889                 pmap_softdone(pmap);
2890
2891         /*
2892          * Remove our earmark on the page table page.
2893          */
2894         pv_placemarker_wakeup(pmap, pt_placemark);
2895
2896         /*
2897          * The existing process page table was replaced and must be destroyed
2898          * here.
2899          */
2900         if (proc_pd_pv)
2901                 pv_put(proc_pd_pv);
2902         if (pvpp)
2903                 *pvpp = pt_pv;
2904         else
2905                 pv_put(pt_pv);
2906         return (pte_pv);
2907 }
2908
2909 /*
2910  * Release any resources held by the given physical map.
2911  *
2912  * Called when a pmap initialized by pmap_pinit is being released.  Should
2913  * only be called if the map contains no valid mappings.
2914  */
2915 struct pmap_release_info {
2916         pmap_t  pmap;
2917         int     retry;
2918         pv_entry_t pvp;
2919 };
2920
2921 static int pmap_release_callback(pv_entry_t pv, void *data);
2922
2923 void
2924 pmap_release(struct pmap *pmap)
2925 {
2926         struct pmap_release_info info;
2927
2928         KASSERT(CPUMASK_TESTZERO(pmap->pm_active),
2929                 ("pmap still active! %016jx",
2930                 (uintmax_t)CPUMASK_LOWMASK(pmap->pm_active)));
2931
2932         /*
2933          * There is no longer a pmap_list, if there were we would remove the
2934          * pmap from it here.
2935          */
2936
2937         /*
2938          * Pull pv's off the RB tree in order from low to high and release
2939          * each page.
2940          */
2941         info.pmap = pmap;
2942         do {
2943                 info.retry = 0;
2944                 info.pvp = NULL;
2945
2946                 spin_lock(&pmap->pm_spin);
2947                 RB_SCAN(pv_entry_rb_tree, &pmap->pm_pvroot, NULL,
2948                         pmap_release_callback, &info);
2949                 spin_unlock(&pmap->pm_spin);
2950
2951                 if (info.pvp)
2952                         pv_put(info.pvp);
2953         } while (info.retry);
2954
2955
2956         /*
2957          * One resident page (the pml4 page) should remain.  Two if
2958          * the pmap has implemented an isolated userland PML4E table.
2959          * No wired pages should remain.
2960          */
2961         int expected_res = 0;
2962
2963         if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0)
2964                 ++expected_res;
2965         if (pmap->pm_pmlpv_iso)
2966                 ++expected_res;
2967
2968 #if 1
2969         if (pmap->pm_stats.resident_count != expected_res ||
2970             pmap->pm_stats.wired_count != 0) {
2971                 kprintf("fatal pmap problem - pmap %p flags %08x "
2972                         "rescnt=%jd wirecnt=%jd\n",
2973                         pmap,
2974                         pmap->pm_flags,
2975                         pmap->pm_stats.resident_count,
2976                         pmap->pm_stats.wired_count);
2977                 tsleep(pmap, 0, "DEAD", 0);
2978         }
2979 #else
2980         KKASSERT(pmap->pm_stats.resident_count == expected_res);
2981         KKASSERT(pmap->pm_stats.wired_count == 0);
2982 #endif
2983 }
2984
2985 /*
2986  * Called from low to high.  We must cache the proper parent pv so we
2987  * can adjust its wired count.
2988  */
2989 static int
2990 pmap_release_callback(pv_entry_t pv, void *data)
2991 {
2992         struct pmap_release_info *info = data;
2993         pmap_t pmap = info->pmap;
2994         vm_pindex_t pindex;
2995         int r;
2996
2997         /*
2998          * Acquire a held and locked pv, check for release race
2999          */
3000         pindex = pv->pv_pindex;
3001         if (info->pvp == pv) {
3002                 spin_unlock(&pmap->pm_spin);
3003                 info->pvp = NULL;
3004         } else if (pv_hold_try(pv)) {
3005                 spin_unlock(&pmap->pm_spin);
3006         } else {
3007                 spin_unlock(&pmap->pm_spin);
3008                 pv_lock(pv);
3009                 pv_put(pv);
3010                 info->retry = 1;
3011                 spin_lock(&pmap->pm_spin);
3012
3013                 return -1;
3014         }
3015         KKASSERT(pv->pv_pmap == pmap && pindex == pv->pv_pindex);
3016
3017         if (pv->pv_pindex < pmap_pt_pindex(0)) {
3018                 /*
3019                  * I am PTE, parent is PT
3020                  */
3021                 pindex = pv->pv_pindex >> NPTEPGSHIFT;
3022                 pindex += NUPTE_TOTAL;
3023         } else if (pv->pv_pindex < pmap_pd_pindex(0)) {
3024                 /*
3025                  * I am PT, parent is PD
3026                  */
3027                 pindex = (pv->pv_pindex - NUPTE_TOTAL) >> NPDEPGSHIFT;
3028                 pindex += NUPTE_TOTAL + NUPT_TOTAL;
3029         } else if (pv->pv_pindex < pmap_pdp_pindex(0)) {
3030                 /*
3031                  * I am PD, parent is PDP
3032                  */
3033                 pindex = (pv->pv_pindex - NUPTE_TOTAL - NUPT_TOTAL) >>
3034                          NPDPEPGSHIFT;
3035                 pindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
3036         } else if (pv->pv_pindex < pmap_pml4_pindex()) {
3037                 /*
3038                  * I am PDP, parent is PML4.  We always calculate the
3039                  * normal PML4 here, not the isolated PML4.
3040                  */
3041                 pindex = pmap_pml4_pindex();
3042         } else {
3043                 /*
3044                  * parent is NULL
3045                  */
3046                 if (info->pvp) {
3047                         pv_put(info->pvp);
3048                         info->pvp = NULL;
3049                 }
3050                 pindex = 0;
3051         }
3052         if (pindex) {
3053                 if (info->pvp && info->pvp->pv_pindex != pindex) {
3054                         pv_put(info->pvp);
3055                         info->pvp = NULL;
3056                 }
3057                 if (info->pvp == NULL)
3058                         info->pvp = pv_get(pmap, pindex, NULL);
3059         } else {
3060                 if (info->pvp) {
3061                         pv_put(info->pvp);
3062                         info->pvp = NULL;
3063                 }
3064         }
3065         r = pmap_release_pv(pv, info->pvp, NULL);
3066         spin_lock(&pmap->pm_spin);
3067
3068         return(r);
3069 }
3070
3071 /*
3072  * Called with held (i.e. also locked) pv.  This function will dispose of
3073  * the lock along with the pv.
3074  *
3075  * If the caller already holds the locked parent page table for pv it
3076  * must pass it as pvp, allowing us to avoid a deadlock, else it can
3077  * pass NULL for pvp.
3078  */
3079 static int
3080 pmap_release_pv(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk)
3081 {
3082         vm_page_t p;
3083
3084         /*
3085          * The pmap is currently not spinlocked, pv is held+locked.
3086          * Remove the pv's page from its parent's page table.  The
3087          * parent's page table page's wire_count will be decremented.
3088          *
3089          * This will clean out the pte at any level of the page table.
3090          * If smp != 0 all cpus are affected.
3091          *
3092          * Do not tear-down recursively, its faster to just let the
3093          * release run its course.
3094          */
3095         pmap_remove_pv_pte(pv, pvp, bulk, 0);
3096
3097         /*
3098          * Terminal pvs are unhooked from their vm_pages.  Because
3099          * terminal pages aren't page table pages they aren't wired
3100          * by us, so we have to be sure not to unwire them either.
3101          */
3102         if (pv->pv_pindex < pmap_pt_pindex(0)) {
3103                 pmap_remove_pv_page(pv);
3104                 goto skip;
3105         }
3106
3107         /*
3108          * We leave the top-level page table page cached, wired, and
3109          * mapped in the pmap until the dtor function (pmap_puninit())
3110          * gets called.
3111          *
3112          * Since we are leaving the top-level pv intact we need
3113          * to break out of what would otherwise be an infinite loop.
3114          *
3115          * This covers both the normal and the isolated PML4 page.
3116          */
3117         if (pv->pv_pindex >= pmap_pml4_pindex()) {
3118                 pv_put(pv);
3119                 return(-1);
3120         }
3121
3122         /*
3123          * For page table pages (other than the top-level page),
3124          * remove and free the vm_page.  The representitive mapping
3125          * removed above by pmap_remove_pv_pte() did not undo the
3126          * last wire_count so we have to do that as well.
3127          */
3128         p = pmap_remove_pv_page(pv);
3129         vm_page_busy_wait(p, FALSE, "pmaprl");
3130         if (p->wire_count != 1) {
3131                 kprintf("p->wire_count was %016lx %d\n",
3132                         pv->pv_pindex, p->wire_count);
3133         }
3134         KKASSERT(p->wire_count == 1);
3135         KKASSERT(p->flags & PG_UNMANAGED);
3136
3137         vm_page_unwire(p, 0);
3138         KKASSERT(p->wire_count == 0);
3139
3140         vm_page_free(p);
3141 skip:
3142         pv_free(pv, pvp);
3143
3144         return 0;
3145 }
3146
3147 /*
3148  * This function will remove the pte associated with a pv from its parent.
3149  * Terminal pv's are supported.  All cpus specified by (bulk) are properly
3150  * invalidated.
3151  *
3152  * The wire count will be dropped on the parent page table.  The wire
3153  * count on the page being removed (pv->pv_m) from the parent page table
3154  * is NOT touched.  Note that terminal pages will not have any additional
3155  * wire counts while page table pages will have at least one representing
3156  * the mapping, plus others representing sub-mappings.
3157  *
3158  * NOTE: Cannot be called on kernel page table pages, only KVM terminal
3159  *       pages and user page table and terminal pages.
3160  *
3161  * NOTE: The pte being removed might be unmanaged, and the pv supplied might
3162  *       be freshly allocated and not imply that the pte is managed.  In this
3163  *       case pv->pv_m should be NULL.
3164  *
3165  * The pv must be locked.  The pvp, if supplied, must be locked.  All
3166  * supplied pv's will remain locked on return.
3167  *
3168  * XXX must lock parent pv's if they exist to remove pte XXX
3169  */
3170 static
3171 void
3172 pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk,
3173                    int destroy)
3174 {
3175         vm_pindex_t ptepindex = pv->pv_pindex;
3176         pmap_t pmap = pv->pv_pmap;
3177         vm_page_t p;
3178         int gotpvp = 0;
3179
3180         KKASSERT(pmap);
3181
3182         if (ptepindex >= pmap_pml4_pindex()) {
3183                 /*
3184                  * We are the top level PML4E table, there is no parent.
3185                  *
3186                  * This is either the normal or isolated PML4E table.
3187                  * Only the normal is used in regular operation, the isolated
3188                  * is only passed in when breaking down the whole pmap.
3189                  */
3190                 p = pmap->pm_pmlpv->pv_m;
3191                 KKASSERT(pv->pv_m == p);        /* debugging */
3192         } else if (ptepindex >= pmap_pdp_pindex(0)) {
3193                 /*
3194                  * Remove a PDP page from the PML4E.  This can only occur
3195                  * with user page tables.  We do not have to lock the
3196                  * pml4 PV so just ignore pvp.
3197                  */
3198                 vm_pindex_t pml4_pindex;
3199                 vm_pindex_t pdp_index;
3200                 pml4_entry_t *pdp;
3201                 pml4_entry_t *pdp_iso;
3202
3203                 pdp_index = ptepindex - pmap_pdp_pindex(0);
3204                 if (pvp == NULL) {
3205                         pml4_pindex = pmap_pml4_pindex();
3206                         pvp = pv_get(pv->pv_pmap, pml4_pindex, NULL);
3207                         KKASSERT(pvp);
3208                         gotpvp = 1;
3209                 }
3210
3211                 pdp = &pmap->pm_pml4[pdp_index & ((1ul << NPML4EPGSHIFT) - 1)];
3212                 KKASSERT((*pdp & pmap->pmap_bits[PG_V_IDX]) != 0);
3213                 p = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
3214                 pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp, 0);
3215
3216                 /*
3217                  * Also remove the PDP from the isolated PML4E if the
3218                  * process uses one.
3219                  */
3220                 if (pvp == pmap->pm_pmlpv && pmap->pm_pmlpv_iso) {
3221                         pdp_iso = &pmap->pm_pml4_iso[pdp_index &
3222                                                 ((1ul << NPML4EPGSHIFT) - 1)];
3223                         pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp_iso, 0);
3224                 }
3225                 KKASSERT(pv->pv_m == p);        /* debugging */
3226         } else if (ptepindex >= pmap_pd_pindex(0)) {
3227                 /*
3228                  * Remove a PD page from the PDP
3229                  *
3230                  * SIMPLE PMAP NOTE: Non-existant pvp's are ok in the case
3231                  *                   of a simple pmap because it stops at
3232                  *                   the PD page.
3233                  */
3234                 vm_pindex_t pdp_pindex;
3235                 vm_pindex_t pd_index;
3236                 pdp_entry_t *pd;
3237
3238                 pd_index = ptepindex - pmap_pd_pindex(0);
3239
3240                 if (pvp == NULL) {
3241                         pdp_pindex = NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
3242                                      (pd_index >> NPML4EPGSHIFT);
3243                         pvp = pv_get(pv->pv_pmap, pdp_pindex, NULL);
3244                         gotpvp = 1;
3245                 }
3246
3247                 if (pvp) {
3248                         pd = pv_pte_lookup(pvp, pd_index &
3249                                                 ((1ul << NPDPEPGSHIFT) - 1));
3250                         KKASSERT((*pd & pmap->pmap_bits[PG_V_IDX]) != 0);
3251                         p = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
3252                         pmap_inval_bulk(bulk, (vm_offset_t)-1, pd, 0);
3253                 } else {
3254                         KKASSERT(pmap->pm_flags & PMAP_FLAG_SIMPLE);
3255                         p = pv->pv_m;           /* degenerate test later */
3256                 }
3257                 KKASSERT(pv->pv_m == p);        /* debugging */
3258         } else if (ptepindex >= pmap_pt_pindex(0)) {
3259                 /*
3260                  *  Remove a PT page from the PD
3261                  */
3262                 vm_pindex_t pd_pindex;
3263                 vm_pindex_t pt_index;
3264                 pd_entry_t *pt;
3265
3266                 pt_index = ptepindex - pmap_pt_pindex(0);
3267
3268                 if (pvp == NULL) {
3269                         pd_pindex = NUPTE_TOTAL + NUPT_TOTAL +
3270                                     (pt_index >> NPDPEPGSHIFT);
3271                         pvp = pv_get(pv->pv_pmap, pd_pindex, NULL);
3272                         KKASSERT(pvp);
3273                         gotpvp = 1;
3274                 }
3275
3276                 pt = pv_pte_lookup(pvp, pt_index & ((1ul << NPDPEPGSHIFT) - 1));
3277 #if 0
3278                 KASSERT((*pt & pmap->pmap_bits[PG_V_IDX]) != 0,
3279                         ("*pt unexpectedly invalid %016jx "
3280                          "gotpvp=%d ptepindex=%ld ptindex=%ld pv=%p pvp=%p",
3281                         *pt, gotpvp, ptepindex, pt_index, pv, pvp));
3282                 p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
3283 #else
3284                 if ((*pt & pmap->pmap_bits[PG_V_IDX]) == 0) {
3285                         kprintf("*pt unexpectedly invalid %016jx "
3286                                 "gotpvp=%d ptepindex=%ld ptindex=%ld "
3287                                 "pv=%p pvp=%p\n",
3288                                 *pt, gotpvp, ptepindex, pt_index, pv, pvp);
3289                         tsleep(pt, 0, "DEAD", 0);
3290                         p = pv->pv_m;
3291                 } else {
3292                         p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
3293                 }
3294 #endif
3295                 pmap_inval_bulk(bulk, (vm_offset_t)-1, pt, 0);
3296                 KKASSERT(pv->pv_m == p);        /* debugging */
3297         } else {
3298                 /*
3299                  * Remove a PTE from the PT page.  The PV might exist even if
3300                  * the PTE is not managed, in whichcase pv->pv_m should be
3301                  * NULL.
3302                  *
3303                  * NOTE: Userland pmaps manage the parent PT/PD/PDP page
3304                  *       table pages but the kernel_pmap does not.
3305                  *
3306                  * NOTE: pv's must be locked bottom-up to avoid deadlocking.
3307                  *       pv is a pte_pv so we can safely lock pt_pv.
3308                  *
3309                  * NOTE: FICTITIOUS pages may have multiple physical mappings
3310                  *       so PHYS_TO_VM_PAGE() will not necessarily work for
3311                  *       terminal ptes.
3312                  */
3313                 vm_pindex_t pt_pindex;
3314                 pt_entry_t *ptep;
3315                 pt_entry_t pte;
3316                 vm_offset_t va;
3317
3318                 pt_pindex = ptepindex >> NPTEPGSHIFT;
3319                 va = (vm_offset_t)ptepindex << PAGE_SHIFT;
3320
3321                 if (ptepindex >= NUPTE_USER) {
3322                         ptep = vtopte(ptepindex << PAGE_SHIFT);
3323                         KKASSERT(pvp == NULL);
3324                         /* pvp remains NULL */
3325                 } else {
3326                         if (pvp == NULL) {
3327                                 pt_pindex = NUPTE_TOTAL +
3328                                             (ptepindex >> NPDPEPGSHIFT);
3329                                 pvp = pv_get(pv->pv_pmap, pt_pindex, NULL);
3330                                 KKASSERT(pvp);
3331                                 gotpvp = 1;
3332                         }
3333                         ptep = pv_pte_lookup(pvp, ptepindex &
3334                                                   ((1ul << NPDPEPGSHIFT) - 1));
3335                 }
3336                 pte = pmap_inval_bulk(bulk, va, ptep, 0);
3337                 if (bulk == NULL)               /* XXX */
3338                         cpu_invlpg((void *)va); /* XXX */
3339
3340                 /*
3341                  * Now update the vm_page_t
3342                  */
3343                 if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) &&
3344                     (pte & pmap->pmap_bits[PG_V_IDX])) {
3345                         /*
3346                          * Valid managed page, adjust (p).
3347                          */
3348                         if (pte & pmap->pmap_bits[PG_DEVICE_IDX]) {
3349                                 p = pv->pv_m;
3350                         } else {
3351                                 p = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3352                                 KKASSERT(pv->pv_m == p);
3353                         }
3354                         if (pte & pmap->pmap_bits[PG_M_IDX]) {
3355                                 if (pmap_track_modified(ptepindex))
3356                                         vm_page_dirty(p);
3357                         }
3358                         if (pte & pmap->pmap_bits[PG_A_IDX]) {
3359                                 vm_page_flag_set(p, PG_REFERENCED);
3360                         }
3361                 } else {
3362                         /*
3363                          * Unmanaged page, do not try to adjust the vm_page_t.
3364                          * pv could be freshly allocated for a pmap_enter(),
3365                          * replacing an unmanaged page with a managed one.
3366                          *
3367                          * pv->pv_m might reflect the new page and not the
3368                          * existing page.
3369                          *
3370                          * We could extract p from the physical address and
3371                          * adjust it but we explicitly do not for unmanaged
3372                          * pages.
3373                          */
3374                         p = NULL;
3375                 }
3376                 if (pte & pmap->pmap_bits[PG_W_IDX])
3377                         atomic_add_long(&pmap->pm_stats.wired_count, -1);
3378                 if (pte & pmap->pmap_bits[PG_G_IDX])
3379                         cpu_invlpg((void *)va);
3380         }
3381
3382         /*
3383          * If requested, scrap the underlying pv->pv_m and the underlying
3384          * pv.  If this is a page-table-page we must also free the page.
3385          *
3386          * pvp must be returned locked.
3387          */
3388         if (destroy == 1) {
3389                 /*
3390                  * page table page (PT, PD, PDP, PML4), caller was responsible
3391                  * for testing wired_count.
3392                  */
3393                 KKASSERT(pv->pv_m->wire_count == 1);
3394                 p = pmap_remove_pv_page(pv);
3395                 pv_free(pv, pvp);
3396                 pv = NULL;
3397
3398                 vm_page_busy_wait(p, FALSE, "pgpun");
3399                 vm_page_unwire(p, 0);
3400                 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
3401                 vm_page_free(p);
3402         } else if (destroy == 2) {
3403                 /*
3404                  * Normal page, remove from pmap and leave the underlying
3405                  * page untouched.
3406                  */
3407                 pmap_remove_pv_page(pv);
3408                 pv_free(pv, pvp);
3409                 pv = NULL;              /* safety */
3410         }
3411
3412         /*
3413          * If we acquired pvp ourselves then we are responsible for
3414          * recursively deleting it.
3415          */
3416         if (pvp && gotpvp) {
3417                 /*
3418                  * Recursively destroy higher-level page tables.
3419                  *
3420                  * This is optional.  If we do not, they will still
3421                  * be destroyed when the process exits.
3422                  *
3423                  * NOTE: Do not destroy pv_entry's with extra hold refs,
3424                  *       a caller may have unlocked it and intends to
3425                  *       continue to use it.
3426                  */
3427                 if (pmap_dynamic_delete &&
3428                     pvp->pv_m &&
3429                     pvp->pv_m->wire_count == 1 &&
3430                     (pvp->pv_hold & PV_HOLD_MASK) == 2 &&
3431                     pvp->pv_pindex < pmap_pml4_pindex()) {
3432                         if (pmap_dynamic_delete == 2)
3433                                 kprintf("A %jd %08x\n", pvp->pv_pindex, pvp->pv_hold);
3434                         if (pmap != &kernel_pmap) {
3435                                 pmap_remove_pv_pte(pvp, NULL, bulk, 1);
3436                                 pvp = NULL;     /* safety */
3437                         } else {
3438                                 kprintf("Attempt to remove kernel_pmap pindex "
3439                                         "%jd\n", pvp->pv_pindex);
3440                                 pv_put(pvp);
3441                         }
3442                 } else {
3443                         pv_put(pvp);
3444                 }
3445         }
3446 }
3447
3448 /*
3449  * Remove the vm_page association to a pv.  The pv must be locked.
3450  */
3451 static
3452 vm_page_t
3453 pmap_remove_pv_page(pv_entry_t pv)
3454 {
3455         vm_page_t m;
3456
3457         m = pv->pv_m;
3458         vm_page_spin_lock(m);
3459         KKASSERT(m && m == pv->pv_m);
3460         pv->pv_m = NULL;
3461         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3462         pmap_page_stats_deleting(m);
3463         if (TAILQ_EMPTY(&m->md.pv_list))
3464                 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
3465         vm_page_spin_unlock(m);
3466
3467         return(m);
3468 }
3469
3470 /*
3471  * Grow the number of kernel page table entries, if needed.
3472  *
3473  * This routine is always called to validate any address space
3474  * beyond KERNBASE (for kldloads).  kernel_vm_end only governs the address
3475  * space below KERNBASE.
3476  *
3477  * kernel_map must be locked exclusively by the caller.
3478  */
3479 void
3480 pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
3481 {
3482         vm_paddr_t paddr;
3483         vm_offset_t ptppaddr;
3484         vm_page_t nkpg;
3485         pd_entry_t *pt, newpt;
3486         pdp_entry_t *pd, newpd;
3487         int update_kernel_vm_end;
3488
3489         /*
3490          * bootstrap kernel_vm_end on first real VM use
3491          */
3492         if (kernel_vm_end == 0) {
3493                 kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
3494
3495                 for (;;) {
3496                         pt = pmap_pt(&kernel_pmap, kernel_vm_end);
3497                         if (pt == NULL)
3498                                 break;
3499                         if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) == 0)
3500                                 break;
3501                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
3502                                         ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3503                         if (kernel_vm_end - 1 >= kernel_map.max_offset) {
3504                                 kernel_vm_end = kernel_map.max_offset;
3505                                 break;                       
3506                         }
3507                 }
3508         }
3509
3510         /*
3511          * Fill in the gaps.  kernel_vm_end is only adjusted for ranges
3512          * below KERNBASE.  Ranges above KERNBASE are kldloaded and we
3513          * do not want to force-fill 128G worth of page tables.
3514          */
3515         if (kstart < KERNBASE) {
3516                 if (kstart > kernel_vm_end)
3517                         kstart = kernel_vm_end;
3518                 KKASSERT(kend <= KERNBASE);
3519                 update_kernel_vm_end = 1;
3520         } else {
3521                 update_kernel_vm_end = 0;
3522         }
3523
3524         kstart = rounddown2(kstart, (vm_offset_t)(PAGE_SIZE * NPTEPG));
3525         kend = roundup2(kend, (vm_offset_t)(PAGE_SIZE * NPTEPG));
3526
3527         if (kend - 1 >= kernel_map.max_offset)
3528                 kend = kernel_map.max_offset;
3529
3530         while (kstart < kend) {
3531                 pt = pmap_pt(&kernel_pmap, kstart);
3532                 if (pt == NULL) {
3533                         /*
3534                          * We need a new PD entry
3535                          */
3536                         nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++,
3537                                              VM_ALLOC_NORMAL |
3538                                              VM_ALLOC_SYSTEM |
3539                                              VM_ALLOC_INTERRUPT);
3540                         if (nkpg == NULL) {
3541                                 panic("pmap_growkernel: no memory to grow "
3542                                       "kernel");
3543                         }
3544                         paddr = VM_PAGE_TO_PHYS(nkpg);
3545                         pmap_zero_page(paddr);
3546                         pd = pmap_pd(&kernel_pmap, kstart);
3547
3548                         newpd = (pdp_entry_t)
3549                             (paddr |
3550                             kernel_pmap.pmap_bits[PG_V_IDX] |
3551                             kernel_pmap.pmap_bits[PG_RW_IDX] |
3552                             kernel_pmap.pmap_bits[PG_A_IDX]);
3553                         atomic_swap_long(pd, newpd);
3554
3555 #if 0
3556                         kprintf("NEWPD pd=%p pde=%016jx phys=%016jx\n",
3557                                 pd, newpd, paddr);
3558 #endif
3559
3560                         continue; /* try again */
3561                 }
3562
3563                 if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) {
3564                         kstart = (kstart + PAGE_SIZE * NPTEPG) &
3565                                  ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3566                         if (kstart - 1 >= kernel_map.max_offset) {
3567                                 kstart = kernel_map.max_offset;
3568                                 break;                       
3569                         }
3570                         continue;
3571                 }
3572
3573                 /*
3574                  * We need a new PT
3575                  *
3576                  * This index is bogus, but out of the way
3577                  */
3578                 nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++,
3579                                      VM_ALLOC_NORMAL |
3580                                      VM_ALLOC_SYSTEM |
3581                                      VM_ALLOC_INTERRUPT);
3582                 if (nkpg == NULL)
3583                         panic("pmap_growkernel: no memory to grow kernel");
3584
3585                 vm_page_wire(nkpg);
3586                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
3587                 pmap_zero_page(ptppaddr);
3588                 newpt = (pd_entry_t)(ptppaddr |
3589                                      kernel_pmap.pmap_bits[PG_V_IDX] |
3590                                      kernel_pmap.pmap_bits[PG_RW_IDX] |
3591                                      kernel_pmap.pmap_bits[PG_A_IDX]);
3592                 atomic_swap_long(pt, newpt);
3593
3594                 kstart = (kstart + PAGE_SIZE * NPTEPG) &
3595                           ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3596
3597                 if (kstart - 1 >= kernel_map.max_offset) {
3598                         kstart = kernel_map.max_offset;
3599                         break;                       
3600                 }
3601         }
3602
3603         /*
3604          * Only update kernel_vm_end for areas below KERNBASE.
3605          */
3606         if (update_kernel_vm_end && kernel_vm_end < kstart)
3607                 kernel_vm_end = kstart;
3608 }
3609
3610 /*
3611  *      Add a reference to the specified pmap.
3612  */
3613 void
3614 pmap_reference(pmap_t pmap)
3615 {
3616         if (pmap != NULL)
3617                 atomic_add_int(&pmap->pm_count, 1);
3618 }
3619
3620 /***************************************************
3621  * page management routines.
3622  ***************************************************/
3623
3624 /*
3625  * Hold a pv without locking it
3626  */
3627 static void
3628 pv_hold(pv_entry_t pv)
3629 {
3630         atomic_add_int(&pv->pv_hold, 1);
3631 }
3632
3633 /*
3634  * Hold a pv_entry, preventing its destruction.  TRUE is returned if the pv
3635  * was successfully locked, FALSE if it wasn't.  The caller must dispose of
3636  * the pv properly.
3637  *
3638  * Either the pmap->pm_spin or the related vm_page_spin (if traversing a
3639  * pv list via its page) must be held by the caller in order to stabilize
3640  * the pv.
3641  */
3642 static int
3643 _pv_hold_try(pv_entry_t pv PMAP_DEBUG_DECL)
3644 {
3645         u_int count;
3646
3647         /*
3648          * Critical path shortcut expects pv to already have one ref
3649          * (for the pv->pv_pmap).
3650          */
3651         count = pv->pv_hold;
3652         cpu_ccfence();
3653         for (;;) {
3654                 if ((count & PV_HOLD_LOCKED) == 0) {
3655                         if (atomic_fcmpset_int(&pv->pv_hold, &count,
3656                                               (count + 1) | PV_HOLD_LOCKED)) {
3657 #ifdef PMAP_DEBUG
3658                                 pv->pv_func = func;
3659                                 pv->pv_line = lineno;
3660 #endif
3661                                 return TRUE;
3662                         }
3663                 } else {
3664                         if (atomic_fcmpset_int(&pv->pv_hold, &count, count + 1))
3665                                 return FALSE;
3666                 }
3667                 /* retry */
3668         }
3669 }
3670
3671 /*
3672  * Drop a previously held pv_entry which could not be locked, allowing its
3673  * destruction.
3674  *
3675  * Must not be called with a spinlock held as we might zfree() the pv if it
3676  * is no longer associated with a pmap and this was the last hold count.
3677  */
3678 static void
3679 pv_drop(pv_entry_t pv)
3680 {
3681         u_int count;
3682
3683         for (;;) {
3684                 count = pv->pv_hold;
3685                 cpu_ccfence();
3686                 KKASSERT((count & PV_HOLD_MASK) > 0);
3687                 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) !=
3688                          (PV_HOLD_LOCKED | 1));
3689                 if (atomic_cmpset_int(&pv->pv_hold, count, count - 1)) {
3690                         if ((count & PV_HOLD_MASK) == 1) {
3691 #ifdef PMAP_DEBUG2
3692                                 if (pmap_enter_debug > 0) {
3693                                         --pmap_enter_debug;
3694                                         kprintf("pv_drop: free pv %p\n", pv);
3695                                 }
3696 #endif
3697                                 KKASSERT(count == 1);
3698                                 KKASSERT(pv->pv_pmap == NULL);
3699                                 zfree(pvzone, pv);
3700                         }
3701                         return;
3702                 }
3703                 /* retry */
3704         }
3705 }
3706
3707 /*
3708  * Find or allocate the requested PV entry, returning a locked, held pv.
3709  *
3710  * If (*isnew) is non-zero, the returned pv will have two hold counts, one
3711  * for the caller and one representing the pmap and vm_page association.
3712  *
3713  * If (*isnew) is zero, the returned pv will have only one hold count.
3714  *
3715  * Since both associations can only be adjusted while the pv is locked,
3716  * together they represent just one additional hold.
3717  */
3718 static
3719 pv_entry_t
3720 _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew PMAP_DEBUG_DECL)
3721 {
3722         struct mdglobaldata *md = mdcpu;
3723         pv_entry_t pv;
3724         pv_entry_t pnew;
3725         int pmap_excl = 0;
3726
3727         pnew = NULL;
3728         if (md->gd_newpv) {
3729 #if 1
3730                 pnew = atomic_swap_ptr((void *)&md->gd_newpv, NULL);
3731 #else
3732                 crit_enter();
3733                 pnew = md->gd_newpv;    /* might race NULL */
3734                 md->gd_newpv = NULL;
3735                 crit_exit();
3736 #endif
3737         }
3738         if (pnew == NULL)
3739                 pnew = zalloc(pvzone);
3740
3741         spin_lock_shared(&pmap->pm_spin);
3742         for (;;) {
3743                 /*
3744                  * Shortcut cache
3745                  */
3746                 pv = pv_entry_lookup(pmap, pindex);
3747                 if (pv == NULL) {
3748                         vm_pindex_t *pmark;
3749
3750                         /*
3751                          * Requires exclusive pmap spinlock
3752                          */
3753                         if (pmap_excl == 0) {
3754                                 pmap_excl = 1;
3755                                 if (!spin_lock_upgrade_try(&pmap->pm_spin)) {
3756                                         spin_unlock_shared(&pmap->pm_spin);
3757                                         spin_lock(&pmap->pm_spin);
3758                                         continue;
3759                                 }
3760                         }
3761
3762                         /*
3763                          * We need to block if someone is holding our
3764                          * placemarker.  As long as we determine the
3765                          * placemarker has not been aquired we do not
3766                          * need to get it as acquision also requires
3767                          * the pmap spin lock.
3768                          *
3769                          * However, we can race the wakeup.
3770                          */
3771                         pmark = pmap_placemarker_hash(pmap, pindex);
3772
3773                         if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3774                                 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
3775                                 tsleep_interlock(pmark, 0);
3776                                 if (((*pmark ^ pindex) &
3777                                      ~PM_PLACEMARK_WAKEUP) == 0) {
3778                                         spin_unlock(&pmap->pm_spin);
3779                                         tsleep(pmark, PINTERLOCKED, "pvplc", 0);
3780                                         spin_lock(&pmap->pm_spin);
3781                                 }
3782                                 continue;
3783                         }
3784
3785                         /*
3786                          * Setup the new entry
3787                          */
3788                         pnew->pv_pmap = pmap;
3789                         pnew->pv_pindex = pindex;
3790                         pnew->pv_hold = PV_HOLD_LOCKED | 2;
3791 #ifdef PMAP_DEBUG
3792                         pnew->pv_func = func;
3793                         pnew->pv_line = lineno;
3794                         if (pnew->pv_line_lastfree > 0) {
3795                                 pnew->pv_line_lastfree =
3796                                                 -pnew->pv_line_lastfree;
3797                         }
3798 #endif
3799                         pv = pv_entry_rb_tree_RB_INSERT(&pmap->pm_pvroot, pnew);
3800                         atomic_add_long(&pmap->pm_stats.resident_count, 1);
3801                         spin_unlock(&pmap->pm_spin);
3802                         *isnew = 1;
3803
3804                         KASSERT(pv == NULL, ("pv insert failed %p->%p", pnew, pv));
3805                         return(pnew);
3806                 }
3807
3808                 /*
3809                  * We already have an entry, cleanup the staged pnew if
3810                  * we can get the lock, otherwise block and retry.
3811                  */
3812                 if (__predict_true(_pv_hold_try(pv PMAP_DEBUG_COPY))) {
3813                         if (pmap_excl)
3814                                 spin_unlock(&pmap->pm_spin);
3815                         else
3816                                 spin_unlock_shared(&pmap->pm_spin);
3817 #if 1
3818                         pnew = atomic_swap_ptr((void *)&md->gd_newpv, pnew);
3819                         if (pnew)
3820                                 zfree(pvzone, pnew);
3821 #else
3822                         crit_enter();
3823                         if (md->gd_newpv == NULL)
3824                                 md->gd_newpv = pnew;
3825                         else
3826                                 zfree(pvzone, pnew);
3827                         crit_exit();
3828 #endif
3829                         KKASSERT(pv->pv_pmap == pmap &&
3830                                  pv->pv_pindex == pindex);
3831                         *isnew = 0;
3832                         return(pv);
3833                 }
3834                 if (pmap_excl) {
3835                         spin_unlock(&pmap->pm_spin);
3836                         _pv_lock(pv PMAP_DEBUG_COPY);
3837                         pv_put(pv);
3838                         spin_lock(&pmap->pm_spin);
3839                 } else {
3840                         spin_unlock_shared(&pmap->pm_spin);
3841                         _pv_lock(pv PMAP_DEBUG_COPY);
3842                         pv_put(pv);
3843                         spin_lock_shared(&pmap->pm_spin);
3844                 }
3845         }
3846         /* NOT REACHED */
3847 }
3848
3849 /*
3850  * Find the requested PV entry, returning a locked+held pv or NULL
3851  */
3852 static
3853 pv_entry_t
3854 _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp PMAP_DEBUG_DECL)
3855 {
3856         pv_entry_t pv;
3857         int pmap_excl = 0;
3858
3859         spin_lock_shared(&pmap->pm_spin);
3860         for (;;) {
3861                 /*
3862                  * Shortcut cache
3863                  */
3864                 pv = pv_entry_lookup(pmap, pindex);
3865                 if (pv == NULL) {
3866                         /*
3867                          * Block if there is ANY placemarker.  If we are to
3868                          * return it, we must also aquire the spot, so we
3869                          * have to block even if the placemarker is held on
3870                          * a different address.
3871                          *
3872                          * OPTIMIZATION: If pmarkp is passed as NULL the
3873                          * caller is just probing (or looking for a real
3874                          * pv_entry), and in this case we only need to check
3875                          * to see if the placemarker matches pindex.
3876                          */
3877                         vm_pindex_t *pmark;
3878
3879                         /*
3880                          * Requires exclusive pmap spinlock
3881                          */
3882                         if (pmap_excl == 0) {
3883                                 pmap_excl = 1;
3884                                 if (!spin_lock_upgrade_try(&pmap->pm_spin)) {
3885                                         spin_unlock_shared(&pmap->pm_spin);
3886                                         spin_lock(&pmap->pm_spin);
3887                                         continue;
3888                                 }
3889                         }
3890
3891                         pmark = pmap_placemarker_hash(pmap, pindex);
3892
3893                         if ((pmarkp && *pmark != PM_NOPLACEMARK) ||
3894                             ((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3895                                 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
3896                                 tsleep_interlock(pmark, 0);
3897                                 if ((pmarkp && *pmark != PM_NOPLACEMARK) ||
3898                                     ((*pmark ^ pindex) &
3899                                      ~PM_PLACEMARK_WAKEUP) == 0) {
3900                                         spin_unlock(&pmap->pm_spin);
3901                                         tsleep(pmark, PINTERLOCKED, "pvpld", 0);
3902                                         spin_lock(&pmap->pm_spin);
3903                                 }
3904                                 continue;
3905                         }
3906                         if (pmarkp) {
3907                                 if (atomic_swap_long(pmark, pindex) !=
3908                                     PM_NOPLACEMARK) {
3909                                         panic("_pv_get: pmark race");
3910                                 }
3911                                 *pmarkp = pmark;
3912                         }
3913                         spin_unlock(&pmap->pm_spin);
3914                         return NULL;
3915                 }
3916                 if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
3917                         if (pmap_excl)
3918                                 spin_unlock(&pmap->pm_spin);
3919                         else
3920                                 spin_unlock_shared(&pmap->pm_spin);
3921                         KKASSERT(pv->pv_pmap == pmap &&
3922                                  pv->pv_pindex == pindex);
3923                         return(pv);
3924                 }
3925                 if (pmap_excl) {
3926                         spin_unlock(&pmap->pm_spin);
3927                         _pv_lock(pv PMAP_DEBUG_COPY);
3928                         pv_put(pv);
3929                         spin_lock(&pmap->pm_spin);
3930                 } else {
3931                         spin_unlock_shared(&pmap->pm_spin);
3932                         _pv_lock(pv PMAP_DEBUG_COPY);
3933                         pv_put(pv);
3934                         spin_lock_shared(&pmap->pm_spin);
3935                 }
3936         }
3937 }
3938
3939 /*
3940  * Lookup, hold, and attempt to lock (pmap,pindex).
3941  *
3942  * If the entry does not exist NULL is returned and *errorp is set to 0
3943  *
3944  * If the entry exists and could be successfully locked it is returned and
3945  * errorp is set to 0.
3946  *
3947  * If the entry exists but could NOT be successfully locked it is returned
3948  * held and *errorp is set to 1.
3949  *
3950  * If the entry is placemarked by someone else NULL is returned and *errorp
3951  * is set to 1.
3952  */
3953 static
3954 pv_entry_t
3955 pv_get_try(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp, int *errorp)
3956 {
3957         pv_entry_t pv;
3958
3959         spin_lock_shared(&pmap->pm_spin);
3960
3961         pv = pv_entry_lookup(pmap, pindex);
3962         if (pv == NULL) {
3963                 vm_pindex_t *pmark;
3964
3965                 pmark = pmap_placemarker_hash(pmap, pindex);
3966
3967                 if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3968                         *errorp = 1;
3969                 } else if (pmarkp &&
3970                            atomic_cmpset_long(pmark, PM_NOPLACEMARK, pindex)) {
3971                         *errorp = 0;
3972                 } else {
3973                         /*
3974                          * Can't set a placemark with a NULL pmarkp, or if
3975                          * pmarkp is non-NULL but we failed to set our
3976                          * placemark.
3977                          */
3978                         *errorp = 1;
3979                 }
3980                 if (pmarkp)
3981                         *pmarkp = pmark;
3982                 spin_unlock_shared(&pmap->pm_spin);
3983
3984                 return NULL;
3985         }
3986
3987         /*
3988          * XXX This has problems if the lock is shared, why?
3989          */
3990         if (pv_hold_try(pv)) {
3991                 spin_unlock_shared(&pmap->pm_spin);
3992                 *errorp = 0;
3993                 KKASSERT(pv->pv_pmap == pmap && pv->pv_pindex == pindex);
3994                 return(pv);     /* lock succeeded */
3995         }
3996         spin_unlock_shared(&pmap->pm_spin);
3997         *errorp = 1;
3998
3999         return (pv);            /* lock failed */
4000 }
4001
4002 /*
4003  * Lock a held pv, keeping the hold count
4004  */
4005 static
4006 void
4007 _pv_lock(pv_entry_t pv PMAP_DEBUG_DECL)
4008 {
4009         u_int count;
4010
4011         for (;;) {
4012                 count = pv->pv_hold;
4013                 cpu_ccfence();
4014                 if ((count & PV_HOLD_LOCKED) == 0) {
4015                         if (atomic_cmpset_int(&pv->pv_hold, count,
4016                                               count | PV_HOLD_LOCKED)) {
4017 #ifdef PMAP_DEBUG
4018                                 pv->pv_func = func;
4019                                 pv->pv_line = lineno;
4020 #endif
4021                                 return;
4022                         }
4023                         continue;
4024                 }
4025                 tsleep_interlock(pv, 0);
4026                 if (atomic_cmpset_int(&pv->pv_hold, count,
4027                                       count | PV_HOLD_WAITING)) {
4028 #ifdef PMAP_DEBUG2
4029                         if (pmap_enter_debug > 0) {
4030                                 --pmap_enter_debug;
4031                                 kprintf("pv waiting on %s:%d\n",
4032                                         pv->pv_func, pv->pv_line);
4033                         }
4034 #endif
4035                         tsleep(pv, PINTERLOCKED, "pvwait", hz);
4036                 }
4037                 /* retry */
4038         }
4039 }
4040
4041 /*
4042  * Unlock a held and locked pv, keeping the hold count.
4043  */
4044 static
4045 void
4046 pv_unlock(pv_entry_t pv)
4047 {
4048         u_int count;
4049
4050         for (;;) {
4051                 count = pv->pv_hold;
4052                 cpu_ccfence();
4053                 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) >=
4054                          (PV_HOLD_LOCKED | 1));
4055                 if (atomic_cmpset_int(&pv->pv_hold, count,
4056                                       count &
4057                                       ~(PV_HOLD_LOCKED | PV_HOLD_WAITING))) {
4058                         if (count & PV_HOLD_WAITING)
4059                                 wakeup(pv);
4060                         break;
4061                 }
4062         }
4063 }
4064
4065 /*
4066  * Unlock and drop a pv.  If the pv is no longer associated with a pmap
4067  * and the hold count drops to zero we will free it.
4068  *
4069  * Caller should not hold any spin locks.  We are protected from hold races
4070  * by virtue of holds only occuring only with a pmap_spin or vm_page_spin
4071  * lock held.  A pv cannot be located otherwise.
4072  */
4073 static
4074 void
4075 pv_put(pv_entry_t pv)
4076 {
4077 #ifdef PMAP_DEBUG2
4078         if (pmap_enter_debug > 0) {
4079                 --pmap_enter_debug;
4080                 kprintf("pv_put pv=%p hold=%08x\n", pv, pv->pv_hold);
4081         }
4082 #endif
4083
4084         /*
4085          * Normal put-aways must have a pv_m associated with the pv,
4086          * but allow the case where the pv has been destructed due
4087          * to pmap_dynamic_delete.
4088          */
4089         KKASSERT(pv->pv_pmap == NULL || pv->pv_m != NULL);
4090
4091         /*
4092          * Fast - shortcut most common condition
4093          */
4094         if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 1))
4095                 return;
4096
4097         /*
4098          * Slow
4099          */
4100         pv_unlock(pv);
4101         pv_drop(pv);
4102 }
4103
4104 /*
4105  * Remove the pmap association from a pv, require that pv_m already be removed,
4106  * then unlock and drop the pv.  Any pte operations must have already been
4107  * completed.  This call may result in a last-drop which will physically free
4108  * the pv.
4109  *
4110  * Removing the pmap association entails an additional drop.
4111  *
4112  * pv must be exclusively locked on call and will be disposed of on return.
4113  */
4114 static
4115 void
4116 _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL)
4117 {
4118         pmap_t pmap;
4119
4120 #ifdef PMAP_DEBUG
4121         pv->pv_func_lastfree = func;
4122         pv->pv_line_lastfree = lineno;
4123 #endif
4124         KKASSERT(pv->pv_m == NULL);
4125         KKASSERT((pv->pv_hold & (PV_HOLD_LOCKED|PV_HOLD_MASK)) >=
4126                   (PV_HOLD_LOCKED|1));
4127         if ((pmap = pv->pv_pmap) != NULL) {
4128                 spin_lock(&pmap->pm_spin);
4129                 KKASSERT(pv->pv_pmap == pmap);
4130                 if (pmap->pm_pvhint_pt == pv)
4131                         pmap->pm_pvhint_pt = NULL;
4132                 if (pmap->pm_pvhint_pte == pv)
4133                         pmap->pm_pvhint_pte = NULL;
4134                 pv_entry_rb_tree_RB_REMOVE(&pmap->pm_pvroot, pv);
4135                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
4136                 pv->pv_pmap = NULL;
4137                 pv->pv_pindex = 0;
4138                 spin_unlock(&pmap->pm_spin);
4139
4140                 /*
4141                  * Try to shortcut three atomic ops, otherwise fall through
4142                  * and do it normally.  Drop two refs and the lock all in
4143                  * one go.
4144                  */
4145                 if (pvp)
4146                         vm_page_unwire_quick(pvp->pv_m);
4147                 if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 0)) {
4148 #ifdef PMAP_DEBUG2
4149                         if (pmap_enter_debug > 0) {
4150                                 --pmap_enter_debug;
4151                                 kprintf("pv_free: free pv %p\n", pv);
4152                         }
4153 #endif
4154                         zfree(pvzone, pv);
4155                         return;
4156                 }
4157                 pv_drop(pv);    /* ref for pv_pmap */
4158         }
4159         pv_unlock(pv);
4160         pv_drop(pv);
4161 }
4162
4163 /*
4164  * This routine is very drastic, but can save the system
4165  * in a pinch.
4166  */
4167 void
4168 pmap_collect(void)
4169 {
4170         int i;
4171         vm_page_t m;
4172         static int warningdone=0;
4173
4174         if (pmap_pagedaemon_waken == 0)
4175                 return;
4176         pmap_pagedaemon_waken = 0;
4177         if (warningdone < 5) {
4178                 kprintf("pmap_collect: collecting pv entries -- "
4179                         "suggest increasing PMAP_SHPGPERPROC\n");
4180                 warningdone++;
4181         }
4182
4183         for (i = 0; i < vm_page_array_size; i++) {
4184                 m = &vm_page_array[i];
4185                 if (m->wire_count || m->hold_count)
4186                         continue;
4187                 if (vm_page_busy_try(m, TRUE) == 0) {
4188                         if (m->wire_count == 0 && m->hold_count == 0) {
4189                                 pmap_remove_all(m);
4190                         }
4191                         vm_page_wakeup(m);
4192                 }
4193         }
4194 }
4195
4196 /*
4197  * Scan the pmap for active page table entries and issue a callback.
4198  * The callback must dispose of pte_pv, whos PTE entry is at *ptep in
4199  * its parent page table.
4200  *
4201  * pte_pv will be NULL if the page or page table is unmanaged.
4202  * pt_pv will point to the page table page containing the pte for the page.
4203  *
4204  * NOTE! If we come across an unmanaged page TABLE (verses an unmanaged page),
4205  *       we pass a NULL pte_pv and we pass a pt_pv pointing to the passed
4206  *       process pmap's PD and page to the callback function.  This can be
4207  *       confusing because the pt_pv is really a pd_pv, and the target page
4208  *       table page is simply aliased by the pmap and not owned by it.
4209  *
4210  * It is assumed that the start and end are properly rounded to the page size.
4211  *
4212  * It is assumed that PD pages and above are managed and thus in the RB tree,
4213  * allowing us to use RB_SCAN from the PD pages down for ranged scans.
4214  */
4215 struct pmap_scan_info {
4216         struct pmap *pmap;
4217         vm_offset_t sva;
4218         vm_offset_t eva;
4219         vm_pindex_t sva_pd_pindex;
4220         vm_pindex_t eva_pd_pindex;
4221         void (*func)(pmap_t, struct pmap_scan_info *,
4222                      pv_entry_t, vm_pindex_t *, pv_entry_t,
4223                      int, vm_offset_t,
4224                      pt_entry_t *, void *);
4225         void *arg;
4226         pmap_inval_bulk_t bulk_core;
4227         pmap_inval_bulk_t *bulk;
4228         int count;
4229         int stop;
4230 };
4231
4232 static int pmap_scan_cmp(pv_entry_t pv, void *data);
4233 static int pmap_scan_callback(pv_entry_t pv, void *data);
4234
4235 static void
4236 pmap_scan(struct pmap_scan_info *info, int smp_inval)
4237 {
4238         struct pmap *pmap = info->pmap;
4239         pv_entry_t pd_pv;       /* A page directory PV */
4240         pv_entry_t pt_pv;       /* A page table PV */
4241         pv_entry_t pte_pv;      /* A page table entry PV */
4242         vm_pindex_t *pte_placemark;
4243         vm_pindex_t *pt_placemark;
4244         pt_entry_t *ptep;
4245         pt_entry_t oldpte;
4246         struct pv_entry dummy_pv;
4247
4248         info->stop = 0;
4249         if (pmap == NULL)
4250                 return;
4251         if (info->sva == info->eva)
4252                 return;
4253         if (smp_inval) {
4254                 info->bulk = &info->bulk_core;
4255                 pmap_inval_bulk_init(&info->bulk_core, pmap);
4256         } else {
4257                 info->bulk = NULL;
4258         }
4259
4260         /*
4261          * Hold the token for stability; if the pmap is empty we have nothing
4262          * to do.
4263          */
4264 #if 0
4265         if (pmap->pm_stats.resident_count == 0) {
4266                 return;
4267         }
4268 #endif
4269
4270         info->count = 0;
4271
4272         /*
4273          * Special handling for scanning one page, which is a very common
4274          * operation (it is?).
4275          *
4276          * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4
4277          */
4278         if (info->sva + PAGE_SIZE == info->eva) {
4279                 if (info->sva >= VM_MAX_USER_ADDRESS) {
4280                         /*
4281                          * Kernel mappings do not track wire counts on
4282                          * page table pages and only maintain pd_pv and
4283                          * pte_pv levels so pmap_scan() works.
4284                          */
4285                         pt_pv = NULL;
4286                         pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva),
4287                                         &pte_placemark);
4288                         ptep = vtopte(info->sva);
4289                 } else {
4290                         /*
4291                          * User pages which are unmanaged will not have a
4292                          * pte_pv.  User page table pages which are unmanaged
4293                          * (shared from elsewhere) will also not have a pt_pv.
4294                          * The func() callback will pass both pte_pv and pt_pv
4295                          * as NULL in that case.
4296                          *
4297                          * We hold pte_placemark across the operation for
4298                          * unmanaged pages.
4299                          *
4300                          * WARNING!  We must hold pt_placemark across the
4301                          *           *ptep test to prevent misintepreting
4302                          *           a non-zero *ptep as a shared page
4303                          *           table page.  Hold it across the function
4304                          *           callback as well for SMP safety.
4305                          */
4306                         pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva),
4307                                         &pte_placemark);
4308                         pt_pv = pv_get(pmap, pmap_pt_pindex(info->sva),
4309                                         &pt_placemark);
4310                         if (pt_pv == NULL) {
4311                                 KKASSERT(pte_pv == NULL);
4312                                 pd_pv = pv_get(pmap,
4313                                                pmap_pd_pindex(info->sva),
4314                                                NULL);
4315                                 if (pd_pv) {
4316                                         ptep = pv_pte_lookup(pd_pv,
4317                                                     pmap_pt_index(info->sva));
4318                                         if (*ptep) {
4319                                                 info->func(pmap, info,
4320                                                      NULL, pt_placemark,
4321                                                      pd_pv, 1,
4322                                                      info->sva, ptep,
4323                                                      info->arg);
4324                                         } else {
4325                                                 pv_placemarker_wakeup(pmap,
4326                                                                   pt_placemark);
4327                                         }
4328                                         pv_put(pd_pv);
4329                                 } else {
4330                                         pv_placemarker_wakeup(pmap,
4331                                                               pt_placemark);
4332                                 }
4333                                 pv_placemarker_wakeup(pmap, pte_placemark);
4334                                 goto fast_skip;
4335                         }
4336                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(info->sva));
4337                 }
4338
4339                 /*
4340                  * NOTE: *ptep can't be ripped out from under us if we hold
4341                  *       pte_pv (or pte_placemark) locked, but bits can
4342                  *       change.
4343                  */
4344                 oldpte = *ptep;
4345                 cpu_ccfence();
4346                 if (oldpte == 0) {
4347                         KKASSERT(pte_pv == NULL);
4348                         pv_placemarker_wakeup(pmap, pte_placemark);
4349                 } else if (pte_pv) {
4350                         KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
4351                                            pmap->pmap_bits[PG_V_IDX])) ==
4352                                 (pmap->pmap_bits[PG_MANAGED_IDX] |
4353                                  pmap->pmap_bits[PG_V_IDX]),
4354                             ("badA *ptep %016lx/%016lx sva %016lx pte_pv %p",
4355                             *ptep, oldpte, info->sva, pte_pv));
4356                         info->func(pmap, info, pte_pv, NULL, pt_pv, 0,
4357                                    info->sva, ptep, info->arg);
4358                 } else {
4359                         KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
4360                                            pmap->pmap_bits[PG_V_IDX])) ==
4361                             pmap->pmap_bits[PG_V_IDX],
4362                             ("badB *ptep %016lx/%016lx sva %016lx pte_pv NULL",
4363                             *ptep, oldpte, info->sva));
4364                         info->func(pmap, info, NULL, pte_placemark, pt_pv, 0,
4365                                    info->sva, ptep, info->arg);
4366                 }
4367                 if (pt_pv)
4368                         pv_put(pt_pv);
4369 fast_skip:
4370                 pmap_inval_bulk_flush(info->bulk);
4371                 return;
4372         }
4373
4374         /*
4375          * Nominal scan case, RB_SCAN() for PD pages and iterate from
4376          * there.
4377          *
4378          * WARNING! eva can overflow our standard ((N + mask) >> bits)
4379          *          bounds, resulting in a pd_pindex of 0.  To solve the
4380          *          problem we use an inclusive range.
4381          */
4382         info->sva_pd_pindex = pmap_pd_pindex(info->sva);
4383         info->eva_pd_pindex = pmap_pd_pindex(info->eva - PAGE_SIZE);
4384
4385         if (info->sva >= VM_MAX_USER_ADDRESS) {
4386                 /*
4387                  * The kernel does not currently maintain any pv_entry's for
4388                  * higher-level page tables.
4389                  */
4390                 bzero(&dummy_pv, sizeof(dummy_pv));
4391                 dummy_pv.pv_pindex = info->sva_pd_pindex;
4392                 spin_lock(&pmap->pm_spin);
4393                 while (dummy_pv.pv_pindex <= info->eva_pd_pindex) {
4394                         pmap_scan_callback(&dummy_pv, info);
4395                         ++dummy_pv.pv_pindex;
4396                         if (dummy_pv.pv_pindex < info->sva_pd_pindex) /*wrap*/
4397                                 break;
4398                 }
4399                 spin_unlock(&pmap->pm_spin);
4400         } else {
4401                 /*
4402                  * User page tables maintain local PML4, PDP, and PD
4403                  * pv_entry's at the very least.  PT pv's might be
4404                  * unmanaged and thus not exist.  PTE pv's might be
4405                  * unmanaged and thus not exist.
4406                  */
4407                 spin_lock(&pmap->pm_spin);
4408                 pv_entry_rb_tree_RB_SCAN(&pmap->pm_pvroot, pmap_scan_cmp,
4409                                          pmap_scan_callback, info);
4410                 spin_unlock(&pmap->pm_spin);
4411         }
4412         pmap_inval_bulk_flush(info->bulk);
4413 }
4414
4415 /*
4416  * WARNING! pmap->pm_spin held
4417  *
4418  * WARNING! eva can overflow our standard ((N + mask) >> bits)
4419  *          bounds, resulting in a pd_pindex of 0.  To solve the
4420  *          problem we use an inclusive range.
4421  */
4422 static int
4423 pmap_scan_cmp(pv_entry_t pv, void *data)
4424 {
4425         struct pmap_scan_info *info = data;
4426         if (pv->pv_pindex < info->sva_pd_pindex)
4427                 return(-1);
4428         if (pv->pv_pindex > info->eva_pd_pindex)
4429                 return(1);
4430         return(0);
4431 }
4432
4433 /*
4434  * pmap_scan() by PDs
4435  *
4436  * WARNING! pmap->pm_spin held
4437  */
4438 static int
4439 pmap_scan_callback(pv_entry_t pv, void *data)
4440 {
4441         struct pmap_scan_info *info = data;
4442         struct pmap *pmap = info->pmap;
4443         pv_entry_t pd_pv;       /* A page directory PV */
4444         pv_entry_t pt_pv;       /* A page table PV */
4445         vm_pindex_t *pt_placemark;
4446         pt_entry_t *ptep;
4447         pt_entry_t oldpte;
4448         vm_offset_t sva;
4449         vm_offset_t eva;
4450         vm_offset_t va_next;
4451         vm_pindex_t pd_pindex;
4452         int error;
4453
4454         /*
4455          * Stop if requested
4456          */
4457         if (info->stop)
4458                 return -1;
4459
4460         /*
4461          * Pull the PD pindex from the pv before releasing the spinlock.
4462          *
4463          * WARNING: pv is faked for kernel pmap scans.
4464          */
4465         pd_pindex = pv->pv_pindex;
4466         spin_unlock(&pmap->pm_spin);
4467         pv = NULL;      /* invalid after spinlock unlocked */
4468
4469         /*
4470          * Calculate the page range within the PD.  SIMPLE pmaps are
4471          * direct-mapped for the entire 2^64 address space.  Normal pmaps
4472          * reflect the user and kernel address space which requires
4473          * cannonicalization w/regards to converting pd_pindex's back
4474          * into addresses.
4475          */
4476         sva = (pd_pindex - pmap_pd_pindex(0)) << PDPSHIFT;
4477         if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0 &&
4478             (sva & PML4_SIGNMASK)) {
4479                 sva |= PML4_SIGNMASK;
4480         }
4481         eva = sva + NBPDP;      /* can overflow */
4482         if (sva < info->sva)
4483                 sva = info->sva;
4484         if (eva < info->sva || eva > info->eva)
4485                 eva = info->eva;
4486
4487         /*
4488          * NOTE: kernel mappings do not track page table pages, only
4489          *       terminal pages.
4490          *
4491          * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4.
4492          *       However, for the scan to be efficient we try to
4493          *       cache items top-down.
4494          */
4495         pd_pv = NULL;
4496         pt_pv = NULL;
4497
4498         for (; sva < eva; sva = va_next) {
4499                 if (info->stop)
4500                         break;
4501                 if (sva >= VM_MAX_USER_ADDRESS) {
4502                         if (pt_pv) {
4503                                 pv_put(pt_pv);
4504                                 pt_pv = NULL;
4505                         }
4506                         goto kernel_skip;
4507                 }
4508
4509                 /*
4510                  * PD cache, scan shortcut if it doesn't exist.
4511                  */
4512                 if (pd_pv == NULL) {
4513                         pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL);
4514                 } else if (pd_pv->pv_pmap != pmap ||
4515                            pd_pv->pv_pindex != pmap_pd_pindex(sva)) {
4516                         pv_put(pd_pv);
4517                         pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL);
4518                 }
4519                 if (pd_pv == NULL) {
4520                         va_next = (sva + NBPDP) & ~PDPMASK;
4521                         if (va_next < sva)
4522                                 va_next = eva;
4523                         continue;
4524                 }
4525
4526                 /*
4527                  * PT cache
4528                  *
4529                  * NOTE: The cached pt_pv can be removed from the pmap when
4530                  *       pmap_dynamic_delete is enabled.
4531                  */
4532                 if (pt_pv && (pt_pv->pv_pmap != pmap ||
4533                               pt_pv->pv_pindex != pmap_pt_pindex(sva))) {
4534                         pv_put(pt_pv);
4535                         pt_pv = NULL;
4536                 }
4537                 if (pt_pv == NULL) {
4538                         pt_pv = pv_get_try(pmap, pmap_pt_pindex(sva),
4539                                            &pt_placemark, &error);
4540                         if (error) {
4541                                 pv_put(pd_pv);  /* lock order */
4542                                 pd_pv = NULL;
4543                                 if (pt_pv) {
4544                                         pv_lock(pt_pv);
4545                                         pv_put(pt_pv);
4546                                         pt_pv = NULL;
4547                                 } else {
4548                                         pv_placemarker_wait(pmap, pt_placemark);
4549                                 }
4550                                 va_next = sva;
4551                                 continue;
4552                         }
4553                         /* may have to re-check later if pt_pv is NULL here */
4554                 }
4555
4556                 /*
4557                  * If pt_pv is NULL we either have an shared page table
4558                  * page and must issue a callback specific to that case,
4559                  * or there is no page table page.
4560                  *
4561                  * Either way we can skip the page table page.
4562                  *
4563                  * WARNING! pt_pv can also be NULL due to a pv creation
4564                  *          race where we find it to be NULL and then
4565                  *          later see a pte_pv.  But its possible the pt_pv
4566                  *          got created inbetween the two operations, so
4567                  *          we must check.
4568                  */
4569                 if (pt_pv == NULL) {
4570                         /*
4571                          * Possible unmanaged (shared from another pmap)
4572                          * page table page.
4573                          *
4574                          * WARNING!  We must hold pt_placemark across the
4575                          *           *ptep test to prevent misintepreting
4576                          *           a non-zero *ptep as a shared page
4577                          *           table page.  Hold it across the function
4578                          *           callback as well for SMP safety.
4579                          */
4580                         ptep = pv_pte_lookup(pd_pv, pmap_pt_index(sva));
4581                         if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
4582                                 info->func(pmap, info, NULL, pt_placemark,
4583                                            pd_pv, 1,
4584                                            sva, ptep, info->arg);
4585                         } else {
4586                                 pv_placemarker_wakeup(pmap, pt_placemark);
4587                         }
4588
4589                         /*
4590                          * Done, move to next page table page.
4591                          */
4592                         va_next = (sva + NBPDR) & ~PDRMASK;
4593                         if (va_next < sva)
4594                                 va_next = eva;
4595                         continue;
4596                 }
4597
4598                 /*
4599                  * From this point in the loop testing pt_pv for non-NULL
4600                  * means we are in UVM, else if it is NULL we are in KVM.
4601                  *
4602                  * Limit our scan to either the end of the va represented
4603                  * by the current page table page, or to the end of the
4604                  * range being removed.
4605                  */
4606 kernel_skip:
4607                 va_next = (sva + NBPDR) & ~PDRMASK;
4608                 if (va_next < sva)
4609                         va_next = eva;
4610                 if (va_next > eva)
4611                         va_next = eva;
4612
4613                 /*
4614                  * Scan the page table for pages.  Some pages may not be
4615                  * managed (might not have a pv_entry).
4616                  *
4617                  * There is no page table management for kernel pages so
4618                  * pt_pv will be NULL in that case, but otherwise pt_pv
4619                  * is non-NULL, locked, and referenced.
4620                  */
4621
4622                 /*
4623                  * At this point a non-NULL pt_pv means a UVA, and a NULL
4624                  * pt_pv means a KVA.
4625                  */
4626                 if (pt_pv)
4627                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(sva));
4628                 else
4629                         ptep = vtopte(sva);
4630
4631                 while (sva < va_next) {
4632                         pv_entry_t pte_pv;
4633                         vm_pindex_t *pte_placemark;
4634
4635                         /*
4636                          * Yield every 64 pages, stop if requested.
4637                          */
4638                         if ((++info->count & 63) == 0)
4639                                 lwkt_user_yield();
4640                         if (info->stop)
4641                                 break;
4642
4643                         /*
4644                          * We can shortcut our scan if *ptep == 0.  This is
4645                          * an unlocked check.
4646                          */
4647                         if (*ptep == 0) {
4648                                 sva += PAGE_SIZE;
4649                                 ++ptep;
4650                                 continue;
4651                         }
4652                         cpu_ccfence();
4653
4654                         /*
4655                          * Acquire the related pte_pv, if any.  If *ptep == 0
4656                          * the related pte_pv should not exist, but if *ptep
4657                          * is not zero the pte_pv may or may not exist (e.g.
4658                          * will not exist for an unmanaged page).
4659                          *
4660                          * However a multitude of races are possible here
4661                          * so if we cannot lock definite state we clean out
4662                          * our cache and break the inner while() loop to
4663                          * force a loop up to the top of the for().
4664                          *
4665                          * XXX unlock/relock pd_pv, pt_pv, and re-test their
4666                          *     validity instead of looping up?
4667                          */
4668                         pte_pv = pv_get_try(pmap, pmap_pte_pindex(sva),
4669                                             &pte_placemark, &error);
4670                         if (error) {
4671                                 if (pd_pv) {
4672                                         pv_put(pd_pv);  /* lock order */
4673                                         pd_pv = NULL;
4674                                 }
4675                                 if (pt_pv) {
4676                                         pv_put(pt_pv);  /* lock order */
4677                                         pt_pv = NULL;
4678                                 }
4679                                 if (pte_pv) {           /* block */
4680                                         pv_lock(pte_pv);
4681                                         pv_put(pte_pv);
4682                                         pte_pv = NULL;
4683                                 } else {
4684                                         pv_placemarker_wait(pmap,
4685                                                         pte_placemark);
4686                                 }
4687                                 va_next = sva;          /* retry */
4688                                 break;
4689                         }
4690
4691                         /*
4692                          * Reload *ptep after successfully locking the
4693                          * pindex.  If *ptep == 0 we had better NOT have a
4694                          * pte_pv.
4695                          */
4696                         cpu_ccfence();
4697                         oldpte = *ptep;
4698                         if (oldpte == 0) {
4699                                 if (pte_pv) {
4700                                         kprintf("Unexpected non-NULL pte_pv "
4701                                                 "%p pt_pv %p "
4702                                                 "*ptep = %016lx/%016lx\n",
4703                                                 pte_pv, pt_pv, *ptep, oldpte);
4704                                         panic("Unexpected non-NULL pte_pv");
4705                                 } else {
4706                                         pv_placemarker_wakeup(pmap, pte_placemark);
4707                                 }
4708                                 sva += PAGE_SIZE;
4709                                 ++ptep;
4710                                 continue;
4711                         }
4712
4713                         /*
4714                          * We can't hold pd_pv across the callback (because
4715                          * we don't pass it to the callback and the callback
4716                          * might deadlock)
4717                          */
4718                         if (pd_pv) {
4719                                 vm_page_wire_quick(pd_pv->pv_m);
4720                                 pv_unlock(pd_pv);
4721                         }
4722
4723                         /*
4724                          * Ready for the callback.  The locked pte_pv (if any)
4725                          * is consumed by the callback.  pte_pv will exist if
4726                          * the page is managed, and will not exist if it
4727                          * isn't.
4728                          */
4729                         if (oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) {
4730                                 /*
4731                                  * Managed pte
4732                                  */
4733                                 KASSERT(pte_pv &&
4734                                          (oldpte & pmap->pmap_bits[PG_V_IDX]),
4735                                     ("badC *ptep %016lx/%016lx sva %016lx "
4736                                     "pte_pv %p",
4737                                     *ptep, oldpte, sva, pte_pv));
4738                                 /*
4739                                  * We must unlock pd_pv across the callback
4740                                  * to avoid deadlocks on any recursive
4741                                  * disposal.  Re-check that it still exists
4742                                  * after re-locking.
4743                                  *
4744                                  * Call target disposes of pte_pv and may
4745                                  * destroy but will not dispose of pt_pv.
4746                                  */
4747                                 info->func(pmap, info, pte_pv, NULL,
4748                                            pt_pv, 0,
4749                                            sva, ptep, info->arg);
4750                         } else {
4751                                 /*
4752                                  * Unmanaged pte
4753                                  *
4754                                  * We must unlock pd_pv across the callback
4755                                  * to avoid deadlocks on any recursive
4756                                  * disposal.  Re-check that it still exists
4757                                  * after re-locking.
4758                                  *
4759                                  * Call target disposes of pte_pv or
4760                                  * pte_placemark and may destroy but will
4761                                  * not dispose of pt_pv.
4762                                  */
4763                                 KASSERT(pte_pv == NULL &&
4764                                         (oldpte & pmap->pmap_bits[PG_V_IDX]),
4765                                     ("badD *ptep %016lx/%016lx sva %016lx "
4766                                     "pte_pv %p pte_pv->pv_m %p ",
4767                                      *ptep, oldpte, sva,
4768                                      pte_pv, (pte_pv ? pte_pv->pv_m : NULL)));
4769                                 if (pte_pv)
4770                                         kprintf("RaceD\n");
4771                                 if (pte_pv) {
4772                                         info->func(pmap, info,
4773                                                    pte_pv, NULL,
4774                                                    pt_pv, 0,
4775                                                    sva, ptep, info->arg);
4776                                 } else {
4777                                         info->func(pmap, info,
4778                                                    NULL, pte_placemark,
4779                                                    pt_pv, 0,
4780                                                    sva, ptep, info->arg);
4781                                 }
4782                         }
4783                         if (pd_pv) {
4784                                 pv_lock(pd_pv);
4785                                 vm_page_unwire_quick(pd_pv->pv_m);
4786                                 if (pd_pv->pv_pmap == NULL) {
4787                                         va_next = sva;          /* retry */
4788                                         break;
4789                                 }
4790                         }
4791
4792                         /*
4793                          * NOTE: The cached pt_pv can be removed from the
4794                          *       pmap when pmap_dynamic_delete is enabled,
4795                          *       which will cause ptep to become stale.
4796                          *
4797                          *       This also means that no pages remain under
4798                          *       the PT, so we can just break out of the inner
4799                          *       loop and let the outer loop clean everything
4800                          *       up.
4801                          */
4802                         if (pt_pv && pt_pv->pv_pmap != pmap)
4803                                 break;
4804                         pte_pv = NULL;
4805                         sva += PAGE_SIZE;
4806                         ++ptep;
4807                 }
4808         }
4809         if (pd_pv) {
4810                 pv_put(pd_pv);
4811                 pd_pv = NULL;
4812         }
4813         if (pt_pv) {
4814                 pv_put(pt_pv);
4815                 pt_pv = NULL;
4816         }
4817         if ((++info->count & 7) == 0)
4818                 lwkt_user_yield();
4819
4820         /*
4821          * Relock before returning.
4822          */
4823         spin_lock(&pmap->pm_spin);
4824         return (0);
4825 }
4826
4827 void
4828 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
4829 {
4830         struct pmap_scan_info info;
4831
4832         info.pmap = pmap;
4833         info.sva = sva;
4834         info.eva = eva;
4835         info.func = pmap_remove_callback;
4836         info.arg = NULL;
4837         pmap_scan(&info, 1);
4838 #if 0
4839         cpu_invltlb();
4840         if (eva - sva < 1024*1024) {
4841                 while (sva < eva) {
4842                         cpu_invlpg((void *)sva);
4843                         sva += PAGE_SIZE;
4844                 }
4845         }
4846 #endif
4847 }
4848
4849 static void
4850 pmap_remove_noinval(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
4851 {
4852         struct pmap_scan_info info;
4853
4854         info.pmap = pmap;
4855         info.sva = sva;
4856         info.eva = eva;
4857         info.func = pmap_remove_callback;
4858         info.arg = NULL;
4859         pmap_scan(&info, 0);
4860 }
4861
4862 static void
4863 pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
4864                      pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
4865                      pv_entry_t pt_pv, int sharept,
4866                      vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
4867 {
4868         pt_entry_t pte;
4869
4870         if (pte_pv) {
4871                 /*
4872                  * Managed entry
4873                  *
4874                  * This will also drop pt_pv's wire_count. Note that
4875                  * terminal pages are not wired based on mmu presence.
4876                  *
4877                  * NOTE: If this is the kernel_pmap, pt_pv can be NULL.
4878                  */
4879                 KKASSERT(pte_pv->pv_m != NULL);
4880                 pmap_remove_pv_pte(pte_pv, pt_pv, info->bulk, 2);
4881                 pte_pv = NULL;  /* safety */
4882
4883                 /*
4884                  * Recursively destroy higher-level page tables.
4885                  *
4886                  * This is optional.  If we do not, they will still
4887                  * be destroyed when the process exits.
4888                  *
4889                  * NOTE: Do not destroy pv_entry's with extra hold refs,
4890                  *       a caller may have unlocked it and intends to
4891                  *       continue to use it.
4892                  */
4893                 if (pmap_dynamic_delete &&
4894                     pt_pv &&
4895                     pt_pv->pv_m &&
4896                     pt_pv->pv_m->wire_count == 1 &&
4897                     (pt_pv->pv_hold & PV_HOLD_MASK) == 2 &&
4898                     pt_pv->pv_pindex < pmap_pml4_pindex()) {
4899                         if (pmap_dynamic_delete == 2)
4900                                 kprintf("B %jd %08x\n", pt_pv->pv_pindex, pt_pv->pv_hold);
4901                         pv_hold(pt_pv); /* extra hold */
4902                         pmap_remove_pv_pte(pt_pv, NULL, info->bulk, 1);
4903                         pv_lock(pt_pv); /* prior extra hold + relock */
4904                 }
4905         } else if (sharept == 0) {
4906                 /*
4907                  * Unmanaged pte (pte_placemark is non-NULL)
4908                  *
4909                  * pt_pv's wire_count is still bumped by unmanaged pages
4910                  * so we must decrement it manually.
4911                  *
4912                  * We have to unwire the target page table page.
4913                  */
4914                 pte = pmap_inval_bulk(info->bulk, va, ptep, 0);
4915                 if (pte & pmap->pmap_bits[PG_W_IDX])
4916                         atomic_add_long(&pmap->pm_stats.wired_count, -1);
4917                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
4918                 if (vm_page_unwire_quick(pt_pv->pv_m))
4919                         panic("pmap_remove: insufficient wirecount");
4920                 pv_placemarker_wakeup(pmap, pte_placemark);
4921         } else {
4922                 /*
4923                  * Unmanaged page table (pt, pd, or pdp. Not pte) for
4924                  * a shared page table.
4925                  *
4926                  * pt_pv is actually the pd_pv for our pmap (not the shared
4927                  * object pmap).
4928                  *
4929                  * We have to unwire the target page table page and we
4930                  * have to unwire our page directory page.
4931                  *
4932                  * It is unclear how we can invalidate a segment so we
4933                  * invalidate -1 which invlidates the tlb.
4934                  */
4935                 pte = pmap_inval_bulk(info->bulk, (vm_offset_t)-1, ptep, 0);
4936                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
4937                 KKASSERT((pte & pmap->pmap_bits[PG_DEVICE_IDX]) == 0);
4938                 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
4939                         panic("pmap_remove: shared pgtable1 bad wirecount");
4940                 if (vm_page_unwire_quick(pt_pv->pv_m))
4941                         panic("pmap_remove: shared pgtable2 bad wirecount");
4942                 pv_placemarker_wakeup(pmap, pte_placemark);
4943         }
4944 }
4945
4946 /*
4947  * Removes this physical page from all physical maps in which it resides.
4948  * Reflects back modify bits to the pager.
4949  *
4950  * This routine may not be called from an interrupt.
4951  */
4952 static
4953 void
4954 pmap_remove_all(vm_page_t m)
4955 {
4956         pv_entry_t pv;
4957         pmap_inval_bulk_t bulk;
4958
4959         if (!pmap_initialized /* || (m->flags & PG_FICTITIOUS)*/)
4960                 return;
4961
4962         vm_page_spin_lock(m);
4963         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4964                 KKASSERT(pv->pv_m == m);
4965                 if (pv_hold_try(pv)) {
4966                         vm_page_spin_unlock(m);
4967                 } else {
4968                         vm_page_spin_unlock(m);
4969                         pv_lock(pv);
4970                         pv_put(pv);
4971                         vm_page_spin_lock(m);
4972                         continue;
4973                 }
4974                 KKASSERT(pv->pv_pmap && pv->pv_m == m);
4975
4976                 /*
4977                  * Holding no spinlocks, pv is locked.  Once we scrap
4978                  * pv we can no longer use it as a list iterator (but
4979                  * we are doing a TAILQ_FIRST() so we are ok).
4980                  */
4981                 pmap_inval_bulk_init(&bulk, pv->pv_pmap);
4982                 pmap_remove_pv_pte(pv, NULL, &bulk, 2);
4983                 pv = NULL;      /* safety */
4984                 pmap_inval_bulk_flush(&bulk);
4985                 vm_page_spin_lock(m);
4986         }
4987         KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
4988         vm_page_spin_unlock(m);
4989 }
4990
4991 /*
4992  * Removes the page from a particular pmap
4993  */
4994 void
4995 pmap_remove_specific(pmap_t pmap, vm_page_t m)
4996 {
4997         pv_entry_t pv;
4998         pmap_inval_bulk_t bulk;
4999
5000         if (!pmap_initialized)
5001                 return;
5002
5003 again:
5004         vm_page_spin_lock(m);
5005         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5006                 if (pv->pv_pmap != pmap)
5007                         continue;
5008                 KKASSERT(pv->pv_m == m);
5009                 if (pv_hold_try(pv)) {
5010                         vm_page_spin_unlock(m);
5011                 } else {
5012                         vm_page_spin_unlock(m);
5013                         pv_lock(pv);
5014                         pv_put(pv);
5015                         goto again;
5016                 }
5017                 KKASSERT(pv->pv_pmap == pmap && pv->pv_m == m);
5018
5019                 /*
5020                  * Holding no spinlocks, pv is locked.  Once gone it can't
5021                  * be used as an iterator.  In fact, because we couldn't
5022                  * necessarily lock it atomically it may have moved within
5023                  * the list and ALSO cannot be used as an iterator.
5024                  */
5025                 pmap_inval_bulk_init(&bulk, pv->pv_pmap);
5026                 pmap_remove_pv_pte(pv, NULL, &bulk, 2);
5027                 pv = NULL;      /* safety */
5028                 pmap_inval_bulk_flush(&bulk);
5029                 goto again;
5030         }
5031         vm_page_spin_unlock(m);
5032 }
5033
5034 /*
5035  * Set the physical protection on the specified range of this map
5036  * as requested.  This function is typically only used for debug watchpoints
5037  * and COW pages.
5038  *
5039  * This function may not be called from an interrupt if the map is
5040  * not the kernel_pmap.
5041  *
5042  * NOTE!  For shared page table pages we just unmap the page.
5043  */
5044 void
5045 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
5046 {
5047         struct pmap_scan_info info;
5048         /* JG review for NX */
5049
5050         if (pmap == NULL)
5051                 return;
5052         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == VM_PROT_NONE) {
5053                 pmap_remove(pmap, sva, eva);
5054                 return;
5055         }
5056         if (prot & VM_PROT_WRITE)
5057                 return;
5058         info.pmap = pmap;
5059         info.sva = sva;
5060         info.eva = eva;
5061         info.func = pmap_protect_callback;
5062         info.arg = &prot;
5063         pmap_scan(&info, 1);
5064 }
5065
5066 static
5067 void
5068 pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
5069                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
5070                       pv_entry_t pt_pv, int sharept,
5071                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
5072 {
5073         pt_entry_t pbits;
5074         pt_entry_t cbits;
5075         pt_entry_t pte;
5076         vm_page_t m;
5077
5078 again:
5079         pbits = *ptep;
5080         cbits = pbits;
5081         if (pte_pv) {
5082                 KKASSERT(pte_pv->pv_m != NULL);
5083                 m = NULL;
5084                 if (pbits & pmap->pmap_bits[PG_A_IDX]) {
5085                         if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
5086                                 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
5087                                 KKASSERT(m == pte_pv->pv_m);
5088                                 vm_page_flag_set(m, PG_REFERENCED);
5089                         }
5090                         cbits &= ~pmap->pmap_bits[PG_A_IDX];
5091                 }
5092                 if (pbits & pmap->pmap_bits[PG_M_IDX]) {
5093                         if (pmap_track_modified(pte_pv->pv_pindex)) {
5094                                 if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
5095                                         if (m == NULL) {
5096                                                 m = PHYS_TO_VM_PAGE(pbits &
5097                                                                     PG_FRAME);
5098                                         }
5099                                         vm_page_dirty(m);
5100                                 }
5101                                 cbits &= ~pmap->pmap_bits[PG_M_IDX];
5102                         }
5103                 }
5104         } else if (sharept) {
5105                 /*
5106                  * Unmanaged page table, pt_pv is actually the pd_pv
5107                  * for our pmap (not the object's shared pmap).
5108                  *
5109                  * When asked to protect something in a shared page table
5110                  * page we just unmap the page table page.  We have to
5111                  * invalidate the tlb in this situation.
5112                  *
5113                  * XXX Warning, shared page tables will not be used for
5114                  * OBJT_DEVICE or OBJT_MGTDEVICE (PG_FICTITIOUS) mappings
5115                  * so PHYS_TO_VM_PAGE() should be safe here.
5116                  */
5117                 pte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, ptep, 0);
5118                 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
5119                         panic("pmap_protect: pgtable1 pg bad wirecount");
5120                 if (vm_page_unwire_quick(pt_pv->pv_m))
5121                         panic("pmap_protect: pgtable2 pg bad wirecount");
5122                 ptep = NULL;
5123         }
5124         /* else unmanaged page, adjust bits, no wire changes */
5125
5126         if (ptep) {
5127                 cbits &= ~pmap->pmap_bits[PG_RW_IDX];
5128 #ifdef PMAP_DEBUG2
5129                 if (pmap_enter_debug > 0) {
5130                         --pmap_enter_debug;
5131                         kprintf("pmap_protect va=%lx ptep=%p pte_pv=%p "
5132                                 "pt_pv=%p cbits=%08lx\n",
5133                                 va, ptep, pte_pv,
5134                                 pt_pv, cbits
5135                         );
5136                 }
5137 #endif
5138                 if (pbits != cbits) {
5139                         vm_offset_t xva;
5140
5141                         xva = (sharept) ? (vm_offset_t)-1 : va;
5142                         if (!pmap_inval_smp_cmpset(pmap, xva,
5143                                                    ptep, pbits, cbits)) {
5144                                 goto again;
5145                         }
5146                 }
5147         }
5148         if (pte_pv)
5149                 pv_put(pte_pv);
5150         else
5151                 pv_placemarker_wakeup(pmap, pte_placemark);
5152 }
5153
5154 /*
5155  * Insert the vm_page (m) at the virtual address (va), replacing any prior
5156  * mapping at that address.  Set protection and wiring as requested.
5157  *
5158  * If entry is non-NULL we check to see if the SEG_SIZE optimization is
5159  * possible.  If it is we enter the page into the appropriate shared pmap
5160  * hanging off the related VM object instead of the passed pmap, then we
5161  * share the page table page from the VM object's pmap into the current pmap.
5162  *
5163  * NOTE: This routine MUST insert the page into the pmap now, it cannot
5164  *       lazy-evaluate.
5165  *
5166  * NOTE: If (m) is PG_UNMANAGED it may also be a temporary fake vm_page_t.
5167  *       never record it.
5168  */
5169 void
5170 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
5171            boolean_t wired, vm_map_entry_t entry)
5172 {
5173         pv_entry_t pt_pv;       /* page table */
5174         pv_entry_t pte_pv;      /* page table entry */
5175         vm_pindex_t *pte_placemark;
5176         pt_entry_t *ptep;
5177         vm_paddr_t opa;
5178         pt_entry_t origpte, newpte;
5179         vm_paddr_t pa;
5180
5181         if (pmap == NULL)
5182                 return;
5183         va = trunc_page(va);
5184 #ifdef PMAP_DIAGNOSTIC
5185         if (va >= KvaEnd)
5186                 panic("pmap_enter: toobig");
5187         if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
5188                 panic("pmap_enter: invalid to pmap_enter page table "
5189                       "pages (va: 0x%lx)", va);
5190 #endif
5191         if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
5192                 kprintf("Warning: pmap_enter called on UVA with "
5193                         "kernel_pmap\n");
5194 #ifdef DDB
5195                 db_print_backtrace();
5196 #endif
5197         }
5198         if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
5199                 kprintf("Warning: pmap_enter called on KVA without"
5200                         "kernel_pmap\n");
5201 #ifdef DDB
5202                 db_print_backtrace();
5203 #endif
5204         }
5205
5206         /*
5207          * Get locked PV entries for our new page table entry (pte_pv or
5208          * pte_placemark) and for its parent page table (pt_pv).  We need
5209          * the parent so we can resolve the location of the ptep.
5210          *
5211          * Only hardware MMU actions can modify the ptep out from
5212          * under us.
5213          *
5214          * if (m) is fictitious or unmanaged we do not create a managing
5215          * pte_pv for it.  Any pre-existing page's management state must
5216          * match (avoiding code complexity).
5217          *
5218          * If the pmap is still being initialized we assume existing
5219          * page tables.
5220          *
5221          * Kernel mapppings do not track page table pages (i.e. pt_pv).
5222          *
5223          * WARNING! If replacing a managed mapping with an unmanaged mapping
5224          *          pte_pv will wind up being non-NULL and must be handled
5225          *          below.
5226          */
5227         if (pmap_initialized == FALSE) {
5228                 pte_pv = NULL;
5229                 pt_pv = NULL;
5230                 pte_placemark = NULL;
5231                 ptep = vtopte(va);
5232                 origpte = *ptep;
5233         } else if (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) { /* XXX */
5234                 pmap_softwait(pmap);
5235                 pte_pv = pv_get(pmap, pmap_pte_pindex(va), &pte_placemark);
5236                 KKASSERT(pte_pv == NULL);
5237                 if (va >= VM_MAX_USER_ADDRESS) {
5238                         pt_pv = NULL;
5239                         ptep = vtopte(va);
5240                 } else {
5241                         pt_pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va),
5242                                                   NULL, entry, va);
5243                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
5244                 }
5245                 origpte = *ptep;
5246                 cpu_ccfence();
5247                 KASSERT(origpte == 0 ||
5248                          (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0,
5249                          ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
5250         } else {
5251                 pmap_softwait(pmap);
5252                 if (va >= VM_MAX_USER_ADDRESS) {
5253                         /*
5254                          * Kernel map, pv_entry-tracked.
5255                          */
5256                         pt_pv = NULL;
5257                         pte_pv = pmap_allocpte(pmap, pmap_pte_pindex(va), NULL);
5258                         ptep = vtopte(va);
5259                 } else {
5260                         /*
5261                          * User map
5262                          */
5263                         pte_pv = pmap_allocpte_seg(pmap, pmap_pte_pindex(va),
5264                                                    &pt_pv, entry, va);
5265                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
5266                 }
5267                 pte_placemark = NULL;   /* safety */
5268                 origpte = *ptep;
5269                 cpu_ccfence();
5270                 KASSERT(origpte == 0 ||
5271                          (origpte & pmap->pmap_bits[PG_MANAGED_IDX]),
5272                          ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
5273         }
5274
5275         pa = VM_PAGE_TO_PHYS(m);
5276         opa = origpte & PG_FRAME;
5277
5278         /*
5279          * Calculate the new PTE.  Note that pte_pv alone does not mean
5280          * the new pte_pv is managed, it could exist because the old pte
5281          * was managed even if the new one is not.
5282          */
5283         newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) |
5284                  pmap->pmap_bits[PG_V_IDX] | pmap->pmap_bits[PG_A_IDX]);
5285         if (wired)
5286                 newpte |= pmap->pmap_bits[PG_W_IDX];
5287         if (va < VM_MAX_USER_ADDRESS)
5288                 newpte |= pmap->pmap_bits[PG_U_IDX];
5289         if (pte_pv && (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) == 0)
5290                 newpte |= pmap->pmap_bits[PG_MANAGED_IDX];
5291 //      if (pmap == &kernel_pmap)
5292 //              newpte |= pgeflag;
5293         newpte |= pmap->pmap_cache_bits[m->pat_mode];
5294         if (m->flags & PG_FICTITIOUS)
5295                 newpte |= pmap->pmap_bits[PG_DEVICE_IDX];
5296
5297         /*
5298          * It is possible for multiple faults to occur in threaded
5299          * environments, the existing pte might be correct.
5300          */
5301         if (((origpte ^ newpte) &
5302             ~(pt_entry_t)(pmap->pmap_bits[PG_M_IDX] |
5303                           pmap->pmap_bits[PG_A_IDX])) == 0) {
5304                 goto done;
5305         }
5306
5307         /*
5308          * Ok, either the address changed or the protection or wiring
5309          * changed.
5310          *
5311          * Clear the current entry, interlocking the removal.  For managed
5312          * pte's this will also flush the modified state to the vm_page.
5313          * Atomic ops are mandatory in order to ensure that PG_M events are
5314          * not lost during any transition.
5315          *
5316          * WARNING: The caller has busied the new page but not the original
5317          *          vm_page which we are trying to replace.  Because we hold
5318          *          the pte_pv lock, but have not busied the page, PG bits
5319          *          can be cleared out from under us.
5320          */
5321         if (opa) {
5322                 if (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) {
5323                         /*
5324                          * Old page was managed.  Expect pte_pv to exist.
5325                          * (it might also exist if the old page was unmanaged).
5326                          *
5327                          * NOTE: pt_pv won't exist for a kernel page
5328                          *       (managed or otherwise).
5329                          *
5330                          * NOTE: We may be reusing the pte_pv so we do not
5331                          *       destroy it in pmap_remove_pv_pte().
5332                          */
5333                         KKASSERT(pte_pv && pte_pv->pv_m);
5334                         if (prot & VM_PROT_NOSYNC) {
5335                                 pmap_remove_pv_pte(pte_pv, pt_pv, NULL, 0);
5336                         } else {
5337                                 pmap_inval_bulk_t bulk;
5338
5339                                 pmap_inval_bulk_init(&bulk, pmap);
5340                                 pmap_remove_pv_pte(pte_pv, pt_pv, &bulk, 0);
5341                                 pmap_inval_bulk_flush(&bulk);
5342                         }
5343                         pmap_remove_pv_page(pte_pv);
5344                         /* will either set pte_pv->pv_m or pv_free() later */
5345                 } else {
5346                         /*
5347                          * Old page was not managed.  If we have a pte_pv
5348                          * it better not have a pv_m assigned to it.  If the
5349                          * new page is managed the pte_pv will be destroyed
5350                          * near the end (we need its interlock).
5351                          *
5352                          * NOTE: We leave the wire count on the PT page
5353                          *       intact for the followup enter, but adjust
5354                          *       the wired-pages count on the pmap.
5355                          */
5356                         KKASSERT(pte_pv == NULL);
5357                         if (prot & VM_PROT_NOSYNC) {
5358                                 /*
5359                                  * NOSYNC (no mmu sync) requested.
5360                                  */
5361                                 (void)pte_load_clear(ptep);
5362                                 cpu_invlpg((void *)va);
5363                         } else {
5364                                 /*
5365                                  * Nominal SYNC
5366                                  */
5367                                 pmap_inval_smp(pmap, va, 1, ptep, 0);
5368                         }
5369
5370                         /*
5371                          * We must adjust pm_stats manually for unmanaged
5372                          * pages.
5373                          */
5374                         if (pt_pv) {
5375                                 atomic_add_long(&pmap->pm_stats.
5376                                                 resident_count, -1);
5377                         }
5378                         if (origpte & pmap->pmap_bits[PG_W_IDX]) {
5379                                 atomic_add_long(&pmap->pm_stats.
5380                                                 wired_count, -1);
5381                         }
5382                 }
5383                 KKASSERT(*ptep == 0);
5384         }
5385
5386 #ifdef PMAP_DEBUG2
5387         if (pmap_enter_debug > 0) {
5388                 --pmap_enter_debug;
5389                 kprintf("pmap_enter: va=%lx m=%p origpte=%lx newpte=%lx ptep=%p"
5390                         " pte_pv=%p pt_pv=%p opa=%lx prot=%02x\n",
5391                         va, m,
5392                         origpte, newpte, ptep,
5393                         pte_pv, pt_pv, opa, prot);
5394         }
5395 #endif
5396
5397         if ((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) {
5398                 /*
5399                  * Entering an unmanaged page.  We must wire the pt_pv unless
5400                  * we retained the wiring from an unmanaged page we had
5401                  * removed (if we retained it via pte_pv that will go away
5402                  * soon).
5403                  */
5404                 if (pt_pv && (opa == 0 ||
5405                               (origpte & pmap->pmap_bits[PG_MANAGED_IDX]))) {
5406                         vm_page_wire_quick(pt_pv->pv_m);
5407                 }
5408                 if (wired)
5409                         atomic_add_long(&pmap->pm_stats.wired_count, 1);
5410
5411                 /*
5412                  * Unmanaged pages need manual resident_count tracking.
5413                  */
5414                 if (pt_pv) {
5415                         atomic_add_long(&pt_pv->pv_pmap->pm_stats.
5416                                         resident_count, 1);
5417                 }
5418                 if (newpte & pmap->pmap_bits[PG_RW_IDX])
5419                         vm_page_flag_set(m, PG_WRITEABLE);
5420         } else {
5421                 /*
5422                  * Entering a managed page.  Our pte_pv takes care of the
5423                  * PT wiring, so if we had removed an unmanaged page before
5424                  * we must adjust.
5425                  *
5426                  * We have to take care of the pmap wired count ourselves.
5427                  *
5428                  * Enter on the PV list if part of our managed memory.
5429                  */
5430                 KKASSERT(pte_pv && (pte_pv->pv_m == NULL || pte_pv->pv_m == m));
5431                 vm_page_spin_lock(m);
5432                 pte_pv->pv_m = m;
5433                 pmap_page_stats_adding(m);
5434                 TAILQ_INSERT_TAIL(&m->md.pv_list, pte_pv, pv_list);
5435                 vm_page_flag_set(m, PG_MAPPED);
5436                 if (newpte & pmap->pmap_bits[PG_RW_IDX])
5437                         vm_page_flag_set(m, PG_WRITEABLE);
5438                 vm_page_spin_unlock(m);
5439
5440                 if (pt_pv && opa &&
5441                     (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) {
5442                         vm_page_unwire_quick(pt_pv->pv_m);
5443                 }
5444
5445                 /*
5446                  * Adjust pmap wired pages count for new entry.
5447                  */
5448                 if (wired) {
5449                         atomic_add_long(&pte_pv->pv_pmap->pm_stats.
5450                                         wired_count, 1);
5451                 }
5452         }
5453
5454         /*
5455          * Kernel VMAs (pt_pv == NULL) require pmap invalidation interlocks.
5456          *
5457          * User VMAs do not because those will be zero->non-zero, so no
5458          * stale entries to worry about at this point.
5459          *
5460          * For KVM there appear to still be issues.  Theoretically we
5461          * should be able to scrap the interlocks entirely but we
5462          * get crashes.
5463          */
5464         if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL) {
5465                 pmap_inval_smp(pmap, va, 1, ptep, newpte);
5466         } else {
5467                 origpte = atomic_swap_long(ptep, newpte);
5468                 if (origpte & pmap->pmap_bits[PG_M_IDX]) {
5469                         kprintf("pmap [M] race @ %016jx\n", va);
5470                         atomic_set_long(ptep, pmap->pmap_bits[PG_M_IDX]);
5471                 }
5472                 if (pt_pv == NULL)
5473                         cpu_invlpg((void *)va);
5474         }
5475
5476         /*
5477          * Cleanup
5478          */
5479 done:
5480         KKASSERT((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0 ||
5481                  (m->flags & PG_MAPPED));
5482
5483         /*
5484          * Cleanup the pv entry, allowing other accessors.  If the new page
5485          * is not managed but we have a pte_pv (which was locking our
5486          * operation), we can free it now.  pte_pv->pv_m should be NULL.
5487          */
5488         if (pte_pv && (newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) {
5489                 pv_free(pte_pv, pt_pv);
5490         } else if (pte_pv) {
5491                 pv_put(pte_pv);
5492         } else if (pte_placemark) {
5493                 pv_placemarker_wakeup(pmap, pte_placemark);
5494         }
5495         if (pt_pv)
5496                 pv_put(pt_pv);
5497 }
5498
5499 /*
5500  * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
5501  * This code also assumes that the pmap has no pre-existing entry for this
5502  * VA.
5503  *
5504  * This code currently may only be used on user pmaps, not kernel_pmap.
5505  */
5506 void
5507 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
5508 {
5509         pmap_enter(pmap, va, m, VM_PROT_READ, FALSE, NULL);
5510 }
5511
5512 /*
5513  * Make a temporary mapping for a physical address.  This is only intended
5514  * to be used for panic dumps.
5515  *
5516  * The caller is responsible for calling smp_invltlb().
5517  */
5518 void *
5519 pmap_kenter_temporary(vm_paddr_t pa, long i)
5520 {
5521         pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
5522         return ((void *)crashdumpmap);
5523 }
5524
5525 #define MAX_INIT_PT (96)
5526
5527 /*
5528  * This routine preloads the ptes for a given object into the specified pmap.
5529  * This eliminates the blast of soft faults on process startup and
5530  * immediately after an mmap.
5531  */
5532 static int pmap_object_init_pt_callback(vm_page_t p, void *data);
5533
5534 void
5535 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
5536                     vm_object_t object, vm_pindex_t pindex,
5537                     vm_size_t size, int limit)
5538 {
5539         struct rb_vm_page_scan_info info;
5540         struct lwp *lp;
5541         vm_size_t psize;
5542
5543         /*
5544          * We can't preinit if read access isn't set or there is no pmap
5545          * or object.
5546          */
5547         if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
5548                 return;
5549
5550         /*
5551          * We can't preinit if the pmap is not the current pmap
5552          */
5553         lp = curthread->td_lwp;
5554         if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
5555                 return;
5556
5557         /*
5558          * Misc additional checks
5559          */
5560         psize = x86_64_btop(size);
5561
5562         if ((object->type != OBJT_VNODE) ||
5563                 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
5564                         (object->resident_page_count > MAX_INIT_PT))) {
5565                 return;
5566         }
5567
5568         if (pindex + psize > object->size) {
5569                 if (object->size < pindex)
5570                         return;           
5571                 psize = object->size - pindex;
5572         }
5573
5574         if (psize == 0)
5575                 return;
5576
5577         /*
5578          * If everything is segment-aligned do not pre-init here.  Instead
5579          * allow the normal vm_fault path to pass a segment hint to
5580          * pmap_enter() which will then use an object-referenced shared
5581          * page table page.
5582          */
5583         if ((addr & SEG_MASK) == 0 &&
5584             (ctob(psize) & SEG_MASK) == 0 &&
5585             (ctob(pindex) & SEG_MASK) == 0) {
5586                 return;
5587         }
5588
5589         /*
5590          * Use a red-black scan to traverse the requested range and load
5591          * any valid pages found into the pmap.
5592          *
5593          * We cannot safely scan the object's memq without holding the
5594          * object token.
5595          */
5596         info.start_pindex = pindex;
5597         info.end_pindex = pindex + psize - 1;
5598         info.limit = limit;
5599         info.mpte = NULL;
5600         info.addr = addr;
5601         info.pmap = pmap;
5602         info.object = object;
5603
5604         /*
5605          * By using the NOLK scan, the callback function must be sure
5606          * to return -1 if the VM page falls out of the object.
5607          */
5608         vm_object_hold_shared(object);
5609         vm_page_rb_tree_RB_SCAN_NOLK(&object->rb_memq, rb_vm_page_scancmp,
5610                                      pmap_object_init_pt_callback, &info);
5611         vm_object_drop(object);
5612 }
5613
5614 static
5615 int
5616 pmap_object_init_pt_callback(vm_page_t p, void *data)
5617 {
5618         struct rb_vm_page_scan_info *info = data;
5619         vm_pindex_t rel_index;
5620         int hard_busy;
5621
5622         /*
5623          * don't allow an madvise to blow away our really
5624          * free pages allocating pv entries.
5625          */
5626         if ((info->limit & MAP_PREFAULT_MADVISE) &&
5627                 vmstats.v_free_count < vmstats.v_free_reserved) {
5628                     return(-1);
5629         }
5630
5631         /*
5632          * Ignore list markers and ignore pages we cannot instantly
5633          * busy (while holding the object token).
5634          */
5635         if (p->flags & PG_MARKER)
5636                 return 0;
5637         hard_busy = 0;
5638 again:
5639         if (hard_busy) {
5640                 if (vm_page_busy_try(p, TRUE))
5641                         return 0;
5642         } else {
5643                 if (vm_page_sbusy_try(p))
5644                         return 0;
5645         }
5646         if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
5647             (p->flags & PG_FICTITIOUS) == 0) {
5648                 if ((p->queue - p->pc) == PQ_CACHE) {
5649                         if (hard_busy == 0) {
5650                                 vm_page_sbusy_drop(p);
5651                                 hard_busy = 1;
5652                                 goto again;
5653                         }
5654                         vm_page_deactivate(p);
5655                 }
5656                 rel_index = p->pindex - info->start_pindex;
5657                 pmap_enter_quick(info->pmap,
5658                                  info->addr + x86_64_ptob(rel_index), p);
5659         }
5660         if (hard_busy)
5661                 vm_page_wakeup(p);
5662         else
5663                 vm_page_sbusy_drop(p);
5664
5665         /*
5666          * We are using an unlocked scan (that is, the scan expects its
5667          * current element to remain in the tree on return).  So we have
5668          * to check here and abort the scan if it isn't.
5669          */
5670         if (p->object != info->object)
5671                 return -1;
5672         lwkt_yield();
5673         return(0);
5674 }
5675
5676 /*
5677  * Return TRUE if the pmap is in shape to trivially pre-fault the specified
5678  * address.
5679  *
5680  * Returns FALSE if it would be non-trivial or if a pte is already loaded
5681  * into the slot.
5682  *
5683  * XXX This is safe only because page table pages are not freed.
5684  */
5685 int
5686 pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
5687 {
5688         pt_entry_t *pte;
5689
5690         /*spin_lock(&pmap->pm_spin);*/
5691         if ((pte = pmap_pte(pmap, addr)) != NULL) {
5692                 if (*pte & pmap->pmap_bits[PG_V_IDX]) {
5693                         /*spin_unlock(&pmap->pm_spin);*/
5694                         return FALSE;
5695                 }
5696         }
5697         /*spin_unlock(&pmap->pm_spin);*/
5698         return TRUE;
5699 }
5700
5701 /*
5702  * Change the wiring attribute for a pmap/va pair.  The mapping must already
5703  * exist in the pmap.  The mapping may or may not be managed.  The wiring in
5704  * the page is not changed, the page is returned so the caller can adjust
5705  * its wiring (the page is not locked in any way).
5706  *
5707  * Wiring is not a hardware characteristic so there is no need to invalidate
5708  * TLB.  However, in an SMP environment we must use a locked bus cycle to
5709  * update the pte (if we are not using the pmap_inval_*() API that is)...
5710  * it's ok to do this for simple wiring changes.
5711  */
5712 vm_page_t
5713 pmap_unwire(pmap_t pmap, vm_offset_t va)
5714 {
5715         pt_entry_t *ptep;
5716         pv_entry_t pt_pv;
5717         vm_paddr_t pa;
5718         vm_page_t m;
5719
5720         if (pmap == NULL)
5721                 return NULL;
5722
5723         /*
5724          * Assume elements in the kernel pmap are stable
5725          */
5726         if (pmap == &kernel_pmap) {
5727                 if (pmap_pt(pmap, va) == 0)
5728                         return NULL;
5729                 ptep = pmap_pte_quick(pmap, va);
5730                 if (pmap_pte_v(pmap, ptep)) {
5731                         if (pmap_pte_w(pmap, ptep))
5732                                 atomic_add_long(&pmap->pm_stats.wired_count,-1);
5733                         atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
5734                         pa = *ptep & PG_FRAME;
5735                         m = PHYS_TO_VM_PAGE(pa);
5736                 } else {
5737                         m = NULL;
5738                 }
5739         } else {
5740                 /*
5741                  * We can only [un]wire pmap-local pages (we cannot wire
5742                  * shared pages)
5743                  */
5744                 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
5745                 if (pt_pv == NULL)
5746                         return NULL;
5747
5748                 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
5749                 if ((*ptep & pmap->pmap_bits[PG_V_IDX]) == 0) {
5750                         pv_put(pt_pv);
5751                         return NULL;
5752                 }
5753
5754                 if (pmap_pte_w(pmap, ptep)) {
5755                         atomic_add_long(&pt_pv->pv_pmap->pm_stats.wired_count,
5756                                         -1);
5757                 }
5758                 /* XXX else return NULL so caller doesn't unwire m ? */
5759
5760                 atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
5761
5762                 pa = *ptep & PG_FRAME;
5763                 m = PHYS_TO_VM_PAGE(pa);        /* held by wired count */
5764                 pv_put(pt_pv);
5765         }
5766         return m;
5767 }
5768
5769 /*
5770  * Copy the range specified by src_addr/len from the source map to
5771  * the range dst_addr/len in the destination map.
5772  *
5773  * This routine is only advisory and need not do anything.
5774  */
5775 void
5776 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 
5777           vm_size_t len, vm_offset_t src_addr)
5778 {
5779 }       
5780
5781 /*
5782  * pmap_zero_page:
5783  *
5784  *      Zero the specified physical page.
5785  *
5786  *      This function may be called from an interrupt and no locking is
5787  *      required.
5788  */
5789 void
5790 pmap_zero_page(vm_paddr_t phys)
5791 {
5792         vm_offset_t va = PHYS_TO_DMAP(phys);
5793
5794         pagezero((void *)va);
5795 }
5796
5797 /*
5798  * pmap_zero_page:
5799  *
5800  *      Zero part of a physical page by mapping it into memory and clearing
5801  *      its contents with bzero.
5802  *
5803  *      off and size may not cover an area beyond a single hardware page.
5804  */
5805 void
5806 pmap_zero_page_area(vm_paddr_t phys, int off, int size)
5807 {
5808         vm_offset_t virt = PHYS_TO_DMAP(phys);
5809
5810         bzero((char *)virt + off, size);
5811 }
5812
5813 /*
5814  * pmap_copy_page:
5815  *
5816  *      Copy the physical page from the source PA to the target PA.
5817  *      This function may be called from an interrupt.  No locking
5818  *      is required.
5819  */
5820 void
5821 pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
5822 {
5823         vm_offset_t src_virt, dst_virt;
5824
5825         src_virt = PHYS_TO_DMAP(src);
5826         dst_virt = PHYS_TO_DMAP(dst);
5827         bcopy((void *)src_virt, (void *)dst_virt, PAGE_SIZE);
5828 }
5829
5830 /*
5831  * pmap_copy_page_frag:
5832  *
5833  *      Copy the physical page from the source PA to the target PA.
5834  *      This function may be called from an interrupt.  No locking
5835  *      is required.
5836  */
5837 void
5838 pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
5839 {
5840         vm_offset_t src_virt, dst_virt;
5841
5842         src_virt = PHYS_TO_DMAP(src);
5843         dst_virt = PHYS_TO_DMAP(dst);
5844
5845         bcopy((char *)src_virt + (src & PAGE_MASK),
5846               (char *)dst_virt + (dst & PAGE_MASK),
5847               bytes);
5848 }
5849
5850 /*
5851  * Returns true if the pmap's pv is one of the first 16 pvs linked to from
5852  * this page.  This count may be changed upwards or downwards in the future;
5853  * it is only necessary that true be returned for a small subset of pmaps
5854  * for proper page aging.
5855  */
5856 boolean_t
5857 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
5858 {
5859         pv_entry_t pv;
5860         int loops = 0;
5861
5862         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
5863                 return FALSE;
5864
5865         vm_page_spin_lock(m);
5866         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5867                 if (pv->pv_pmap == pmap) {
5868                         vm_page_spin_unlock(m);
5869                         return TRUE;
5870                 }
5871                 loops++;
5872                 if (loops >= 16)
5873                         break;
5874         }
5875         vm_page_spin_unlock(m);
5876         return (FALSE);
5877 }
5878
5879 /*
5880  * Remove all pages from specified address space this aids process exit
5881  * speeds.  Also, this code may be special cased for the current process
5882  * only.
5883  */
5884 void
5885 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5886 {
5887         pmap_remove_noinval(pmap, sva, eva);
5888         cpu_invltlb();
5889 }
5890
5891 /*
5892  * pmap_testbit tests bits in pte's note that the testbit/clearbit
5893  * routines are inline, and a lot of things compile-time evaluate.
5894  */
5895
5896 static
5897 boolean_t
5898 pmap_testbit(vm_page_t m, int bit)
5899 {
5900         pv_entry_t pv;
5901         pt_entry_t *pte;
5902         pmap_t pmap;
5903
5904         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
5905                 return FALSE;
5906
5907         if (TAILQ_FIRST(&m->md.pv_list) == NULL)
5908                 return FALSE;
5909         vm_page_spin_lock(m);
5910         if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
5911                 vm_page_spin_unlock(m);
5912                 return FALSE;
5913         }
5914
5915         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5916 #if defined(PMAP_DIAGNOSTIC)
5917                 if (pv->pv_pmap == NULL) {
5918                         kprintf("Null pmap (tb) at pindex: %"PRIu64"\n",
5919                             pv->pv_pindex);
5920                         continue;
5921                 }
5922 #endif
5923                 pmap = pv->pv_pmap;
5924
5925                 /*
5926                  * If the bit being tested is the modified bit, then
5927                  * mark clean_map and ptes as never
5928                  * modified.
5929                  *
5930                  * WARNING!  Because we do not lock the pv, *pte can be in a
5931                  *           state of flux.  Despite this the value of *pte
5932                  *           will still be related to the vm_page in some way
5933                  *           because the pv cannot be destroyed as long as we
5934                  *           hold the vm_page spin lock.
5935                  */
5936                 if (bit == PG_A_IDX || bit == PG_M_IDX) {
5937                                 //& (pmap->pmap_bits[PG_A_IDX] | pmap->pmap_bits[PG_M_IDX])) {
5938                         if (!pmap_track_modified(pv->pv_pindex))
5939                                 continue;
5940                 }
5941
5942                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
5943                 if (*pte & pmap->pmap_bits[bit]) {
5944                         vm_page_spin_unlock(m);
5945                         return TRUE;
5946                 }
5947         }
5948         vm_page_spin_unlock(m);
5949         return (FALSE);
5950 }
5951
5952 /*
5953  * This routine is used to modify bits in ptes.  Only one bit should be
5954  * specified.  PG_RW requires special handling.
5955  *
5956  * Caller must NOT hold any spin locks
5957  */
5958 static __inline
5959 void
5960 pmap_clearbit(vm_page_t m, int bit_index)
5961 {
5962         pv_entry_t pv;
5963         pt_entry_t *pte;
5964         pt_entry_t pbits;
5965         pmap_t pmap;
5966
5967         if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
5968                 if (bit_index == PG_RW_IDX)
5969                         vm_page_flag_clear(m, PG_WRITEABLE);
5970                 return;
5971         }
5972
5973         /*
5974          * PG_M or PG_A case
5975          *
5976          * Loop over all current mappings setting/clearing as appropos If
5977          * setting RO do we need to clear the VAC?
5978          *
5979          * NOTE: When clearing PG_M we could also (not implemented) drop
5980          *       through to the PG_RW code and clear PG_RW too, forcing
5981          *       a fault on write to redetect PG_M for virtual kernels, but
5982          *       it isn't necessary since virtual kernels invalidate the
5983          *       pte when they clear the VPTE_M bit in their virtual page
5984          *       tables.
5985          *
5986          * NOTE: Does not re-dirty the page when clearing only PG_M.
5987          *
5988          * NOTE: Because we do not lock the pv, *pte can be in a state of
5989          *       flux.  Despite this the value of *pte is still somewhat
5990          *       related while we hold the vm_page spin lock.
5991          *
5992          *       *pte can be zero due to this race.  Since we are clearing
5993          *       bits we basically do no harm when this race occurs.
5994          */
5995         if (bit_index != PG_RW_IDX) {
5996                 vm_page_spin_lock(m);
5997                 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5998 #if defined(PMAP_DIAGNOSTIC)
5999                         if (pv->pv_pmap == NULL) {
6000                                 kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
6001                                     pv->pv_pindex);
6002                                 continue;
6003                         }
6004 #endif
6005                         pmap = pv->pv_pmap;
6006                         pte = pmap_pte_quick(pv->pv_pmap,
6007                                              pv->pv_pindex << PAGE_SHIFT);
6008                         pbits = *pte;
6009                         if (pbits & pmap->pmap_bits[bit_index])
6010                                 atomic_clear_long(pte, pmap->pmap_bits[bit_index]);
6011                 }
6012                 vm_page_spin_unlock(m);
6013                 return;
6014         }
6015
6016         /*
6017          * Clear PG_RW.  Also clears PG_M and marks the page dirty if PG_M
6018          * was set.
6019          */
6020 restart:
6021         vm_page_spin_lock(m);
6022         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
6023                 /*
6024                  * don't write protect pager mappings
6025                  */
6026                 if (!pmap_track_modified(pv->pv_pindex))
6027                         continue;
6028
6029 #if defined(PMAP_DIAGNOSTIC)
6030                 if (pv->pv_pmap == NULL) {
6031                         kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
6032                                 pv->pv_pindex);
6033                         continue;
6034                 }
6035 #endif
6036                 pmap = pv->pv_pmap;
6037
6038                 /*
6039                  * Skip pages which do not have PG_RW set.
6040                  */
6041                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
6042                 if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0)
6043                         continue;
6044
6045                 /*
6046                  * We must lock the PV to be able to safely test the pte.
6047                  */
6048                 if (pv_hold_try(pv)) {
6049                         vm_page_spin_unlock(m);
6050                 } else {
6051                         vm_page_spin_unlock(m);
6052                         pv_lock(pv);    /* held, now do a blocking lock */
6053                         pv_put(pv);
6054                         goto restart;
6055                 }
6056
6057                 /*
6058                  * Reload pte after acquiring pv.
6059                  */
6060                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
6061 #if 0
6062                 if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0) {
6063                         pv_put(pv);
6064                         goto restart;
6065                 }
6066 #endif
6067
6068                 KKASSERT(pv->pv_pmap == pmap && pv->pv_m == m);
6069                 for (;;) {
6070                         pt_entry_t nbits;
6071
6072                         pbits = *pte;
6073                         cpu_ccfence();
6074                         nbits = pbits & ~(pmap->pmap_bits[PG_RW_IDX] |
6075                                           pmap->pmap_bits[PG_M_IDX]);
6076                         if (pmap_inval_smp_cmpset(pmap,
6077                                      ((vm_offset_t)pv->pv_pindex << PAGE_SHIFT),
6078                                      pte, pbits, nbits)) {
6079                                 break;
6080                         }
6081                         cpu_pause();
6082                 }
6083
6084                 /*
6085                  * If PG_M was found to be set while we were clearing PG_RW
6086                  * we also clear PG_M (done above) and mark the page dirty.
6087                  * Callers expect this behavior.
6088                  *
6089                  * we lost pv so it cannot be used as an iterator.  In fact,
6090                  * because we couldn't necessarily lock it atomically it may
6091                  * have moved within the list and ALSO cannot be used as an
6092                  * iterator.
6093                  */
6094                 vm_page_spin_lock(m);
6095                 if (pbits & pmap->pmap_bits[PG_M_IDX])
6096                         vm_page_dirty(m);
6097                 vm_page_spin_unlock(m);
6098                 pv_put(pv);
6099                 goto restart;
6100         }
6101         if (bit_index == PG_RW_IDX)
6102                 vm_page_flag_clear(m, PG_WRITEABLE);
6103         vm_page_spin_unlock(m);
6104 }
6105
6106 /*
6107  * Lower the permission for all mappings to a given page.
6108  *
6109  * Page must be busied by caller.  Because page is busied by caller this
6110  * should not be able to race a pmap_enter().
6111  */
6112 void
6113 pmap_page_protect(vm_page_t m, vm_prot_t prot)
6114 {
6115         /* JG NX support? */
6116         if ((prot & VM_PROT_WRITE) == 0) {
6117                 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
6118                         /*
6119                          * NOTE: pmap_clearbit(.. PG_RW) also clears
6120                          *       the PG_WRITEABLE flag in (m).
6121                          */
6122                         pmap_clearbit(m, PG_RW_IDX);
6123                 } else {
6124                         pmap_remove_all(m);
6125                 }
6126         }
6127 }
6128
6129 vm_paddr_t
6130 pmap_phys_address(vm_pindex_t ppn)
6131 {
6132         return (x86_64_ptob(ppn));
6133 }
6134
6135 /*
6136  * Return a count of reference bits for a page, clearing those bits.
6137  * It is not necessary for every reference bit to be cleared, but it
6138  * is necessary that 0 only be returned when there are truly no
6139  * reference bits set.
6140  *
6141  * XXX: The exact number of bits to check and clear is a matter that
6142  * should be tested and standardized at some point in the future for
6143  * optimal aging of shared pages.
6144  *
6145  * This routine may not block.
6146  */
6147 int
6148 pmap_ts_referenced(vm_page_t m)
6149 {
6150         pv_entry_t pv;
6151         pt_entry_t *pte;
6152         pmap_t pmap;
6153         int rtval = 0;
6154
6155         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
6156                 return (rtval);
6157
6158         vm_page_spin_lock(m);
6159         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
6160                 if (!pmap_track_modified(pv->pv_pindex))
6161                         continue;
6162                 pmap = pv->pv_pmap;
6163                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
6164                 if (pte && (*pte & pmap->pmap_bits[PG_A_IDX])) {
6165                         atomic_clear_long(pte, pmap->pmap_bits[PG_A_IDX]);
6166                         rtval++;
6167                         if (rtval > 4)
6168                                 break;
6169                 }
6170         }
6171         vm_page_spin_unlock(m);
6172         return (rtval);
6173 }
6174
6175 /*
6176  *      pmap_is_modified:
6177  *
6178  *      Return whether or not the specified physical page was modified
6179  *      in any physical maps.
6180  */
6181 boolean_t
6182 pmap_is_modified(vm_page_t m)
6183 {
6184         boolean_t res;
6185
6186         res = pmap_testbit(m, PG_M_IDX);
6187         return (res);
6188 }
6189
6190 /*
6191  *      Clear the modify bits on the specified physical page.
6192  */
6193 void
6194 pmap_clear_modify(vm_page_t m)
6195 {
6196         pmap_clearbit(m, PG_M_IDX);
6197 }
6198
6199 /*
6200  *      pmap_clear_reference:
6201  *
6202  *      Clear the reference bit on the specified physical page.
6203  */
6204 void
6205 pmap_clear_reference(vm_page_t m)
6206 {
6207         pmap_clearbit(m, PG_A_IDX);
6208 }
6209
6210 /*
6211  * Miscellaneous support routines follow
6212  */
6213
6214 static
6215 void
6216 x86_64_protection_init(void)
6217 {
6218         uint64_t *kp;
6219         int prot;
6220
6221         /*
6222          * NX supported? (boot time loader.conf override only)
6223          */
6224         TUNABLE_INT_FETCH("machdep.pmap_nx_enable", &pmap_nx_enable);
6225         if (pmap_nx_enable == 0 || (amd_feature & AMDID_NX) == 0)
6226                 pmap_bits_default[PG_NX_IDX] = 0;
6227
6228         /*
6229          * 0 is basically read-only access, but also set the NX (no-execute)
6230          * bit when VM_PROT_EXECUTE is not specified.
6231          */
6232         kp = protection_codes;
6233         for (prot = 0; prot < PROTECTION_CODES_SIZE; prot++) {
6234                 switch (prot) {
6235                 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
6236                         /*
6237                          * This case handled elsewhere
6238                          */
6239                         *kp++ = 0;
6240                         break;
6241                 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
6242                         /*
6243                          * Read-only is 0|NX
6244                          */
6245                         *kp++ = pmap_bits_default[PG_NX_IDX];
6246                         break;
6247                 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
6248                 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
6249                         /*
6250                          * Execute requires read access
6251                          */
6252                         *kp++ = 0;
6253                         break;
6254                 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
6255                 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
6256                         /*
6257                          * Write without execute is RW|NX
6258                          */
6259                         *kp++ = pmap_bits_default[PG_RW_IDX] |
6260                                 pmap_bits_default[PG_NX_IDX];
6261                         break;
6262                 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
6263                 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
6264                         /*
6265                          * Write with execute is RW
6266                          */
6267                         *kp++ = pmap_bits_default[PG_RW_IDX];
6268                         break;
6269                 }
6270         }
6271 }
6272
6273 /*
6274  * Map a set of physical memory pages into the kernel virtual
6275  * address space. Return a pointer to where it is mapped. This
6276  * routine is intended to be used for mapping device memory,
6277  * NOT real memory.
6278  *
6279  * NOTE: We can't use pgeflag unless we invalidate the pages one at
6280  *       a time.
6281  *
6282  * NOTE: The PAT attributes {WRITE_BACK, WRITE_THROUGH, UNCACHED, UNCACHEABLE}
6283  *       work whether the cpu supports PAT or not.  The remaining PAT
6284  *       attributes {WRITE_PROTECTED, WRITE_COMBINING} only work if the cpu
6285  *       supports PAT.
6286  */
6287 void *
6288 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
6289 {
6290         return(pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6291 }
6292
6293 void *
6294 pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size)
6295 {
6296         return(pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
6297 }
6298
6299 void *
6300 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
6301 {
6302         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6303 }
6304
6305 /*
6306  * Map a set of physical memory pages into the kernel virtual
6307  * address space. Return a pointer to where it is mapped. This
6308  * routine is intended to be used for mapping device memory,
6309  * NOT real memory.
6310  */
6311 void *
6312 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
6313 {
6314         vm_offset_t va, tmpva, offset;
6315         pt_entry_t *pte;
6316         vm_size_t tmpsize;
6317
6318         offset = pa & PAGE_MASK;
6319         size = roundup(offset + size, PAGE_SIZE);
6320
6321         va = kmem_alloc_nofault(&kernel_map, size, VM_SUBSYS_MAPDEV, PAGE_SIZE);
6322         if (va == 0)
6323                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
6324
6325         pa = pa & ~PAGE_MASK;
6326         for (tmpva = va, tmpsize = size; tmpsize > 0;) {
6327                 pte = vtopte(tmpva);
6328                 *pte = pa |
6329                     kernel_pmap.pmap_bits[PG_RW_IDX] |
6330                     kernel_pmap.pmap_bits[PG_V_IDX] | /* pgeflag | */
6331                     kernel_pmap.pmap_cache_bits[mode];
6332                 tmpsize -= PAGE_SIZE;
6333                 tmpva += PAGE_SIZE;
6334                 pa += PAGE_SIZE;
6335         }
6336         pmap_invalidate_range(&kernel_pmap, va, va + size);
6337         pmap_invalidate_cache_range(va, va + size);
6338
6339         return ((void *)(va + offset));
6340 }
6341
6342 void
6343 pmap_unmapdev(vm_offset_t va, vm_size_t size)
6344 {
6345         vm_offset_t base, offset;
6346
6347         base = va & ~PAGE_MASK;
6348         offset = va & PAGE_MASK;
6349         size = roundup(offset + size, PAGE_SIZE);
6350         pmap_qremove(va, size >> PAGE_SHIFT);
6351         kmem_free(&kernel_map, base, size);
6352 }
6353
6354 /*
6355  * Sets the memory attribute for the specified page.
6356  */
6357 void
6358 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
6359 {
6360
6361     m->pat_mode = ma;
6362
6363     /*
6364      * If "m" is a normal page, update its direct mapping.  This update
6365      * can be relied upon to perform any cache operations that are
6366      * required for data coherence.
6367      */
6368     if ((m->flags & PG_FICTITIOUS) == 0)
6369         pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 1, m->pat_mode);
6370 }
6371
6372 /*
6373  * Change the PAT attribute on an existing kernel memory map.  Caller
6374  * must ensure that the virtual memory in question is not accessed
6375  * during the adjustment.
6376  */
6377 void
6378 pmap_change_attr(vm_offset_t va, vm_size_t count, int mode)
6379 {
6380         pt_entry_t *pte;
6381         vm_offset_t base;
6382         int changed = 0;
6383
6384         if (va == 0)
6385                 panic("pmap_change_attr: va is NULL");
6386         base = trunc_page(va);
6387
6388         while (count) {
6389                 pte = vtopte(va);
6390                 *pte = (*pte & ~(pt_entry_t)(kernel_pmap.pmap_cache_mask)) |
6391                        kernel_pmap.pmap_cache_bits[mode];
6392                 --count;
6393                 va += PAGE_SIZE;
6394         }
6395
6396         changed = 1;    /* XXX: not optimal */
6397
6398         /*
6399          * Flush CPU caches if required to make sure any data isn't cached that
6400          * shouldn't be, etc.
6401          */
6402         if (changed) {
6403                 pmap_invalidate_range(&kernel_pmap, base, va);
6404                 pmap_invalidate_cache_range(base, va);
6405         }
6406 }
6407
6408 /*
6409  * perform the pmap work for mincore
6410  */
6411 int
6412 pmap_mincore(pmap_t pmap, vm_offset_t addr)
6413 {
6414         pt_entry_t *ptep, pte;
6415         vm_page_t m;
6416         int val = 0;
6417         
6418         ptep = pmap_pte(pmap, addr);
6419
6420         if (ptep && (pte = *ptep) != 0) {
6421                 vm_offset_t pa;
6422
6423                 val = MINCORE_INCORE;
6424                 if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0)
6425                         goto done;
6426
6427                 pa = pte & PG_FRAME;
6428
6429                 if (pte & pmap->pmap_bits[PG_DEVICE_IDX])
6430                         m = NULL;
6431                 else
6432                         m = PHYS_TO_VM_PAGE(pa);
6433
6434                 /*
6435                  * Modified by us
6436                  */
6437                 if (pte & pmap->pmap_bits[PG_M_IDX])
6438                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
6439                 /*
6440                  * Modified by someone
6441                  */
6442                 else if (m && (m->dirty || pmap_is_modified(m)))
6443                         val |= MINCORE_MODIFIED_OTHER;
6444                 /*
6445                  * Referenced by us
6446                  */
6447                 if (pte & pmap->pmap_bits[PG_A_IDX])
6448                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
6449
6450                 /*
6451                  * Referenced by someone
6452                  */
6453                 else if (m && ((m->flags & PG_REFERENCED) ||
6454                                 pmap_ts_referenced(m))) {
6455                         val |= MINCORE_REFERENCED_OTHER;
6456                         vm_page_flag_set(m, PG_REFERENCED);
6457                 }
6458         } 
6459 done:
6460
6461         return val;
6462 }
6463
6464 /*
6465  * Replace p->p_vmspace with a new one.  If adjrefs is non-zero the new
6466  * vmspace will be ref'd and the old one will be deref'd.
6467  *
6468  * The vmspace for all lwps associated with the process will be adjusted
6469  * and cr3 will be reloaded if any lwp is the current lwp.
6470  *
6471  * The process must hold the vmspace->vm_map.token for oldvm and newvm
6472  */
6473 void
6474 pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
6475 {
6476         struct vmspace *oldvm;
6477         struct lwp *lp;
6478
6479         oldvm = p->p_vmspace;
6480         if (oldvm != newvm) {
6481                 if (adjrefs)
6482                         vmspace_ref(newvm);
6483                 p->p_vmspace = newvm;
6484                 KKASSERT(p->p_nthreads == 1);
6485                 lp = RB_ROOT(&p->p_lwp_tree);
6486                 pmap_setlwpvm(lp, newvm);
6487                 if (adjrefs)
6488                         vmspace_rel(oldvm);
6489         }
6490 }
6491
6492 /*
6493  * Set the vmspace for a LWP.  The vmspace is almost universally set the
6494  * same as the process vmspace, but virtual kernels need to swap out contexts
6495  * on a per-lwp basis.
6496  *
6497  * Caller does not necessarily hold any vmspace tokens.  Caller must control
6498  * the lwp (typically be in the context of the lwp).  We use a critical
6499  * section to protect against statclock and hardclock (statistics collection).
6500  */
6501 void
6502 pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
6503 {
6504         struct vmspace *oldvm;
6505         struct pmap *pmap;
6506         thread_t td;
6507
6508         oldvm = lp->lwp_vmspace;
6509
6510         if (oldvm != newvm) {
6511                 crit_enter();
6512                 td = curthread;
6513                 KKASSERT((newvm->vm_refcnt & VM_REF_DELETED) == 0);
6514                 lp->lwp_vmspace = newvm;
6515                 if (td->td_lwp == lp) {
6516                         pmap = vmspace_pmap(newvm);
6517                         ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid);
6518                         if (pmap->pm_active_lock & CPULOCK_EXCL)
6519                                 pmap_interlock_wait(newvm);
6520 #if defined(SWTCH_OPTIM_STATS)
6521                         tlb_flush_count++;
6522 #endif
6523                         if (pmap->pmap_bits[TYPE_IDX] == REGULAR_PMAP) {
6524                                 td->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4);
6525                                 if (meltdown_mitigation && pmap->pm_pmlpv_iso) {
6526                                         td->td_pcb->pcb_cr3_iso =
6527                                                 vtophys(pmap->pm_pml4_iso);
6528                                         td->td_pcb->pcb_flags |= PCB_ISOMMU;
6529                                 } else {
6530                                         td->td_pcb->pcb_cr3_iso = 0;
6531                                         td->td_pcb->pcb_flags &= ~PCB_ISOMMU;
6532                                 }
6533                         } else if (pmap->pmap_bits[TYPE_IDX] == EPT_PMAP) {
6534                                 td->td_pcb->pcb_cr3 = KPML4phys;
6535                                 td->td_pcb->pcb_cr3_iso = 0;
6536                                 td->td_pcb->pcb_flags &= ~PCB_ISOMMU;
6537                         } else {
6538                                 panic("pmap_setlwpvm: unknown pmap type\n");
6539                         }
6540
6541                         /*
6542                          * The MMU separation fields needs to be updated.
6543                          * (it can't access the pcb directly from the
6544                          * restricted user pmap).
6545                          */
6546                         {
6547                                 struct trampframe *tramp;
6548
6549                                 tramp = &pscpu->trampoline;
6550                                 tramp->tr_pcb_cr3 = td->td_pcb->pcb_cr3;
6551                                 tramp->tr_pcb_cr3_iso = td->td_pcb->pcb_cr3_iso;
6552                                 tramp->tr_pcb_flags = td->td_pcb->pcb_flags;
6553                                 tramp->tr_pcb_rsp = (register_t)td->td_pcb;
6554                                 /* tr_pcb_rsp doesn't change */
6555                         }
6556
6557                         /*
6558                          * In kernel-land we always use the normal PML4E
6559                          * so the kernel is fully mapped and can also access
6560                          * user memory.
6561                          */
6562                         load_cr3(td->td_pcb->pcb_cr3);
6563                         pmap = vmspace_pmap(oldvm);
6564                         ATOMIC_CPUMASK_NANDBIT(pmap->pm_active,
6565                                                mycpu->gd_cpuid);
6566                 }
6567                 crit_exit();
6568         }
6569 }
6570
6571 /*
6572  * Called when switching to a locked pmap, used to interlock against pmaps
6573  * undergoing modifications to prevent us from activating the MMU for the
6574  * target pmap until all such modifications have completed.  We have to do
6575  * this because the thread making the modifications has already set up its
6576  * SMP synchronization mask.
6577  *
6578  * This function cannot sleep!
6579  *
6580  * No requirements.
6581  */
6582 void
6583 pmap_interlock_wait(struct vmspace *vm)
6584 {
6585         struct pmap *pmap = &vm->vm_pmap;
6586
6587         if (pmap->pm_active_lock & CPULOCK_EXCL) {
6588                 crit_enter();
6589                 KKASSERT(curthread->td_critcount >= 2);
6590                 DEBUG_PUSH_INFO("pmap_interlock_wait");
6591                 while (pmap->pm_active_lock & CPULOCK_EXCL) {
6592                         cpu_ccfence();
6593                         lwkt_process_ipiq();
6594                 }
6595                 DEBUG_POP_INFO();
6596                 crit_exit();
6597         }
6598 }
6599
6600 vm_offset_t
6601 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
6602 {
6603
6604         if ((obj == NULL) || (size < NBPDR) ||
6605             ((obj->type != OBJT_DEVICE) && (obj->type != OBJT_MGTDEVICE))) {
6606                 return addr;
6607         }
6608
6609         addr = roundup2(addr, NBPDR);
6610         return addr;
6611 }
6612
6613 /*
6614  * Used by kmalloc/kfree, page already exists at va
6615  */
6616 vm_page_t
6617 pmap_kvtom(vm_offset_t va)
6618 {
6619         pt_entry_t *ptep = vtopte(va);
6620
6621         KKASSERT((*ptep & kernel_pmap.pmap_bits[PG_DEVICE_IDX]) == 0);
6622         return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME));
6623 }
6624
6625 /*
6626  * Initialize machine-specific shared page directory support.  This
6627  * is executed when a VM object is created.
6628  */
6629 void
6630 pmap_object_init(vm_object_t object)
6631 {
6632         object->md.pmap_rw = NULL;
6633         object->md.pmap_ro = NULL;
6634 }
6635
6636 /*
6637  * Clean up machine-specific shared page directory support.  This
6638  * is executed when a VM object is destroyed.
6639  */
6640 void
6641 pmap_object_free(vm_object_t object)
6642 {
6643         pmap_t pmap;
6644
6645         if ((pmap = object->md.pmap_rw) != NULL) {
6646                 object->md.pmap_rw = NULL;
6647                 pmap_remove_noinval(pmap,
6648                                   VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
6649                 CPUMASK_ASSZERO(pmap->pm_active);
6650                 pmap_release(pmap);
6651                 pmap_puninit(pmap);
6652                 kfree(pmap, M_OBJPMAP);
6653         }
6654         if ((pmap = object->md.pmap_ro) != NULL) {
6655                 object->md.pmap_ro = NULL;
6656                 pmap_remove_noinval(pmap,
6657                                   VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
6658                 CPUMASK_ASSZERO(pmap->pm_active);
6659                 pmap_release(pmap);
6660                 pmap_puninit(pmap);
6661                 kfree(pmap, M_OBJPMAP);
6662         }
6663 }
6664
6665 /*
6666  * pmap_pgscan_callback - Used by pmap_pgscan to acquire the related
6667  * VM page and issue a pginfo->callback.
6668  *
6669  * We are expected to dispose of any non-NULL pte_pv.
6670  */
6671 static
6672 void
6673 pmap_pgscan_callback(pmap_t pmap, struct pmap_scan_info *info,
6674                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
6675                       pv_entry_t pt_pv, int sharept,
6676                       vm_offset_t va, pt_entry_t *ptep, void *arg)
6677 {
6678         struct pmap_pgscan_info *pginfo = arg;
6679         vm_page_t m;
6680
6681         if (pte_pv) {
6682                 /*
6683                  * Try to busy the page while we hold the pte_pv locked.
6684                  */
6685                 KKASSERT(pte_pv->pv_m);
6686                 m = PHYS_TO_VM_PAGE(*ptep & PG_FRAME);
6687                 if (vm_page_busy_try(m, TRUE) == 0) {
6688                         if (m == PHYS_TO_VM_PAGE(*ptep & PG_FRAME)) {
6689                                 /*
6690                                  * The callback is issued with the pte_pv
6691                                  * unlocked and put away, and the pt_pv
6692                                  * unlocked.
6693                                  */
6694                                 pv_put(pte_pv);
6695                                 if (pt_pv) {
6696                                         vm_page_wire_quick(pt_pv->pv_m);
6697                                         pv_unlock(pt_pv);
6698                                 }
6699                                 if (pginfo->callback(pginfo, va, m) < 0)
6700                                         info->stop = 1;
6701                                 if (pt_pv) {
6702                                         pv_lock(pt_pv);
6703                                         vm_page_unwire_quick(pt_pv->pv_m);
6704                                 }
6705                         } else {
6706                                 vm_page_wakeup(m);
6707                                 pv_put(pte_pv);
6708                         }
6709                 } else {
6710                         ++pginfo->busycount;
6711                         pv_put(pte_pv);
6712                 }
6713         } else {
6714                 /*
6715                  * Shared page table or unmanaged page (sharept or !sharept)
6716                  */
6717                 pv_placemarker_wakeup(pmap, pte_placemark);
6718         }
6719 }
6720
6721 void
6722 pmap_pgscan(struct pmap_pgscan_info *pginfo)
6723 {
6724         struct pmap_scan_info info;
6725
6726         pginfo->offset = pginfo->beg_addr;
6727         info.pmap = pginfo->pmap;
6728         info.sva = pginfo->beg_addr;
6729         info.eva = pginfo->end_addr;
6730         info.func = pmap_pgscan_callback;
6731         info.arg = pginfo;
6732         pmap_scan(&info, 0);
6733         if (info.stop == 0)
6734                 pginfo->offset = pginfo->end_addr;
6735 }
6736
6737 /*
6738  * Wait for a placemarker that we do not own to clear.  The placemarker
6739  * in question is not necessarily set to the pindex we want, we may have
6740  * to wait on the element because we want to reserve it ourselves.
6741  *
6742  * NOTE: PM_PLACEMARK_WAKEUP sets a bit which is already set in
6743  *       PM_NOPLACEMARK, so it does not interfere with placemarks
6744  *       which have already been woken up.
6745  */
6746 static
6747 void
6748 pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark)
6749 {
6750         if (*pmark != PM_NOPLACEMARK) {
6751                 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
6752                 tsleep_interlock(pmark, 0);
6753                 if (*pmark != PM_NOPLACEMARK)
6754                         tsleep(pmark, PINTERLOCKED, "pvplw", 0);
6755         }
6756 }
6757
6758 /*
6759  * Wakeup a placemarker that we own.  Replace the entry with
6760  * PM_NOPLACEMARK and issue a wakeup() if necessary.
6761  */
6762 static
6763 void
6764 pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark)
6765 {
6766         vm_pindex_t pindex;
6767
6768         pindex = atomic_swap_long(pmark, PM_NOPLACEMARK);
6769         KKASSERT(pindex != PM_NOPLACEMARK);
6770         if (pindex & PM_PLACEMARK_WAKEUP)
6771                 wakeup(pmark);
6772 }