6cd4e7186940006188d27e44b1c6b0c6833909ee
[dragonfly.git] / sys / platform / pc64 / x86_64 / pmap.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1994 David Greenman
5  * Copyright (c) 2003 Peter Wemm
6  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
7  * Copyright (c) 2008, 2009 The DragonFly Project.
8  * Copyright (c) 2008, 2009 Jordan Gordeev.
9  * Copyright (c) 2011-2017 Matthew Dillon
10  * All rights reserved.
11  *
12  * This code is derived from software contributed to Berkeley by
13  * the Systems Programming Group of the University of Utah Computer
14  * Science Department and William Jolitz of UUNET Technologies Inc.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  */
44 /*
45  * Manage physical address maps for x86-64 systems.
46  */
47
48 #if 0 /* JG */
49 #include "opt_pmap.h"
50 #endif
51 #include "opt_msgbuf.h"
52
53 #include <sys/param.h>
54 #include <sys/kernel.h>
55 #include <sys/proc.h>
56 #include <sys/msgbuf.h>
57 #include <sys/vmmeter.h>
58 #include <sys/mman.h>
59 #include <sys/systm.h>
60
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <sys/sysctl.h>
64 #include <sys/lock.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_pager.h>
72 #include <vm/vm_zone.h>
73
74 #include <sys/user.h>
75 #include <sys/thread2.h>
76 #include <sys/spinlock2.h>
77 #include <vm/vm_page2.h>
78
79 #include <machine/cputypes.h>
80 #include <machine/md_var.h>
81 #include <machine/specialreg.h>
82 #include <machine/smp.h>
83 #include <machine_base/apic/apicreg.h>
84 #include <machine/globaldata.h>
85 #include <machine/pmap.h>
86 #include <machine/pmap_inval.h>
87 #include <machine/inttypes.h>
88
89 #include <ddb/ddb.h>
90
91 #define PMAP_KEEP_PDIRS
92 #ifndef PMAP_SHPGPERPROC
93 #define PMAP_SHPGPERPROC 2000
94 #endif
95
96 #if defined(DIAGNOSTIC)
97 #define PMAP_DIAGNOSTIC
98 #endif
99
100 #define MINPV 2048
101
102 /*
103  * pmap debugging will report who owns a pv lock when blocking.
104  */
105 #ifdef PMAP_DEBUG
106
107 #define PMAP_DEBUG_DECL         ,const char *func, int lineno
108 #define PMAP_DEBUG_ARGS         , __func__, __LINE__
109 #define PMAP_DEBUG_COPY         , func, lineno
110
111 #define pv_get(pmap, pindex, pmarkp)    _pv_get(pmap, pindex, pmarkp    \
112                                                         PMAP_DEBUG_ARGS)
113 #define pv_lock(pv)                     _pv_lock(pv                     \
114                                                         PMAP_DEBUG_ARGS)
115 #define pv_hold_try(pv)                 _pv_hold_try(pv                 \
116                                                         PMAP_DEBUG_ARGS)
117 #define pv_alloc(pmap, pindex, isnewp)  _pv_alloc(pmap, pindex, isnewp  \
118                                                         PMAP_DEBUG_ARGS)
119
120 #define pv_free(pv, pvp)                _pv_free(pv, pvp PMAP_DEBUG_ARGS)
121
122 #else
123
124 #define PMAP_DEBUG_DECL
125 #define PMAP_DEBUG_ARGS
126 #define PMAP_DEBUG_COPY
127
128 #define pv_get(pmap, pindex, pmarkp)            _pv_get(pmap, pindex, pmarkp)
129 #define pv_lock(pv)                     _pv_lock(pv)
130 #define pv_hold_try(pv)                 _pv_hold_try(pv)
131 #define pv_alloc(pmap, pindex, isnewp)  _pv_alloc(pmap, pindex, isnewp)
132 #define pv_free(pv, pvp)                _pv_free(pv, pvp)
133
134 #endif
135
136 /*
137  * Get PDEs and PTEs for user/kernel address space
138  */
139 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
140
141 #define pmap_pde_v(pmap, pte)           ((*(pd_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
142 #define pmap_pte_w(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_W_IDX]) != 0)
143 #define pmap_pte_m(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_M_IDX]) != 0)
144 #define pmap_pte_u(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_U_IDX]) != 0)
145 #define pmap_pte_v(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
146
147 /*
148  * Given a map and a machine independent protection code,
149  * convert to a vax protection code.
150  */
151 #define pte_prot(m, p)          \
152         (m->protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
153 static uint64_t protection_codes[PROTECTION_CODES_SIZE];
154
155 struct pmap kernel_pmap;
156
157 MALLOC_DEFINE(M_OBJPMAP, "objpmap", "pmaps associated with VM objects");
158
159 vm_paddr_t avail_start;         /* PA of first available physical page */
160 vm_paddr_t avail_end;           /* PA of last available physical page */
161 vm_offset_t virtual2_start;     /* cutout free area prior to kernel start */
162 vm_offset_t virtual2_end;
163 vm_offset_t virtual_start;      /* VA of first avail page (after kernel bss) */
164 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
165 vm_offset_t KvaStart;           /* VA start of KVA space */
166 vm_offset_t KvaEnd;             /* VA end of KVA space (non-inclusive) */
167 vm_offset_t KvaSize;            /* max size of kernel virtual address space */
168 static boolean_t pmap_initialized = FALSE;      /* Has pmap_init completed? */
169 //static int pgeflag;           /* PG_G or-in */
170 uint64_t PatMsr;
171
172 static int ndmpdp;
173 static vm_paddr_t dmaplimit;
174 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
175
176 static pt_entry_t pat_pte_index[PAT_INDEX_SIZE];        /* PAT -> PG_ bits */
177 /*static pt_entry_t pat_pde_index[PAT_INDEX_SIZE];*/    /* PAT -> PG_ bits */
178
179 static uint64_t KPTbase;
180 static uint64_t KPTphys;
181 static uint64_t KPDphys;        /* phys addr of kernel level 2 */
182 static uint64_t KPDbase;        /* phys addr of kernel level 2 @ KERNBASE */
183 uint64_t KPDPphys;              /* phys addr of kernel level 3 */
184 uint64_t KPML4phys;             /* phys addr of kernel level 4 */
185
186 static uint64_t DMPDphys;       /* phys addr of direct mapped level 2 */
187 static uint64_t DMPDPphys;      /* phys addr of direct mapped level 3 */
188
189 /*
190  * Data for the pv entry allocation mechanism
191  */
192 static vm_zone_t pvzone;
193 static struct vm_zone pvzone_store;
194 static vm_pindex_t pv_entry_max=0, pv_entry_high_water=0;
195 static int pmap_pagedaemon_waken = 0;
196 static struct pv_entry *pvinit;
197
198 /*
199  * All those kernel PT submaps that BSD is so fond of
200  */
201 pt_entry_t *CMAP1 = NULL, *ptmmap;
202 caddr_t CADDR1 = NULL, ptvmmap = NULL;
203 static pt_entry_t *msgbufmap;
204 struct msgbuf *msgbufp=NULL;
205
206 /*
207  * PMAP default PG_* bits. Needed to be able to add
208  * EPT/NPT pagetable pmap_bits for the VMM module
209  */
210 uint64_t pmap_bits_default[] = {
211                 REGULAR_PMAP,                   /* TYPE_IDX             0 */
212                 X86_PG_V,                       /* PG_V_IDX             1 */
213                 X86_PG_RW,                      /* PG_RW_IDX            2 */
214                 X86_PG_U,                       /* PG_U_IDX             3 */
215                 X86_PG_A,                       /* PG_A_IDX             4 */
216                 X86_PG_M,                       /* PG_M_IDX             5 */
217                 X86_PG_PS,                      /* PG_PS_IDX3           6 */
218                 X86_PG_G,                       /* PG_G_IDX             7 */
219                 X86_PG_AVAIL1,                  /* PG_AVAIL1_IDX        8 */
220                 X86_PG_AVAIL2,                  /* PG_AVAIL2_IDX        9 */
221                 X86_PG_AVAIL3,                  /* PG_AVAIL3_IDX        10 */
222                 X86_PG_NC_PWT | X86_PG_NC_PCD,  /* PG_N_IDX             11 */
223                 X86_PG_NX,                      /* PG_NX_IDX            12 */
224 };
225 /*
226  * Crashdump maps.
227  */
228 static pt_entry_t *pt_crashdumpmap;
229 static caddr_t crashdumpmap;
230
231 static int pmap_debug = 0;
232 SYSCTL_INT(_machdep, OID_AUTO, pmap_debug, CTLFLAG_RW,
233     &pmap_debug, 0, "Debug pmap's");
234 #ifdef PMAP_DEBUG2
235 static int pmap_enter_debug = 0;
236 SYSCTL_INT(_machdep, OID_AUTO, pmap_enter_debug, CTLFLAG_RW,
237     &pmap_enter_debug, 0, "Debug pmap_enter's");
238 #endif
239 static int pmap_yield_count = 64;
240 SYSCTL_INT(_machdep, OID_AUTO, pmap_yield_count, CTLFLAG_RW,
241     &pmap_yield_count, 0, "Yield during init_pt/release");
242 static int pmap_mmu_optimize = 0;
243 SYSCTL_INT(_machdep, OID_AUTO, pmap_mmu_optimize, CTLFLAG_RW,
244     &pmap_mmu_optimize, 0, "Share page table pages when possible");
245 int pmap_fast_kernel_cpusync = 0;
246 SYSCTL_INT(_machdep, OID_AUTO, pmap_fast_kernel_cpusync, CTLFLAG_RW,
247     &pmap_fast_kernel_cpusync, 0, "Share page table pages when possible");
248 int pmap_dynamic_delete = 0;
249 SYSCTL_INT(_machdep, OID_AUTO, pmap_dynamic_delete, CTLFLAG_RW,
250     &pmap_dynamic_delete, 0, "Dynamically delete PT/PD/PDPs");
251 int pmap_lock_delay = 100;
252 SYSCTL_INT(_machdep, OID_AUTO, pmap_lock_delay, CTLFLAG_RW,
253     &pmap_lock_delay, 0, "Spin loops");
254
255 static int pmap_nx_enable = 0;
256 /* needs manual TUNABLE in early probe, see below */
257
258 /* Standard user access funtions */
259 extern int std_copyinstr (const void *udaddr, void *kaddr, size_t len,
260     size_t *lencopied);
261 extern int std_copyin (const void *udaddr, void *kaddr, size_t len);
262 extern int std_copyout (const void *kaddr, void *udaddr, size_t len);
263 extern int std_fubyte (const uint8_t *base);
264 extern int std_subyte (uint8_t *base, uint8_t byte);
265 extern int32_t std_fuword32 (const uint32_t *base);
266 extern int64_t std_fuword64 (const uint64_t *base);
267 extern int std_suword64 (uint64_t *base, uint64_t word);
268 extern int std_suword32 (uint32_t *base, int word);
269 extern uint32_t std_swapu32 (volatile uint32_t *base, uint32_t v);
270 extern uint64_t std_swapu64 (volatile uint64_t *base, uint64_t v);
271
272 static void pv_hold(pv_entry_t pv);
273 static int _pv_hold_try(pv_entry_t pv
274                                 PMAP_DEBUG_DECL);
275 static void pv_drop(pv_entry_t pv);
276 static void _pv_lock(pv_entry_t pv
277                                 PMAP_DEBUG_DECL);
278 static void pv_unlock(pv_entry_t pv);
279 static pv_entry_t _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew
280                                 PMAP_DEBUG_DECL);
281 static pv_entry_t _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp
282                                 PMAP_DEBUG_DECL);
283 static void _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL);
284 static pv_entry_t pv_get_try(pmap_t pmap, vm_pindex_t pindex,
285                                 vm_pindex_t **pmarkp, int *errorp);
286 static void pv_put(pv_entry_t pv);
287 static void *pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex);
288 static pv_entry_t pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
289                       pv_entry_t *pvpp);
290 static pv_entry_t pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex,
291                       pv_entry_t *pvpp, vm_map_entry_t entry, vm_offset_t va);
292 static void pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp,
293                         pmap_inval_bulk_t *bulk, int destroy);
294 static vm_page_t pmap_remove_pv_page(pv_entry_t pv);
295 static int pmap_release_pv(pv_entry_t pv, pv_entry_t pvp,
296                         pmap_inval_bulk_t *bulk);
297
298 struct pmap_scan_info;
299 static void pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
300                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
301                       pv_entry_t pt_pv, int sharept,
302                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
303 static void pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
304                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
305                       pv_entry_t pt_pv, int sharept,
306                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
307
308 static void x86_64_protection_init (void);
309 static void create_pagetables(vm_paddr_t *firstaddr);
310 static void pmap_remove_all (vm_page_t m);
311 static boolean_t pmap_testbit (vm_page_t m, int bit);
312
313 static pt_entry_t * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
314 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
315
316 static void pmap_pinit_defaults(struct pmap *pmap);
317 static void pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark);
318 static void pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark);
319
320 static int
321 pv_entry_compare(pv_entry_t pv1, pv_entry_t pv2)
322 {
323         if (pv1->pv_pindex < pv2->pv_pindex)
324                 return(-1);
325         if (pv1->pv_pindex > pv2->pv_pindex)
326                 return(1);
327         return(0);
328 }
329
330 RB_GENERATE2(pv_entry_rb_tree, pv_entry, pv_entry,
331              pv_entry_compare, vm_pindex_t, pv_pindex);
332
333 static __inline
334 void
335 pmap_page_stats_adding(vm_page_t m)
336 {
337         globaldata_t gd = mycpu;
338
339         if (TAILQ_EMPTY(&m->md.pv_list)) {
340                 ++gd->gd_vmtotal.t_arm;
341         } else if (TAILQ_FIRST(&m->md.pv_list) ==
342                    TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
343                 ++gd->gd_vmtotal.t_armshr;
344                 ++gd->gd_vmtotal.t_avmshr;
345         } else {
346                 ++gd->gd_vmtotal.t_avmshr;
347         }
348 }
349
350 static __inline
351 void
352 pmap_page_stats_deleting(vm_page_t m)
353 {
354         globaldata_t gd = mycpu;
355
356         if (TAILQ_EMPTY(&m->md.pv_list)) {
357                 --gd->gd_vmtotal.t_arm;
358         } else if (TAILQ_FIRST(&m->md.pv_list) ==
359                    TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
360                 --gd->gd_vmtotal.t_armshr;
361                 --gd->gd_vmtotal.t_avmshr;
362         } else {
363                 --gd->gd_vmtotal.t_avmshr;
364         }
365 }
366
367 /*
368  * This is an ineligent crowbar to prevent heavily threaded programs
369  * from creating long live-locks in the pmap code when pmap_mmu_optimize
370  * is enabled.  Without it a pmap-local page table page can wind up being
371  * constantly created and destroyed (without injury, but also without
372  * progress) as the optimization tries to switch to the object's shared page
373  * table page.
374  */
375 static __inline void
376 pmap_softwait(pmap_t pmap)
377 {
378         while (pmap->pm_softhold) {
379                 tsleep_interlock(&pmap->pm_softhold, 0);
380                 if (pmap->pm_softhold)
381                         tsleep(&pmap->pm_softhold, PINTERLOCKED, "mmopt", 0);
382         }
383 }
384
385 static __inline void
386 pmap_softhold(pmap_t pmap)
387 {
388         while (atomic_swap_int(&pmap->pm_softhold, 1) == 1) {
389                 tsleep_interlock(&pmap->pm_softhold, 0);
390                 if (atomic_swap_int(&pmap->pm_softhold, 1) == 1)
391                         tsleep(&pmap->pm_softhold, PINTERLOCKED, "mmopt", 0);
392         }
393 }
394
395 static __inline void
396 pmap_softdone(pmap_t pmap)
397 {
398         atomic_swap_int(&pmap->pm_softhold, 0);
399         wakeup(&pmap->pm_softhold);
400 }
401
402 /*
403  * Move the kernel virtual free pointer to the next
404  * 2MB.  This is used to help improve performance
405  * by using a large (2MB) page for much of the kernel
406  * (.text, .data, .bss)
407  */
408 static
409 vm_offset_t
410 pmap_kmem_choose(vm_offset_t addr)
411 {
412         vm_offset_t newaddr = addr;
413
414         newaddr = roundup2(addr, NBPDR);
415         return newaddr;
416 }
417
418 /*
419  * Returns the pindex of a page table entry (representing a terminal page).
420  * There are NUPTE_TOTAL page table entries possible (a huge number)
421  *
422  * x86-64 has a 48-bit address space, where bit 47 is sign-extended out.
423  * We want to properly translate negative KVAs.
424  */
425 static __inline
426 vm_pindex_t
427 pmap_pte_pindex(vm_offset_t va)
428 {
429         return ((va >> PAGE_SHIFT) & (NUPTE_TOTAL - 1));
430 }
431
432 /*
433  * Returns the pindex of a page table.
434  */
435 static __inline
436 vm_pindex_t
437 pmap_pt_pindex(vm_offset_t va)
438 {
439         return (NUPTE_TOTAL + ((va >> PDRSHIFT) & (NUPT_TOTAL - 1)));
440 }
441
442 /*
443  * Returns the pindex of a page directory.
444  */
445 static __inline
446 vm_pindex_t
447 pmap_pd_pindex(vm_offset_t va)
448 {
449         return (NUPTE_TOTAL + NUPT_TOTAL +
450                 ((va >> PDPSHIFT) & (NUPD_TOTAL - 1)));
451 }
452
453 static __inline
454 vm_pindex_t
455 pmap_pdp_pindex(vm_offset_t va)
456 {
457         return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
458                 ((va >> PML4SHIFT) & (NUPDP_TOTAL - 1)));
459 }
460
461 static __inline
462 vm_pindex_t
463 pmap_pml4_pindex(void)
464 {
465         return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL);
466 }
467
468 /*
469  * Return various clipped indexes for a given VA
470  *
471  * Returns the index of a pt in a page directory, representing a page
472  * table.
473  */
474 static __inline
475 vm_pindex_t
476 pmap_pt_index(vm_offset_t va)
477 {
478         return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
479 }
480
481 /*
482  * Returns the index of a pd in a page directory page, representing a page
483  * directory.
484  */
485 static __inline
486 vm_pindex_t
487 pmap_pd_index(vm_offset_t va)
488 {
489         return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
490 }
491
492 /*
493  * Returns the index of a pdp in the pml4 table, representing a page
494  * directory page.
495  */
496 static __inline
497 vm_pindex_t
498 pmap_pdp_index(vm_offset_t va)
499 {
500         return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
501 }
502
503 /*
504  * Locate the requested pt_entry
505  */
506 static __inline
507 pv_entry_t
508 pv_entry_lookup(pmap_t pmap, vm_pindex_t pindex)
509 {
510         pv_entry_t pv;
511
512         if (pindex < pmap_pt_pindex(0))
513                 pv = pmap->pm_pvhint_pte;
514         else if (pindex < pmap_pd_pindex(0))
515                 pv = pmap->pm_pvhint_pt;
516         else
517                 pv = NULL;
518         cpu_ccfence();
519         if (pv == NULL || pv->pv_pmap != pmap) {
520                 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot,
521                                                 pindex);
522         } else if (pv->pv_pindex != pindex) {
523                 pv = pv_entry_rb_tree_RB_LOOKUP_REL(&pmap->pm_pvroot,
524                                                     pindex, pv);
525         }
526         return pv;
527 }
528
529 /*
530  * pmap_pte_quick:
531  *
532  *      Super fast pmap_pte routine best used when scanning the pv lists.
533  *      This eliminates many course-grained invltlb calls.  Note that many of
534  *      the pv list scans are across different pmaps and it is very wasteful
535  *      to do an entire invltlb when checking a single mapping.
536  */
537 static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va);
538
539 static
540 pt_entry_t *
541 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
542 {
543         return pmap_pte(pmap, va);
544 }
545
546 /*
547  * The placemarker hash must be broken up into four zones so lock
548  * ordering semantics continue to work (e.g. pte, pt, pd, then pdp).
549  *
550  * Placemarkers are used to 'lock' page table indices that do not have
551  * a pv_entry.  This allows the pmap to support managed and unmanaged
552  * pages and shared page tables.
553  */
554 #define PM_PLACE_BASE   (PM_PLACEMARKS >> 2)
555
556 static __inline
557 vm_pindex_t *
558 pmap_placemarker_hash(pmap_t pmap, vm_pindex_t pindex)
559 {
560         int hi;
561
562         if (pindex < pmap_pt_pindex(0))         /* zone 0 - PTE */
563                 hi = 0;
564         else if (pindex < pmap_pd_pindex(0))    /* zone 1 - PT */
565                 hi = PM_PLACE_BASE;
566         else if (pindex < pmap_pdp_pindex(0))   /* zone 2 - PD */
567                 hi = PM_PLACE_BASE << 1;
568         else                                    /* zone 3 - PDP (and PML4E) */
569                 hi = PM_PLACE_BASE | (PM_PLACE_BASE << 1);
570         hi += pindex & (PM_PLACE_BASE - 1);
571
572         return (&pmap->pm_placemarks[hi]);
573 }
574
575
576 /*
577  * Generic procedure to index a pte from a pt, pd, or pdp.
578  *
579  * NOTE: Normally passed pindex as pmap_xx_index().  pmap_xx_pindex() is NOT
580  *       a page table page index but is instead of PV lookup index.
581  */
582 static
583 void *
584 pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex)
585 {
586         pt_entry_t *pte;
587
588         pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pv->pv_m));
589         return(&pte[pindex]);
590 }
591
592 /*
593  * Return pointer to PDP slot in the PML4
594  */
595 static __inline
596 pml4_entry_t *
597 pmap_pdp(pmap_t pmap, vm_offset_t va)
598 {
599         return (&pmap->pm_pml4[pmap_pdp_index(va)]);
600 }
601
602 /*
603  * Return pointer to PD slot in the PDP given a pointer to the PDP
604  */
605 static __inline
606 pdp_entry_t *
607 pmap_pdp_to_pd(pml4_entry_t pdp_pte, vm_offset_t va)
608 {
609         pdp_entry_t *pd;
610
611         pd = (pdp_entry_t *)PHYS_TO_DMAP(pdp_pte & PG_FRAME);
612         return (&pd[pmap_pd_index(va)]);
613 }
614
615 /*
616  * Return pointer to PD slot in the PDP.
617  */
618 static __inline
619 pdp_entry_t *
620 pmap_pd(pmap_t pmap, vm_offset_t va)
621 {
622         pml4_entry_t *pdp;
623
624         pdp = pmap_pdp(pmap, va);
625         if ((*pdp & pmap->pmap_bits[PG_V_IDX]) == 0)
626                 return NULL;
627         return (pmap_pdp_to_pd(*pdp, va));
628 }
629
630 /*
631  * Return pointer to PT slot in the PD given a pointer to the PD
632  */
633 static __inline
634 pd_entry_t *
635 pmap_pd_to_pt(pdp_entry_t pd_pte, vm_offset_t va)
636 {
637         pd_entry_t *pt;
638
639         pt = (pd_entry_t *)PHYS_TO_DMAP(pd_pte & PG_FRAME);
640         return (&pt[pmap_pt_index(va)]);
641 }
642
643 /*
644  * Return pointer to PT slot in the PD
645  *
646  * SIMPLE PMAP NOTE: Simple pmaps (embedded in objects) do not have PDPs,
647  *                   so we cannot lookup the PD via the PDP.  Instead we
648  *                   must look it up via the pmap.
649  */
650 static __inline
651 pd_entry_t *
652 pmap_pt(pmap_t pmap, vm_offset_t va)
653 {
654         pdp_entry_t *pd;
655         pv_entry_t pv;
656         vm_pindex_t pd_pindex;
657         vm_paddr_t phys;
658
659         if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
660                 pd_pindex = pmap_pd_pindex(va);
661                 spin_lock_shared(&pmap->pm_spin);
662                 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pd_pindex);
663                 if (pv == NULL || pv->pv_m == NULL) {
664                         spin_unlock_shared(&pmap->pm_spin);
665                         return NULL;
666                 }
667                 phys = VM_PAGE_TO_PHYS(pv->pv_m);
668                 spin_unlock_shared(&pmap->pm_spin);
669                 return (pmap_pd_to_pt(phys, va));
670         } else {
671                 pd = pmap_pd(pmap, va);
672                 if (pd == NULL || (*pd & pmap->pmap_bits[PG_V_IDX]) == 0)
673                          return NULL;
674                 return (pmap_pd_to_pt(*pd, va));
675         }
676 }
677
678 /*
679  * Return pointer to PTE slot in the PT given a pointer to the PT
680  */
681 static __inline
682 pt_entry_t *
683 pmap_pt_to_pte(pd_entry_t pt_pte, vm_offset_t va)
684 {
685         pt_entry_t *pte;
686
687         pte = (pt_entry_t *)PHYS_TO_DMAP(pt_pte & PG_FRAME);
688         return (&pte[pmap_pte_index(va)]);
689 }
690
691 /*
692  * Return pointer to PTE slot in the PT
693  */
694 static __inline
695 pt_entry_t *
696 pmap_pte(pmap_t pmap, vm_offset_t va)
697 {
698         pd_entry_t *pt;
699
700         pt = pmap_pt(pmap, va);
701         if (pt == NULL || (*pt & pmap->pmap_bits[PG_V_IDX]) == 0)
702                  return NULL;
703         if ((*pt & pmap->pmap_bits[PG_PS_IDX]) != 0)
704                 return ((pt_entry_t *)pt);
705         return (pmap_pt_to_pte(*pt, va));
706 }
707
708 /*
709  * Of all the layers (PTE, PT, PD, PDP, PML4) the best one to cache is
710  * the PT layer.  This will speed up core pmap operations considerably.
711  *
712  * NOTE: The pmap spinlock does not need to be held but the passed-in pv
713  *       must be in a known associated state (typically by being locked when
714  *       the pmap spinlock isn't held).  We allow the race for that case.
715  *
716  * NOTE: pm_pvhint* is only accessed (read) with the spin-lock held, using
717  *       cpu_ccfence() to prevent compiler optimizations from reloading the
718  *       field.
719  */
720 static __inline
721 void
722 pv_cache(pv_entry_t pv, vm_pindex_t pindex)
723 {
724         if (pindex < pmap_pt_pindex(0)) {
725                 if (pv->pv_pmap)
726                         pv->pv_pmap->pm_pvhint_pte = pv;
727         } else if (pindex < pmap_pd_pindex(0)) {
728                 if (pv->pv_pmap)
729                         pv->pv_pmap->pm_pvhint_pt = pv;
730         }
731 }
732
733
734 /*
735  * Return address of PT slot in PD (KVM only)
736  *
737  * Cannot be used for user page tables because it might interfere with
738  * the shared page-table-page optimization (pmap_mmu_optimize).
739  */
740 static __inline
741 pd_entry_t *
742 vtopt(vm_offset_t va)
743 {
744         uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
745                                   NPML4EPGSHIFT)) - 1);
746
747         return (PDmap + ((va >> PDRSHIFT) & mask));
748 }
749
750 /*
751  * KVM - return address of PTE slot in PT
752  */
753 static __inline
754 pt_entry_t *
755 vtopte(vm_offset_t va)
756 {
757         uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
758                                   NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
759
760         return (PTmap + ((va >> PAGE_SHIFT) & mask));
761 }
762
763 /*
764  * Returns the physical address translation from va for a user address.
765  * (vm_paddr_t)-1 is returned on failure.
766  */
767 vm_paddr_t
768 uservtophys(vm_offset_t va)
769 {
770         uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
771                                   NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
772         vm_paddr_t pa;
773         pt_entry_t pte;
774         pmap_t pmap;
775
776         pmap = vmspace_pmap(mycpu->gd_curthread->td_lwp->lwp_vmspace);
777         pa = (vm_paddr_t)-1;
778         if (va < VM_MAX_USER_ADDRESS) {
779                 pte = kreadmem64(PTmap + ((va >> PAGE_SHIFT) & mask));
780                 if (pte & pmap->pmap_bits[PG_V_IDX])
781                         pa = (pte & PG_FRAME) | (va & PAGE_MASK);
782         }
783         return pa;
784 }
785
786 static uint64_t
787 allocpages(vm_paddr_t *firstaddr, long n)
788 {
789         uint64_t ret;
790
791         ret = *firstaddr;
792         bzero((void *)ret, n * PAGE_SIZE);
793         *firstaddr += n * PAGE_SIZE;
794         return (ret);
795 }
796
797 static
798 void
799 create_pagetables(vm_paddr_t *firstaddr)
800 {
801         long i;         /* must be 64 bits */
802         long nkpt_base;
803         long nkpt_phys;
804         long nkpd_phys;
805         int j;
806
807         /*
808          * We are running (mostly) V=P at this point
809          *
810          * Calculate how many 1GB PD entries in our PDP pages are needed
811          * for the DMAP.  This is only allocated if the system does not
812          * support 1GB pages.  Otherwise ndmpdp is simply a count of
813          * the number of 1G terminal entries in our PDP pages are needed.
814          *
815          * NOTE: Maxmem is in pages
816          */
817         ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
818         if (ndmpdp < 4)         /* Minimum 4GB of dirmap */
819                 ndmpdp = 4;
820         KKASSERT(ndmpdp <= NDMPML4E * NPML4EPG);
821
822         /*
823          * Starting at KERNBASE - map all 2G worth of page table pages.
824          * KERNBASE is offset -2G from the end of kvm.  This will accomodate
825          * all KVM allocations above KERNBASE, including the SYSMAPs below.
826          *
827          * We do this by allocating 2*512 PT pages.  Each PT page can map
828          * 2MB, for 2GB total.
829          */
830         nkpt_base = (NPDPEPG - KPDPI) * NPTEPG; /* typically 2 x 512 */
831
832         /*
833          * Starting at the beginning of kvm (VM_MIN_KERNEL_ADDRESS),
834          * Calculate how many page table pages we need to preallocate
835          * for early vm_map allocations.
836          *
837          * A few extra won't hurt, they will get used up in the running
838          * system.
839          *
840          * vm_page array
841          * initial pventry's
842          */
843         nkpt_phys = (Maxmem * sizeof(struct vm_page) + NBPDR - 1) / NBPDR;
844         nkpt_phys += (Maxmem * sizeof(struct pv_entry) + NBPDR - 1) / NBPDR;
845         nkpt_phys += 128;       /* a few extra */
846
847         /*
848          * The highest value nkpd_phys can be set to is
849          * NKPDPE - (NPDPEPG - KPDPI) (i.e. NKPDPE - 2).
850          *
851          * Doing so would cause all PD pages to be pre-populated for
852          * a maximal KVM space (approximately 16*512 pages, or 32MB.
853          * We can save memory by not doing this.
854          */
855         nkpd_phys = (nkpt_phys + NPDPEPG - 1) / NPDPEPG;
856
857         /*
858          * Allocate pages
859          *
860          * Normally NKPML4E=1-16 (1-16 kernel PDP page)
861          * Normally NKPDPE= NKPML4E*512-1 (511 min kernel PD pages)
862          *
863          * Only allocate enough PD pages
864          * NOTE: We allocate all kernel PD pages up-front, typically
865          *       ~511G of KVM, requiring 511 PD pages.
866          */
867         KPTbase = allocpages(firstaddr, nkpt_base);     /* KERNBASE to end */
868         KPTphys = allocpages(firstaddr, nkpt_phys);     /* KVA start */
869         KPML4phys = allocpages(firstaddr, 1);           /* recursive PML4 map */
870         KPDPphys = allocpages(firstaddr, NKPML4E);      /* kernel PDP pages */
871         KPDphys = allocpages(firstaddr, nkpd_phys);     /* kernel PD pages */
872
873         /*
874          * Alloc PD pages for the area starting at KERNBASE.
875          */
876         KPDbase = allocpages(firstaddr, NPDPEPG - KPDPI);
877
878         /*
879          * Stuff for our DMAP
880          */
881         DMPDPphys = allocpages(firstaddr, NDMPML4E);
882         if ((amd_feature & AMDID_PAGE1GB) == 0)
883                 DMPDphys = allocpages(firstaddr, ndmpdp);
884         dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
885
886         /*
887          * Fill in the underlying page table pages for the area around
888          * KERNBASE.  This remaps low physical memory to KERNBASE.
889          *
890          * Read-only from zero to physfree
891          * XXX not fully used, underneath 2M pages
892          */
893         for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
894                 ((pt_entry_t *)KPTbase)[i] = i << PAGE_SHIFT;
895                 ((pt_entry_t *)KPTbase)[i] |=
896                     pmap_bits_default[PG_RW_IDX] |
897                     pmap_bits_default[PG_V_IDX] |
898                     pmap_bits_default[PG_G_IDX];
899         }
900
901         /*
902          * Now map the initial kernel page tables.  One block of page
903          * tables is placed at the beginning of kernel virtual memory,
904          * and another block is placed at KERNBASE to map the kernel binary,
905          * data, bss, and initial pre-allocations.
906          */
907         for (i = 0; i < nkpt_base; i++) {
908                 ((pd_entry_t *)KPDbase)[i] = KPTbase + (i << PAGE_SHIFT);
909                 ((pd_entry_t *)KPDbase)[i] |=
910                     pmap_bits_default[PG_RW_IDX] |
911                     pmap_bits_default[PG_V_IDX];
912         }
913         for (i = 0; i < nkpt_phys; i++) {
914                 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
915                 ((pd_entry_t *)KPDphys)[i] |=
916                     pmap_bits_default[PG_RW_IDX] |
917                     pmap_bits_default[PG_V_IDX];
918         }
919
920         /*
921          * Map from zero to end of allocations using 2M pages as an
922          * optimization.  This will bypass some of the KPTBase pages
923          * above in the KERNBASE area.
924          */
925         for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
926                 ((pd_entry_t *)KPDbase)[i] = i << PDRSHIFT;
927                 ((pd_entry_t *)KPDbase)[i] |=
928                     pmap_bits_default[PG_RW_IDX] |
929                     pmap_bits_default[PG_V_IDX] |
930                     pmap_bits_default[PG_PS_IDX] |
931                     pmap_bits_default[PG_G_IDX];
932         }
933
934         /*
935          * Load PD addresses into the PDP pages for primary KVA space to
936          * cover existing page tables.  PD's for KERNBASE are handled in
937          * the next loop.
938          *
939          * expected to pre-populate all of its PDs.  See NKPDPE in vmparam.h.
940          */
941         for (i = 0; i < nkpd_phys; i++) {
942                 ((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] =
943                                 KPDphys + (i << PAGE_SHIFT);
944                 ((pdp_entry_t *)KPDPphys)[NKPML4E * NPDPEPG - NKPDPE + i] |=
945                     pmap_bits_default[PG_RW_IDX] |
946                     pmap_bits_default[PG_V_IDX] |
947                     pmap_bits_default[PG_U_IDX];
948         }
949
950         /*
951          * Load PDs for KERNBASE to the end
952          */
953         i = (NKPML4E - 1) * NPDPEPG + KPDPI;
954         for (j = 0; j < NPDPEPG - KPDPI; ++j) {
955                 ((pdp_entry_t *)KPDPphys)[i + j] =
956                                 KPDbase + (j << PAGE_SHIFT);
957                 ((pdp_entry_t *)KPDPphys)[i + j] |=
958                     pmap_bits_default[PG_RW_IDX] |
959                     pmap_bits_default[PG_V_IDX] |
960                     pmap_bits_default[PG_U_IDX];
961         }
962
963         /*
964          * Now set up the direct map space using either 2MB or 1GB pages
965          * Preset PG_M and PG_A because demotion expects it.
966          *
967          * When filling in entries in the PD pages make sure any excess
968          * entries are set to zero as we allocated enough PD pages
969          */
970         if ((amd_feature & AMDID_PAGE1GB) == 0) {
971                 for (i = 0; i < NPDEPG * ndmpdp; i++) {
972                         ((pd_entry_t *)DMPDphys)[i] = i << PDRSHIFT;
973                         ((pd_entry_t *)DMPDphys)[i] |=
974                             pmap_bits_default[PG_RW_IDX] |
975                             pmap_bits_default[PG_V_IDX] |
976                             pmap_bits_default[PG_PS_IDX] |
977                             pmap_bits_default[PG_G_IDX] |
978                             pmap_bits_default[PG_M_IDX] |
979                             pmap_bits_default[PG_A_IDX];
980                 }
981
982                 /*
983                  * And the direct map space's PDP
984                  */
985                 for (i = 0; i < ndmpdp; i++) {
986                         ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
987                                                         (i << PAGE_SHIFT);
988                         ((pdp_entry_t *)DMPDPphys)[i] |=
989                             pmap_bits_default[PG_RW_IDX] |
990                             pmap_bits_default[PG_V_IDX] |
991                             pmap_bits_default[PG_U_IDX];
992                 }
993         } else {
994                 for (i = 0; i < ndmpdp; i++) {
995                         ((pdp_entry_t *)DMPDPphys)[i] =
996                                                 (vm_paddr_t)i << PDPSHIFT;
997                         ((pdp_entry_t *)DMPDPphys)[i] |=
998                             pmap_bits_default[PG_RW_IDX] |
999                             pmap_bits_default[PG_V_IDX] |
1000                             pmap_bits_default[PG_PS_IDX] |
1001                             pmap_bits_default[PG_G_IDX] |
1002                             pmap_bits_default[PG_M_IDX] |
1003                             pmap_bits_default[PG_A_IDX];
1004                 }
1005         }
1006
1007         /* And recursively map PML4 to itself in order to get PTmap */
1008         ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
1009         ((pdp_entry_t *)KPML4phys)[PML4PML4I] |=
1010             pmap_bits_default[PG_RW_IDX] |
1011             pmap_bits_default[PG_V_IDX] |
1012             pmap_bits_default[PG_U_IDX];
1013
1014         /*
1015          * Connect the Direct Map slots up to the PML4
1016          */
1017         for (j = 0; j < NDMPML4E; ++j) {
1018                 ((pdp_entry_t *)KPML4phys)[DMPML4I + j] =
1019                     (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
1020                     pmap_bits_default[PG_RW_IDX] |
1021                     pmap_bits_default[PG_V_IDX] |
1022                     pmap_bits_default[PG_U_IDX];
1023         }
1024
1025         /*
1026          * Connect the KVA slot up to the PML4
1027          */
1028         for (j = 0; j < NKPML4E; ++j) {
1029                 ((pdp_entry_t *)KPML4phys)[KPML4I + j] =
1030                     KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT);
1031                 ((pdp_entry_t *)KPML4phys)[KPML4I + j] |=
1032                     pmap_bits_default[PG_RW_IDX] |
1033                     pmap_bits_default[PG_V_IDX] |
1034                     pmap_bits_default[PG_U_IDX];
1035         }
1036         cpu_mfence();
1037         cpu_invltlb();
1038 }
1039
1040 /*
1041  *      Bootstrap the system enough to run with virtual memory.
1042  *
1043  *      On x86_64 this is called after mapping has already been enabled
1044  *      and just syncs the pmap module with what has already been done.
1045  *      [We can't call it easily with mapping off since the kernel is not
1046  *      mapped with PA == VA, hence we would have to relocate every address
1047  *      from the linked base (virtual) address "KERNBASE" to the actual
1048  *      (physical) address starting relative to 0]
1049  */
1050 void
1051 pmap_bootstrap(vm_paddr_t *firstaddr)
1052 {
1053         vm_offset_t va;
1054         pt_entry_t *pte;
1055         int i;
1056
1057         KvaStart = VM_MIN_KERNEL_ADDRESS;
1058         KvaEnd = VM_MAX_KERNEL_ADDRESS;
1059         KvaSize = KvaEnd - KvaStart;
1060
1061         avail_start = *firstaddr;
1062
1063         /*
1064          * Create an initial set of page tables to run the kernel in.
1065          */
1066         create_pagetables(firstaddr);
1067
1068         virtual2_start = KvaStart;
1069         virtual2_end = PTOV_OFFSET;
1070
1071         virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr;
1072         virtual_start = pmap_kmem_choose(virtual_start);
1073
1074         virtual_end = VM_MAX_KERNEL_ADDRESS;
1075
1076         /* XXX do %cr0 as well */
1077         load_cr4(rcr4() | CR4_PGE | CR4_PSE);
1078         load_cr3(KPML4phys);
1079
1080         /*
1081          * Initialize protection array.
1082          */
1083         x86_64_protection_init();
1084
1085         /*
1086          * The kernel's pmap is statically allocated so we don't have to use
1087          * pmap_create, which is unlikely to work correctly at this part of
1088          * the boot sequence (XXX and which no longer exists).
1089          */
1090         kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys);
1091         kernel_pmap.pm_count = 1;
1092         CPUMASK_ASSALLONES(kernel_pmap.pm_active);
1093         RB_INIT(&kernel_pmap.pm_pvroot);
1094         spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
1095         for (i = 0; i < PM_PLACEMARKS; ++i)
1096                 kernel_pmap.pm_placemarks[i] = PM_NOPLACEMARK;
1097
1098         /*
1099          * Reserve some special page table entries/VA space for temporary
1100          * mapping of pages.
1101          */
1102 #define SYSMAP(c, p, v, n)      \
1103         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
1104
1105         va = virtual_start;
1106         pte = vtopte(va);
1107
1108         /*
1109          * CMAP1/CMAP2 are used for zeroing and copying pages.
1110          */
1111         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
1112
1113         /*
1114          * Crashdump maps.
1115          */
1116         SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
1117
1118         /*
1119          * ptvmmap is used for reading arbitrary physical pages via
1120          * /dev/mem.
1121          */
1122         SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
1123
1124         /*
1125          * msgbufp is used to map the system message buffer.
1126          * XXX msgbufmap is not used.
1127          */
1128         SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
1129                atop(round_page(MSGBUF_SIZE)))
1130
1131         virtual_start = va;
1132         virtual_start = pmap_kmem_choose(virtual_start);
1133
1134         *CMAP1 = 0;
1135
1136         /*
1137          * PG_G is terribly broken on SMP because we IPI invltlb's in some
1138          * cases rather then invl1pg.  Actually, I don't even know why it
1139          * works under UP because self-referential page table mappings
1140          */
1141 //      pgeflag = 0;
1142
1143         cpu_invltlb();
1144
1145         /* Initialize the PAT MSR */
1146         pmap_init_pat();
1147         pmap_pinit_defaults(&kernel_pmap);
1148
1149         TUNABLE_INT_FETCH("machdep.pmap_fast_kernel_cpusync",
1150                           &pmap_fast_kernel_cpusync);
1151
1152 }
1153
1154 /*
1155  * Setup the PAT MSR.
1156  */
1157 void
1158 pmap_init_pat(void)
1159 {
1160         uint64_t pat_msr;
1161         u_long cr0, cr4;
1162
1163         /*
1164          * Default values mapping PATi,PCD,PWT bits at system reset.
1165          * The default values effectively ignore the PATi bit by
1166          * repeating the encodings for 0-3 in 4-7, and map the PCD
1167          * and PWT bit combinations to the expected PAT types.
1168          */
1169         pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |        /* 000 */
1170                   PAT_VALUE(1, PAT_WRITE_THROUGH) |     /* 001 */
1171                   PAT_VALUE(2, PAT_UNCACHED) |          /* 010 */
1172                   PAT_VALUE(3, PAT_UNCACHEABLE) |       /* 011 */
1173                   PAT_VALUE(4, PAT_WRITE_BACK) |        /* 100 */
1174                   PAT_VALUE(5, PAT_WRITE_THROUGH) |     /* 101 */
1175                   PAT_VALUE(6, PAT_UNCACHED) |          /* 110 */
1176                   PAT_VALUE(7, PAT_UNCACHEABLE);        /* 111 */
1177         pat_pte_index[PAT_WRITE_BACK]   = 0;
1178         pat_pte_index[PAT_WRITE_THROUGH]= 0         | X86_PG_NC_PWT;
1179         pat_pte_index[PAT_UNCACHED]     = X86_PG_NC_PCD;
1180         pat_pte_index[PAT_UNCACHEABLE]  = X86_PG_NC_PCD | X86_PG_NC_PWT;
1181         pat_pte_index[PAT_WRITE_PROTECTED] = pat_pte_index[PAT_UNCACHEABLE];
1182         pat_pte_index[PAT_WRITE_COMBINING] = pat_pte_index[PAT_UNCACHEABLE];
1183
1184         if (cpu_feature & CPUID_PAT) {
1185                 /*
1186                  * If we support the PAT then set-up entries for
1187                  * WRITE_PROTECTED and WRITE_COMBINING using bit patterns
1188                  * 5 and 6.
1189                  */
1190                 pat_msr = (pat_msr & ~PAT_MASK(5)) |
1191                           PAT_VALUE(5, PAT_WRITE_PROTECTED);
1192                 pat_msr = (pat_msr & ~PAT_MASK(6)) |
1193                           PAT_VALUE(6, PAT_WRITE_COMBINING);
1194                 pat_pte_index[PAT_WRITE_PROTECTED] = X86_PG_PTE_PAT | X86_PG_NC_PWT;
1195                 pat_pte_index[PAT_WRITE_COMBINING] = X86_PG_PTE_PAT | X86_PG_NC_PCD;
1196
1197                 /*
1198                  * Then enable the PAT
1199                  */
1200
1201                 /* Disable PGE. */
1202                 cr4 = rcr4();
1203                 load_cr4(cr4 & ~CR4_PGE);
1204
1205                 /* Disable caches (CD = 1, NW = 0). */
1206                 cr0 = rcr0();
1207                 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1208
1209                 /* Flushes caches and TLBs. */
1210                 wbinvd();
1211                 cpu_invltlb();
1212
1213                 /* Update PAT and index table. */
1214                 wrmsr(MSR_PAT, pat_msr);
1215
1216                 /* Flush caches and TLBs again. */
1217                 wbinvd();
1218                 cpu_invltlb();
1219
1220                 /* Restore caches and PGE. */
1221                 load_cr0(cr0);
1222                 load_cr4(cr4);
1223                 PatMsr = pat_msr;
1224         }
1225 }
1226
1227 /*
1228  * Set 4mb pdir for mp startup
1229  */
1230 void
1231 pmap_set_opt(void)
1232 {
1233         if (cpu_feature & CPUID_PSE) {
1234                 load_cr4(rcr4() | CR4_PSE);
1235                 if (mycpu->gd_cpuid == 0)       /* only on BSP */
1236                         cpu_invltlb();
1237         }
1238 }
1239
1240 /*
1241  *      Initialize the pmap module.
1242  *      Called by vm_init, to initialize any structures that the pmap
1243  *      system needs to map virtual memory.
1244  *      pmap_init has been enhanced to support in a fairly consistant
1245  *      way, discontiguous physical memory.
1246  */
1247 void
1248 pmap_init(void)
1249 {
1250         vm_pindex_t initial_pvs;
1251         vm_pindex_t i;
1252
1253         /*
1254          * Allocate memory for random pmap data structures.  Includes the
1255          * pv_head_table.
1256          */
1257
1258         for (i = 0; i < vm_page_array_size; i++) {
1259                 vm_page_t m;
1260
1261                 m = &vm_page_array[i];
1262                 TAILQ_INIT(&m->md.pv_list);
1263         }
1264
1265         /*
1266          * init the pv free list
1267          */
1268         initial_pvs = vm_page_array_size;
1269         if (initial_pvs < MINPV)
1270                 initial_pvs = MINPV;
1271         pvzone = &pvzone_store;
1272         pvinit = (void *)kmem_alloc(&kernel_map,
1273                                     initial_pvs * sizeof (struct pv_entry),
1274                                     VM_SUBSYS_PVENTRY);
1275         zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
1276                   pvinit, initial_pvs);
1277
1278         /*
1279          * Now it is safe to enable pv_table recording.
1280          */
1281         pmap_initialized = TRUE;
1282 }
1283
1284 /*
1285  * Initialize the address space (zone) for the pv_entries.  Set a
1286  * high water mark so that the system can recover from excessive
1287  * numbers of pv entries.
1288  */
1289 void
1290 pmap_init2(void)
1291 {
1292         vm_pindex_t shpgperproc = PMAP_SHPGPERPROC;
1293         vm_pindex_t entry_max;
1294
1295         TUNABLE_LONG_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1296         pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
1297         TUNABLE_LONG_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1298         pv_entry_high_water = 9 * (pv_entry_max / 10);
1299
1300         /*
1301          * Subtract out pages already installed in the zone (hack)
1302          */
1303         entry_max = pv_entry_max - vm_page_array_size;
1304         if (entry_max <= 0)
1305                 entry_max = 1;
1306
1307         zinitna(pvzone, NULL, 0, entry_max, ZONE_INTERRUPT);
1308
1309         /*
1310          * Enable dynamic deletion of empty higher-level page table pages
1311          * by default only if system memory is < 8GB (use 7GB for slop).
1312          * This can save a little memory, but imposes significant
1313          * performance overhead for things like bulk builds, and for programs
1314          * which do a lot of memory mapping and memory unmapping.
1315          */
1316         if (pmap_dynamic_delete < 0) {
1317                 if (vmstats.v_page_count < 7LL * 1024 * 1024 * 1024 / PAGE_SIZE)
1318                         pmap_dynamic_delete = 1;
1319                 else
1320                         pmap_dynamic_delete = 0;
1321         }
1322 }
1323
1324 /*
1325  * Typically used to initialize a fictitious page by vm/device_pager.c
1326  */
1327 void
1328 pmap_page_init(struct vm_page *m)
1329 {
1330         vm_page_init(m);
1331         TAILQ_INIT(&m->md.pv_list);
1332 }
1333
1334 /***************************************************
1335  * Low level helper routines.....
1336  ***************************************************/
1337
1338 /*
1339  * this routine defines the region(s) of memory that should
1340  * not be tested for the modified bit.
1341  */
1342 static __inline
1343 int
1344 pmap_track_modified(vm_pindex_t pindex)
1345 {
1346         vm_offset_t va = (vm_offset_t)pindex << PAGE_SHIFT;
1347         if ((va < clean_sva) || (va >= clean_eva)) 
1348                 return 1;
1349         else
1350                 return 0;
1351 }
1352
1353 /*
1354  * Extract the physical page address associated with the map/VA pair.
1355  * The page must be wired for this to work reliably.
1356  */
1357 vm_paddr_t 
1358 pmap_extract(pmap_t pmap, vm_offset_t va, void **handlep)
1359 {
1360         vm_paddr_t rtval;
1361         pv_entry_t pt_pv;
1362         pt_entry_t *ptep;
1363
1364         rtval = 0;
1365         if (va >= VM_MAX_USER_ADDRESS) {
1366                 /*
1367                  * Kernel page directories might be direct-mapped and
1368                  * there is typically no PV tracking of pte's
1369                  */
1370                 pd_entry_t *pt;
1371
1372                 pt = pmap_pt(pmap, va);
1373                 if (pt && (*pt & pmap->pmap_bits[PG_V_IDX])) {
1374                         if (*pt & pmap->pmap_bits[PG_PS_IDX]) {
1375                                 rtval = *pt & PG_PS_FRAME;
1376                                 rtval |= va & PDRMASK;
1377                         } else {
1378                                 ptep = pmap_pt_to_pte(*pt, va);
1379                                 if (*pt & pmap->pmap_bits[PG_V_IDX]) {
1380                                         rtval = *ptep & PG_FRAME;
1381                                         rtval |= va & PAGE_MASK;
1382                                 }
1383                         }
1384                 }
1385                 if (handlep)
1386                         *handlep = NULL;
1387         } else {
1388                 /*
1389                  * User pages currently do not direct-map the page directory
1390                  * and some pages might not used managed PVs.  But all PT's
1391                  * will have a PV.
1392                  */
1393                 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
1394                 if (pt_pv) {
1395                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1396                         if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
1397                                 rtval = *ptep & PG_FRAME;
1398                                 rtval |= va & PAGE_MASK;
1399                         }
1400                         if (handlep)
1401                                 *handlep = pt_pv;       /* locked until done */
1402                         else
1403                                 pv_put (pt_pv);
1404                 } else if (handlep) {
1405                         *handlep = NULL;
1406                 }
1407         }
1408         return rtval;
1409 }
1410
1411 void
1412 pmap_extract_done(void *handle)
1413 {
1414         if (handle)
1415                 pv_put((pv_entry_t)handle);
1416 }
1417
1418 /*
1419  * Similar to extract but checks protections, SMP-friendly short-cut for
1420  * vm_fault_page[_quick]().  Can return NULL to cause the caller to
1421  * fall-through to the real fault code.  Does not work with HVM page
1422  * tables.
1423  *
1424  * if busyp is NULL the returned page, if not NULL, is held (and not busied).
1425  *
1426  * If busyp is not NULL and this function sets *busyp non-zero, the returned
1427  * page is busied (and not held).
1428  *
1429  * If busyp is not NULL and this function sets *busyp to zero, the returned
1430  * page is held (and not busied).
1431  *
1432  * If VM_PROT_WRITE is set in prot, and the pte is already writable, the
1433  * returned page will be dirtied.  If the pte is not already writable NULL
1434  * is returned.  In otherwords, if the bit is set and a vm_page_t is returned,
1435  * any COW will already have happened and that page can be written by the
1436  * caller.
1437  *
1438  * WARNING! THE RETURNED PAGE IS ONLY HELD AND NOT SUITABLE FOR READING
1439  *          OR WRITING AS-IS.
1440  */
1441 vm_page_t
1442 pmap_fault_page_quick(pmap_t pmap, vm_offset_t va, vm_prot_t prot, int *busyp)
1443 {
1444         if (pmap &&
1445             va < VM_MAX_USER_ADDRESS &&
1446             (pmap->pm_flags & PMAP_HVM) == 0) {
1447                 pv_entry_t pt_pv;
1448                 pv_entry_t pte_pv;
1449                 pt_entry_t *ptep;
1450                 pt_entry_t req;
1451                 vm_page_t m;
1452                 int error;
1453
1454                 req = pmap->pmap_bits[PG_V_IDX] |
1455                       pmap->pmap_bits[PG_U_IDX];
1456                 if (prot & VM_PROT_WRITE)
1457                         req |= pmap->pmap_bits[PG_RW_IDX];
1458
1459                 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
1460                 if (pt_pv == NULL)
1461                         return (NULL);
1462                 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1463                 if ((*ptep & req) != req) {
1464                         pv_put(pt_pv);
1465                         return (NULL);
1466                 }
1467                 pte_pv = pv_get_try(pmap, pmap_pte_pindex(va), NULL, &error);
1468                 if (pte_pv && error == 0) {
1469                         m = pte_pv->pv_m;
1470                         if (prot & VM_PROT_WRITE) {
1471                                 /* interlocked by presence of pv_entry */
1472                                 vm_page_dirty(m);
1473                         }
1474                         if (busyp) {
1475                                 if (prot & VM_PROT_WRITE) {
1476                                         if (vm_page_busy_try(m, TRUE))
1477                                                 m = NULL;
1478                                         *busyp = 1;
1479                                 } else {
1480                                         vm_page_hold(m);
1481                                         *busyp = 0;
1482                                 }
1483                         } else {
1484                                 vm_page_hold(m);
1485                         }
1486                         pv_put(pte_pv);
1487                 } else if (pte_pv) {
1488                         pv_drop(pte_pv);
1489                         m = NULL;
1490                 } else {
1491                         /* error, since we didn't request a placemarker */
1492                         m = NULL;
1493                 }
1494                 pv_put(pt_pv);
1495                 return(m);
1496         } else {
1497                 return(NULL);
1498         }
1499 }
1500
1501 /*
1502  * Extract the physical page address associated kernel virtual address.
1503  */
1504 vm_paddr_t
1505 pmap_kextract(vm_offset_t va)
1506 {
1507         pd_entry_t pt;          /* pt entry in pd */
1508         vm_paddr_t pa;
1509
1510         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1511                 pa = DMAP_TO_PHYS(va);
1512         } else {
1513                 pt = *vtopt(va);
1514                 if (pt & kernel_pmap.pmap_bits[PG_PS_IDX]) {
1515                         pa = (pt & PG_PS_FRAME) | (va & PDRMASK);
1516                 } else {
1517                         /*
1518                          * Beware of a concurrent promotion that changes the
1519                          * PDE at this point!  For example, vtopte() must not
1520                          * be used to access the PTE because it would use the
1521                          * new PDE.  It is, however, safe to use the old PDE
1522                          * because the page table page is preserved by the
1523                          * promotion.
1524                          */
1525                         pa = *pmap_pt_to_pte(pt, va);
1526                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1527                 }
1528         }
1529         return pa;
1530 }
1531
1532 /***************************************************
1533  * Low level mapping routines.....
1534  ***************************************************/
1535
1536 /*
1537  * Routine: pmap_kenter
1538  * Function:
1539  *      Add a wired page to the KVA
1540  *      NOTE! note that in order for the mapping to take effect -- you
1541  *      should do an invltlb after doing the pmap_kenter().
1542  */
1543 void 
1544 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1545 {
1546         pt_entry_t *ptep;
1547         pt_entry_t npte;
1548
1549         npte = pa |
1550                kernel_pmap.pmap_bits[PG_RW_IDX] |
1551                kernel_pmap.pmap_bits[PG_V_IDX];
1552 //             pgeflag;
1553         ptep = vtopte(va);
1554 #if 1
1555         pmap_inval_smp(&kernel_pmap, va, 1, ptep, npte);
1556 #else
1557         /* FUTURE */
1558         if (*ptep)
1559                 pmap_inval_smp(&kernel_pmap, va, ptep, npte);
1560         else
1561                 *ptep = npte;
1562 #endif
1563 }
1564
1565 /*
1566  * Similar to pmap_kenter(), except we only invalidate the mapping on the
1567  * current CPU.  Returns 0 if the previous pte was 0, 1 if it wasn't
1568  * (caller can conditionalize calling smp_invltlb()).
1569  */
1570 int
1571 pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
1572 {
1573         pt_entry_t *ptep;
1574         pt_entry_t npte;
1575         int res;
1576
1577         npte = pa | kernel_pmap.pmap_bits[PG_RW_IDX] |
1578                     kernel_pmap.pmap_bits[PG_V_IDX];
1579         // npte |= pgeflag;
1580         ptep = vtopte(va);
1581 #if 1
1582         res = 1;
1583 #else
1584         /* FUTURE */
1585         res = (*ptep != 0);
1586 #endif
1587         atomic_swap_long(ptep, npte);
1588         cpu_invlpg((void *)va);
1589
1590         return res;
1591 }
1592
1593 /*
1594  * Enter addresses into the kernel pmap but don't bother
1595  * doing any tlb invalidations.  Caller will do a rollup
1596  * invalidation via pmap_rollup_inval().
1597  */
1598 int
1599 pmap_kenter_noinval(vm_offset_t va, vm_paddr_t pa)
1600 {
1601         pt_entry_t *ptep;
1602         pt_entry_t npte;
1603         int res;
1604
1605         npte = pa |
1606             kernel_pmap.pmap_bits[PG_RW_IDX] |
1607             kernel_pmap.pmap_bits[PG_V_IDX];
1608 //          pgeflag;
1609         ptep = vtopte(va);
1610 #if 1
1611         res = 1;
1612 #else
1613         /* FUTURE */
1614         res = (*ptep != 0);
1615 #endif
1616         atomic_swap_long(ptep, npte);
1617         cpu_invlpg((void *)va);
1618
1619         return res;
1620 }
1621
1622 /*
1623  * remove a page from the kernel pagetables
1624  */
1625 void
1626 pmap_kremove(vm_offset_t va)
1627 {
1628         pt_entry_t *ptep;
1629
1630         ptep = vtopte(va);
1631         pmap_inval_smp(&kernel_pmap, va, 1, ptep, 0);
1632 }
1633
1634 void
1635 pmap_kremove_quick(vm_offset_t va)
1636 {
1637         pt_entry_t *ptep;
1638
1639         ptep = vtopte(va);
1640         (void)pte_load_clear(ptep);
1641         cpu_invlpg((void *)va);
1642 }
1643
1644 /*
1645  * Remove addresses from the kernel pmap but don't bother
1646  * doing any tlb invalidations.  Caller will do a rollup
1647  * invalidation via pmap_rollup_inval().
1648  */
1649 void
1650 pmap_kremove_noinval(vm_offset_t va)
1651 {
1652         pt_entry_t *ptep;
1653
1654         ptep = vtopte(va);
1655         (void)pte_load_clear(ptep);
1656 }
1657
1658 /*
1659  * XXX these need to be recoded.  They are not used in any critical path.
1660  */
1661 void
1662 pmap_kmodify_rw(vm_offset_t va)
1663 {
1664         atomic_set_long(vtopte(va), kernel_pmap.pmap_bits[PG_RW_IDX]);
1665         cpu_invlpg((void *)va);
1666 }
1667
1668 /* NOT USED
1669 void
1670 pmap_kmodify_nc(vm_offset_t va)
1671 {
1672         atomic_set_long(vtopte(va), PG_N);
1673         cpu_invlpg((void *)va);
1674 }
1675 */
1676
1677 /*
1678  * Used to map a range of physical addresses into kernel virtual
1679  * address space during the low level boot, typically to map the
1680  * dump bitmap, message buffer, and vm_page_array.
1681  *
1682  * These mappings are typically made at some pointer after the end of the
1683  * kernel text+data.
1684  *
1685  * We could return PHYS_TO_DMAP(start) here and not allocate any
1686  * via (*virtp), but then kmem from userland and kernel dumps won't
1687  * have access to the related pointers.
1688  */
1689 vm_offset_t
1690 pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
1691 {
1692         vm_offset_t va;
1693         vm_offset_t va_start;
1694
1695         /*return PHYS_TO_DMAP(start);*/
1696
1697         va_start = *virtp;
1698         va = va_start;
1699
1700         while (start < end) {
1701                 pmap_kenter_quick(va, start);
1702                 va += PAGE_SIZE;
1703                 start += PAGE_SIZE;
1704         }
1705         *virtp = va;
1706         return va_start;
1707 }
1708
1709 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
1710
1711 /*
1712  * Remove the specified set of pages from the data and instruction caches.
1713  *
1714  * In contrast to pmap_invalidate_cache_range(), this function does not
1715  * rely on the CPU's self-snoop feature, because it is intended for use
1716  * when moving pages into a different cache domain.
1717  */
1718 void
1719 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1720 {
1721         vm_offset_t daddr, eva;
1722         int i;
1723
1724         if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1725             (cpu_feature & CPUID_CLFSH) == 0)
1726                 wbinvd();
1727         else {
1728                 cpu_mfence();
1729                 for (i = 0; i < count; i++) {
1730                         daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
1731                         eva = daddr + PAGE_SIZE;
1732                         for (; daddr < eva; daddr += cpu_clflush_line_size)
1733                                 clflush(daddr);
1734                 }
1735                 cpu_mfence();
1736         }
1737 }
1738
1739 void
1740 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
1741 {
1742         KASSERT((sva & PAGE_MASK) == 0,
1743             ("pmap_invalidate_cache_range: sva not page-aligned"));
1744         KASSERT((eva & PAGE_MASK) == 0,
1745             ("pmap_invalidate_cache_range: eva not page-aligned"));
1746
1747         if (cpu_feature & CPUID_SS) {
1748                 ; /* If "Self Snoop" is supported, do nothing. */
1749         } else {
1750                 /* Globally invalidate caches */
1751                 cpu_wbinvd_on_all_cpus();
1752         }
1753 }
1754
1755 /*
1756  * Invalidate the specified range of virtual memory on all cpus associated
1757  * with the pmap.
1758  */
1759 void
1760 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1761 {
1762         pmap_inval_smp(pmap, sva, (eva - sva) >> PAGE_SHIFT, NULL, 0);
1763 }
1764
1765 /*
1766  * Add a list of wired pages to the kva.  This routine is used for temporary
1767  * kernel mappings such as those found in buffer cache buffer.  Page
1768  * modifications and accesses are not tracked or recorded.
1769  *
1770  * NOTE! Old mappings are simply overwritten, and we cannot assume relaxed
1771  *       semantics as previous mappings may have been zerod without any
1772  *       invalidation.
1773  *
1774  * The page *must* be wired.
1775  */
1776 static __inline void
1777 _pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count, int doinval)
1778 {
1779         vm_offset_t end_va;
1780         vm_offset_t va;
1781
1782         end_va = beg_va + count * PAGE_SIZE;
1783
1784         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
1785                 pt_entry_t pte;
1786                 pt_entry_t *ptep;
1787
1788                 ptep = vtopte(va);
1789                 pte = VM_PAGE_TO_PHYS(*m) |
1790                         kernel_pmap.pmap_bits[PG_RW_IDX] |
1791                         kernel_pmap.pmap_bits[PG_V_IDX] |
1792                         kernel_pmap.pmap_cache_bits[(*m)->pat_mode];
1793 //              pgeflag;
1794                 atomic_swap_long(ptep, pte);
1795                 m++;
1796         }
1797         if (doinval)
1798                 pmap_invalidate_range(&kernel_pmap, beg_va, end_va);
1799 }
1800
1801 void
1802 pmap_qenter(vm_offset_t beg_va, vm_page_t *m, int count)
1803 {
1804         _pmap_qenter(beg_va, m, count, 1);
1805 }
1806
1807 void
1808 pmap_qenter_noinval(vm_offset_t beg_va, vm_page_t *m, int count)
1809 {
1810         _pmap_qenter(beg_va, m, count, 0);
1811 }
1812
1813 /*
1814  * This routine jerks page mappings from the kernel -- it is meant only
1815  * for temporary mappings such as those found in buffer cache buffers.
1816  * No recording modified or access status occurs.
1817  *
1818  * MPSAFE, INTERRUPT SAFE (cluster callback)
1819  */
1820 void
1821 pmap_qremove(vm_offset_t beg_va, int count)
1822 {
1823         vm_offset_t end_va;
1824         vm_offset_t va;
1825
1826         end_va = beg_va + count * PAGE_SIZE;
1827
1828         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
1829                 pt_entry_t *pte;
1830
1831                 pte = vtopte(va);
1832                 (void)pte_load_clear(pte);
1833                 cpu_invlpg((void *)va);
1834         }
1835         pmap_invalidate_range(&kernel_pmap, beg_va, end_va);
1836 }
1837
1838 /*
1839  * This routine removes temporary kernel mappings, only invalidating them
1840  * on the current cpu.  It should only be used under carefully controlled
1841  * conditions.
1842  */
1843 void
1844 pmap_qremove_quick(vm_offset_t beg_va, int count)
1845 {
1846         vm_offset_t end_va;
1847         vm_offset_t va;
1848
1849         end_va = beg_va + count * PAGE_SIZE;
1850
1851         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
1852                 pt_entry_t *pte;
1853
1854                 pte = vtopte(va);
1855                 (void)pte_load_clear(pte);
1856                 cpu_invlpg((void *)va);
1857         }
1858 }
1859
1860 /*
1861  * This routine removes temporary kernel mappings *without* invalidating
1862  * the TLB.  It can only be used on permanent kva reservations such as those
1863  * found in buffer cache buffers, under carefully controlled circumstances.
1864  *
1865  * NOTE: Repopulating these KVAs requires unconditional invalidation.
1866  *       (pmap_qenter() does unconditional invalidation).
1867  */
1868 void
1869 pmap_qremove_noinval(vm_offset_t beg_va, int count)
1870 {
1871         vm_offset_t end_va;
1872         vm_offset_t va;
1873
1874         end_va = beg_va + count * PAGE_SIZE;
1875
1876         for (va = beg_va; va < end_va; va += PAGE_SIZE) {
1877                 pt_entry_t *pte;
1878
1879                 pte = vtopte(va);
1880                 (void)pte_load_clear(pte);
1881         }
1882 }
1883
1884 /*
1885  * Create a new thread and optionally associate it with a (new) process.
1886  * NOTE! the new thread's cpu may not equal the current cpu.
1887  */
1888 void
1889 pmap_init_thread(thread_t td)
1890 {
1891         /* enforce pcb placement & alignment */
1892         td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
1893         td->td_pcb = (struct pcb *)((intptr_t)td->td_pcb & ~(intptr_t)0xF);
1894         td->td_savefpu = &td->td_pcb->pcb_save;
1895         td->td_sp = (char *)td->td_pcb; /* no -16 */
1896 }
1897
1898 /*
1899  * This routine directly affects the fork perf for a process.
1900  */
1901 void
1902 pmap_init_proc(struct proc *p)
1903 {
1904 }
1905
1906 static void
1907 pmap_pinit_defaults(struct pmap *pmap)
1908 {
1909         bcopy(pmap_bits_default, pmap->pmap_bits,
1910               sizeof(pmap_bits_default));
1911         bcopy(protection_codes, pmap->protection_codes,
1912               sizeof(protection_codes));
1913         bcopy(pat_pte_index, pmap->pmap_cache_bits,
1914               sizeof(pat_pte_index));
1915         pmap->pmap_cache_mask = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PTE_PAT;
1916         pmap->copyinstr = std_copyinstr;
1917         pmap->copyin = std_copyin;
1918         pmap->copyout = std_copyout;
1919         pmap->fubyte = std_fubyte;
1920         pmap->subyte = std_subyte;
1921         pmap->fuword32 = std_fuword32;
1922         pmap->fuword64 = std_fuword64;
1923         pmap->suword32 = std_suword32;
1924         pmap->suword64 = std_suword64;
1925         pmap->swapu32 = std_swapu32;
1926         pmap->swapu64 = std_swapu64;
1927 }
1928 /*
1929  * Initialize pmap0/vmspace0.
1930  *
1931  * On architectures where the kernel pmap is not integrated into the user
1932  * process pmap, this pmap represents the process pmap, not the kernel pmap.
1933  * kernel_pmap should be used to directly access the kernel_pmap.
1934  */
1935 void
1936 pmap_pinit0(struct pmap *pmap)
1937 {
1938         int i;
1939
1940         pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys);
1941         pmap->pm_count = 1;
1942         CPUMASK_ASSZERO(pmap->pm_active);
1943         pmap->pm_pvhint_pt = NULL;
1944         pmap->pm_pvhint_pte = NULL;
1945         RB_INIT(&pmap->pm_pvroot);
1946         spin_init(&pmap->pm_spin, "pmapinit0");
1947         for (i = 0; i < PM_PLACEMARKS; ++i)
1948                 pmap->pm_placemarks[i] = PM_NOPLACEMARK;
1949         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1950         pmap_pinit_defaults(pmap);
1951 }
1952
1953 /*
1954  * Initialize a preallocated and zeroed pmap structure,
1955  * such as one in a vmspace structure.
1956  */
1957 static void
1958 pmap_pinit_simple(struct pmap *pmap)
1959 {
1960         int i;
1961
1962         /*
1963          * Misc initialization
1964          */
1965         pmap->pm_count = 1;
1966         CPUMASK_ASSZERO(pmap->pm_active);
1967         pmap->pm_pvhint_pt = NULL;
1968         pmap->pm_pvhint_pte = NULL;
1969         pmap->pm_flags = PMAP_FLAG_SIMPLE;
1970
1971         pmap_pinit_defaults(pmap);
1972
1973         /*
1974          * Don't blow up locks/tokens on re-use (XXX fix/use drop code
1975          * for this).
1976          */
1977         if (pmap->pm_pmlpv == NULL) {
1978                 RB_INIT(&pmap->pm_pvroot);
1979                 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1980                 spin_init(&pmap->pm_spin, "pmapinitsimple");
1981                 for (i = 0; i < PM_PLACEMARKS; ++i)
1982                         pmap->pm_placemarks[i] = PM_NOPLACEMARK;
1983         }
1984 }
1985
1986 void
1987 pmap_pinit(struct pmap *pmap)
1988 {
1989         pv_entry_t pv;
1990         int j;
1991
1992         if (pmap->pm_pmlpv) {
1993                 if (pmap->pmap_bits[TYPE_IDX] != REGULAR_PMAP) {
1994                         pmap_puninit(pmap);
1995                 }
1996         }
1997
1998         pmap_pinit_simple(pmap);
1999         pmap->pm_flags &= ~PMAP_FLAG_SIMPLE;
2000
2001         /*
2002          * No need to allocate page table space yet but we do need a valid
2003          * page directory table.
2004          */
2005         if (pmap->pm_pml4 == NULL) {
2006                 pmap->pm_pml4 =
2007                     (pml4_entry_t *)kmem_alloc_pageable(&kernel_map,
2008                                                         PAGE_SIZE,
2009                                                         VM_SUBSYS_PML4);
2010         }
2011
2012         /*
2013          * Allocate the page directory page, which wires it even though
2014          * it isn't being entered into some higher level page table (it
2015          * being the highest level).  If one is already cached we don't
2016          * have to do anything.
2017          */
2018         if ((pv = pmap->pm_pmlpv) == NULL) {
2019                 pv = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
2020                 pmap->pm_pmlpv = pv;
2021                 pmap_kenter((vm_offset_t)pmap->pm_pml4,
2022                             VM_PAGE_TO_PHYS(pv->pv_m));
2023                 pv_put(pv);
2024
2025                 /*
2026                  * Install DMAP and KMAP.
2027                  */
2028                 for (j = 0; j < NDMPML4E; ++j) {
2029                         pmap->pm_pml4[DMPML4I + j] =
2030                             (DMPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
2031                             pmap->pmap_bits[PG_RW_IDX] |
2032                             pmap->pmap_bits[PG_V_IDX] |
2033                             pmap->pmap_bits[PG_U_IDX];
2034                 }
2035                 for (j = 0; j < NKPML4E; ++j) {
2036                         pmap->pm_pml4[KPML4I + j] =
2037                             (KPDPphys + ((vm_paddr_t)j << PAGE_SHIFT)) |
2038                             pmap->pmap_bits[PG_RW_IDX] |
2039                             pmap->pmap_bits[PG_V_IDX] |
2040                             pmap->pmap_bits[PG_U_IDX];
2041                 }
2042
2043                 /*
2044                  * install self-referential address mapping entry
2045                  */
2046                 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pv->pv_m) |
2047                     pmap->pmap_bits[PG_V_IDX] |
2048                     pmap->pmap_bits[PG_RW_IDX] |
2049                     pmap->pmap_bits[PG_A_IDX] |
2050                     pmap->pmap_bits[PG_M_IDX];
2051         } else {
2052                 KKASSERT(pv->pv_m->flags & PG_MAPPED);
2053                 KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
2054         }
2055         KKASSERT(pmap->pm_pml4[255] == 0);
2056         KKASSERT(RB_ROOT(&pmap->pm_pvroot) == pv);
2057         KKASSERT(pv->pv_entry.rbe_left == NULL);
2058         KKASSERT(pv->pv_entry.rbe_right == NULL);
2059 }
2060
2061 /*
2062  * Clean up a pmap structure so it can be physically freed.  This routine
2063  * is called by the vmspace dtor function.  A great deal of pmap data is
2064  * left passively mapped to improve vmspace management so we have a bit
2065  * of cleanup work to do here.
2066  */
2067 void
2068 pmap_puninit(pmap_t pmap)
2069 {
2070         pv_entry_t pv;
2071         vm_page_t p;
2072
2073         KKASSERT(CPUMASK_TESTZERO(pmap->pm_active));
2074         if ((pv = pmap->pm_pmlpv) != NULL) {
2075                 if (pv_hold_try(pv) == 0)
2076                         pv_lock(pv);
2077                 KKASSERT(pv == pmap->pm_pmlpv);
2078                 p = pmap_remove_pv_page(pv);
2079                 pv_free(pv, NULL);
2080                 pv = NULL;      /* safety */
2081                 pmap_kremove((vm_offset_t)pmap->pm_pml4);
2082                 vm_page_busy_wait(p, FALSE, "pgpun");
2083                 KKASSERT(p->flags & (PG_FICTITIOUS|PG_UNMANAGED));
2084                 vm_page_unwire(p, 0);
2085                 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
2086
2087                 /*
2088                  * XXX eventually clean out PML4 static entries and
2089                  * use vm_page_free_zero()
2090                  */
2091                 vm_page_free(p);
2092                 pmap->pm_pmlpv = NULL;
2093         }
2094         if (pmap->pm_pml4) {
2095                 KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys));
2096                 kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE);
2097                 pmap->pm_pml4 = NULL;
2098         }
2099         KKASSERT(pmap->pm_stats.resident_count == 0);
2100         KKASSERT(pmap->pm_stats.wired_count == 0);
2101 }
2102
2103 /*
2104  * This function is now unused (used to add the pmap to the pmap_list)
2105  */
2106 void
2107 pmap_pinit2(struct pmap *pmap)
2108 {
2109 }
2110
2111 /*
2112  * This routine is called when various levels in the page table need to
2113  * be populated.  This routine cannot fail.
2114  *
2115  * This function returns two locked pv_entry's, one representing the
2116  * requested pv and one representing the requested pv's parent pv.  If
2117  * an intermediate page table does not exist it will be created, mapped,
2118  * wired, and the parent page table will be given an additional hold
2119  * count representing the presence of the child pv_entry.
2120  */
2121 static
2122 pv_entry_t
2123 pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp)
2124 {
2125         pt_entry_t *ptep;
2126         pv_entry_t pv;
2127         pv_entry_t pvp;
2128         pt_entry_t v;
2129         vm_pindex_t pt_pindex;
2130         vm_page_t m;
2131         int isnew;
2132         int ispt;
2133
2134         /*
2135          * If the pv already exists and we aren't being asked for the
2136          * parent page table page we can just return it.  A locked+held pv
2137          * is returned.  The pv will also have a second hold related to the
2138          * pmap association that we don't have to worry about.
2139          */
2140         ispt = 0;
2141         pv = pv_alloc(pmap, ptepindex, &isnew);
2142         if (isnew == 0 && pvpp == NULL)
2143                 return(pv);
2144
2145         /*
2146          * Special case terminal PVs.  These are not page table pages so
2147          * no vm_page is allocated (the caller supplied the vm_page).  If
2148          * pvpp is non-NULL we are being asked to also removed the pt_pv
2149          * for this pv.
2150          *
2151          * Note that pt_pv's are only returned for user VAs. We assert that
2152          * a pt_pv is not being requested for kernel VAs.  The kernel
2153          * pre-wires all higher-level page tables so don't overload managed
2154          * higher-level page tables on top of it!
2155          */
2156         if (ptepindex < pmap_pt_pindex(0)) {
2157                 if (ptepindex >= NUPTE_USER) {
2158                         /* kernel manages this manually for KVM */
2159                         KKASSERT(pvpp == NULL);
2160                 } else {
2161                         KKASSERT(pvpp != NULL);
2162                         pt_pindex = NUPTE_TOTAL + (ptepindex >> NPTEPGSHIFT);
2163                         pvp = pmap_allocpte(pmap, pt_pindex, NULL);
2164                         if (isnew)
2165                                 vm_page_wire_quick(pvp->pv_m);
2166                         *pvpp = pvp;
2167                 }
2168                 return(pv);
2169         }
2170
2171         /*
2172          * The kernel never uses managed PT/PD/PDP pages.
2173          */
2174         KKASSERT(pmap != &kernel_pmap);
2175
2176         /*
2177          * Non-terminal PVs allocate a VM page to represent the page table,
2178          * so we have to resolve pvp and calculate ptepindex for the pvp
2179          * and then for the page table entry index in the pvp for
2180          * fall-through.
2181          */
2182         if (ptepindex < pmap_pd_pindex(0)) {
2183                 /*
2184                  * pv is PT, pvp is PD
2185                  */
2186                 ptepindex = (ptepindex - pmap_pt_pindex(0)) >> NPDEPGSHIFT;
2187                 ptepindex += NUPTE_TOTAL + NUPT_TOTAL;
2188                 pvp = pmap_allocpte(pmap, ptepindex, NULL);
2189
2190                 /*
2191                  * PT index in PD
2192                  */
2193                 ptepindex = pv->pv_pindex - pmap_pt_pindex(0);
2194                 ptepindex &= ((1ul << NPDEPGSHIFT) - 1);
2195                 ispt = 1;
2196         } else if (ptepindex < pmap_pdp_pindex(0)) {
2197                 /*
2198                  * pv is PD, pvp is PDP
2199                  *
2200                  * SIMPLE PMAP NOTE: Simple pmaps do not allocate above
2201                  *                   the PD.
2202                  */
2203                 ptepindex = (ptepindex - pmap_pd_pindex(0)) >> NPDPEPGSHIFT;
2204                 ptepindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
2205
2206                 if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
2207                         KKASSERT(pvpp == NULL);
2208                         pvp = NULL;
2209                 } else {
2210                         pvp = pmap_allocpte(pmap, ptepindex, NULL);
2211                 }
2212
2213                 /*
2214                  * PD index in PDP
2215                  */
2216                 ptepindex = pv->pv_pindex - pmap_pd_pindex(0);
2217                 ptepindex &= ((1ul << NPDPEPGSHIFT) - 1);
2218         } else if (ptepindex < pmap_pml4_pindex()) {
2219                 /*
2220                  * pv is PDP, pvp is the root pml4 table
2221                  */
2222                 pvp = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
2223
2224                 /*
2225                  * PDP index in PML4
2226                  */
2227                 ptepindex = pv->pv_pindex - pmap_pdp_pindex(0);
2228                 ptepindex &= ((1ul << NPML4EPGSHIFT) - 1);
2229         } else {
2230                 /*
2231                  * pv represents the top-level PML4, there is no parent.
2232                  */
2233                 pvp = NULL;
2234         }
2235
2236         if (isnew == 0)
2237                 goto notnew;
2238
2239         /*
2240          * (isnew) is TRUE, pv is not terminal.
2241          *
2242          * (1) Add a wire count to the parent page table (pvp).
2243          * (2) Allocate a VM page for the page table.
2244          * (3) Enter the VM page into the parent page table.
2245          *
2246          * page table pages are marked PG_WRITEABLE and PG_MAPPED.
2247          */
2248         if (pvp)
2249                 vm_page_wire_quick(pvp->pv_m);
2250
2251         for (;;) {
2252                 m = vm_page_alloc(NULL, pv->pv_pindex,
2253                                   VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM |
2254                                   VM_ALLOC_INTERRUPT);
2255                 if (m)
2256                         break;
2257                 vm_wait(0);
2258         }
2259         vm_page_wire(m);        /* wire for mapping in parent */
2260         vm_page_unmanage(m);    /* m must be spinunlocked */
2261         pmap_zero_page(VM_PAGE_TO_PHYS(m));
2262         m->valid = VM_PAGE_BITS_ALL;
2263
2264         vm_page_spin_lock(m);
2265         pmap_page_stats_adding(m);
2266         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2267         pv->pv_m = m;
2268         vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
2269         vm_page_spin_unlock(m);
2270
2271         /*
2272          * (isnew) is TRUE, pv is not terminal.
2273          *
2274          * Wire the page into pvp.  Bump the resident_count for the pmap.
2275          * There is no pvp for the top level, address the pm_pml4[] array
2276          * directly.
2277          *
2278          * If the caller wants the parent we return it, otherwise
2279          * we just put it away.
2280          *
2281          * No interlock is needed for pte 0 -> non-zero.
2282          *
2283          * In the situation where *ptep is valid we might have an unmanaged
2284          * page table page shared from another page table which we need to
2285          * unshare before installing our private page table page.
2286          */
2287         if (pvp) {
2288                 v = VM_PAGE_TO_PHYS(m) |
2289                     (pmap->pmap_bits[PG_U_IDX] |
2290                      pmap->pmap_bits[PG_RW_IDX] |
2291                      pmap->pmap_bits[PG_V_IDX] |
2292                      pmap->pmap_bits[PG_A_IDX] |
2293                      pmap->pmap_bits[PG_M_IDX]);
2294                 ptep = pv_pte_lookup(pvp, ptepindex);
2295                 if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
2296                         pt_entry_t pte;
2297
2298                         if (ispt == 0) {
2299                                 panic("pmap_allocpte: unexpected pte %p/%d",
2300                                       pvp, (int)ptepindex);
2301                         }
2302                         pte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, ptep, v);
2303                         if (vm_page_unwire_quick(
2304                                         PHYS_TO_VM_PAGE(pte & PG_FRAME))) {
2305                                 panic("pmap_allocpte: shared pgtable "
2306                                       "pg bad wirecount");
2307                         }
2308                 } else {
2309                         pt_entry_t pte;
2310
2311                         pte = atomic_swap_long(ptep, v);
2312                         if (pte != 0) {
2313                                 kprintf("install pgtbl mixup 0x%016jx "
2314                                         "old/new 0x%016jx/0x%016jx\n",
2315                                         (intmax_t)ptepindex, pte, v);
2316                         }
2317                 }
2318         }
2319         vm_page_wakeup(m);
2320
2321         /*
2322          * (isnew) may be TRUE or FALSE, pv may or may not be terminal.
2323          */
2324 notnew:
2325         if (pvp) {
2326                 KKASSERT(pvp->pv_m != NULL);
2327                 ptep = pv_pte_lookup(pvp, ptepindex);
2328                 v = VM_PAGE_TO_PHYS(pv->pv_m) |
2329                     (pmap->pmap_bits[PG_U_IDX] |
2330                      pmap->pmap_bits[PG_RW_IDX] |
2331                      pmap->pmap_bits[PG_V_IDX] |
2332                      pmap->pmap_bits[PG_A_IDX] |
2333                      pmap->pmap_bits[PG_M_IDX]);
2334                 if (*ptep != v) {
2335                         kprintf("mismatched upper level pt %016jx/%016jx\n",
2336                                 *ptep, v);
2337                 }
2338         }
2339         if (pvpp)
2340                 *pvpp = pvp;
2341         else if (pvp)
2342                 pv_put(pvp);
2343         return (pv);
2344 }
2345
2346 /*
2347  * This version of pmap_allocpte() checks for possible segment optimizations
2348  * that would allow page-table sharing.  It can be called for terminal
2349  * page or page table page ptepindex's.
2350  *
2351  * The function is called with page table page ptepindex's for fictitious
2352  * and unmanaged terminal pages.  That is, we don't want to allocate a
2353  * terminal pv, we just want the pt_pv.  pvpp is usually passed as NULL
2354  * for this case.
2355  *
2356  * This function can return a pv and *pvpp associated with the passed in pmap
2357  * OR a pv and *pvpp associated with the shared pmap.  In the latter case
2358  * an unmanaged page table page will be entered into the pass in pmap.
2359  */
2360 static
2361 pv_entry_t
2362 pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp,
2363                   vm_map_entry_t entry, vm_offset_t va)
2364 {
2365         vm_object_t object;
2366         pmap_t obpmap;
2367         pmap_t *obpmapp;
2368         vm_pindex_t *pt_placemark;
2369         vm_offset_t b;
2370         pv_entry_t pte_pv;      /* in original or shared pmap */
2371         pv_entry_t pt_pv;       /* in original or shared pmap */
2372         pv_entry_t proc_pd_pv;  /* in original pmap */
2373         pv_entry_t proc_pt_pv;  /* in original pmap */
2374         pv_entry_t xpv;         /* PT in shared pmap */
2375         pd_entry_t *pt;         /* PT entry in PD of original pmap */
2376         pd_entry_t opte;        /* contents of *pt */
2377         pd_entry_t npte;        /* contents of *pt */
2378         vm_page_t m;
2379         int softhold;
2380
2381         /*
2382          * Basic tests, require a non-NULL vm_map_entry, require proper
2383          * alignment and type for the vm_map_entry, require that the
2384          * underlying object already be allocated.
2385          *
2386          * We allow almost any type of object to use this optimization.
2387          * The object itself does NOT have to be sized to a multiple of the
2388          * segment size, but the memory mapping does.
2389          *
2390          * XXX don't handle devices currently, because VM_PAGE_TO_PHYS()
2391          *     won't work as expected.
2392          */
2393         if (entry == NULL ||
2394             pmap_mmu_optimize == 0 ||                   /* not enabled */
2395             (pmap->pm_flags & PMAP_HVM) ||              /* special pmap */
2396             ptepindex >= pmap_pd_pindex(0) ||           /* not terminal or pt */
2397             entry->inheritance != VM_INHERIT_SHARE ||   /* not shared */
2398             entry->maptype != VM_MAPTYPE_NORMAL ||      /* weird map type */
2399             entry->object.vm_object == NULL ||          /* needs VM object */
2400             entry->object.vm_object->type == OBJT_DEVICE ||     /* ick */
2401             entry->object.vm_object->type == OBJT_MGTDEVICE ||  /* ick */
2402             (entry->offset & SEG_MASK) ||               /* must be aligned */
2403             (entry->start & SEG_MASK)) {
2404                 return(pmap_allocpte(pmap, ptepindex, pvpp));
2405         }
2406
2407         /*
2408          * Make sure the full segment can be represented.
2409          */
2410         b = va & ~(vm_offset_t)SEG_MASK;
2411         if (b < entry->start || b + SEG_SIZE > entry->end)
2412                 return(pmap_allocpte(pmap, ptepindex, pvpp));
2413
2414         /*
2415          * If the full segment can be represented dive the VM object's
2416          * shared pmap, allocating as required.
2417          */
2418         object = entry->object.vm_object;
2419
2420         if (entry->protection & VM_PROT_WRITE)
2421                 obpmapp = &object->md.pmap_rw;
2422         else
2423                 obpmapp = &object->md.pmap_ro;
2424
2425 #ifdef PMAP_DEBUG2
2426         if (pmap_enter_debug > 0) {
2427                 --pmap_enter_debug;
2428                 kprintf("pmap_allocpte_seg: va=%jx prot %08x o=%p "
2429                         "obpmapp %p %p\n",
2430                         va, entry->protection, object,
2431                         obpmapp, *obpmapp);
2432                 kprintf("pmap_allocpte_seg: entry %p %jx-%jx\n",
2433                         entry, entry->start, entry->end);
2434         }
2435 #endif
2436
2437         /*
2438          * We allocate what appears to be a normal pmap but because portions
2439          * of this pmap are shared with other unrelated pmaps we have to
2440          * set pm_active to point to all cpus.
2441          *
2442          * XXX Currently using pmap_spin to interlock the update, can't use
2443          *     vm_object_hold/drop because the token might already be held
2444          *     shared OR exclusive and we don't know.
2445          */
2446         while ((obpmap = *obpmapp) == NULL) {
2447                 obpmap = kmalloc(sizeof(*obpmap), M_OBJPMAP, M_WAITOK|M_ZERO);
2448                 pmap_pinit_simple(obpmap);
2449                 pmap_pinit2(obpmap);
2450                 spin_lock(&pmap_spin);
2451                 if (*obpmapp != NULL) {
2452                         /*
2453                          * Handle race
2454                          */
2455                         spin_unlock(&pmap_spin);
2456                         pmap_release(obpmap);
2457                         pmap_puninit(obpmap);
2458                         kfree(obpmap, M_OBJPMAP);
2459                         obpmap = *obpmapp; /* safety */
2460                 } else {
2461                         obpmap->pm_active = smp_active_mask;
2462                         obpmap->pm_flags |= PMAP_SEGSHARED;
2463                         *obpmapp = obpmap;
2464                         spin_unlock(&pmap_spin);
2465                 }
2466         }
2467
2468         /*
2469          * Layering is: PTE, PT, PD, PDP, PML4.  We have to return the
2470          * pte/pt using the shared pmap from the object but also adjust
2471          * the process pmap's page table page as a side effect.
2472          */
2473
2474         /*
2475          * Resolve the terminal PTE and PT in the shared pmap.  This is what
2476          * we will return.  This is true if ptepindex represents a terminal
2477          * page, otherwise pte_pv is actually the PT and pt_pv is actually
2478          * the PD.
2479          */
2480         pt_pv = NULL;
2481         pte_pv = pmap_allocpte(obpmap, ptepindex, &pt_pv);
2482         softhold = 0;
2483 retry:
2484         if (ptepindex >= pmap_pt_pindex(0))
2485                 xpv = pte_pv;
2486         else
2487                 xpv = pt_pv;
2488
2489         /*
2490          * Resolve the PD in the process pmap so we can properly share the
2491          * page table page.  Lock order is bottom-up (leaf first)!
2492          *
2493          * NOTE: proc_pt_pv can be NULL.
2494          */
2495         proc_pt_pv = pv_get(pmap, pmap_pt_pindex(b), &pt_placemark);
2496         proc_pd_pv = pmap_allocpte(pmap, pmap_pd_pindex(b), NULL);
2497 #ifdef PMAP_DEBUG2
2498         if (pmap_enter_debug > 0) {
2499                 --pmap_enter_debug;
2500                 kprintf("proc_pt_pv %p (wc %d) pd_pv %p va=%jx\n",
2501                         proc_pt_pv,
2502                         (proc_pt_pv ? proc_pt_pv->pv_m->wire_count : -1),
2503                         proc_pd_pv,
2504                         va);
2505         }
2506 #endif
2507
2508         /*
2509          * xpv is the page table page pv from the shared object
2510          * (for convenience), from above.
2511          *
2512          * Calculate the pte value for the PT to load into the process PD.
2513          * If we have to change it we must properly dispose of the previous
2514          * entry.
2515          */
2516         pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b));
2517         npte = VM_PAGE_TO_PHYS(xpv->pv_m) |
2518                (pmap->pmap_bits[PG_U_IDX] |
2519                 pmap->pmap_bits[PG_RW_IDX] |
2520                 pmap->pmap_bits[PG_V_IDX] |
2521                 pmap->pmap_bits[PG_A_IDX] |
2522                 pmap->pmap_bits[PG_M_IDX]);
2523
2524         /*
2525          * Dispose of previous page table page if it was local to the
2526          * process pmap.  If the old pt is not empty we cannot dispose of it
2527          * until we clean it out.  This case should not arise very often so
2528          * it is not optimized.
2529          *
2530          * Leave pt_pv and pte_pv (in our object pmap) locked and intact
2531          * for the retry.
2532          */
2533         if (proc_pt_pv) {
2534                 pmap_inval_bulk_t bulk;
2535
2536                 if (proc_pt_pv->pv_m->wire_count != 1) {
2537                         /*
2538                          * The page table has a bunch of stuff in it
2539                          * which we have to scrap.
2540                          */
2541                         if (softhold == 0) {
2542                                 softhold = 1;
2543                                 pmap_softhold(pmap);
2544                         }
2545                         pv_put(proc_pd_pv);
2546                         pv_put(proc_pt_pv);
2547                         pmap_remove(pmap,
2548                                     va & ~(vm_offset_t)SEG_MASK,
2549                                     (va + SEG_SIZE) & ~(vm_offset_t)SEG_MASK);
2550                 } else {
2551                         /*
2552                          * The page table is empty and can be destroyed.
2553                          * However, doing so leaves the pt slot unlocked,
2554                          * so we have to loop-up to handle any races until
2555                          * we get a NULL proc_pt_pv and a proper pt_placemark.
2556                          */
2557                         pmap_inval_bulk_init(&bulk, proc_pt_pv->pv_pmap);
2558                         pmap_release_pv(proc_pt_pv, proc_pd_pv, &bulk);
2559                         pmap_inval_bulk_flush(&bulk);
2560                         pv_put(proc_pd_pv);
2561                 }
2562                 goto retry;
2563         }
2564
2565         /*
2566          * Handle remaining cases.  We are holding pt_placemark to lock
2567          * the page table page in the primary pmap while we manipulate
2568          * it.
2569          */
2570         if (*pt == 0) {
2571                 atomic_swap_long(pt, npte);
2572                 vm_page_wire_quick(xpv->pv_m);          /* shared pt -> proc */
2573                 vm_page_wire_quick(proc_pd_pv->pv_m);   /* proc pd for sh pt */
2574                 atomic_add_long(&pmap->pm_stats.resident_count, 1);
2575         } else if (*pt != npte) {
2576                 opte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, pt, npte);
2577
2578 #if 0
2579                 opte = pte_load_clear(pt);
2580                 KKASSERT(opte && opte != npte);
2581
2582                 *pt = npte;
2583 #endif
2584                 vm_page_wire_quick(xpv->pv_m);          /* shared pt -> proc */
2585
2586                 /*
2587                  * Clean up opte, bump the wire_count for the process
2588                  * PD page representing the new entry if it was
2589                  * previously empty.
2590                  *
2591                  * If the entry was not previously empty and we have
2592                  * a PT in the proc pmap then opte must match that
2593                  * pt.  The proc pt must be retired (this is done
2594                  * later on in this procedure).
2595                  *
2596                  * NOTE: replacing valid pte, wire_count on proc_pd_pv
2597                  * stays the same.
2598                  */
2599                 KKASSERT(opte & pmap->pmap_bits[PG_V_IDX]);
2600                 m = PHYS_TO_VM_PAGE(opte & PG_FRAME);
2601                 if (vm_page_unwire_quick(m)) {
2602                         panic("pmap_allocpte_seg: "
2603                               "bad wire count %p",
2604                               m);
2605                 }
2606         }
2607
2608         if (softhold)
2609                 pmap_softdone(pmap);
2610
2611         /*
2612          * Remove our earmark on the page table page.
2613          */
2614         pv_placemarker_wakeup(pmap, pt_placemark);
2615
2616         /*
2617          * The existing process page table was replaced and must be destroyed
2618          * here.
2619          */
2620         if (proc_pd_pv)
2621                 pv_put(proc_pd_pv);
2622         if (pvpp)
2623                 *pvpp = pt_pv;
2624         else
2625                 pv_put(pt_pv);
2626         return (pte_pv);
2627 }
2628
2629 /*
2630  * Release any resources held by the given physical map.
2631  *
2632  * Called when a pmap initialized by pmap_pinit is being released.  Should
2633  * only be called if the map contains no valid mappings.
2634  */
2635 struct pmap_release_info {
2636         pmap_t  pmap;
2637         int     retry;
2638         pv_entry_t pvp;
2639 };
2640
2641 static int pmap_release_callback(pv_entry_t pv, void *data);
2642
2643 void
2644 pmap_release(struct pmap *pmap)
2645 {
2646         struct pmap_release_info info;
2647
2648         KASSERT(CPUMASK_TESTZERO(pmap->pm_active),
2649                 ("pmap still active! %016jx",
2650                 (uintmax_t)CPUMASK_LOWMASK(pmap->pm_active)));
2651
2652         /*
2653          * There is no longer a pmap_list, if there were we would remove the
2654          * pmap from it here.
2655          */
2656
2657         /*
2658          * Pull pv's off the RB tree in order from low to high and release
2659          * each page.
2660          */
2661         info.pmap = pmap;
2662         do {
2663                 info.retry = 0;
2664                 info.pvp = NULL;
2665
2666                 spin_lock(&pmap->pm_spin);
2667                 RB_SCAN(pv_entry_rb_tree, &pmap->pm_pvroot, NULL,
2668                         pmap_release_callback, &info);
2669                 spin_unlock(&pmap->pm_spin);
2670
2671                 if (info.pvp)
2672                         pv_put(info.pvp);
2673         } while (info.retry);
2674
2675
2676         /*
2677          * One resident page (the pml4 page) should remain.
2678          * No wired pages should remain.
2679          */
2680 #if 1
2681         if (pmap->pm_stats.resident_count !=
2682             ((pmap->pm_flags & PMAP_FLAG_SIMPLE) ? 0 : 1) ||
2683             pmap->pm_stats.wired_count != 0) {
2684                 kprintf("fatal pmap problem - pmap %p flags %08x "
2685                         "rescnt=%jd wirecnt=%jd\n",
2686                         pmap,
2687                         pmap->pm_flags,
2688                         pmap->pm_stats.resident_count,
2689                         pmap->pm_stats.wired_count);
2690                 tsleep(pmap, 0, "DEAD", 0);
2691         }
2692 #else
2693         KKASSERT(pmap->pm_stats.resident_count ==
2694                  ((pmap->pm_flags & PMAP_FLAG_SIMPLE) ? 0 : 1));
2695         KKASSERT(pmap->pm_stats.wired_count == 0);
2696 #endif
2697 }
2698
2699 /*
2700  * Called from low to high.  We must cache the proper parent pv so we
2701  * can adjust its wired count.
2702  */
2703 static int
2704 pmap_release_callback(pv_entry_t pv, void *data)
2705 {
2706         struct pmap_release_info *info = data;
2707         pmap_t pmap = info->pmap;
2708         vm_pindex_t pindex;
2709         int r;
2710
2711         /*
2712          * Acquire a held and locked pv, check for release race
2713          */
2714         pindex = pv->pv_pindex;
2715         if (info->pvp == pv) {
2716                 spin_unlock(&pmap->pm_spin);
2717                 info->pvp = NULL;
2718         } else if (pv_hold_try(pv)) {
2719                 spin_unlock(&pmap->pm_spin);
2720         } else {
2721                 spin_unlock(&pmap->pm_spin);
2722                 pv_lock(pv);
2723                 pv_put(pv);
2724                 info->retry = 1;
2725                 spin_lock(&pmap->pm_spin);
2726
2727                 return -1;
2728         }
2729         KKASSERT(pv->pv_pmap == pmap && pindex == pv->pv_pindex);
2730
2731         if (pv->pv_pindex < pmap_pt_pindex(0)) {
2732                 /*
2733                  * I am PTE, parent is PT
2734                  */
2735                 pindex = pv->pv_pindex >> NPTEPGSHIFT;
2736                 pindex += NUPTE_TOTAL;
2737         } else if (pv->pv_pindex < pmap_pd_pindex(0)) {
2738                 /*
2739                  * I am PT, parent is PD
2740                  */
2741                 pindex = (pv->pv_pindex - NUPTE_TOTAL) >> NPDEPGSHIFT;
2742                 pindex += NUPTE_TOTAL + NUPT_TOTAL;
2743         } else if (pv->pv_pindex < pmap_pdp_pindex(0)) {
2744                 /*
2745                  * I am PD, parent is PDP
2746                  */
2747                 pindex = (pv->pv_pindex - NUPTE_TOTAL - NUPT_TOTAL) >>
2748                          NPDPEPGSHIFT;
2749                 pindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
2750         } else if (pv->pv_pindex < pmap_pml4_pindex()) {
2751                 /*
2752                  * I am PDP, parent is PML4 (there's only one)
2753                  */
2754 #if 0
2755                 pindex = (pv->pv_pindex - NUPTE_TOTAL - NUPT_TOTAL -
2756                            NUPD_TOTAL) >> NPML4EPGSHIFT;
2757                 pindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL;
2758 #endif
2759                 pindex = pmap_pml4_pindex();
2760         } else {
2761                 /*
2762                  * parent is NULL
2763                  */
2764                 if (info->pvp) {
2765                         pv_put(info->pvp);
2766                         info->pvp = NULL;
2767                 }
2768                 pindex = 0;
2769         }
2770         if (pindex) {
2771                 if (info->pvp && info->pvp->pv_pindex != pindex) {
2772                         pv_put(info->pvp);
2773                         info->pvp = NULL;
2774                 }
2775                 if (info->pvp == NULL)
2776                         info->pvp = pv_get(pmap, pindex, NULL);
2777         } else {
2778                 if (info->pvp) {
2779                         pv_put(info->pvp);
2780                         info->pvp = NULL;
2781                 }
2782         }
2783         r = pmap_release_pv(pv, info->pvp, NULL);
2784         spin_lock(&pmap->pm_spin);
2785
2786         return(r);
2787 }
2788
2789 /*
2790  * Called with held (i.e. also locked) pv.  This function will dispose of
2791  * the lock along with the pv.
2792  *
2793  * If the caller already holds the locked parent page table for pv it
2794  * must pass it as pvp, allowing us to avoid a deadlock, else it can
2795  * pass NULL for pvp.
2796  */
2797 static int
2798 pmap_release_pv(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk)
2799 {
2800         vm_page_t p;
2801
2802         /*
2803          * The pmap is currently not spinlocked, pv is held+locked.
2804          * Remove the pv's page from its parent's page table.  The
2805          * parent's page table page's wire_count will be decremented.
2806          *
2807          * This will clean out the pte at any level of the page table.
2808          * If smp != 0 all cpus are affected.
2809          *
2810          * Do not tear-down recursively, its faster to just let the
2811          * release run its course.
2812          */
2813         pmap_remove_pv_pte(pv, pvp, bulk, 0);
2814
2815         /*
2816          * Terminal pvs are unhooked from their vm_pages.  Because
2817          * terminal pages aren't page table pages they aren't wired
2818          * by us, so we have to be sure not to unwire them either.
2819          */
2820         if (pv->pv_pindex < pmap_pt_pindex(0)) {
2821                 pmap_remove_pv_page(pv);
2822                 goto skip;
2823         }
2824
2825         /*
2826          * We leave the top-level page table page cached, wired, and
2827          * mapped in the pmap until the dtor function (pmap_puninit())
2828          * gets called.
2829          *
2830          * Since we are leaving the top-level pv intact we need
2831          * to break out of what would otherwise be an infinite loop.
2832          */
2833         if (pv->pv_pindex == pmap_pml4_pindex()) {
2834                 pv_put(pv);
2835                 return(-1);
2836         }
2837
2838         /*
2839          * For page table pages (other than the top-level page),
2840          * remove and free the vm_page.  The representitive mapping
2841          * removed above by pmap_remove_pv_pte() did not undo the
2842          * last wire_count so we have to do that as well.
2843          */
2844         p = pmap_remove_pv_page(pv);
2845         vm_page_busy_wait(p, FALSE, "pmaprl");
2846         if (p->wire_count != 1) {
2847                 kprintf("p->wire_count was %016lx %d\n",
2848                         pv->pv_pindex, p->wire_count);
2849         }
2850         KKASSERT(p->wire_count == 1);
2851         KKASSERT(p->flags & PG_UNMANAGED);
2852
2853         vm_page_unwire(p, 0);
2854         KKASSERT(p->wire_count == 0);
2855
2856         vm_page_free(p);
2857 skip:
2858         pv_free(pv, pvp);
2859
2860         return 0;
2861 }
2862
2863 /*
2864  * This function will remove the pte associated with a pv from its parent.
2865  * Terminal pv's are supported.  All cpus specified by (bulk) are properly
2866  * invalidated.
2867  *
2868  * The wire count will be dropped on the parent page table.  The wire
2869  * count on the page being removed (pv->pv_m) from the parent page table
2870  * is NOT touched.  Note that terminal pages will not have any additional
2871  * wire counts while page table pages will have at least one representing
2872  * the mapping, plus others representing sub-mappings.
2873  *
2874  * NOTE: Cannot be called on kernel page table pages, only KVM terminal
2875  *       pages and user page table and terminal pages.
2876  *
2877  * NOTE: The pte being removed might be unmanaged, and the pv supplied might
2878  *       be freshly allocated and not imply that the pte is managed.  In this
2879  *       case pv->pv_m should be NULL.
2880  *
2881  * The pv must be locked.  The pvp, if supplied, must be locked.  All
2882  * supplied pv's will remain locked on return.
2883  *
2884  * XXX must lock parent pv's if they exist to remove pte XXX
2885  */
2886 static
2887 void
2888 pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, pmap_inval_bulk_t *bulk,
2889                    int destroy)
2890 {
2891         vm_pindex_t ptepindex = pv->pv_pindex;
2892         pmap_t pmap = pv->pv_pmap;
2893         vm_page_t p;
2894         int gotpvp = 0;
2895
2896         KKASSERT(pmap);
2897
2898         if (ptepindex == pmap_pml4_pindex()) {
2899                 /*
2900                  * We are the top level PML4E table, there is no parent.
2901                  */
2902                 p = pmap->pm_pmlpv->pv_m;
2903                 KKASSERT(pv->pv_m == p);        /* debugging */
2904         } else if (ptepindex >= pmap_pdp_pindex(0)) {
2905                 /*
2906                  * Remove a PDP page from the PML4E.  This can only occur
2907                  * with user page tables.  We do not have to lock the
2908                  * pml4 PV so just ignore pvp.
2909                  */
2910                 vm_pindex_t pml4_pindex;
2911                 vm_pindex_t pdp_index;
2912                 pml4_entry_t *pdp;
2913
2914                 pdp_index = ptepindex - pmap_pdp_pindex(0);
2915                 if (pvp == NULL) {
2916                         pml4_pindex = pmap_pml4_pindex();
2917                         pvp = pv_get(pv->pv_pmap, pml4_pindex, NULL);
2918                         KKASSERT(pvp);
2919                         gotpvp = 1;
2920                 }
2921
2922                 pdp = &pmap->pm_pml4[pdp_index & ((1ul << NPML4EPGSHIFT) - 1)];
2923                 KKASSERT((*pdp & pmap->pmap_bits[PG_V_IDX]) != 0);
2924                 p = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
2925                 pmap_inval_bulk(bulk, (vm_offset_t)-1, pdp, 0);
2926                 KKASSERT(pv->pv_m == p);        /* debugging */
2927         } else if (ptepindex >= pmap_pd_pindex(0)) {
2928                 /*
2929                  * Remove a PD page from the PDP
2930                  *
2931                  * SIMPLE PMAP NOTE: Non-existant pvp's are ok in the case
2932                  *                   of a simple pmap because it stops at
2933                  *                   the PD page.
2934                  */
2935                 vm_pindex_t pdp_pindex;
2936                 vm_pindex_t pd_index;
2937                 pdp_entry_t *pd;
2938
2939                 pd_index = ptepindex - pmap_pd_pindex(0);
2940
2941                 if (pvp == NULL) {
2942                         pdp_pindex = NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
2943                                      (pd_index >> NPML4EPGSHIFT);
2944                         pvp = pv_get(pv->pv_pmap, pdp_pindex, NULL);
2945                         gotpvp = 1;
2946                 }
2947
2948                 if (pvp) {
2949                         pd = pv_pte_lookup(pvp, pd_index &
2950                                                 ((1ul << NPDPEPGSHIFT) - 1));
2951                         KKASSERT((*pd & pmap->pmap_bits[PG_V_IDX]) != 0);
2952                         p = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
2953                         pmap_inval_bulk(bulk, (vm_offset_t)-1, pd, 0);
2954                 } else {
2955                         KKASSERT(pmap->pm_flags & PMAP_FLAG_SIMPLE);
2956                         p = pv->pv_m;           /* degenerate test later */
2957                 }
2958                 KKASSERT(pv->pv_m == p);        /* debugging */
2959         } else if (ptepindex >= pmap_pt_pindex(0)) {
2960                 /*
2961                  *  Remove a PT page from the PD
2962                  */
2963                 vm_pindex_t pd_pindex;
2964                 vm_pindex_t pt_index;
2965                 pd_entry_t *pt;
2966
2967                 pt_index = ptepindex - pmap_pt_pindex(0);
2968
2969                 if (pvp == NULL) {
2970                         pd_pindex = NUPTE_TOTAL + NUPT_TOTAL +
2971                                     (pt_index >> NPDPEPGSHIFT);
2972                         pvp = pv_get(pv->pv_pmap, pd_pindex, NULL);
2973                         KKASSERT(pvp);
2974                         gotpvp = 1;
2975                 }
2976
2977                 pt = pv_pte_lookup(pvp, pt_index & ((1ul << NPDPEPGSHIFT) - 1));
2978 #if 0
2979                 KASSERT((*pt & pmap->pmap_bits[PG_V_IDX]) != 0,
2980                         ("*pt unexpectedly invalid %016jx "
2981                          "gotpvp=%d ptepindex=%ld ptindex=%ld pv=%p pvp=%p",
2982                         *pt, gotpvp, ptepindex, pt_index, pv, pvp));
2983                 p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
2984 #else
2985                 if ((*pt & pmap->pmap_bits[PG_V_IDX]) == 0) {
2986                         kprintf("*pt unexpectedly invalid %016jx "
2987                                 "gotpvp=%d ptepindex=%ld ptindex=%ld "
2988                                 "pv=%p pvp=%p\n",
2989                                 *pt, gotpvp, ptepindex, pt_index, pv, pvp);
2990                         tsleep(pt, 0, "DEAD", 0);
2991                         p = pv->pv_m;
2992                 } else {
2993                         p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
2994                 }
2995 #endif
2996                 pmap_inval_bulk(bulk, (vm_offset_t)-1, pt, 0);
2997                 KKASSERT(pv->pv_m == p);        /* debugging */
2998         } else {
2999                 /*
3000                  * Remove a PTE from the PT page.  The PV might exist even if
3001                  * the PTE is not managed, in whichcase pv->pv_m should be
3002                  * NULL.
3003                  *
3004                  * NOTE: Userland pmaps manage the parent PT/PD/PDP page
3005                  *       table pages but the kernel_pmap does not.
3006                  *
3007                  * NOTE: pv's must be locked bottom-up to avoid deadlocking.
3008                  *       pv is a pte_pv so we can safely lock pt_pv.
3009                  *
3010                  * NOTE: FICTITIOUS pages may have multiple physical mappings
3011                  *       so PHYS_TO_VM_PAGE() will not necessarily work for
3012                  *       terminal ptes.
3013                  */
3014                 vm_pindex_t pt_pindex;
3015                 pt_entry_t *ptep;
3016                 pt_entry_t pte;
3017                 vm_offset_t va;
3018
3019                 pt_pindex = ptepindex >> NPTEPGSHIFT;
3020                 va = (vm_offset_t)ptepindex << PAGE_SHIFT;
3021
3022                 if (ptepindex >= NUPTE_USER) {
3023                         ptep = vtopte(ptepindex << PAGE_SHIFT);
3024                         KKASSERT(pvp == NULL);
3025                         /* pvp remains NULL */
3026                 } else {
3027                         if (pvp == NULL) {
3028                                 pt_pindex = NUPTE_TOTAL +
3029                                             (ptepindex >> NPDPEPGSHIFT);
3030                                 pvp = pv_get(pv->pv_pmap, pt_pindex, NULL);
3031                                 KKASSERT(pvp);
3032                                 gotpvp = 1;
3033                         }
3034                         ptep = pv_pte_lookup(pvp, ptepindex &
3035                                                   ((1ul << NPDPEPGSHIFT) - 1));
3036                 }
3037                 pte = pmap_inval_bulk(bulk, va, ptep, 0);
3038                 if (bulk == NULL)               /* XXX */
3039                         cpu_invlpg((void *)va); /* XXX */
3040
3041                 /*
3042                  * Now update the vm_page_t
3043                  */
3044                 if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) &&
3045                     (pte & pmap->pmap_bits[PG_V_IDX])) {
3046                         /*
3047                          * Valid managed page, adjust (p).
3048                          */
3049                         if (pte & pmap->pmap_bits[PG_DEVICE_IDX]) {
3050                                 p = pv->pv_m;
3051                         } else {
3052                                 p = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3053                                 KKASSERT(pv->pv_m == p);
3054                         }
3055                         if (pte & pmap->pmap_bits[PG_M_IDX]) {
3056                                 if (pmap_track_modified(ptepindex))
3057                                         vm_page_dirty(p);
3058                         }
3059                         if (pte & pmap->pmap_bits[PG_A_IDX]) {
3060                                 vm_page_flag_set(p, PG_REFERENCED);
3061                         }
3062                 } else {
3063                         /*
3064                          * Unmanaged page, do not try to adjust the vm_page_t.
3065                          * pv could be freshly allocated for a pmap_enter(),
3066                          * replacing an unmanaged page with a managed one.
3067                          *
3068                          * pv->pv_m might reflect the new page and not the
3069                          * existing page.
3070                          *
3071                          * We could extract p from the physical address and
3072                          * adjust it but we explicitly do not for unmanaged
3073                          * pages.
3074                          */
3075                         p = NULL;
3076                 }
3077                 if (pte & pmap->pmap_bits[PG_W_IDX])
3078                         atomic_add_long(&pmap->pm_stats.wired_count, -1);
3079                 if (pte & pmap->pmap_bits[PG_G_IDX])
3080                         cpu_invlpg((void *)va);
3081         }
3082
3083         /*
3084          * If requested, scrap the underlying pv->pv_m and the underlying
3085          * pv.  If this is a page-table-page we must also free the page.
3086          *
3087          * pvp must be returned locked.
3088          */
3089         if (destroy == 1) {
3090                 /*
3091                  * page table page (PT, PD, PDP, PML4), caller was responsible
3092                  * for testing wired_count.
3093                  */
3094                 KKASSERT(pv->pv_m->wire_count == 1);
3095                 p = pmap_remove_pv_page(pv);
3096                 pv_free(pv, pvp);
3097                 pv = NULL;
3098
3099                 vm_page_busy_wait(p, FALSE, "pgpun");
3100                 vm_page_unwire(p, 0);
3101                 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
3102                 vm_page_free(p);
3103         } else if (destroy == 2) {
3104                 /*
3105                  * Normal page, remove from pmap and leave the underlying
3106                  * page untouched.
3107                  */
3108                 pmap_remove_pv_page(pv);
3109                 pv_free(pv, pvp);
3110                 pv = NULL;              /* safety */
3111         }
3112
3113         /*
3114          * If we acquired pvp ourselves then we are responsible for
3115          * recursively deleting it.
3116          */
3117         if (pvp && gotpvp) {
3118                 /*
3119                  * Recursively destroy higher-level page tables.
3120                  *
3121                  * This is optional.  If we do not, they will still
3122                  * be destroyed when the process exits.
3123                  *
3124                  * NOTE: Do not destroy pv_entry's with extra hold refs,
3125                  *       a caller may have unlocked it and intends to
3126                  *       continue to use it.
3127                  */
3128                 if (pmap_dynamic_delete &&
3129                     pvp->pv_m &&
3130                     pvp->pv_m->wire_count == 1 &&
3131                     (pvp->pv_hold & PV_HOLD_MASK) == 2 &&
3132                     pvp->pv_pindex != pmap_pml4_pindex()) {
3133                         if (pmap_dynamic_delete == 2)
3134                                 kprintf("A %jd %08x\n", pvp->pv_pindex, pvp->pv_hold);
3135                         if (pmap != &kernel_pmap) {
3136                                 pmap_remove_pv_pte(pvp, NULL, bulk, 1);
3137                                 pvp = NULL;     /* safety */
3138                         } else {
3139                                 kprintf("Attempt to remove kernel_pmap pindex "
3140                                         "%jd\n", pvp->pv_pindex);
3141                                 pv_put(pvp);
3142                         }
3143                 } else {
3144                         pv_put(pvp);
3145                 }
3146         }
3147 }
3148
3149 /*
3150  * Remove the vm_page association to a pv.  The pv must be locked.
3151  */
3152 static
3153 vm_page_t
3154 pmap_remove_pv_page(pv_entry_t pv)
3155 {
3156         vm_page_t m;
3157
3158         m = pv->pv_m;
3159         vm_page_spin_lock(m);
3160         KKASSERT(m && m == pv->pv_m);
3161         pv->pv_m = NULL;
3162         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3163         pmap_page_stats_deleting(m);
3164         if (TAILQ_EMPTY(&m->md.pv_list))
3165                 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
3166         vm_page_spin_unlock(m);
3167
3168         return(m);
3169 }
3170
3171 /*
3172  * Grow the number of kernel page table entries, if needed.
3173  *
3174  * This routine is always called to validate any address space
3175  * beyond KERNBASE (for kldloads).  kernel_vm_end only governs the address
3176  * space below KERNBASE.
3177  *
3178  * kernel_map must be locked exclusively by the caller.
3179  */
3180 void
3181 pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
3182 {
3183         vm_paddr_t paddr;
3184         vm_offset_t ptppaddr;
3185         vm_page_t nkpg;
3186         pd_entry_t *pt, newpt;
3187         pdp_entry_t *pd, newpd;
3188         int update_kernel_vm_end;
3189
3190         /*
3191          * bootstrap kernel_vm_end on first real VM use
3192          */
3193         if (kernel_vm_end == 0) {
3194                 kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
3195
3196                 for (;;) {
3197                         pt = pmap_pt(&kernel_pmap, kernel_vm_end);
3198                         if (pt == NULL)
3199                                 break;
3200                         if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) == 0)
3201                                 break;
3202                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
3203                                         ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3204                         if (kernel_vm_end - 1 >= kernel_map.max_offset) {
3205                                 kernel_vm_end = kernel_map.max_offset;
3206                                 break;                       
3207                         }
3208                 }
3209         }
3210
3211         /*
3212          * Fill in the gaps.  kernel_vm_end is only adjusted for ranges
3213          * below KERNBASE.  Ranges above KERNBASE are kldloaded and we
3214          * do not want to force-fill 128G worth of page tables.
3215          */
3216         if (kstart < KERNBASE) {
3217                 if (kstart > kernel_vm_end)
3218                         kstart = kernel_vm_end;
3219                 KKASSERT(kend <= KERNBASE);
3220                 update_kernel_vm_end = 1;
3221         } else {
3222                 update_kernel_vm_end = 0;
3223         }
3224
3225         kstart = rounddown2(kstart, (vm_offset_t)(PAGE_SIZE * NPTEPG));
3226         kend = roundup2(kend, (vm_offset_t)(PAGE_SIZE * NPTEPG));
3227
3228         if (kend - 1 >= kernel_map.max_offset)
3229                 kend = kernel_map.max_offset;
3230
3231         while (kstart < kend) {
3232                 pt = pmap_pt(&kernel_pmap, kstart);
3233                 if (pt == NULL) {
3234                         /*
3235                          * We need a new PD entry
3236                          */
3237                         nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++,
3238                                              VM_ALLOC_NORMAL |
3239                                              VM_ALLOC_SYSTEM |
3240                                              VM_ALLOC_INTERRUPT);
3241                         if (nkpg == NULL) {
3242                                 panic("pmap_growkernel: no memory to grow "
3243                                       "kernel");
3244                         }
3245                         paddr = VM_PAGE_TO_PHYS(nkpg);
3246                         pmap_zero_page(paddr);
3247                         pd = pmap_pd(&kernel_pmap, kstart);
3248
3249                         newpd = (pdp_entry_t)
3250                             (paddr |
3251                             kernel_pmap.pmap_bits[PG_V_IDX] |
3252                             kernel_pmap.pmap_bits[PG_RW_IDX] |
3253                             kernel_pmap.pmap_bits[PG_A_IDX] |
3254                             kernel_pmap.pmap_bits[PG_M_IDX]);
3255                         atomic_swap_long(pd, newpd);
3256
3257 #if 0
3258                         kprintf("NEWPD pd=%p pde=%016jx phys=%016jx\n",
3259                                 pd, newpd, paddr);
3260 #endif
3261
3262                         continue; /* try again */
3263                 }
3264
3265                 if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) {
3266                         kstart = (kstart + PAGE_SIZE * NPTEPG) &
3267                                  ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3268                         if (kstart - 1 >= kernel_map.max_offset) {
3269                                 kstart = kernel_map.max_offset;
3270                                 break;                       
3271                         }
3272                         continue;
3273                 }
3274
3275                 /*
3276                  * We need a new PT
3277                  *
3278                  * This index is bogus, but out of the way
3279                  */
3280                 nkpg = vm_page_alloc(NULL, mycpu->gd_rand_incr++,
3281                                      VM_ALLOC_NORMAL |
3282                                      VM_ALLOC_SYSTEM |
3283                                      VM_ALLOC_INTERRUPT);
3284                 if (nkpg == NULL)
3285                         panic("pmap_growkernel: no memory to grow kernel");
3286
3287                 vm_page_wire(nkpg);
3288                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
3289                 pmap_zero_page(ptppaddr);
3290                 newpt = (pd_entry_t)(ptppaddr |
3291                                      kernel_pmap.pmap_bits[PG_V_IDX] |
3292                                      kernel_pmap.pmap_bits[PG_RW_IDX] |
3293                                      kernel_pmap.pmap_bits[PG_A_IDX] |
3294                                      kernel_pmap.pmap_bits[PG_M_IDX]);
3295                 atomic_swap_long(pt, newpt);
3296
3297                 kstart = (kstart + PAGE_SIZE * NPTEPG) &
3298                           ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1);
3299
3300                 if (kstart - 1 >= kernel_map.max_offset) {
3301                         kstart = kernel_map.max_offset;
3302                         break;                       
3303                 }
3304         }
3305
3306         /*
3307          * Only update kernel_vm_end for areas below KERNBASE.
3308          */
3309         if (update_kernel_vm_end && kernel_vm_end < kstart)
3310                 kernel_vm_end = kstart;
3311 }
3312
3313 /*
3314  *      Add a reference to the specified pmap.
3315  */
3316 void
3317 pmap_reference(pmap_t pmap)
3318 {
3319         if (pmap != NULL)
3320                 atomic_add_int(&pmap->pm_count, 1);
3321 }
3322
3323 /***************************************************
3324  * page management routines.
3325  ***************************************************/
3326
3327 /*
3328  * Hold a pv without locking it
3329  */
3330 static void
3331 pv_hold(pv_entry_t pv)
3332 {
3333         atomic_add_int(&pv->pv_hold, 1);
3334 }
3335
3336 /*
3337  * Hold a pv_entry, preventing its destruction.  TRUE is returned if the pv
3338  * was successfully locked, FALSE if it wasn't.  The caller must dispose of
3339  * the pv properly.
3340  *
3341  * Either the pmap->pm_spin or the related vm_page_spin (if traversing a
3342  * pv list via its page) must be held by the caller in order to stabilize
3343  * the pv.
3344  */
3345 static int
3346 _pv_hold_try(pv_entry_t pv PMAP_DEBUG_DECL)
3347 {
3348         u_int count;
3349
3350         /*
3351          * Critical path shortcut expects pv to already have one ref
3352          * (for the pv->pv_pmap).
3353          */
3354         if (atomic_cmpset_int(&pv->pv_hold, 1, PV_HOLD_LOCKED | 2)) {
3355 #ifdef PMAP_DEBUG
3356                 pv->pv_func = func;
3357                 pv->pv_line = lineno;
3358 #endif
3359                 return TRUE;
3360         }
3361
3362         for (;;) {
3363                 count = pv->pv_hold;
3364                 cpu_ccfence();
3365                 if ((count & PV_HOLD_LOCKED) == 0) {
3366                         if (atomic_cmpset_int(&pv->pv_hold, count,
3367                                               (count + 1) | PV_HOLD_LOCKED)) {
3368 #ifdef PMAP_DEBUG
3369                                 pv->pv_func = func;
3370                                 pv->pv_line = lineno;
3371 #endif
3372                                 return TRUE;
3373                         }
3374                 } else {
3375                         if (atomic_cmpset_int(&pv->pv_hold, count, count + 1))
3376                                 return FALSE;
3377                 }
3378                 /* retry */
3379         }
3380 }
3381
3382 /*
3383  * Drop a previously held pv_entry which could not be locked, allowing its
3384  * destruction.
3385  *
3386  * Must not be called with a spinlock held as we might zfree() the pv if it
3387  * is no longer associated with a pmap and this was the last hold count.
3388  */
3389 static void
3390 pv_drop(pv_entry_t pv)
3391 {
3392         u_int count;
3393
3394         for (;;) {
3395                 count = pv->pv_hold;
3396                 cpu_ccfence();
3397                 KKASSERT((count & PV_HOLD_MASK) > 0);
3398                 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) !=
3399                          (PV_HOLD_LOCKED | 1));
3400                 if (atomic_cmpset_int(&pv->pv_hold, count, count - 1)) {
3401                         if ((count & PV_HOLD_MASK) == 1) {
3402 #ifdef PMAP_DEBUG2
3403                                 if (pmap_enter_debug > 0) {
3404                                         --pmap_enter_debug;
3405                                         kprintf("pv_drop: free pv %p\n", pv);
3406                                 }
3407 #endif
3408                                 KKASSERT(count == 1);
3409                                 KKASSERT(pv->pv_pmap == NULL);
3410                                 zfree(pvzone, pv);
3411                         }
3412                         return;
3413                 }
3414                 /* retry */
3415         }
3416 }
3417
3418 /*
3419  * Find or allocate the requested PV entry, returning a locked, held pv.
3420  *
3421  * If (*isnew) is non-zero, the returned pv will have two hold counts, one
3422  * for the caller and one representing the pmap and vm_page association.
3423  *
3424  * If (*isnew) is zero, the returned pv will have only one hold count.
3425  *
3426  * Since both associations can only be adjusted while the pv is locked,
3427  * together they represent just one additional hold.
3428  */
3429 static
3430 pv_entry_t
3431 _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew PMAP_DEBUG_DECL)
3432 {
3433         struct mdglobaldata *md = mdcpu;
3434         pv_entry_t pv;
3435         pv_entry_t pnew;
3436         int pmap_excl = 0;
3437
3438         pnew = NULL;
3439         if (md->gd_newpv) {
3440 #if 1
3441                 pnew = atomic_swap_ptr((void *)&md->gd_newpv, NULL);
3442 #else
3443                 crit_enter();
3444                 pnew = md->gd_newpv;    /* might race NULL */
3445                 md->gd_newpv = NULL;
3446                 crit_exit();
3447 #endif
3448         }
3449         if (pnew == NULL)
3450                 pnew = zalloc(pvzone);
3451
3452         spin_lock_shared(&pmap->pm_spin);
3453         for (;;) {
3454                 /*
3455                  * Shortcut cache
3456                  */
3457                 pv = pv_entry_lookup(pmap, pindex);
3458                 if (pv == NULL) {
3459                         vm_pindex_t *pmark;
3460
3461                         /*
3462                          * Requires exclusive pmap spinlock
3463                          */
3464                         if (pmap_excl == 0) {
3465                                 pmap_excl = 1;
3466                                 if (!spin_lock_upgrade_try(&pmap->pm_spin)) {
3467                                         spin_unlock_shared(&pmap->pm_spin);
3468                                         spin_lock(&pmap->pm_spin);
3469                                         continue;
3470                                 }
3471                         }
3472
3473                         /*
3474                          * We need to block if someone is holding our
3475                          * placemarker.  As long as we determine the
3476                          * placemarker has not been aquired we do not
3477                          * need to get it as acquision also requires
3478                          * the pmap spin lock.
3479                          *
3480                          * However, we can race the wakeup.
3481                          */
3482                         pmark = pmap_placemarker_hash(pmap, pindex);
3483
3484                         if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3485                                 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
3486                                 tsleep_interlock(pmark, 0);
3487                                 if (((*pmark ^ pindex) &
3488                                      ~PM_PLACEMARK_WAKEUP) == 0) {
3489                                         spin_unlock(&pmap->pm_spin);
3490                                         tsleep(pmark, PINTERLOCKED, "pvplc", 0);
3491                                         spin_lock(&pmap->pm_spin);
3492                                 }
3493                                 continue;
3494                         }
3495
3496                         /*
3497                          * Setup the new entry
3498                          */
3499                         pnew->pv_pmap = pmap;
3500                         pnew->pv_pindex = pindex;
3501                         pnew->pv_hold = PV_HOLD_LOCKED | 2;
3502 #ifdef PMAP_DEBUG
3503                         pnew->pv_func = func;
3504                         pnew->pv_line = lineno;
3505                         if (pnew->pv_line_lastfree > 0) {
3506                                 pnew->pv_line_lastfree =
3507                                                 -pnew->pv_line_lastfree;
3508                         }
3509 #endif
3510                         pv = pv_entry_rb_tree_RB_INSERT(&pmap->pm_pvroot, pnew);
3511                         atomic_add_long(&pmap->pm_stats.resident_count, 1);
3512                         spin_unlock(&pmap->pm_spin);
3513                         *isnew = 1;
3514
3515                         KKASSERT(pv == NULL);
3516                         return(pnew);
3517                 }
3518
3519                 /*
3520                  * We already have an entry, cleanup the staged pnew if
3521                  * we can get the lock, otherwise block and retry.
3522                  */
3523                 if (__predict_true(_pv_hold_try(pv PMAP_DEBUG_COPY))) {
3524                         if (pmap_excl)
3525                                 spin_unlock(&pmap->pm_spin);
3526                         else
3527                                 spin_unlock_shared(&pmap->pm_spin);
3528 #if 1
3529                         pnew = atomic_swap_ptr((void *)&md->gd_newpv, pnew);
3530                         if (pnew)
3531                                 zfree(pvzone, pnew);
3532 #else
3533                         crit_enter();
3534                         if (md->gd_newpv == NULL)
3535                                 md->gd_newpv = pnew;
3536                         else
3537                                 zfree(pvzone, pnew);
3538                         crit_exit();
3539 #endif
3540                         KKASSERT(pv->pv_pmap == pmap &&
3541                                  pv->pv_pindex == pindex);
3542                         *isnew = 0;
3543                         return(pv);
3544                 }
3545                 if (pmap_excl) {
3546                         spin_unlock(&pmap->pm_spin);
3547                         _pv_lock(pv PMAP_DEBUG_COPY);
3548                         pv_put(pv);
3549                         spin_lock(&pmap->pm_spin);
3550                 } else {
3551                         spin_unlock_shared(&pmap->pm_spin);
3552                         _pv_lock(pv PMAP_DEBUG_COPY);
3553                         pv_put(pv);
3554                         spin_lock_shared(&pmap->pm_spin);
3555                 }
3556         }
3557         /* NOT REACHED */
3558 }
3559
3560 /*
3561  * Find the requested PV entry, returning a locked+held pv or NULL
3562  */
3563 static
3564 pv_entry_t
3565 _pv_get(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp PMAP_DEBUG_DECL)
3566 {
3567         pv_entry_t pv;
3568         int pmap_excl = 0;
3569
3570         spin_lock_shared(&pmap->pm_spin);
3571         for (;;) {
3572                 /*
3573                  * Shortcut cache
3574                  */
3575                 pv = pv_entry_lookup(pmap, pindex);
3576                 if (pv == NULL) {
3577                         /*
3578                          * Block if there is ANY placemarker.  If we are to
3579                          * return it, we must also aquire the spot, so we
3580                          * have to block even if the placemarker is held on
3581                          * a different address.
3582                          *
3583                          * OPTIMIZATION: If pmarkp is passed as NULL the
3584                          * caller is just probing (or looking for a real
3585                          * pv_entry), and in this case we only need to check
3586                          * to see if the placemarker matches pindex.
3587                          */
3588                         vm_pindex_t *pmark;
3589
3590                         /*
3591                          * Requires exclusive pmap spinlock
3592                          */
3593                         if (pmap_excl == 0) {
3594                                 pmap_excl = 1;
3595                                 if (!spin_lock_upgrade_try(&pmap->pm_spin)) {
3596                                         spin_unlock_shared(&pmap->pm_spin);
3597                                         spin_lock(&pmap->pm_spin);
3598                                         continue;
3599                                 }
3600                         }
3601
3602                         pmark = pmap_placemarker_hash(pmap, pindex);
3603
3604                         if ((pmarkp && *pmark != PM_NOPLACEMARK) ||
3605                             ((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3606                                 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
3607                                 tsleep_interlock(pmark, 0);
3608                                 if ((pmarkp && *pmark != PM_NOPLACEMARK) ||
3609                                     ((*pmark ^ pindex) &
3610                                      ~PM_PLACEMARK_WAKEUP) == 0) {
3611                                         spin_unlock(&pmap->pm_spin);
3612                                         tsleep(pmark, PINTERLOCKED, "pvpld", 0);
3613                                         spin_lock(&pmap->pm_spin);
3614                                 }
3615                                 continue;
3616                         }
3617                         if (pmarkp) {
3618                                 if (atomic_swap_long(pmark, pindex) !=
3619                                     PM_NOPLACEMARK) {
3620                                         panic("_pv_get: pmark race");
3621                                 }
3622                                 *pmarkp = pmark;
3623                         }
3624                         spin_unlock(&pmap->pm_spin);
3625                         return NULL;
3626                 }
3627                 if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
3628                         pv_cache(pv, pindex);
3629                         if (pmap_excl)
3630                                 spin_unlock(&pmap->pm_spin);
3631                         else
3632                                 spin_unlock_shared(&pmap->pm_spin);
3633                         KKASSERT(pv->pv_pmap == pmap &&
3634                                  pv->pv_pindex == pindex);
3635                         return(pv);
3636                 }
3637                 if (pmap_excl) {
3638                         spin_unlock(&pmap->pm_spin);
3639                         _pv_lock(pv PMAP_DEBUG_COPY);
3640                         pv_put(pv);
3641                         spin_lock(&pmap->pm_spin);
3642                 } else {
3643                         spin_unlock_shared(&pmap->pm_spin);
3644                         _pv_lock(pv PMAP_DEBUG_COPY);
3645                         pv_put(pv);
3646                         spin_lock_shared(&pmap->pm_spin);
3647                 }
3648         }
3649 }
3650
3651 /*
3652  * Lookup, hold, and attempt to lock (pmap,pindex).
3653  *
3654  * If the entry does not exist NULL is returned and *errorp is set to 0
3655  *
3656  * If the entry exists and could be successfully locked it is returned and
3657  * errorp is set to 0.
3658  *
3659  * If the entry exists but could NOT be successfully locked it is returned
3660  * held and *errorp is set to 1.
3661  *
3662  * If the entry is placemarked by someone else NULL is returned and *errorp
3663  * is set to 1.
3664  */
3665 static
3666 pv_entry_t
3667 pv_get_try(pmap_t pmap, vm_pindex_t pindex, vm_pindex_t **pmarkp, int *errorp)
3668 {
3669         pv_entry_t pv;
3670
3671         spin_lock_shared(&pmap->pm_spin);
3672
3673         pv = pv_entry_lookup(pmap, pindex);
3674         if (pv == NULL) {
3675                 vm_pindex_t *pmark;
3676
3677                 pmark = pmap_placemarker_hash(pmap, pindex);
3678
3679                 if (((*pmark ^ pindex) & ~PM_PLACEMARK_WAKEUP) == 0) {
3680                         *errorp = 1;
3681                 } else if (pmarkp &&
3682                            atomic_cmpset_long(pmark, PM_NOPLACEMARK, pindex)) {
3683                         *errorp = 0;
3684                 } else {
3685                         /*
3686                          * Can't set a placemark with a NULL pmarkp, or if
3687                          * pmarkp is non-NULL but we failed to set our
3688                          * placemark.
3689                          */
3690                         *errorp = 1;
3691                 }
3692                 if (pmarkp)
3693                         *pmarkp = pmark;
3694                 spin_unlock_shared(&pmap->pm_spin);
3695
3696                 return NULL;
3697         }
3698
3699         /*
3700          * XXX This has problems if the lock is shared, why?
3701          */
3702         if (pv_hold_try(pv)) {
3703                 pv_cache(pv, pindex);   /* overwrite ok (shared lock) */
3704                 spin_unlock_shared(&pmap->pm_spin);
3705                 *errorp = 0;
3706                 KKASSERT(pv->pv_pmap == pmap && pv->pv_pindex == pindex);
3707                 return(pv);     /* lock succeeded */
3708         }
3709         spin_unlock_shared(&pmap->pm_spin);
3710         *errorp = 1;
3711
3712         return (pv);            /* lock failed */
3713 }
3714
3715 /*
3716  * Lock a held pv, keeping the hold count
3717  */
3718 static
3719 void
3720 _pv_lock(pv_entry_t pv PMAP_DEBUG_DECL)
3721 {
3722         u_int count;
3723
3724         for (;;) {
3725                 count = pv->pv_hold;
3726                 cpu_ccfence();
3727                 if ((count & PV_HOLD_LOCKED) == 0) {
3728                         if (atomic_cmpset_int(&pv->pv_hold, count,
3729                                               count | PV_HOLD_LOCKED)) {
3730 #ifdef PMAP_DEBUG
3731                                 pv->pv_func = func;
3732                                 pv->pv_line = lineno;
3733 #endif
3734                                 return;
3735                         }
3736                         continue;
3737                 }
3738                 tsleep_interlock(pv, 0);
3739                 if (atomic_cmpset_int(&pv->pv_hold, count,
3740                                       count | PV_HOLD_WAITING)) {
3741 #ifdef PMAP_DEBUG2
3742                         if (pmap_enter_debug > 0) {
3743                                 --pmap_enter_debug;
3744                                 kprintf("pv waiting on %s:%d\n",
3745                                         pv->pv_func, pv->pv_line);
3746                         }
3747 #endif
3748                         tsleep(pv, PINTERLOCKED, "pvwait", hz);
3749                 }
3750                 /* retry */
3751         }
3752 }
3753
3754 /*
3755  * Unlock a held and locked pv, keeping the hold count.
3756  */
3757 static
3758 void
3759 pv_unlock(pv_entry_t pv)
3760 {
3761         u_int count;
3762
3763         for (;;) {
3764                 count = pv->pv_hold;
3765                 cpu_ccfence();
3766                 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) >=
3767                          (PV_HOLD_LOCKED | 1));
3768                 if (atomic_cmpset_int(&pv->pv_hold, count,
3769                                       count &
3770                                       ~(PV_HOLD_LOCKED | PV_HOLD_WAITING))) {
3771                         if (count & PV_HOLD_WAITING)
3772                                 wakeup(pv);
3773                         break;
3774                 }
3775         }
3776 }
3777
3778 /*
3779  * Unlock and drop a pv.  If the pv is no longer associated with a pmap
3780  * and the hold count drops to zero we will free it.
3781  *
3782  * Caller should not hold any spin locks.  We are protected from hold races
3783  * by virtue of holds only occuring only with a pmap_spin or vm_page_spin
3784  * lock held.  A pv cannot be located otherwise.
3785  */
3786 static
3787 void
3788 pv_put(pv_entry_t pv)
3789 {
3790 #ifdef PMAP_DEBUG2
3791         if (pmap_enter_debug > 0) {
3792                 --pmap_enter_debug;
3793                 kprintf("pv_put pv=%p hold=%08x\n", pv, pv->pv_hold);
3794         }
3795 #endif
3796
3797         /*
3798          * Normal put-aways must have a pv_m associated with the pv,
3799          * but allow the case where the pv has been destructed due
3800          * to pmap_dynamic_delete.
3801          */
3802         KKASSERT(pv->pv_pmap == NULL || pv->pv_m != NULL);
3803
3804         /*
3805          * Fast - shortcut most common condition
3806          */
3807         if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 1))
3808                 return;
3809
3810         /*
3811          * Slow
3812          */
3813         pv_unlock(pv);
3814         pv_drop(pv);
3815 }
3816
3817 /*
3818  * Remove the pmap association from a pv, require that pv_m already be removed,
3819  * then unlock and drop the pv.  Any pte operations must have already been
3820  * completed.  This call may result in a last-drop which will physically free
3821  * the pv.
3822  *
3823  * Removing the pmap association entails an additional drop.
3824  *
3825  * pv must be exclusively locked on call and will be disposed of on return.
3826  */
3827 static
3828 void
3829 _pv_free(pv_entry_t pv, pv_entry_t pvp PMAP_DEBUG_DECL)
3830 {
3831         pmap_t pmap;
3832
3833 #ifdef PMAP_DEBUG
3834         pv->pv_func_lastfree = func;
3835         pv->pv_line_lastfree = lineno;
3836 #endif
3837         KKASSERT(pv->pv_m == NULL);
3838         KKASSERT((pv->pv_hold & (PV_HOLD_LOCKED|PV_HOLD_MASK)) >=
3839                   (PV_HOLD_LOCKED|1));
3840         if ((pmap = pv->pv_pmap) != NULL) {
3841                 spin_lock(&pmap->pm_spin);
3842                 KKASSERT(pv->pv_pmap == pmap);
3843                 if (pmap->pm_pvhint_pt == pv)
3844                         pmap->pm_pvhint_pt = NULL;
3845                 if (pmap->pm_pvhint_pte == pv)
3846                         pmap->pm_pvhint_pte = NULL;
3847                 pv_entry_rb_tree_RB_REMOVE(&pmap->pm_pvroot, pv);
3848                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
3849                 pv->pv_pmap = NULL;
3850                 pv->pv_pindex = 0;
3851                 spin_unlock(&pmap->pm_spin);
3852
3853                 /*
3854                  * Try to shortcut three atomic ops, otherwise fall through
3855                  * and do it normally.  Drop two refs and the lock all in
3856                  * one go.
3857                  */
3858                 if (pvp)
3859                         vm_page_unwire_quick(pvp->pv_m);
3860                 if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 0)) {
3861 #ifdef PMAP_DEBUG2
3862                         if (pmap_enter_debug > 0) {
3863                                 --pmap_enter_debug;
3864                                 kprintf("pv_free: free pv %p\n", pv);
3865                         }
3866 #endif
3867                         zfree(pvzone, pv);
3868                         return;
3869                 }
3870                 pv_drop(pv);    /* ref for pv_pmap */
3871         }
3872         pv_unlock(pv);
3873         pv_drop(pv);
3874 }
3875
3876 /*
3877  * This routine is very drastic, but can save the system
3878  * in a pinch.
3879  */
3880 void
3881 pmap_collect(void)
3882 {
3883         int i;
3884         vm_page_t m;
3885         static int warningdone=0;
3886
3887         if (pmap_pagedaemon_waken == 0)
3888                 return;
3889         pmap_pagedaemon_waken = 0;
3890         if (warningdone < 5) {
3891                 kprintf("pmap_collect: collecting pv entries -- "
3892                         "suggest increasing PMAP_SHPGPERPROC\n");
3893                 warningdone++;
3894         }
3895
3896         for (i = 0; i < vm_page_array_size; i++) {
3897                 m = &vm_page_array[i];
3898                 if (m->wire_count || m->hold_count)
3899                         continue;
3900                 if (vm_page_busy_try(m, TRUE) == 0) {
3901                         if (m->wire_count == 0 && m->hold_count == 0) {
3902                                 pmap_remove_all(m);
3903                         }
3904                         vm_page_wakeup(m);
3905                 }
3906         }
3907 }
3908
3909 /*
3910  * Scan the pmap for active page table entries and issue a callback.
3911  * The callback must dispose of pte_pv, whos PTE entry is at *ptep in
3912  * its parent page table.
3913  *
3914  * pte_pv will be NULL if the page or page table is unmanaged.
3915  * pt_pv will point to the page table page containing the pte for the page.
3916  *
3917  * NOTE! If we come across an unmanaged page TABLE (verses an unmanaged page),
3918  *       we pass a NULL pte_pv and we pass a pt_pv pointing to the passed
3919  *       process pmap's PD and page to the callback function.  This can be
3920  *       confusing because the pt_pv is really a pd_pv, and the target page
3921  *       table page is simply aliased by the pmap and not owned by it.
3922  *
3923  * It is assumed that the start and end are properly rounded to the page size.
3924  *
3925  * It is assumed that PD pages and above are managed and thus in the RB tree,
3926  * allowing us to use RB_SCAN from the PD pages down for ranged scans.
3927  */
3928 struct pmap_scan_info {
3929         struct pmap *pmap;
3930         vm_offset_t sva;
3931         vm_offset_t eva;
3932         vm_pindex_t sva_pd_pindex;
3933         vm_pindex_t eva_pd_pindex;
3934         void (*func)(pmap_t, struct pmap_scan_info *,
3935                      pv_entry_t, vm_pindex_t *, pv_entry_t,
3936                      int, vm_offset_t,
3937                      pt_entry_t *, void *);
3938         void *arg;
3939         pmap_inval_bulk_t bulk_core;
3940         pmap_inval_bulk_t *bulk;
3941         int count;
3942         int stop;
3943 };
3944
3945 static int pmap_scan_cmp(pv_entry_t pv, void *data);
3946 static int pmap_scan_callback(pv_entry_t pv, void *data);
3947
3948 static void
3949 pmap_scan(struct pmap_scan_info *info, int smp_inval)
3950 {
3951         struct pmap *pmap = info->pmap;
3952         pv_entry_t pd_pv;       /* A page directory PV */
3953         pv_entry_t pt_pv;       /* A page table PV */
3954         pv_entry_t pte_pv;      /* A page table entry PV */
3955         vm_pindex_t *pte_placemark;
3956         vm_pindex_t *pt_placemark;
3957         pt_entry_t *ptep;
3958         pt_entry_t oldpte;
3959         struct pv_entry dummy_pv;
3960
3961         info->stop = 0;
3962         if (pmap == NULL)
3963                 return;
3964         if (info->sva == info->eva)
3965                 return;
3966         if (smp_inval) {
3967                 info->bulk = &info->bulk_core;
3968                 pmap_inval_bulk_init(&info->bulk_core, pmap);
3969         } else {
3970                 info->bulk = NULL;
3971         }
3972
3973         /*
3974          * Hold the token for stability; if the pmap is empty we have nothing
3975          * to do.
3976          */
3977 #if 0
3978         if (pmap->pm_stats.resident_count == 0) {
3979                 return;
3980         }
3981 #endif
3982
3983         info->count = 0;
3984
3985         /*
3986          * Special handling for scanning one page, which is a very common
3987          * operation (it is?).
3988          *
3989          * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4
3990          */
3991         if (info->sva + PAGE_SIZE == info->eva) {
3992                 if (info->sva >= VM_MAX_USER_ADDRESS) {
3993                         /*
3994                          * Kernel mappings do not track wire counts on
3995                          * page table pages and only maintain pd_pv and
3996                          * pte_pv levels so pmap_scan() works.
3997                          */
3998                         pt_pv = NULL;
3999                         pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva),
4000                                         &pte_placemark);
4001                         ptep = vtopte(info->sva);
4002                 } else {
4003                         /*
4004                          * User pages which are unmanaged will not have a
4005                          * pte_pv.  User page table pages which are unmanaged
4006                          * (shared from elsewhere) will also not have a pt_pv.
4007                          * The func() callback will pass both pte_pv and pt_pv
4008                          * as NULL in that case.
4009                          *
4010                          * We hold pte_placemark across the operation for
4011                          * unmanaged pages.
4012                          *
4013                          * WARNING!  We must hold pt_placemark across the
4014                          *           *ptep test to prevent misintepreting
4015                          *           a non-zero *ptep as a shared page
4016                          *           table page.  Hold it across the function
4017                          *           callback as well for SMP safety.
4018                          */
4019                         pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva),
4020                                         &pte_placemark);
4021                         pt_pv = pv_get(pmap, pmap_pt_pindex(info->sva),
4022                                         &pt_placemark);
4023                         if (pt_pv == NULL) {
4024                                 KKASSERT(pte_pv == NULL);
4025                                 pd_pv = pv_get(pmap,
4026                                                pmap_pd_pindex(info->sva),
4027                                                NULL);
4028                                 if (pd_pv) {
4029                                         ptep = pv_pte_lookup(pd_pv,
4030                                                     pmap_pt_index(info->sva));
4031                                         if (*ptep) {
4032                                                 info->func(pmap, info,
4033                                                      NULL, pt_placemark,
4034                                                      pd_pv, 1,
4035                                                      info->sva, ptep,
4036                                                      info->arg);
4037                                         } else {
4038                                                 pv_placemarker_wakeup(pmap,
4039                                                                   pt_placemark);
4040                                         }
4041                                         pv_put(pd_pv);
4042                                 } else {
4043                                         pv_placemarker_wakeup(pmap,
4044                                                               pt_placemark);
4045                                 }
4046                                 pv_placemarker_wakeup(pmap, pte_placemark);
4047                                 goto fast_skip;
4048                         }
4049                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(info->sva));
4050                 }
4051
4052                 /*
4053                  * NOTE: *ptep can't be ripped out from under us if we hold
4054                  *       pte_pv (or pte_placemark) locked, but bits can
4055                  *       change.
4056                  */
4057                 oldpte = *ptep;
4058                 cpu_ccfence();
4059                 if (oldpte == 0) {
4060                         KKASSERT(pte_pv == NULL);
4061                         pv_placemarker_wakeup(pmap, pte_placemark);
4062                 } else if (pte_pv) {
4063                         KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
4064                                            pmap->pmap_bits[PG_V_IDX])) ==
4065                                 (pmap->pmap_bits[PG_MANAGED_IDX] |
4066                                  pmap->pmap_bits[PG_V_IDX]),
4067                             ("badA *ptep %016lx/%016lx sva %016lx pte_pv %p",
4068                             *ptep, oldpte, info->sva, pte_pv));
4069                         info->func(pmap, info, pte_pv, NULL, pt_pv, 0,
4070                                    info->sva, ptep, info->arg);
4071                 } else {
4072                         KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
4073                                            pmap->pmap_bits[PG_V_IDX])) ==
4074                             pmap->pmap_bits[PG_V_IDX],
4075                             ("badB *ptep %016lx/%016lx sva %016lx pte_pv NULL",
4076                             *ptep, oldpte, info->sva));
4077                         info->func(pmap, info, NULL, pte_placemark, pt_pv, 0,
4078                                    info->sva, ptep, info->arg);
4079                 }
4080                 if (pt_pv)
4081                         pv_put(pt_pv);
4082 fast_skip:
4083                 pmap_inval_bulk_flush(info->bulk);
4084                 return;
4085         }
4086
4087         /*
4088          * Nominal scan case, RB_SCAN() for PD pages and iterate from
4089          * there.
4090          *
4091          * WARNING! eva can overflow our standard ((N + mask) >> bits)
4092          *          bounds, resulting in a pd_pindex of 0.  To solve the
4093          *          problem we use an inclusive range.
4094          */
4095         info->sva_pd_pindex = pmap_pd_pindex(info->sva);
4096         info->eva_pd_pindex = pmap_pd_pindex(info->eva - PAGE_SIZE);
4097
4098         if (info->sva >= VM_MAX_USER_ADDRESS) {
4099                 /*
4100                  * The kernel does not currently maintain any pv_entry's for
4101                  * higher-level page tables.
4102                  */
4103                 bzero(&dummy_pv, sizeof(dummy_pv));
4104                 dummy_pv.pv_pindex = info->sva_pd_pindex;
4105                 spin_lock(&pmap->pm_spin);
4106                 while (dummy_pv.pv_pindex <= info->eva_pd_pindex) {
4107                         pmap_scan_callback(&dummy_pv, info);
4108                         ++dummy_pv.pv_pindex;
4109                         if (dummy_pv.pv_pindex < info->sva_pd_pindex) /*wrap*/
4110                                 break;
4111                 }
4112                 spin_unlock(&pmap->pm_spin);
4113         } else {
4114                 /*
4115                  * User page tables maintain local PML4, PDP, and PD
4116                  * pv_entry's at the very least.  PT pv's might be
4117                  * unmanaged and thus not exist.  PTE pv's might be
4118                  * unmanaged and thus not exist.
4119                  */
4120                 spin_lock(&pmap->pm_spin);
4121                 pv_entry_rb_tree_RB_SCAN(&pmap->pm_pvroot, pmap_scan_cmp,
4122                                          pmap_scan_callback, info);
4123                 spin_unlock(&pmap->pm_spin);
4124         }
4125         pmap_inval_bulk_flush(info->bulk);
4126 }
4127
4128 /*
4129  * WARNING! pmap->pm_spin held
4130  *
4131  * WARNING! eva can overflow our standard ((N + mask) >> bits)
4132  *          bounds, resulting in a pd_pindex of 0.  To solve the
4133  *          problem we use an inclusive range.
4134  */
4135 static int
4136 pmap_scan_cmp(pv_entry_t pv, void *data)
4137 {
4138         struct pmap_scan_info *info = data;
4139         if (pv->pv_pindex < info->sva_pd_pindex)
4140                 return(-1);
4141         if (pv->pv_pindex > info->eva_pd_pindex)
4142                 return(1);
4143         return(0);
4144 }
4145
4146 /*
4147  * pmap_scan() by PDs
4148  *
4149  * WARNING! pmap->pm_spin held
4150  */
4151 static int
4152 pmap_scan_callback(pv_entry_t pv, void *data)
4153 {
4154         struct pmap_scan_info *info = data;
4155         struct pmap *pmap = info->pmap;
4156         pv_entry_t pd_pv;       /* A page directory PV */
4157         pv_entry_t pt_pv;       /* A page table PV */
4158         vm_pindex_t *pt_placemark;
4159         pt_entry_t *ptep;
4160         pt_entry_t oldpte;
4161         vm_offset_t sva;
4162         vm_offset_t eva;
4163         vm_offset_t va_next;
4164         vm_pindex_t pd_pindex;
4165         int error;
4166
4167         /*
4168          * Stop if requested
4169          */
4170         if (info->stop)
4171                 return -1;
4172
4173         /*
4174          * Pull the PD pindex from the pv before releasing the spinlock.
4175          *
4176          * WARNING: pv is faked for kernel pmap scans.
4177          */
4178         pd_pindex = pv->pv_pindex;
4179         spin_unlock(&pmap->pm_spin);
4180         pv = NULL;      /* invalid after spinlock unlocked */
4181
4182         /*
4183          * Calculate the page range within the PD.  SIMPLE pmaps are
4184          * direct-mapped for the entire 2^64 address space.  Normal pmaps
4185          * reflect the user and kernel address space which requires
4186          * cannonicalization w/regards to converting pd_pindex's back
4187          * into addresses.
4188          */
4189         sva = (pd_pindex - pmap_pd_pindex(0)) << PDPSHIFT;
4190         if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0 &&
4191             (sva & PML4_SIGNMASK)) {
4192                 sva |= PML4_SIGNMASK;
4193         }
4194         eva = sva + NBPDP;      /* can overflow */
4195         if (sva < info->sva)
4196                 sva = info->sva;
4197         if (eva < info->sva || eva > info->eva)
4198                 eva = info->eva;
4199
4200         /*
4201          * NOTE: kernel mappings do not track page table pages, only
4202          *       terminal pages.
4203          *
4204          * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4.
4205          *       However, for the scan to be efficient we try to
4206          *       cache items top-down.
4207          */
4208         pd_pv = NULL;
4209         pt_pv = NULL;
4210
4211         for (; sva < eva; sva = va_next) {
4212                 if (info->stop)
4213                         break;
4214                 if (sva >= VM_MAX_USER_ADDRESS) {
4215                         if (pt_pv) {
4216                                 pv_put(pt_pv);
4217                                 pt_pv = NULL;
4218                         }
4219                         goto kernel_skip;
4220                 }
4221
4222                 /*
4223                  * PD cache, scan shortcut if it doesn't exist.
4224                  */
4225                 if (pd_pv == NULL) {
4226                         pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL);
4227                 } else if (pd_pv->pv_pmap != pmap ||
4228                            pd_pv->pv_pindex != pmap_pd_pindex(sva)) {
4229                         pv_put(pd_pv);
4230                         pd_pv = pv_get(pmap, pmap_pd_pindex(sva), NULL);
4231                 }
4232                 if (pd_pv == NULL) {
4233                         va_next = (sva + NBPDP) & ~PDPMASK;
4234                         if (va_next < sva)
4235                                 va_next = eva;
4236                         continue;
4237                 }
4238
4239                 /*
4240                  * PT cache
4241                  *
4242                  * NOTE: The cached pt_pv can be removed from the pmap when
4243                  *       pmap_dynamic_delete is enabled.
4244                  */
4245                 if (pt_pv && (pt_pv->pv_pmap != pmap ||
4246                               pt_pv->pv_pindex != pmap_pt_pindex(sva))) {
4247                         pv_put(pt_pv);
4248                         pt_pv = NULL;
4249                 }
4250                 if (pt_pv == NULL) {
4251                         pt_pv = pv_get_try(pmap, pmap_pt_pindex(sva),
4252                                            &pt_placemark, &error);
4253                         if (error) {
4254                                 pv_put(pd_pv);  /* lock order */
4255                                 pd_pv = NULL;
4256                                 if (pt_pv) {
4257                                         pv_lock(pt_pv);
4258                                         pv_put(pt_pv);
4259                                         pt_pv = NULL;
4260                                 } else {
4261                                         pv_placemarker_wait(pmap, pt_placemark);
4262                                 }
4263                                 va_next = sva;
4264                                 continue;
4265                         }
4266                         /* may have to re-check later if pt_pv is NULL here */
4267                 }
4268
4269                 /*
4270                  * If pt_pv is NULL we either have an shared page table
4271                  * page and must issue a callback specific to that case,
4272                  * or there is no page table page.
4273                  *
4274                  * Either way we can skip the page table page.
4275                  *
4276                  * WARNING! pt_pv can also be NULL due to a pv creation
4277                  *          race where we find it to be NULL and then
4278                  *          later see a pte_pv.  But its possible the pt_pv
4279                  *          got created inbetween the two operations, so
4280                  *          we must check.
4281                  */
4282                 if (pt_pv == NULL) {
4283                         /*
4284                          * Possible unmanaged (shared from another pmap)
4285                          * page table page.
4286                          *
4287                          * WARNING!  We must hold pt_placemark across the
4288                          *           *ptep test to prevent misintepreting
4289                          *           a non-zero *ptep as a shared page
4290                          *           table page.  Hold it across the function
4291                          *           callback as well for SMP safety.
4292                          */
4293                         ptep = pv_pte_lookup(pd_pv, pmap_pt_index(sva));
4294                         if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
4295                                 info->func(pmap, info, NULL, pt_placemark,
4296                                            pd_pv, 1,
4297                                            sva, ptep, info->arg);
4298                         } else {
4299                                 pv_placemarker_wakeup(pmap, pt_placemark);
4300                         }
4301
4302                         /*
4303                          * Done, move to next page table page.
4304                          */
4305                         va_next = (sva + NBPDR) & ~PDRMASK;
4306                         if (va_next < sva)
4307                                 va_next = eva;
4308                         continue;
4309                 }
4310
4311                 /*
4312                  * From this point in the loop testing pt_pv for non-NULL
4313                  * means we are in UVM, else if it is NULL we are in KVM.
4314                  *
4315                  * Limit our scan to either the end of the va represented
4316                  * by the current page table page, or to the end of the
4317                  * range being removed.
4318                  */
4319 kernel_skip:
4320                 va_next = (sva + NBPDR) & ~PDRMASK;
4321                 if (va_next < sva)
4322                         va_next = eva;
4323                 if (va_next > eva)
4324                         va_next = eva;
4325
4326                 /*
4327                  * Scan the page table for pages.  Some pages may not be
4328                  * managed (might not have a pv_entry).
4329                  *
4330                  * There is no page table management for kernel pages so
4331                  * pt_pv will be NULL in that case, but otherwise pt_pv
4332                  * is non-NULL, locked, and referenced.
4333                  */
4334
4335                 /*
4336                  * At this point a non-NULL pt_pv means a UVA, and a NULL
4337                  * pt_pv means a KVA.
4338                  */
4339                 if (pt_pv)
4340                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(sva));
4341                 else
4342                         ptep = vtopte(sva);
4343
4344                 while (sva < va_next) {
4345                         pv_entry_t pte_pv;
4346                         vm_pindex_t *pte_placemark;
4347
4348                         /*
4349                          * Yield every 64 pages, stop if requested.
4350                          */
4351                         if ((++info->count & 63) == 0)
4352                                 lwkt_user_yield();
4353                         if (info->stop)
4354                                 break;
4355
4356                         /*
4357                          * We can shortcut our scan if *ptep == 0.  This is
4358                          * an unlocked check.
4359                          */
4360                         if (*ptep == 0) {
4361                                 sva += PAGE_SIZE;
4362                                 ++ptep;
4363                                 continue;
4364                         }
4365                         cpu_ccfence();
4366
4367                         /*
4368                          * Acquire the related pte_pv, if any.  If *ptep == 0
4369                          * the related pte_pv should not exist, but if *ptep
4370                          * is not zero the pte_pv may or may not exist (e.g.
4371                          * will not exist for an unmanaged page).
4372                          *
4373                          * However a multitude of races are possible here
4374                          * so if we cannot lock definite state we clean out
4375                          * our cache and break the inner while() loop to
4376                          * force a loop up to the top of the for().
4377                          *
4378                          * XXX unlock/relock pd_pv, pt_pv, and re-test their
4379                          *     validity instead of looping up?
4380                          */
4381                         pte_pv = pv_get_try(pmap, pmap_pte_pindex(sva),
4382                                             &pte_placemark, &error);
4383                         if (error) {
4384                                 pv_put(pd_pv);          /* lock order */
4385                                 pd_pv = NULL;
4386                                 if (pt_pv) {
4387                                         pv_put(pt_pv);  /* lock order */
4388                                         pt_pv = NULL;
4389                                 }
4390                                 if (pte_pv) {           /* block */
4391                                         pv_lock(pte_pv);
4392                                         pv_put(pte_pv);
4393                                         pte_pv = NULL;
4394                                 } else {
4395                                         pv_placemarker_wait(pmap,
4396                                                         pte_placemark);
4397                                 }
4398                                 va_next = sva;          /* retry */
4399                                 break;
4400                         }
4401
4402                         /*
4403                          * Reload *ptep after successfully locking the
4404                          * pindex.  If *ptep == 0 we had better NOT have a
4405                          * pte_pv.
4406                          */
4407                         cpu_ccfence();
4408                         oldpte = *ptep;
4409                         if (oldpte == 0) {
4410                                 if (pte_pv) {
4411                                         kprintf("Unexpected non-NULL pte_pv "
4412                                                 "%p pt_pv %p "
4413                                                 "*ptep = %016lx/%016lx\n",
4414                                                 pte_pv, pt_pv, *ptep, oldpte);
4415                                         panic("Unexpected non-NULL pte_pv");
4416                                 } else {
4417                                         pv_placemarker_wakeup(pmap, pte_placemark);
4418                                 }
4419                                 sva += PAGE_SIZE;
4420                                 ++ptep;
4421                                 continue;
4422                         }
4423
4424                         /*
4425                          * We can't hold pd_pv across the callback (because
4426                          * we don't pass it to the callback and the callback
4427                          * might deadlock)
4428                          */
4429                         if (pd_pv) {
4430                                 vm_page_wire_quick(pd_pv->pv_m);
4431                                 pv_unlock(pd_pv);
4432                         }
4433
4434                         /*
4435                          * Ready for the callback.  The locked pte_pv (if any)
4436                          * is consumed by the callback.  pte_pv will exist if
4437                          * the page is managed, and will not exist if it
4438                          * isn't.
4439                          */
4440                         if (oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) {
4441                                 /*
4442                                  * Managed pte
4443                                  */
4444                                 KASSERT(pte_pv &&
4445                                          (oldpte & pmap->pmap_bits[PG_V_IDX]),
4446                                     ("badC *ptep %016lx/%016lx sva %016lx "
4447                                     "pte_pv %p",
4448                                     *ptep, oldpte, sva, pte_pv));
4449                                 /*
4450                                  * We must unlock pd_pv across the callback
4451                                  * to avoid deadlocks on any recursive
4452                                  * disposal.  Re-check that it still exists
4453                                  * after re-locking.
4454                                  *
4455                                  * Call target disposes of pte_pv and may
4456                                  * destroy but will not dispose of pt_pv.
4457                                  */
4458                                 info->func(pmap, info, pte_pv, NULL,
4459                                            pt_pv, 0,
4460                                            sva, ptep, info->arg);
4461                         } else {
4462                                 /*
4463                                  * Unmanaged pte
4464                                  *
4465                                  * We must unlock pd_pv across the callback
4466                                  * to avoid deadlocks on any recursive
4467                                  * disposal.  Re-check that it still exists
4468                                  * after re-locking.
4469                                  *
4470                                  * Call target disposes of pte_pv or
4471                                  * pte_placemark and may destroy but will
4472                                  * not dispose of pt_pv.
4473                                  */
4474                                 KASSERT(pte_pv == NULL &&
4475                                         (oldpte & pmap->pmap_bits[PG_V_IDX]),
4476                                     ("badD *ptep %016lx/%016lx sva %016lx "
4477                                     "pte_pv %p pte_pv->pv_m %p ",
4478                                      *ptep, oldpte, sva,
4479                                      pte_pv, (pte_pv ? pte_pv->pv_m : NULL)));
4480                                 if (pte_pv)
4481                                         kprintf("RaceD\n");
4482                                 if (pte_pv) {
4483                                         info->func(pmap, info,
4484                                                    pte_pv, NULL,
4485                                                    pt_pv, 0,
4486                                                    sva, ptep, info->arg);
4487                                 } else {
4488                                         info->func(pmap, info,
4489                                                    NULL, pte_placemark,
4490                                                    pt_pv, 0,
4491                                                    sva, ptep, info->arg);
4492                                 }
4493                         }
4494                         if (pd_pv) {
4495                                 pv_lock(pd_pv);
4496                                 vm_page_unwire_quick(pd_pv->pv_m);
4497                                 if (pd_pv->pv_pmap == NULL) {
4498                                         va_next = sva;          /* retry */
4499                                         break;
4500                                 }
4501                         }
4502
4503                         /*
4504                          * NOTE: The cached pt_pv can be removed from the
4505                          *       pmap when pmap_dynamic_delete is enabled,
4506                          *       which will cause ptep to become stale.
4507                          *
4508                          *       This also means that no pages remain under
4509                          *       the PT, so we can just break out of the inner
4510                          *       loop and let the outer loop clean everything
4511                          *       up.
4512                          */
4513                         if (pt_pv && pt_pv->pv_pmap != pmap)
4514                                 break;
4515                         pte_pv = NULL;
4516                         sva += PAGE_SIZE;
4517                         ++ptep;
4518                 }
4519         }
4520         if (pd_pv) {
4521                 pv_put(pd_pv);
4522                 pd_pv = NULL;
4523         }
4524         if (pt_pv) {
4525                 pv_put(pt_pv);
4526                 pt_pv = NULL;
4527         }
4528         if ((++info->count & 7) == 0)
4529                 lwkt_user_yield();
4530
4531         /*
4532          * Relock before returning.
4533          */
4534         spin_lock(&pmap->pm_spin);
4535         return (0);
4536 }
4537
4538 void
4539 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
4540 {
4541         struct pmap_scan_info info;
4542
4543         info.pmap = pmap;
4544         info.sva = sva;
4545         info.eva = eva;
4546         info.func = pmap_remove_callback;
4547         info.arg = NULL;
4548         pmap_scan(&info, 1);
4549 #if 0
4550         cpu_invltlb();
4551         if (eva - sva < 1024*1024) {
4552                 while (sva < eva) {
4553                         cpu_invlpg((void *)sva);
4554                         sva += PAGE_SIZE;
4555                 }
4556         }
4557 #endif
4558 }
4559
4560 static void
4561 pmap_remove_noinval(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
4562 {
4563         struct pmap_scan_info info;
4564
4565         info.pmap = pmap;
4566         info.sva = sva;
4567         info.eva = eva;
4568         info.func = pmap_remove_callback;
4569         info.arg = NULL;
4570         pmap_scan(&info, 0);
4571 }
4572
4573 static void
4574 pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
4575                      pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
4576                      pv_entry_t pt_pv, int sharept,
4577                      vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
4578 {
4579         pt_entry_t pte;
4580
4581         if (pte_pv) {
4582                 /*
4583                  * Managed entry
4584                  *
4585                  * This will also drop pt_pv's wire_count. Note that
4586                  * terminal pages are not wired based on mmu presence.
4587                  *
4588                  * NOTE: If this is the kernel_pmap, pt_pv can be NULL.
4589                  */
4590                 KKASSERT(pte_pv->pv_m != NULL);
4591                 pmap_remove_pv_pte(pte_pv, pt_pv, info->bulk, 2);
4592                 pte_pv = NULL;  /* safety */
4593
4594                 /*
4595                  * Recursively destroy higher-level page tables.
4596                  *
4597                  * This is optional.  If we do not, they will still
4598                  * be destroyed when the process exits.
4599                  *
4600                  * NOTE: Do not destroy pv_entry's with extra hold refs,
4601                  *       a caller may have unlocked it and intends to
4602                  *       continue to use it.
4603                  */
4604                 if (pmap_dynamic_delete &&
4605                     pt_pv &&
4606                     pt_pv->pv_m &&
4607                     pt_pv->pv_m->wire_count == 1 &&
4608                     (pt_pv->pv_hold & PV_HOLD_MASK) == 2 &&
4609                     pt_pv->pv_pindex != pmap_pml4_pindex()) {
4610                         if (pmap_dynamic_delete == 2)
4611                                 kprintf("B %jd %08x\n", pt_pv->pv_pindex, pt_pv->pv_hold);
4612                         pv_hold(pt_pv); /* extra hold */
4613                         pmap_remove_pv_pte(pt_pv, NULL, info->bulk, 1);
4614                         pv_lock(pt_pv); /* prior extra hold + relock */
4615                 }
4616         } else if (sharept == 0) {
4617                 /*
4618                  * Unmanaged pte (pte_placemark is non-NULL)
4619                  *
4620                  * pt_pv's wire_count is still bumped by unmanaged pages
4621                  * so we must decrement it manually.
4622                  *
4623                  * We have to unwire the target page table page.
4624                  */
4625                 pte = pmap_inval_bulk(info->bulk, va, ptep, 0);
4626                 if (pte & pmap->pmap_bits[PG_W_IDX])
4627                         atomic_add_long(&pmap->pm_stats.wired_count, -1);
4628                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
4629                 if (vm_page_unwire_quick(pt_pv->pv_m))
4630                         panic("pmap_remove: insufficient wirecount");
4631                 pv_placemarker_wakeup(pmap, pte_placemark);
4632         } else {
4633                 /*
4634                  * Unmanaged page table (pt, pd, or pdp. Not pte) for
4635                  * a shared page table.
4636                  *
4637                  * pt_pv is actually the pd_pv for our pmap (not the shared
4638                  * object pmap).
4639                  *
4640                  * We have to unwire the target page table page and we
4641                  * have to unwire our page directory page.
4642                  *
4643                  * It is unclear how we can invalidate a segment so we
4644                  * invalidate -1 which invlidates the tlb.
4645                  */
4646                 pte = pmap_inval_bulk(info->bulk, (vm_offset_t)-1, ptep, 0);
4647                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
4648                 KKASSERT((pte & pmap->pmap_bits[PG_DEVICE_IDX]) == 0);
4649                 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
4650                         panic("pmap_remove: shared pgtable1 bad wirecount");
4651                 if (vm_page_unwire_quick(pt_pv->pv_m))
4652                         panic("pmap_remove: shared pgtable2 bad wirecount");
4653                 pv_placemarker_wakeup(pmap, pte_placemark);
4654         }
4655 }
4656
4657 /*
4658  * Removes this physical page from all physical maps in which it resides.
4659  * Reflects back modify bits to the pager.
4660  *
4661  * This routine may not be called from an interrupt.
4662  */
4663 static
4664 void
4665 pmap_remove_all(vm_page_t m)
4666 {
4667         pv_entry_t pv;
4668         pmap_inval_bulk_t bulk;
4669
4670         if (!pmap_initialized /* || (m->flags & PG_FICTITIOUS)*/)
4671                 return;
4672
4673         vm_page_spin_lock(m);
4674         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4675                 KKASSERT(pv->pv_m == m);
4676                 if (pv_hold_try(pv)) {
4677                         vm_page_spin_unlock(m);
4678                 } else {
4679                         vm_page_spin_unlock(m);
4680                         pv_lock(pv);
4681                         pv_put(pv);
4682                         vm_page_spin_lock(m);
4683                         continue;
4684                 }
4685                 KKASSERT(pv->pv_pmap && pv->pv_m == m);
4686
4687                 /*
4688                  * Holding no spinlocks, pv is locked.  Once we scrap
4689                  * pv we can no longer use it as a list iterator (but
4690                  * we are doing a TAILQ_FIRST() so we are ok).
4691                  */
4692                 pmap_inval_bulk_init(&bulk, pv->pv_pmap);
4693                 pmap_remove_pv_pte(pv, NULL, &bulk, 2);
4694                 pv = NULL;      /* safety */
4695                 pmap_inval_bulk_flush(&bulk);
4696                 vm_page_spin_lock(m);
4697         }
4698         KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
4699         vm_page_spin_unlock(m);
4700 }
4701
4702 /*
4703  * Removes the page from a particular pmap
4704  */
4705 void
4706 pmap_remove_specific(pmap_t pmap, vm_page_t m)
4707 {
4708         pv_entry_t pv;
4709         pmap_inval_bulk_t bulk;
4710
4711         if (!pmap_initialized)
4712                 return;
4713
4714 again:
4715         vm_page_spin_lock(m);
4716         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4717                 if (pv->pv_pmap != pmap)
4718                         continue;
4719                 KKASSERT(pv->pv_m == m);
4720                 if (pv_hold_try(pv)) {
4721                         vm_page_spin_unlock(m);
4722                 } else {
4723                         vm_page_spin_unlock(m);
4724                         pv_lock(pv);
4725                         pv_put(pv);
4726                         goto again;
4727                 }
4728                 KKASSERT(pv->pv_pmap == pmap && pv->pv_m == m);
4729
4730                 /*
4731                  * Holding no spinlocks, pv is locked.  Once gone it can't
4732                  * be used as an iterator.  In fact, because we couldn't
4733                  * necessarily lock it atomically it may have moved within
4734                  * the list and ALSO cannot be used as an iterator.
4735                  */
4736                 pmap_inval_bulk_init(&bulk, pv->pv_pmap);
4737                 pmap_remove_pv_pte(pv, NULL, &bulk, 2);
4738                 pv = NULL;      /* safety */
4739                 pmap_inval_bulk_flush(&bulk);
4740                 goto again;
4741         }
4742         vm_page_spin_unlock(m);
4743 }
4744
4745 /*
4746  * Set the physical protection on the specified range of this map
4747  * as requested.  This function is typically only used for debug watchpoints
4748  * and COW pages.
4749  *
4750  * This function may not be called from an interrupt if the map is
4751  * not the kernel_pmap.
4752  *
4753  * NOTE!  For shared page table pages we just unmap the page.
4754  */
4755 void
4756 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
4757 {
4758         struct pmap_scan_info info;
4759         /* JG review for NX */
4760
4761         if (pmap == NULL)
4762                 return;
4763         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == VM_PROT_NONE) {
4764                 pmap_remove(pmap, sva, eva);
4765                 return;
4766         }
4767         if (prot & VM_PROT_WRITE)
4768                 return;
4769         info.pmap = pmap;
4770         info.sva = sva;
4771         info.eva = eva;
4772         info.func = pmap_protect_callback;
4773         info.arg = &prot;
4774         pmap_scan(&info, 1);
4775 }
4776
4777 static
4778 void
4779 pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
4780                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
4781                       pv_entry_t pt_pv, int sharept,
4782                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
4783 {
4784         pt_entry_t pbits;
4785         pt_entry_t cbits;
4786         pt_entry_t pte;
4787         vm_page_t m;
4788
4789 again:
4790         pbits = *ptep;
4791         cbits = pbits;
4792         if (pte_pv) {
4793                 KKASSERT(pte_pv->pv_m != NULL);
4794                 m = NULL;
4795                 if (pbits & pmap->pmap_bits[PG_A_IDX]) {
4796                         if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
4797                                 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
4798                                 KKASSERT(m == pte_pv->pv_m);
4799                                 vm_page_flag_set(m, PG_REFERENCED);
4800                         }
4801                         cbits &= ~pmap->pmap_bits[PG_A_IDX];
4802                 }
4803                 if (pbits & pmap->pmap_bits[PG_M_IDX]) {
4804                         if (pmap_track_modified(pte_pv->pv_pindex)) {
4805                                 if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
4806                                         if (m == NULL) {
4807                                                 m = PHYS_TO_VM_PAGE(pbits &
4808                                                                     PG_FRAME);
4809                                         }
4810                                         vm_page_dirty(m);
4811                                 }
4812                                 cbits &= ~pmap->pmap_bits[PG_M_IDX];
4813                         }
4814                 }
4815         } else if (sharept) {
4816                 /*
4817                  * Unmanaged page table, pt_pv is actually the pd_pv
4818                  * for our pmap (not the object's shared pmap).
4819                  *
4820                  * When asked to protect something in a shared page table
4821                  * page we just unmap the page table page.  We have to
4822                  * invalidate the tlb in this situation.
4823                  *
4824                  * XXX Warning, shared page tables will not be used for
4825                  * OBJT_DEVICE or OBJT_MGTDEVICE (PG_FICTITIOUS) mappings
4826                  * so PHYS_TO_VM_PAGE() should be safe here.
4827                  */
4828                 pte = pmap_inval_smp(pmap, (vm_offset_t)-1, 1, ptep, 0);
4829                 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
4830                         panic("pmap_protect: pgtable1 pg bad wirecount");
4831                 if (vm_page_unwire_quick(pt_pv->pv_m))
4832                         panic("pmap_protect: pgtable2 pg bad wirecount");
4833                 ptep = NULL;
4834         }
4835         /* else unmanaged page, adjust bits, no wire changes */
4836
4837         if (ptep) {
4838                 cbits &= ~pmap->pmap_bits[PG_RW_IDX];
4839 #ifdef PMAP_DEBUG2
4840                 if (pmap_enter_debug > 0) {
4841                         --pmap_enter_debug;
4842                         kprintf("pmap_protect va=%lx ptep=%p pte_pv=%p "
4843                                 "pt_pv=%p cbits=%08lx\n",
4844                                 va, ptep, pte_pv,
4845                                 pt_pv, cbits
4846                         );
4847                 }
4848 #endif
4849                 if (pbits != cbits) {
4850                         vm_offset_t xva;
4851
4852                         xva = (sharept) ? (vm_offset_t)-1 : va;
4853                         if (!pmap_inval_smp_cmpset(pmap, xva,
4854                                                    ptep, pbits, cbits)) {
4855                                 goto again;
4856                         }
4857                 }
4858         }
4859         if (pte_pv)
4860                 pv_put(pte_pv);
4861         else
4862                 pv_placemarker_wakeup(pmap, pte_placemark);
4863 }
4864
4865 /*
4866  * Insert the vm_page (m) at the virtual address (va), replacing any prior
4867  * mapping at that address.  Set protection and wiring as requested.
4868  *
4869  * If entry is non-NULL we check to see if the SEG_SIZE optimization is
4870  * possible.  If it is we enter the page into the appropriate shared pmap
4871  * hanging off the related VM object instead of the passed pmap, then we
4872  * share the page table page from the VM object's pmap into the current pmap.
4873  *
4874  * NOTE: This routine MUST insert the page into the pmap now, it cannot
4875  *       lazy-evaluate.
4876  *
4877  * NOTE: If (m) is PG_UNMANAGED it may also be a temporary fake vm_page_t.
4878  *       never record it.
4879  */
4880 void
4881 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
4882            boolean_t wired, vm_map_entry_t entry)
4883 {
4884         pv_entry_t pt_pv;       /* page table */
4885         pv_entry_t pte_pv;      /* page table entry */
4886         vm_pindex_t *pte_placemark;
4887         pt_entry_t *ptep;
4888         vm_paddr_t opa;
4889         pt_entry_t origpte, newpte;
4890         vm_paddr_t pa;
4891
4892         if (pmap == NULL)
4893                 return;
4894         va = trunc_page(va);
4895 #ifdef PMAP_DIAGNOSTIC
4896         if (va >= KvaEnd)
4897                 panic("pmap_enter: toobig");
4898         if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
4899                 panic("pmap_enter: invalid to pmap_enter page table "
4900                       "pages (va: 0x%lx)", va);
4901 #endif
4902         if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
4903                 kprintf("Warning: pmap_enter called on UVA with "
4904                         "kernel_pmap\n");
4905 #ifdef DDB
4906                 db_print_backtrace();
4907 #endif
4908         }
4909         if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
4910                 kprintf("Warning: pmap_enter called on KVA without"
4911                         "kernel_pmap\n");
4912 #ifdef DDB
4913                 db_print_backtrace();
4914 #endif
4915         }
4916
4917         /*
4918          * Get locked PV entries for our new page table entry (pte_pv or
4919          * pte_placemark) and for its parent page table (pt_pv).  We need
4920          * the parent so we can resolve the location of the ptep.
4921          *
4922          * Only hardware MMU actions can modify the ptep out from
4923          * under us.
4924          *
4925          * if (m) is fictitious or unmanaged we do not create a managing
4926          * pte_pv for it.  Any pre-existing page's management state must
4927          * match (avoiding code complexity).
4928          *
4929          * If the pmap is still being initialized we assume existing
4930          * page tables.
4931          *
4932          * Kernel mapppings do not track page table pages (i.e. pt_pv).
4933          *
4934          * WARNING! If replacing a managed mapping with an unmanaged mapping
4935          *          pte_pv will wind up being non-NULL and must be handled
4936          *          below.
4937          */
4938         if (pmap_initialized == FALSE) {
4939                 pte_pv = NULL;
4940                 pt_pv = NULL;
4941                 pte_placemark = NULL;
4942                 ptep = vtopte(va);
4943                 origpte = *ptep;
4944         } else if (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) { /* XXX */
4945                 pmap_softwait(pmap);
4946                 pte_pv = pv_get(pmap, pmap_pte_pindex(va), &pte_placemark);
4947                 KKASSERT(pte_pv == NULL);
4948                 if (va >= VM_MAX_USER_ADDRESS) {
4949                         pt_pv = NULL;
4950                         ptep = vtopte(va);
4951                 } else {
4952                         pt_pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va),
4953                                                   NULL, entry, va);
4954                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
4955                 }
4956                 origpte = *ptep;
4957                 cpu_ccfence();
4958                 KASSERT(origpte == 0 ||
4959                          (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0,
4960                          ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
4961         } else {
4962                 pmap_softwait(pmap);
4963                 if (va >= VM_MAX_USER_ADDRESS) {
4964                         /*
4965                          * Kernel map, pv_entry-tracked.
4966                          */
4967                         pt_pv = NULL;
4968                         pte_pv = pmap_allocpte(pmap, pmap_pte_pindex(va), NULL);
4969                         ptep = vtopte(va);
4970                 } else {
4971                         /*
4972                          * User map
4973                          */
4974                         pte_pv = pmap_allocpte_seg(pmap, pmap_pte_pindex(va),
4975                                                    &pt_pv, entry, va);
4976                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
4977                 }
4978                 pte_placemark = NULL;   /* safety */
4979                 origpte = *ptep;
4980                 cpu_ccfence();
4981                 KASSERT(origpte == 0 ||
4982                          (origpte & pmap->pmap_bits[PG_MANAGED_IDX]),
4983                          ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
4984         }
4985
4986         pa = VM_PAGE_TO_PHYS(m);
4987         opa = origpte & PG_FRAME;
4988
4989         /*
4990          * Calculate the new PTE.  Note that pte_pv alone does not mean
4991          * the new pte_pv is managed, it could exist because the old pte
4992          * was managed even if the new one is not.
4993          */
4994         newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) |
4995                  pmap->pmap_bits[PG_V_IDX] | pmap->pmap_bits[PG_A_IDX]);
4996         if (wired)
4997                 newpte |= pmap->pmap_bits[PG_W_IDX];
4998         if (va < VM_MAX_USER_ADDRESS)
4999                 newpte |= pmap->pmap_bits[PG_U_IDX];
5000         if (pte_pv && (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) == 0)
5001                 newpte |= pmap->pmap_bits[PG_MANAGED_IDX];
5002 //      if (pmap == &kernel_pmap)
5003 //              newpte |= pgeflag;
5004         newpte |= pmap->pmap_cache_bits[m->pat_mode];
5005         if (m->flags & PG_FICTITIOUS)
5006                 newpte |= pmap->pmap_bits[PG_DEVICE_IDX];
5007
5008         /*
5009          * It is possible for multiple faults to occur in threaded
5010          * environments, the existing pte might be correct.
5011          */
5012         if (((origpte ^ newpte) &
5013             ~(pt_entry_t)(pmap->pmap_bits[PG_M_IDX] |
5014                           pmap->pmap_bits[PG_A_IDX])) == 0) {
5015                 goto done;
5016         }
5017
5018         /*
5019          * Ok, either the address changed or the protection or wiring
5020          * changed.
5021          *
5022          * Clear the current entry, interlocking the removal.  For managed
5023          * pte's this will also flush the modified state to the vm_page.
5024          * Atomic ops are mandatory in order to ensure that PG_M events are
5025          * not lost during any transition.
5026          *
5027          * WARNING: The caller has busied the new page but not the original
5028          *          vm_page which we are trying to replace.  Because we hold
5029          *          the pte_pv lock, but have not busied the page, PG bits
5030          *          can be cleared out from under us.
5031          */
5032         if (opa) {
5033                 if (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) {
5034                         /*
5035                          * Old page was managed.  Expect pte_pv to exist.
5036                          * (it might also exist if the old page was unmanaged).
5037                          *
5038                          * NOTE: pt_pv won't exist for a kernel page
5039                          *       (managed or otherwise).
5040                          *
5041                          * NOTE: We may be reusing the pte_pv so we do not
5042                          *       destroy it in pmap_remove_pv_pte().
5043                          */
5044                         KKASSERT(pte_pv && pte_pv->pv_m);
5045                         if (prot & VM_PROT_NOSYNC) {
5046                                 pmap_remove_pv_pte(pte_pv, pt_pv, NULL, 0);
5047                         } else {
5048                                 pmap_inval_bulk_t bulk;
5049
5050                                 pmap_inval_bulk_init(&bulk, pmap);
5051                                 pmap_remove_pv_pte(pte_pv, pt_pv, &bulk, 0);
5052                                 pmap_inval_bulk_flush(&bulk);
5053                         }
5054                         pmap_remove_pv_page(pte_pv);
5055                         /* will either set pte_pv->pv_m or pv_free() later */
5056                 } else {
5057                         /*
5058                          * Old page was not managed.  If we have a pte_pv
5059                          * it better not have a pv_m assigned to it.  If the
5060                          * new page is managed the pte_pv will be destroyed
5061                          * near the end (we need its interlock).
5062                          *
5063                          * NOTE: We leave the wire count on the PT page
5064                          *       intact for the followup enter, but adjust
5065                          *       the wired-pages count on the pmap.
5066                          */
5067                         KKASSERT(pte_pv == NULL);
5068                         if (prot & VM_PROT_NOSYNC) {
5069                                 /*
5070                                  * NOSYNC (no mmu sync) requested.
5071                                  */
5072                                 (void)pte_load_clear(ptep);
5073                                 cpu_invlpg((void *)va);
5074                         } else {
5075                                 /*
5076                                  * Nominal SYNC
5077                                  */
5078                                 pmap_inval_smp(pmap, va, 1, ptep, 0);
5079                         }
5080
5081                         /*
5082                          * We must adjust pm_stats manually for unmanaged
5083                          * pages.
5084                          */
5085                         if (pt_pv) {
5086                                 atomic_add_long(&pmap->pm_stats.
5087                                                 resident_count, -1);
5088                         }
5089                         if (origpte & pmap->pmap_bits[PG_W_IDX]) {
5090                                 atomic_add_long(&pmap->pm_stats.
5091                                                 wired_count, -1);
5092                         }
5093                 }
5094                 KKASSERT(*ptep == 0);
5095         }
5096
5097 #ifdef PMAP_DEBUG2
5098         if (pmap_enter_debug > 0) {
5099                 --pmap_enter_debug;
5100                 kprintf("pmap_enter: va=%lx m=%p origpte=%lx newpte=%lx ptep=%p"
5101                         " pte_pv=%p pt_pv=%p opa=%lx prot=%02x\n",
5102                         va, m,
5103                         origpte, newpte, ptep,
5104                         pte_pv, pt_pv, opa, prot);
5105         }
5106 #endif
5107
5108         if ((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) {
5109                 /*
5110                  * Entering an unmanaged page.  We must wire the pt_pv unless
5111                  * we retained the wiring from an unmanaged page we had
5112                  * removed (if we retained it via pte_pv that will go away
5113                  * soon).
5114                  */
5115                 if (pt_pv && (opa == 0 ||
5116                               (origpte & pmap->pmap_bits[PG_MANAGED_IDX]))) {
5117                         vm_page_wire_quick(pt_pv->pv_m);
5118                 }
5119                 if (wired)
5120                         atomic_add_long(&pmap->pm_stats.wired_count, 1);
5121
5122                 /*
5123                  * Unmanaged pages need manual resident_count tracking.
5124                  */
5125                 if (pt_pv) {
5126                         atomic_add_long(&pt_pv->pv_pmap->pm_stats.
5127                                         resident_count, 1);
5128                 }
5129                 if (newpte & pmap->pmap_bits[PG_RW_IDX])
5130                         vm_page_flag_set(m, PG_WRITEABLE);
5131         } else {
5132                 /*
5133                  * Entering a managed page.  Our pte_pv takes care of the
5134                  * PT wiring, so if we had removed an unmanaged page before
5135                  * we must adjust.
5136                  *
5137                  * We have to take care of the pmap wired count ourselves.
5138                  *
5139                  * Enter on the PV list if part of our managed memory.
5140                  */
5141                 KKASSERT(pte_pv && (pte_pv->pv_m == NULL || pte_pv->pv_m == m));
5142                 vm_page_spin_lock(m);
5143                 pte_pv->pv_m = m;
5144                 pmap_page_stats_adding(m);
5145                 TAILQ_INSERT_TAIL(&m->md.pv_list, pte_pv, pv_list);
5146                 vm_page_flag_set(m, PG_MAPPED);
5147                 if (newpte & pmap->pmap_bits[PG_RW_IDX])
5148                         vm_page_flag_set(m, PG_WRITEABLE);
5149                 vm_page_spin_unlock(m);
5150
5151                 if (pt_pv && opa &&
5152                     (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) {
5153                         vm_page_unwire_quick(pt_pv->pv_m);
5154                 }
5155
5156                 /*
5157                  * Adjust pmap wired pages count for new entry.
5158                  */
5159                 if (wired) {
5160                         atomic_add_long(&pte_pv->pv_pmap->pm_stats.
5161                                         wired_count, 1);
5162                 }
5163         }
5164
5165         /*
5166          * Kernel VMAs (pt_pv == NULL) require pmap invalidation interlocks.
5167          *
5168          * User VMAs do not because those will be zero->non-zero, so no
5169          * stale entries to worry about at this point.
5170          *
5171          * For KVM there appear to still be issues.  Theoretically we
5172          * should be able to scrap the interlocks entirely but we
5173          * get crashes.
5174          */
5175         if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL) {
5176                 pmap_inval_smp(pmap, va, 1, ptep, newpte);
5177         } else {
5178                 origpte = atomic_swap_long(ptep, newpte);
5179                 if (origpte & pmap->pmap_bits[PG_M_IDX]) {
5180                         kprintf("pmap [M] race @ %016jx\n", va);
5181                         atomic_set_long(ptep, pmap->pmap_bits[PG_M_IDX]);
5182                 }
5183                 if (pt_pv == NULL)
5184                         cpu_invlpg((void *)va);
5185         }
5186
5187         /*
5188          * Cleanup
5189          */
5190 done:
5191         KKASSERT((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0 ||
5192                  (m->flags & PG_MAPPED));
5193
5194         /*
5195          * Cleanup the pv entry, allowing other accessors.  If the new page
5196          * is not managed but we have a pte_pv (which was locking our
5197          * operation), we can free it now.  pte_pv->pv_m should be NULL.
5198          */
5199         if (pte_pv && (newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0) {
5200                 pv_free(pte_pv, pt_pv);
5201         } else if (pte_pv) {
5202                 pv_put(pte_pv);
5203         } else if (pte_placemark) {
5204                 pv_placemarker_wakeup(pmap, pte_placemark);
5205         }
5206         if (pt_pv)
5207                 pv_put(pt_pv);
5208 }
5209
5210 /*
5211  * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
5212  * This code also assumes that the pmap has no pre-existing entry for this
5213  * VA.
5214  *
5215  * This code currently may only be used on user pmaps, not kernel_pmap.
5216  */
5217 void
5218 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
5219 {
5220         pmap_enter(pmap, va, m, VM_PROT_READ, FALSE, NULL);
5221 }
5222
5223 /*
5224  * Make a temporary mapping for a physical address.  This is only intended
5225  * to be used for panic dumps.
5226  *
5227  * The caller is responsible for calling smp_invltlb().
5228  */
5229 void *
5230 pmap_kenter_temporary(vm_paddr_t pa, long i)
5231 {
5232         pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
5233         return ((void *)crashdumpmap);
5234 }
5235
5236 #define MAX_INIT_PT (96)
5237
5238 /*
5239  * This routine preloads the ptes for a given object into the specified pmap.
5240  * This eliminates the blast of soft faults on process startup and
5241  * immediately after an mmap.
5242  */
5243 static int pmap_object_init_pt_callback(vm_page_t p, void *data);
5244
5245 void
5246 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
5247                     vm_object_t object, vm_pindex_t pindex,
5248                     vm_size_t size, int limit)
5249 {
5250         struct rb_vm_page_scan_info info;
5251         struct lwp *lp;
5252         vm_size_t psize;
5253
5254         /*
5255          * We can't preinit if read access isn't set or there is no pmap
5256          * or object.
5257          */
5258         if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
5259                 return;
5260
5261         /*
5262          * We can't preinit if the pmap is not the current pmap
5263          */
5264         lp = curthread->td_lwp;
5265         if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
5266                 return;
5267
5268         /*
5269          * Misc additional checks
5270          */
5271         psize = x86_64_btop(size);
5272
5273         if ((object->type != OBJT_VNODE) ||
5274                 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
5275                         (object->resident_page_count > MAX_INIT_PT))) {
5276                 return;
5277         }
5278
5279         if (pindex + psize > object->size) {
5280                 if (object->size < pindex)
5281                         return;           
5282                 psize = object->size - pindex;
5283         }
5284
5285         if (psize == 0)
5286                 return;
5287
5288         /*
5289          * If everything is segment-aligned do not pre-init here.  Instead
5290          * allow the normal vm_fault path to pass a segment hint to
5291          * pmap_enter() which will then use an object-referenced shared
5292          * page table page.
5293          */
5294         if ((addr & SEG_MASK) == 0 &&
5295             (ctob(psize) & SEG_MASK) == 0 &&
5296             (ctob(pindex) & SEG_MASK) == 0) {
5297                 return;
5298         }
5299
5300         /*
5301          * Use a red-black scan to traverse the requested range and load
5302          * any valid pages found into the pmap.
5303          *
5304          * We cannot safely scan the object's memq without holding the
5305          * object token.
5306          */
5307         info.start_pindex = pindex;
5308         info.end_pindex = pindex + psize - 1;
5309         info.limit = limit;
5310         info.mpte = NULL;
5311         info.addr = addr;
5312         info.pmap = pmap;
5313         info.object = object;
5314
5315         /*
5316          * By using the NOLK scan, the callback function must be sure
5317          * to return -1 if the VM page falls out of the object.
5318          */
5319         vm_object_hold_shared(object);
5320         vm_page_rb_tree_RB_SCAN_NOLK(&object->rb_memq, rb_vm_page_scancmp,
5321                                      pmap_object_init_pt_callback, &info);
5322         vm_object_drop(object);
5323 }
5324
5325 static
5326 int
5327 pmap_object_init_pt_callback(vm_page_t p, void *data)
5328 {
5329         struct rb_vm_page_scan_info *info = data;
5330         vm_pindex_t rel_index;
5331         int hard_busy;
5332
5333         /*
5334          * don't allow an madvise to blow away our really
5335          * free pages allocating pv entries.
5336          */
5337         if ((info->limit & MAP_PREFAULT_MADVISE) &&
5338                 vmstats.v_free_count < vmstats.v_free_reserved) {
5339                     return(-1);
5340         }
5341
5342         /*
5343          * Ignore list markers and ignore pages we cannot instantly
5344          * busy (while holding the object token).
5345          */
5346         if (p->flags & PG_MARKER)
5347                 return 0;
5348         hard_busy = 0;
5349 again:
5350         if (hard_busy) {
5351                 if (vm_page_busy_try(p, TRUE))
5352                         return 0;
5353         } else {
5354                 if (vm_page_sbusy_try(p))
5355                         return 0;
5356         }
5357         if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
5358             (p->flags & PG_FICTITIOUS) == 0) {
5359                 if ((p->queue - p->pc) == PQ_CACHE) {
5360                         if (hard_busy == 0) {
5361                                 vm_page_sbusy_drop(p);
5362                                 hard_busy = 1;
5363                                 goto again;
5364                         }
5365                         vm_page_deactivate(p);
5366                 }
5367                 rel_index = p->pindex - info->start_pindex;
5368                 pmap_enter_quick(info->pmap,
5369                                  info->addr + x86_64_ptob(rel_index), p);
5370         }
5371         if (hard_busy)
5372                 vm_page_wakeup(p);
5373         else
5374                 vm_page_sbusy_drop(p);
5375
5376         /*
5377          * We are using an unlocked scan (that is, the scan expects its
5378          * current element to remain in the tree on return).  So we have
5379          * to check here and abort the scan if it isn't.
5380          */
5381         if (p->object != info->object)
5382                 return -1;
5383         lwkt_yield();
5384         return(0);
5385 }
5386
5387 /*
5388  * Return TRUE if the pmap is in shape to trivially pre-fault the specified
5389  * address.
5390  *
5391  * Returns FALSE if it would be non-trivial or if a pte is already loaded
5392  * into the slot.
5393  *
5394  * XXX This is safe only because page table pages are not freed.
5395  */
5396 int
5397 pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
5398 {
5399         pt_entry_t *pte;
5400
5401         /*spin_lock(&pmap->pm_spin);*/
5402         if ((pte = pmap_pte(pmap, addr)) != NULL) {
5403                 if (*pte & pmap->pmap_bits[PG_V_IDX]) {
5404                         /*spin_unlock(&pmap->pm_spin);*/
5405                         return FALSE;
5406                 }
5407         }
5408         /*spin_unlock(&pmap->pm_spin);*/
5409         return TRUE;
5410 }
5411
5412 /*
5413  * Change the wiring attribute for a pmap/va pair.  The mapping must already
5414  * exist in the pmap.  The mapping may or may not be managed.  The wiring in
5415  * the page is not changed, the page is returned so the caller can adjust
5416  * its wiring (the page is not locked in any way).
5417  *
5418  * Wiring is not a hardware characteristic so there is no need to invalidate
5419  * TLB.  However, in an SMP environment we must use a locked bus cycle to
5420  * update the pte (if we are not using the pmap_inval_*() API that is)...
5421  * it's ok to do this for simple wiring changes.
5422  */
5423 vm_page_t
5424 pmap_unwire(pmap_t pmap, vm_offset_t va)
5425 {
5426         pt_entry_t *ptep;
5427         pv_entry_t pt_pv;
5428         vm_paddr_t pa;
5429         vm_page_t m;
5430
5431         if (pmap == NULL)
5432                 return NULL;
5433
5434         /*
5435          * Assume elements in the kernel pmap are stable
5436          */
5437         if (pmap == &kernel_pmap) {
5438                 if (pmap_pt(pmap, va) == 0)
5439                         return NULL;
5440                 ptep = pmap_pte_quick(pmap, va);
5441                 if (pmap_pte_v(pmap, ptep)) {
5442                         if (pmap_pte_w(pmap, ptep))
5443                                 atomic_add_long(&pmap->pm_stats.wired_count,-1);
5444                         atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
5445                         pa = *ptep & PG_FRAME;
5446                         m = PHYS_TO_VM_PAGE(pa);
5447                 } else {
5448                         m = NULL;
5449                 }
5450         } else {
5451                 /*
5452                  * We can only [un]wire pmap-local pages (we cannot wire
5453                  * shared pages)
5454                  */
5455                 pt_pv = pv_get(pmap, pmap_pt_pindex(va), NULL);
5456                 if (pt_pv == NULL)
5457                         return NULL;
5458
5459                 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
5460                 if ((*ptep & pmap->pmap_bits[PG_V_IDX]) == 0) {
5461                         pv_put(pt_pv);
5462                         return NULL;
5463                 }
5464
5465                 if (pmap_pte_w(pmap, ptep)) {
5466                         atomic_add_long(&pt_pv->pv_pmap->pm_stats.wired_count,
5467                                         -1);
5468                 }
5469                 /* XXX else return NULL so caller doesn't unwire m ? */
5470
5471                 atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
5472
5473                 pa = *ptep & PG_FRAME;
5474                 m = PHYS_TO_VM_PAGE(pa);        /* held by wired count */
5475                 pv_put(pt_pv);
5476         }
5477         return m;
5478 }
5479
5480 /*
5481  * Copy the range specified by src_addr/len from the source map to
5482  * the range dst_addr/len in the destination map.
5483  *
5484  * This routine is only advisory and need not do anything.
5485  */
5486 void
5487 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 
5488           vm_size_t len, vm_offset_t src_addr)
5489 {
5490 }       
5491
5492 /*
5493  * pmap_zero_page:
5494  *
5495  *      Zero the specified physical page.
5496  *
5497  *      This function may be called from an interrupt and no locking is
5498  *      required.
5499  */
5500 void
5501 pmap_zero_page(vm_paddr_t phys)
5502 {
5503         vm_offset_t va = PHYS_TO_DMAP(phys);
5504
5505         pagezero((void *)va);
5506 }
5507
5508 /*
5509  * pmap_zero_page:
5510  *
5511  *      Zero part of a physical page by mapping it into memory and clearing
5512  *      its contents with bzero.
5513  *
5514  *      off and size may not cover an area beyond a single hardware page.
5515  */
5516 void
5517 pmap_zero_page_area(vm_paddr_t phys, int off, int size)
5518 {
5519         vm_offset_t virt = PHYS_TO_DMAP(phys);
5520
5521         bzero((char *)virt + off, size);
5522 }
5523
5524 /*
5525  * pmap_copy_page:
5526  *
5527  *      Copy the physical page from the source PA to the target PA.
5528  *      This function may be called from an interrupt.  No locking
5529  *      is required.
5530  */
5531 void
5532 pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
5533 {
5534         vm_offset_t src_virt, dst_virt;
5535
5536         src_virt = PHYS_TO_DMAP(src);
5537         dst_virt = PHYS_TO_DMAP(dst);
5538         bcopy((void *)src_virt, (void *)dst_virt, PAGE_SIZE);
5539 }
5540
5541 /*
5542  * pmap_copy_page_frag:
5543  *
5544  *      Copy the physical page from the source PA to the target PA.
5545  *      This function may be called from an interrupt.  No locking
5546  *      is required.
5547  */
5548 void
5549 pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
5550 {
5551         vm_offset_t src_virt, dst_virt;
5552
5553         src_virt = PHYS_TO_DMAP(src);
5554         dst_virt = PHYS_TO_DMAP(dst);
5555
5556         bcopy((char *)src_virt + (src & PAGE_MASK),
5557               (char *)dst_virt + (dst & PAGE_MASK),
5558               bytes);
5559 }
5560
5561 /*
5562  * Returns true if the pmap's pv is one of the first 16 pvs linked to from
5563  * this page.  This count may be changed upwards or downwards in the future;
5564  * it is only necessary that true be returned for a small subset of pmaps
5565  * for proper page aging.
5566  */
5567 boolean_t
5568 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
5569 {
5570         pv_entry_t pv;
5571         int loops = 0;
5572
5573         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
5574                 return FALSE;
5575
5576         vm_page_spin_lock(m);
5577         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5578                 if (pv->pv_pmap == pmap) {
5579                         vm_page_spin_unlock(m);
5580                         return TRUE;
5581                 }
5582                 loops++;
5583                 if (loops >= 16)
5584                         break;
5585         }
5586         vm_page_spin_unlock(m);
5587         return (FALSE);
5588 }
5589
5590 /*
5591  * Remove all pages from specified address space this aids process exit
5592  * speeds.  Also, this code may be special cased for the current process
5593  * only.
5594  */
5595 void
5596 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5597 {
5598         pmap_remove_noinval(pmap, sva, eva);
5599         cpu_invltlb();
5600 }
5601
5602 /*
5603  * pmap_testbit tests bits in pte's note that the testbit/clearbit
5604  * routines are inline, and a lot of things compile-time evaluate.
5605  */
5606
5607 static
5608 boolean_t
5609 pmap_testbit(vm_page_t m, int bit)
5610 {
5611         pv_entry_t pv;
5612         pt_entry_t *pte;
5613         pmap_t pmap;
5614
5615         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
5616                 return FALSE;
5617
5618         if (TAILQ_FIRST(&m->md.pv_list) == NULL)
5619                 return FALSE;
5620         vm_page_spin_lock(m);
5621         if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
5622                 vm_page_spin_unlock(m);
5623                 return FALSE;
5624         }
5625
5626         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5627 #if defined(PMAP_DIAGNOSTIC)
5628                 if (pv->pv_pmap == NULL) {
5629                         kprintf("Null pmap (tb) at pindex: %"PRIu64"\n",
5630                             pv->pv_pindex);
5631                         continue;
5632                 }
5633 #endif
5634                 pmap = pv->pv_pmap;
5635
5636                 /*
5637                  * If the bit being tested is the modified bit, then
5638                  * mark clean_map and ptes as never
5639                  * modified.
5640                  *
5641                  * WARNING!  Because we do not lock the pv, *pte can be in a
5642                  *           state of flux.  Despite this the value of *pte
5643                  *           will still be related to the vm_page in some way
5644                  *           because the pv cannot be destroyed as long as we
5645                  *           hold the vm_page spin lock.
5646                  */
5647                 if (bit == PG_A_IDX || bit == PG_M_IDX) {
5648                                 //& (pmap->pmap_bits[PG_A_IDX] | pmap->pmap_bits[PG_M_IDX])) {
5649                         if (!pmap_track_modified(pv->pv_pindex))
5650                                 continue;
5651                 }
5652
5653                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
5654                 if (*pte & pmap->pmap_bits[bit]) {
5655                         vm_page_spin_unlock(m);
5656                         return TRUE;
5657                 }
5658         }
5659         vm_page_spin_unlock(m);
5660         return (FALSE);
5661 }
5662
5663 /*
5664  * This routine is used to modify bits in ptes.  Only one bit should be
5665  * specified.  PG_RW requires special handling.
5666  *
5667  * Caller must NOT hold any spin locks
5668  */
5669 static __inline
5670 void
5671 pmap_clearbit(vm_page_t m, int bit_index)
5672 {
5673         pv_entry_t pv;
5674         pt_entry_t *pte;
5675         pt_entry_t pbits;
5676         pmap_t pmap;
5677
5678         if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
5679                 if (bit_index == PG_RW_IDX)
5680                         vm_page_flag_clear(m, PG_WRITEABLE);
5681                 return;
5682         }
5683
5684         /*
5685          * PG_M or PG_A case
5686          *
5687          * Loop over all current mappings setting/clearing as appropos If
5688          * setting RO do we need to clear the VAC?
5689          *
5690          * NOTE: When clearing PG_M we could also (not implemented) drop
5691          *       through to the PG_RW code and clear PG_RW too, forcing
5692          *       a fault on write to redetect PG_M for virtual kernels, but
5693          *       it isn't necessary since virtual kernels invalidate the
5694          *       pte when they clear the VPTE_M bit in their virtual page
5695          *       tables.
5696          *
5697          * NOTE: Does not re-dirty the page when clearing only PG_M.
5698          *
5699          * NOTE: Because we do not lock the pv, *pte can be in a state of
5700          *       flux.  Despite this the value of *pte is still somewhat
5701          *       related while we hold the vm_page spin lock.
5702          *
5703          *       *pte can be zero due to this race.  Since we are clearing
5704          *       bits we basically do no harm when this race occurs.
5705          */
5706         if (bit_index != PG_RW_IDX) {
5707                 vm_page_spin_lock(m);
5708                 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5709 #if defined(PMAP_DIAGNOSTIC)
5710                         if (pv->pv_pmap == NULL) {
5711                                 kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
5712                                     pv->pv_pindex);
5713                                 continue;
5714                         }
5715 #endif
5716                         pmap = pv->pv_pmap;
5717                         pte = pmap_pte_quick(pv->pv_pmap,
5718                                              pv->pv_pindex << PAGE_SHIFT);
5719                         pbits = *pte;
5720                         if (pbits & pmap->pmap_bits[bit_index])
5721                                 atomic_clear_long(pte, pmap->pmap_bits[bit_index]);
5722                 }
5723                 vm_page_spin_unlock(m);
5724                 return;
5725         }
5726
5727         /*
5728          * Clear PG_RW.  Also clears PG_M and marks the page dirty if PG_M
5729          * was set.
5730          */
5731 restart:
5732         vm_page_spin_lock(m);
5733         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5734                 /*
5735                  * don't write protect pager mappings
5736                  */
5737                 if (!pmap_track_modified(pv->pv_pindex))
5738                         continue;
5739
5740 #if defined(PMAP_DIAGNOSTIC)
5741                 if (pv->pv_pmap == NULL) {
5742                         kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
5743                                 pv->pv_pindex);
5744                         continue;
5745                 }
5746 #endif
5747                 pmap = pv->pv_pmap;
5748
5749                 /*
5750                  * Skip pages which do not have PG_RW set.
5751                  */
5752                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
5753                 if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0)
5754                         continue;
5755
5756                 /*
5757                  * We must lock the PV to be able to safely test the pte.
5758                  */
5759                 if (pv_hold_try(pv)) {
5760                         vm_page_spin_unlock(m);
5761                 } else {
5762                         vm_page_spin_unlock(m);
5763                         pv_lock(pv);    /* held, now do a blocking lock */
5764                         pv_put(pv);
5765                         goto restart;
5766                 }
5767
5768                 /*
5769                  * Reload pte after acquiring pv.
5770                  */
5771                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
5772 #if 0
5773                 if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0) {
5774                         pv_put(pv);
5775                         goto restart;
5776                 }
5777 #endif
5778
5779                 KKASSERT(pv->pv_pmap == pmap && pv->pv_m == m);
5780                 for (;;) {
5781                         pt_entry_t nbits;
5782
5783                         pbits = *pte;
5784                         cpu_ccfence();
5785                         nbits = pbits & ~(pmap->pmap_bits[PG_RW_IDX] |
5786                                           pmap->pmap_bits[PG_M_IDX]);
5787                         if (pmap_inval_smp_cmpset(pmap,
5788                                      ((vm_offset_t)pv->pv_pindex << PAGE_SHIFT),
5789                                      pte, pbits, nbits)) {
5790                                 break;
5791                         }
5792                         cpu_pause();
5793                 }
5794
5795                 /*
5796                  * If PG_M was found to be set while we were clearing PG_RW
5797                  * we also clear PG_M (done above) and mark the page dirty.
5798                  * Callers expect this behavior.
5799                  *
5800                  * we lost pv so it cannot be used as an iterator.  In fact,
5801                  * because we couldn't necessarily lock it atomically it may
5802                  * have moved within the list and ALSO cannot be used as an
5803                  * iterator.
5804                  */
5805                 vm_page_spin_lock(m);
5806                 if (pbits & pmap->pmap_bits[PG_M_IDX])
5807                         vm_page_dirty(m);
5808                 vm_page_spin_unlock(m);
5809                 pv_put(pv);
5810                 goto restart;
5811         }
5812         if (bit_index == PG_RW_IDX)
5813                 vm_page_flag_clear(m, PG_WRITEABLE);
5814         vm_page_spin_unlock(m);
5815 }
5816
5817 /*
5818  * Lower the permission for all mappings to a given page.
5819  *
5820  * Page must be busied by caller.  Because page is busied by caller this
5821  * should not be able to race a pmap_enter().
5822  */
5823 void
5824 pmap_page_protect(vm_page_t m, vm_prot_t prot)
5825 {
5826         /* JG NX support? */
5827         if ((prot & VM_PROT_WRITE) == 0) {
5828                 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
5829                         /*
5830                          * NOTE: pmap_clearbit(.. PG_RW) also clears
5831                          *       the PG_WRITEABLE flag in (m).
5832                          */
5833                         pmap_clearbit(m, PG_RW_IDX);
5834                 } else {
5835                         pmap_remove_all(m);
5836                 }
5837         }
5838 }
5839
5840 vm_paddr_t
5841 pmap_phys_address(vm_pindex_t ppn)
5842 {
5843         return (x86_64_ptob(ppn));
5844 }
5845
5846 /*
5847  * Return a count of reference bits for a page, clearing those bits.
5848  * It is not necessary for every reference bit to be cleared, but it
5849  * is necessary that 0 only be returned when there are truly no
5850  * reference bits set.
5851  *
5852  * XXX: The exact number of bits to check and clear is a matter that
5853  * should be tested and standardized at some point in the future for
5854  * optimal aging of shared pages.
5855  *
5856  * This routine may not block.
5857  */
5858 int
5859 pmap_ts_referenced(vm_page_t m)
5860 {
5861         pv_entry_t pv;
5862         pt_entry_t *pte;
5863         pmap_t pmap;
5864         int rtval = 0;
5865
5866         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
5867                 return (rtval);
5868
5869         vm_page_spin_lock(m);
5870         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
5871                 if (!pmap_track_modified(pv->pv_pindex))
5872                         continue;
5873                 pmap = pv->pv_pmap;
5874                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
5875                 if (pte && (*pte & pmap->pmap_bits[PG_A_IDX])) {
5876                         atomic_clear_long(pte, pmap->pmap_bits[PG_A_IDX]);
5877                         rtval++;
5878                         if (rtval > 4)
5879                                 break;
5880                 }
5881         }
5882         vm_page_spin_unlock(m);
5883         return (rtval);
5884 }
5885
5886 /*
5887  *      pmap_is_modified:
5888  *
5889  *      Return whether or not the specified physical page was modified
5890  *      in any physical maps.
5891  */
5892 boolean_t
5893 pmap_is_modified(vm_page_t m)
5894 {
5895         boolean_t res;
5896
5897         res = pmap_testbit(m, PG_M_IDX);
5898         return (res);
5899 }
5900
5901 /*
5902  *      Clear the modify bits on the specified physical page.
5903  */
5904 void
5905 pmap_clear_modify(vm_page_t m)
5906 {
5907         pmap_clearbit(m, PG_M_IDX);
5908 }
5909
5910 /*
5911  *      pmap_clear_reference:
5912  *
5913  *      Clear the reference bit on the specified physical page.
5914  */
5915 void
5916 pmap_clear_reference(vm_page_t m)
5917 {
5918         pmap_clearbit(m, PG_A_IDX);
5919 }
5920
5921 /*
5922  * Miscellaneous support routines follow
5923  */
5924
5925 static
5926 void
5927 x86_64_protection_init(void)
5928 {
5929         uint64_t *kp;
5930         int prot;
5931
5932         /*
5933          * NX supported? (boot time loader.conf override only)
5934          */
5935         TUNABLE_INT_FETCH("machdep.pmap_nx_enable", &pmap_nx_enable);
5936         if (pmap_nx_enable == 0 || (amd_feature & AMDID_NX) == 0)
5937                 pmap_bits_default[PG_NX_IDX] = 0;
5938
5939         /*
5940          * 0 is basically read-only access, but also set the NX (no-execute)
5941          * bit when VM_PROT_EXECUTE is not specified.
5942          */
5943         kp = protection_codes;
5944         for (prot = 0; prot < PROTECTION_CODES_SIZE; prot++) {
5945                 switch (prot) {
5946                 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
5947                         /*
5948                          * This case handled elsewhere
5949                          */
5950                         *kp++ = 0;
5951                         break;
5952                 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
5953                         /*
5954                          * Read-only is 0|NX
5955                          */
5956                         *kp++ = pmap_bits_default[PG_NX_IDX];
5957                         break;
5958                 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
5959                 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
5960                         /*
5961                          * Execute requires read access
5962                          */
5963                         *kp++ = 0;
5964                         break;
5965                 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
5966                 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
5967                         /*
5968                          * Write without execute is RW|NX
5969                          */
5970                         *kp++ = pmap_bits_default[PG_RW_IDX] |
5971                                 pmap_bits_default[PG_NX_IDX];
5972                         break;
5973                 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
5974                 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
5975                         /*
5976                          * Write with execute is RW
5977                          */
5978                         *kp++ = pmap_bits_default[PG_RW_IDX];
5979                         break;
5980                 }
5981         }
5982 }
5983
5984 /*
5985  * Map a set of physical memory pages into the kernel virtual
5986  * address space. Return a pointer to where it is mapped. This
5987  * routine is intended to be used for mapping device memory,
5988  * NOT real memory.
5989  *
5990  * NOTE: We can't use pgeflag unless we invalidate the pages one at
5991  *       a time.
5992  *
5993  * NOTE: The PAT attributes {WRITE_BACK, WRITE_THROUGH, UNCACHED, UNCACHEABLE}
5994  *       work whether the cpu supports PAT or not.  The remaining PAT
5995  *       attributes {WRITE_PROTECTED, WRITE_COMBINING} only work if the cpu
5996  *       supports PAT.
5997  */
5998 void *
5999 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
6000 {
6001         return(pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6002 }
6003
6004 void *
6005 pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size)
6006 {
6007         return(pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
6008 }
6009
6010 void *
6011 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
6012 {
6013         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6014 }
6015
6016 /*
6017  * Map a set of physical memory pages into the kernel virtual
6018  * address space. Return a pointer to where it is mapped. This
6019  * routine is intended to be used for mapping device memory,
6020  * NOT real memory.
6021  */
6022 void *
6023 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
6024 {
6025         vm_offset_t va, tmpva, offset;
6026         pt_entry_t *pte;
6027         vm_size_t tmpsize;
6028
6029         offset = pa & PAGE_MASK;
6030         size = roundup(offset + size, PAGE_SIZE);
6031
6032         va = kmem_alloc_nofault(&kernel_map, size, VM_SUBSYS_MAPDEV, PAGE_SIZE);
6033         if (va == 0)
6034                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
6035
6036         pa = pa & ~PAGE_MASK;
6037         for (tmpva = va, tmpsize = size; tmpsize > 0;) {
6038                 pte = vtopte(tmpva);
6039                 *pte = pa |
6040                     kernel_pmap.pmap_bits[PG_RW_IDX] |
6041                     kernel_pmap.pmap_bits[PG_V_IDX] | /* pgeflag | */
6042                     kernel_pmap.pmap_cache_bits[mode];
6043                 tmpsize -= PAGE_SIZE;
6044                 tmpva += PAGE_SIZE;
6045                 pa += PAGE_SIZE;
6046         }
6047         pmap_invalidate_range(&kernel_pmap, va, va + size);
6048         pmap_invalidate_cache_range(va, va + size);
6049
6050         return ((void *)(va + offset));
6051 }
6052
6053 void
6054 pmap_unmapdev(vm_offset_t va, vm_size_t size)
6055 {
6056         vm_offset_t base, offset;
6057
6058         base = va & ~PAGE_MASK;
6059         offset = va & PAGE_MASK;
6060         size = roundup(offset + size, PAGE_SIZE);
6061         pmap_qremove(va, size >> PAGE_SHIFT);
6062         kmem_free(&kernel_map, base, size);
6063 }
6064
6065 /*
6066  * Sets the memory attribute for the specified page.
6067  */
6068 void
6069 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
6070 {
6071
6072     m->pat_mode = ma;
6073
6074     /*
6075      * If "m" is a normal page, update its direct mapping.  This update
6076      * can be relied upon to perform any cache operations that are
6077      * required for data coherence.
6078      */
6079     if ((m->flags & PG_FICTITIOUS) == 0)
6080         pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 1, m->pat_mode);
6081 }
6082
6083 /*
6084  * Change the PAT attribute on an existing kernel memory map.  Caller
6085  * must ensure that the virtual memory in question is not accessed
6086  * during the adjustment.
6087  */
6088 void
6089 pmap_change_attr(vm_offset_t va, vm_size_t count, int mode)
6090 {
6091         pt_entry_t *pte;
6092         vm_offset_t base;
6093         int changed = 0;
6094
6095         if (va == 0)
6096                 panic("pmap_change_attr: va is NULL");
6097         base = trunc_page(va);
6098
6099         while (count) {
6100                 pte = vtopte(va);
6101                 *pte = (*pte & ~(pt_entry_t)(kernel_pmap.pmap_cache_mask)) |
6102                        kernel_pmap.pmap_cache_bits[mode];
6103                 --count;
6104                 va += PAGE_SIZE;
6105         }
6106
6107         changed = 1;    /* XXX: not optimal */
6108
6109         /*
6110          * Flush CPU caches if required to make sure any data isn't cached that
6111          * shouldn't be, etc.
6112          */
6113         if (changed) {
6114                 pmap_invalidate_range(&kernel_pmap, base, va);
6115                 pmap_invalidate_cache_range(base, va);
6116         }
6117 }
6118
6119 /*
6120  * perform the pmap work for mincore
6121  */
6122 int
6123 pmap_mincore(pmap_t pmap, vm_offset_t addr)
6124 {
6125         pt_entry_t *ptep, pte;
6126         vm_page_t m;
6127         int val = 0;
6128         
6129         ptep = pmap_pte(pmap, addr);
6130
6131         if (ptep && (pte = *ptep) != 0) {
6132                 vm_offset_t pa;
6133
6134                 val = MINCORE_INCORE;
6135                 if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0)
6136                         goto done;
6137
6138                 pa = pte & PG_FRAME;
6139
6140                 if (pte & pmap->pmap_bits[PG_DEVICE_IDX])
6141                         m = NULL;
6142                 else
6143                         m = PHYS_TO_VM_PAGE(pa);
6144
6145                 /*
6146                  * Modified by us
6147                  */
6148                 if (pte & pmap->pmap_bits[PG_M_IDX])
6149                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
6150                 /*
6151                  * Modified by someone
6152                  */
6153                 else if (m && (m->dirty || pmap_is_modified(m)))
6154                         val |= MINCORE_MODIFIED_OTHER;
6155                 /*
6156                  * Referenced by us
6157                  */
6158                 if (pte & pmap->pmap_bits[PG_A_IDX])
6159                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
6160
6161                 /*
6162                  * Referenced by someone
6163                  */
6164                 else if (m && ((m->flags & PG_REFERENCED) ||
6165                                 pmap_ts_referenced(m))) {
6166                         val |= MINCORE_REFERENCED_OTHER;
6167                         vm_page_flag_set(m, PG_REFERENCED);
6168                 }
6169         } 
6170 done:
6171
6172         return val;
6173 }
6174
6175 /*
6176  * Replace p->p_vmspace with a new one.  If adjrefs is non-zero the new
6177  * vmspace will be ref'd and the old one will be deref'd.
6178  *
6179  * The vmspace for all lwps associated with the process will be adjusted
6180  * and cr3 will be reloaded if any lwp is the current lwp.
6181  *
6182  * The process must hold the vmspace->vm_map.token for oldvm and newvm
6183  */
6184 void
6185 pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
6186 {
6187         struct vmspace *oldvm;
6188         struct lwp *lp;
6189
6190         oldvm = p->p_vmspace;
6191         if (oldvm != newvm) {
6192                 if (adjrefs)
6193                         vmspace_ref(newvm);
6194                 p->p_vmspace = newvm;
6195                 KKASSERT(p->p_nthreads == 1);
6196                 lp = RB_ROOT(&p->p_lwp_tree);
6197                 pmap_setlwpvm(lp, newvm);
6198                 if (adjrefs)
6199                         vmspace_rel(oldvm);
6200         }
6201 }
6202
6203 /*
6204  * Set the vmspace for a LWP.  The vmspace is almost universally set the
6205  * same as the process vmspace, but virtual kernels need to swap out contexts
6206  * on a per-lwp basis.
6207  *
6208  * Caller does not necessarily hold any vmspace tokens.  Caller must control
6209  * the lwp (typically be in the context of the lwp).  We use a critical
6210  * section to protect against statclock and hardclock (statistics collection).
6211  */
6212 void
6213 pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
6214 {
6215         struct vmspace *oldvm;
6216         struct pmap *pmap;
6217
6218         oldvm = lp->lwp_vmspace;
6219
6220         if (oldvm != newvm) {
6221                 crit_enter();
6222                 KKASSERT((newvm->vm_refcnt & VM_REF_DELETED) == 0);
6223                 lp->lwp_vmspace = newvm;
6224                 if (curthread->td_lwp == lp) {
6225                         pmap = vmspace_pmap(newvm);
6226                         ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid);
6227                         if (pmap->pm_active_lock & CPULOCK_EXCL)
6228                                 pmap_interlock_wait(newvm);
6229 #if defined(SWTCH_OPTIM_STATS)
6230                         tlb_flush_count++;
6231 #endif
6232                         if (pmap->pmap_bits[TYPE_IDX] == REGULAR_PMAP) {
6233                                 curthread->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4);
6234                         } else if (pmap->pmap_bits[TYPE_IDX] == EPT_PMAP) {
6235                                 curthread->td_pcb->pcb_cr3 = KPML4phys;
6236                         } else {
6237                                 panic("pmap_setlwpvm: unknown pmap type\n");
6238                         }
6239                         load_cr3(curthread->td_pcb->pcb_cr3);
6240                         pmap = vmspace_pmap(oldvm);
6241                         ATOMIC_CPUMASK_NANDBIT(pmap->pm_active,
6242                                                mycpu->gd_cpuid);
6243                 }
6244                 crit_exit();
6245         }
6246 }
6247
6248 /*
6249  * Called when switching to a locked pmap, used to interlock against pmaps
6250  * undergoing modifications to prevent us from activating the MMU for the
6251  * target pmap until all such modifications have completed.  We have to do
6252  * this because the thread making the modifications has already set up its
6253  * SMP synchronization mask.
6254  *
6255  * This function cannot sleep!
6256  *
6257  * No requirements.
6258  */
6259 void
6260 pmap_interlock_wait(struct vmspace *vm)
6261 {
6262         struct pmap *pmap = &vm->vm_pmap;
6263
6264         if (pmap->pm_active_lock & CPULOCK_EXCL) {
6265                 crit_enter();
6266                 KKASSERT(curthread->td_critcount >= 2);
6267                 DEBUG_PUSH_INFO("pmap_interlock_wait");
6268                 while (pmap->pm_active_lock & CPULOCK_EXCL) {
6269                         cpu_ccfence();
6270                         lwkt_process_ipiq();
6271                 }
6272                 DEBUG_POP_INFO();
6273                 crit_exit();
6274         }
6275 }
6276
6277 vm_offset_t
6278 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
6279 {
6280
6281         if ((obj == NULL) || (size < NBPDR) ||
6282             ((obj->type != OBJT_DEVICE) && (obj->type != OBJT_MGTDEVICE))) {
6283                 return addr;
6284         }
6285
6286         addr = roundup2(addr, NBPDR);
6287         return addr;
6288 }
6289
6290 /*
6291  * Used by kmalloc/kfree, page already exists at va
6292  */
6293 vm_page_t
6294 pmap_kvtom(vm_offset_t va)
6295 {
6296         pt_entry_t *ptep = vtopte(va);
6297
6298         KKASSERT((*ptep & kernel_pmap.pmap_bits[PG_DEVICE_IDX]) == 0);
6299         return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME));
6300 }
6301
6302 /*
6303  * Initialize machine-specific shared page directory support.  This
6304  * is executed when a VM object is created.
6305  */
6306 void
6307 pmap_object_init(vm_object_t object)
6308 {
6309         object->md.pmap_rw = NULL;
6310         object->md.pmap_ro = NULL;
6311 }
6312
6313 /*
6314  * Clean up machine-specific shared page directory support.  This
6315  * is executed when a VM object is destroyed.
6316  */
6317 void
6318 pmap_object_free(vm_object_t object)
6319 {
6320         pmap_t pmap;
6321
6322         if ((pmap = object->md.pmap_rw) != NULL) {
6323                 object->md.pmap_rw = NULL;
6324                 pmap_remove_noinval(pmap,
6325                                   VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
6326                 CPUMASK_ASSZERO(pmap->pm_active);
6327                 pmap_release(pmap);
6328                 pmap_puninit(pmap);
6329                 kfree(pmap, M_OBJPMAP);
6330         }
6331         if ((pmap = object->md.pmap_ro) != NULL) {
6332                 object->md.pmap_ro = NULL;
6333                 pmap_remove_noinval(pmap,
6334                                   VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
6335                 CPUMASK_ASSZERO(pmap->pm_active);
6336                 pmap_release(pmap);
6337                 pmap_puninit(pmap);
6338                 kfree(pmap, M_OBJPMAP);
6339         }
6340 }
6341
6342 /*
6343  * pmap_pgscan_callback - Used by pmap_pgscan to acquire the related
6344  * VM page and issue a pginfo->callback.
6345  *
6346  * We are expected to dispose of any non-NULL pte_pv.
6347  */
6348 static
6349 void
6350 pmap_pgscan_callback(pmap_t pmap, struct pmap_scan_info *info,
6351                       pv_entry_t pte_pv, vm_pindex_t *pte_placemark,
6352                       pv_entry_t pt_pv, int sharept,
6353                       vm_offset_t va, pt_entry_t *ptep, void *arg)
6354 {
6355         struct pmap_pgscan_info *pginfo = arg;
6356         vm_page_t m;
6357
6358         if (pte_pv) {
6359                 /*
6360                  * Try to busy the page while we hold the pte_pv locked.
6361                  */
6362                 KKASSERT(pte_pv->pv_m);
6363                 m = PHYS_TO_VM_PAGE(*ptep & PG_FRAME);
6364                 if (vm_page_busy_try(m, TRUE) == 0) {
6365                         if (m == PHYS_TO_VM_PAGE(*ptep & PG_FRAME)) {
6366                                 /*
6367                                  * The callback is issued with the pte_pv
6368                                  * unlocked and put away, and the pt_pv
6369                                  * unlocked.
6370                                  */
6371                                 pv_put(pte_pv);
6372                                 if (pt_pv) {
6373                                         vm_page_wire_quick(pt_pv->pv_m);
6374                                         pv_unlock(pt_pv);
6375                                 }
6376                                 if (pginfo->callback(pginfo, va, m) < 0)
6377                                         info->stop = 1;
6378                                 if (pt_pv) {
6379                                         pv_lock(pt_pv);
6380                                         vm_page_unwire_quick(pt_pv->pv_m);
6381                                 }
6382                         } else {
6383                                 vm_page_wakeup(m);
6384                                 pv_put(pte_pv);
6385                         }
6386                 } else {
6387                         ++pginfo->busycount;
6388                         pv_put(pte_pv);
6389                 }
6390         } else {
6391                 /*
6392                  * Shared page table or unmanaged page (sharept or !sharept)
6393                  */
6394                 pv_placemarker_wakeup(pmap, pte_placemark);
6395         }
6396 }
6397
6398 void
6399 pmap_pgscan(struct pmap_pgscan_info *pginfo)
6400 {
6401         struct pmap_scan_info info;
6402
6403         pginfo->offset = pginfo->beg_addr;
6404         info.pmap = pginfo->pmap;
6405         info.sva = pginfo->beg_addr;
6406         info.eva = pginfo->end_addr;
6407         info.func = pmap_pgscan_callback;
6408         info.arg = pginfo;
6409         pmap_scan(&info, 0);
6410         if (info.stop == 0)
6411                 pginfo->offset = pginfo->end_addr;
6412 }
6413
6414 /*
6415  * Wait for a placemarker that we do not own to clear.  The placemarker
6416  * in question is not necessarily set to the pindex we want, we may have
6417  * to wait on the element because we want to reserve it ourselves.
6418  *
6419  * NOTE: PM_PLACEMARK_WAKEUP sets a bit which is already set in
6420  *       PM_NOPLACEMARK, so it does not interfere with placemarks
6421  *       which have already been woken up.
6422  */
6423 static
6424 void
6425 pv_placemarker_wait(pmap_t pmap, vm_pindex_t *pmark)
6426 {
6427         if (*pmark != PM_NOPLACEMARK) {
6428                 atomic_set_long(pmark, PM_PLACEMARK_WAKEUP);
6429                 tsleep_interlock(pmark, 0);
6430                 if (*pmark != PM_NOPLACEMARK)
6431                         tsleep(pmark, PINTERLOCKED, "pvplw", 0);
6432         }
6433 }
6434
6435 /*
6436  * Wakeup a placemarker that we own.  Replace the entry with
6437  * PM_NOPLACEMARK and issue a wakeup() if necessary.
6438  */
6439 static
6440 void
6441 pv_placemarker_wakeup(pmap_t pmap, vm_pindex_t *pmark)
6442 {
6443         vm_pindex_t pindex;
6444
6445         pindex = atomic_swap_long(pmark, PM_NOPLACEMARK);
6446         KKASSERT(pindex != PM_NOPLACEMARK);
6447         if (pindex & PM_PLACEMARK_WAKEUP)
6448                 wakeup(pmark);
6449 }