kernel - Yield during VM teardown, fix zfree() contention
[dragonfly.git] / sys / platform / pc64 / x86_64 / pmap.c
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1994 David Greenman
5  * Copyright (c) 2003 Peter Wemm
6  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
7  * Copyright (c) 2008, 2009 The DragonFly Project.
8  * Copyright (c) 2008, 2009 Jordan Gordeev.
9  * Copyright (c) 2011-2012 Matthew Dillon
10  * All rights reserved.
11  *
12  * This code is derived from software contributed to Berkeley by
13  * the Systems Programming Group of the University of Utah Computer
14  * Science Department and William Jolitz of UUNET Technologies Inc.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  */
44 /*
45  * Manage physical address maps for x86-64 systems.
46  */
47
48 #if 0 /* JG */
49 #include "opt_disable_pse.h"
50 #include "opt_pmap.h"
51 #endif
52 #include "opt_msgbuf.h"
53
54 #include <sys/param.h>
55 #include <sys/kernel.h>
56 #include <sys/proc.h>
57 #include <sys/msgbuf.h>
58 #include <sys/vmmeter.h>
59 #include <sys/mman.h>
60 #include <sys/systm.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <sys/sysctl.h>
65 #include <sys/lock.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_zone.h>
74
75 #include <sys/user.h>
76 #include <sys/thread2.h>
77 #include <sys/sysref2.h>
78 #include <sys/spinlock2.h>
79 #include <vm/vm_page2.h>
80
81 #include <machine/cputypes.h>
82 #include <machine/md_var.h>
83 #include <machine/specialreg.h>
84 #include <machine/smp.h>
85 #include <machine_base/apic/apicreg.h>
86 #include <machine/globaldata.h>
87 #include <machine/pmap.h>
88 #include <machine/pmap_inval.h>
89 #include <machine/inttypes.h>
90
91 #include <ddb/ddb.h>
92
93 #define PMAP_KEEP_PDIRS
94 #ifndef PMAP_SHPGPERPROC
95 #define PMAP_SHPGPERPROC 2000
96 #endif
97
98 #if defined(DIAGNOSTIC)
99 #define PMAP_DIAGNOSTIC
100 #endif
101
102 #define MINPV 2048
103
104 /*
105  * pmap debugging will report who owns a pv lock when blocking.
106  */
107 #ifdef PMAP_DEBUG
108
109 #define PMAP_DEBUG_DECL         ,const char *func, int lineno
110 #define PMAP_DEBUG_ARGS         , __func__, __LINE__
111 #define PMAP_DEBUG_COPY         , func, lineno
112
113 #define pv_get(pmap, pindex)            _pv_get(pmap, pindex            \
114                                                         PMAP_DEBUG_ARGS)
115 #define pv_lock(pv)                     _pv_lock(pv                     \
116                                                         PMAP_DEBUG_ARGS)
117 #define pv_hold_try(pv)                 _pv_hold_try(pv                 \
118                                                         PMAP_DEBUG_ARGS)
119 #define pv_alloc(pmap, pindex, isnewp)  _pv_alloc(pmap, pindex, isnewp  \
120                                                         PMAP_DEBUG_ARGS)
121
122 #else
123
124 #define PMAP_DEBUG_DECL
125 #define PMAP_DEBUG_ARGS
126 #define PMAP_DEBUG_COPY
127
128 #define pv_get(pmap, pindex)            _pv_get(pmap, pindex)
129 #define pv_lock(pv)                     _pv_lock(pv)
130 #define pv_hold_try(pv)                 _pv_hold_try(pv)
131 #define pv_alloc(pmap, pindex, isnewp)  _pv_alloc(pmap, pindex, isnewp)
132
133 #endif
134
135 /*
136  * Get PDEs and PTEs for user/kernel address space
137  */
138 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
139
140 #define pmap_pde_v(pmap, pte)           ((*(pd_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
141 #define pmap_pte_w(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_W_IDX]) != 0)
142 #define pmap_pte_m(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_M_IDX]) != 0)
143 #define pmap_pte_u(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_U_IDX]) != 0)
144 #define pmap_pte_v(pmap, pte)           ((*(pt_entry_t *)pte & pmap->pmap_bits[PG_V_IDX]) != 0)
145
146 /*
147  * Given a map and a machine independent protection code,
148  * convert to a vax protection code.
149  */
150 #define pte_prot(m, p)          \
151         (m->protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
152 static int protection_codes[PROTECTION_CODES_SIZE];
153
154 struct pmap kernel_pmap;
155 static TAILQ_HEAD(,pmap)        pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list);
156
157 MALLOC_DEFINE(M_OBJPMAP, "objpmap", "pmaps associated with VM objects");
158
159 vm_paddr_t avail_start;         /* PA of first available physical page */
160 vm_paddr_t avail_end;           /* PA of last available physical page */
161 vm_offset_t virtual2_start;     /* cutout free area prior to kernel start */
162 vm_offset_t virtual2_end;
163 vm_offset_t virtual_start;      /* VA of first avail page (after kernel bss) */
164 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
165 vm_offset_t KvaStart;           /* VA start of KVA space */
166 vm_offset_t KvaEnd;             /* VA end of KVA space (non-inclusive) */
167 vm_offset_t KvaSize;            /* max size of kernel virtual address space */
168 static boolean_t pmap_initialized = FALSE;      /* Has pmap_init completed? */
169 //static int pgeflag;           /* PG_G or-in */
170 //static int pseflag;           /* PG_PS or-in */
171 uint64_t PatMsr;
172
173 static int ndmpdp;
174 static vm_paddr_t dmaplimit;
175 static int nkpt;
176 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
177
178 static pt_entry_t pat_pte_index[PAT_INDEX_SIZE];        /* PAT -> PG_ bits */
179 /*static pt_entry_t pat_pde_index[PAT_INDEX_SIZE];*/    /* PAT -> PG_ bits */
180
181 static uint64_t KPTbase;
182 static uint64_t KPTphys;
183 static uint64_t KPDphys;        /* phys addr of kernel level 2 */
184 static uint64_t KPDbase;        /* phys addr of kernel level 2 @ KERNBASE */
185 uint64_t KPDPphys;      /* phys addr of kernel level 3 */
186 uint64_t KPML4phys;     /* phys addr of kernel level 4 */
187
188 static uint64_t DMPDphys;       /* phys addr of direct mapped level 2 */
189 static uint64_t DMPDPphys;      /* phys addr of direct mapped level 3 */
190
191 /*
192  * Data for the pv entry allocation mechanism
193  */
194 static vm_zone_t pvzone;
195 static struct vm_zone pvzone_store;
196 static struct vm_object pvzone_obj;
197 static int pv_entry_max=0, pv_entry_high_water=0;
198 static int pmap_pagedaemon_waken = 0;
199 static struct pv_entry *pvinit;
200
201 /*
202  * All those kernel PT submaps that BSD is so fond of
203  */
204 pt_entry_t *CMAP1 = NULL, *ptmmap;
205 caddr_t CADDR1 = NULL, ptvmmap = NULL;
206 static pt_entry_t *msgbufmap;
207 struct msgbuf *msgbufp=NULL;
208
209 /*
210  * PMAP default PG_* bits. Needed to be able to add
211  * EPT/NPT pagetable pmap_bits for the VMM module
212  */
213 uint64_t pmap_bits_default[] = {
214                 REGULAR_PMAP,                                   /* TYPE_IDX             0 */
215                 X86_PG_V,                                       /* PG_V_IDX             1 */
216                 X86_PG_RW,                                      /* PG_RW_IDX            2 */
217                 X86_PG_U,                                       /* PG_U_IDX             3 */
218                 X86_PG_A,                                       /* PG_A_IDX             4 */
219                 X86_PG_M,                                       /* PG_M_IDX             5 */
220                 X86_PG_PS,                                      /* PG_PS_IDX3           6 */
221                 X86_PG_G,                                       /* PG_G_IDX             7 */
222                 X86_PG_AVAIL1,                                  /* PG_AVAIL1_IDX        8 */
223                 X86_PG_AVAIL2,                                  /* PG_AVAIL2_IDX        9 */
224                 X86_PG_AVAIL3,                                  /* PG_AVAIL3_IDX        10 */
225                 X86_PG_NC_PWT | X86_PG_NC_PCD,                  /* PG_N_IDX     11 */
226 };
227 /*
228  * Crashdump maps.
229  */
230 static pt_entry_t *pt_crashdumpmap;
231 static caddr_t crashdumpmap;
232
233 #ifdef PMAP_DEBUG2
234 static int pmap_enter_debug = 0;
235 SYSCTL_INT(_machdep, OID_AUTO, pmap_enter_debug, CTLFLAG_RW,
236     &pmap_enter_debug, 0, "Debug pmap_enter's");
237 #endif
238 static int pmap_yield_count = 64;
239 SYSCTL_INT(_machdep, OID_AUTO, pmap_yield_count, CTLFLAG_RW,
240     &pmap_yield_count, 0, "Yield during init_pt/release");
241 static int pmap_mmu_optimize = 0;
242 SYSCTL_INT(_machdep, OID_AUTO, pmap_mmu_optimize, CTLFLAG_RW,
243     &pmap_mmu_optimize, 0, "Share page table pages when possible");
244 int pmap_fast_kernel_cpusync = 0;
245 SYSCTL_INT(_machdep, OID_AUTO, pmap_fast_kernel_cpusync, CTLFLAG_RW,
246     &pmap_fast_kernel_cpusync, 0, "Share page table pages when possible");
247
248 #define DISABLE_PSE
249
250 /* Standard user access funtions */
251 extern int std_copyinstr (const void *udaddr, void *kaddr, size_t len,
252     size_t *lencopied);
253 extern int std_copyin (const void *udaddr, void *kaddr, size_t len);
254 extern int std_copyout (const void *kaddr, void *udaddr, size_t len);
255 extern int std_fubyte (const void *base);
256 extern int std_subyte (void *base, int byte);
257 extern long std_fuword (const void *base);
258 extern int std_suword (void *base, long word);
259 extern int std_suword32 (void *base, int word);
260
261 static void pv_hold(pv_entry_t pv);
262 static int _pv_hold_try(pv_entry_t pv
263                                 PMAP_DEBUG_DECL);
264 static void pv_drop(pv_entry_t pv);
265 static void _pv_lock(pv_entry_t pv
266                                 PMAP_DEBUG_DECL);
267 static void pv_unlock(pv_entry_t pv);
268 static pv_entry_t _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew
269                                 PMAP_DEBUG_DECL);
270 static pv_entry_t _pv_get(pmap_t pmap, vm_pindex_t pindex
271                                 PMAP_DEBUG_DECL);
272 static pv_entry_t pv_get_try(pmap_t pmap, vm_pindex_t pindex, int *errorp);
273 static pv_entry_t pv_find(pmap_t pmap, vm_pindex_t pindex);
274 static void pv_put(pv_entry_t pv);
275 static void pv_free(pv_entry_t pv);
276 static void *pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex);
277 static pv_entry_t pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
278                       pv_entry_t *pvpp);
279 static pv_entry_t pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex,
280                       pv_entry_t *pvpp, vm_map_entry_t entry, vm_offset_t va);
281 static void pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp,
282                       struct pmap_inval_info *info);
283 static vm_page_t pmap_remove_pv_page(pv_entry_t pv);
284 static int pmap_release_pv( struct pmap_inval_info *info,
285                       pv_entry_t pv, pv_entry_t pvp);
286
287 struct pmap_scan_info;
288 static void pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
289                       pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
290                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
291 static void pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
292                       pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
293                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused);
294
295 static void i386_protection_init (void);
296 static void create_pagetables(vm_paddr_t *firstaddr);
297 static void pmap_remove_all (vm_page_t m);
298 static boolean_t pmap_testbit (vm_page_t m, int bit);
299
300 static pt_entry_t * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
301 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
302
303 static void pmap_pinit_defaults(struct pmap *pmap);
304
305 static unsigned pdir4mb;
306
307 static int
308 pv_entry_compare(pv_entry_t pv1, pv_entry_t pv2)
309 {
310         if (pv1->pv_pindex < pv2->pv_pindex)
311                 return(-1);
312         if (pv1->pv_pindex > pv2->pv_pindex)
313                 return(1);
314         return(0);
315 }
316
317 RB_GENERATE2(pv_entry_rb_tree, pv_entry, pv_entry,
318              pv_entry_compare, vm_pindex_t, pv_pindex);
319
320 static __inline
321 void
322 pmap_page_stats_adding(vm_page_t m)
323 {
324         globaldata_t gd = mycpu;
325
326         if (TAILQ_EMPTY(&m->md.pv_list)) {
327                 ++gd->gd_vmtotal.t_arm;
328         } else if (TAILQ_FIRST(&m->md.pv_list) ==
329                    TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
330                 ++gd->gd_vmtotal.t_armshr;
331                 ++gd->gd_vmtotal.t_avmshr;
332         } else {
333                 ++gd->gd_vmtotal.t_avmshr;
334         }
335 }
336
337 static __inline
338 void
339 pmap_page_stats_deleting(vm_page_t m)
340 {
341         globaldata_t gd = mycpu;
342
343         if (TAILQ_EMPTY(&m->md.pv_list)) {
344                 --gd->gd_vmtotal.t_arm;
345         } else if (TAILQ_FIRST(&m->md.pv_list) ==
346                    TAILQ_LAST(&m->md.pv_list, md_page_pv_list)) {
347                 --gd->gd_vmtotal.t_armshr;
348                 --gd->gd_vmtotal.t_avmshr;
349         } else {
350                 --gd->gd_vmtotal.t_avmshr;
351         }
352 }
353
354 /*
355  * Move the kernel virtual free pointer to the next
356  * 2MB.  This is used to help improve performance
357  * by using a large (2MB) page for much of the kernel
358  * (.text, .data, .bss)
359  */
360 static
361 vm_offset_t
362 pmap_kmem_choose(vm_offset_t addr)
363 {
364         vm_offset_t newaddr = addr;
365
366         newaddr = roundup2(addr, NBPDR);
367         return newaddr;
368 }
369
370 /*
371  * pmap_pte_quick:
372  *
373  *      Super fast pmap_pte routine best used when scanning the pv lists.
374  *      This eliminates many course-grained invltlb calls.  Note that many of
375  *      the pv list scans are across different pmaps and it is very wasteful
376  *      to do an entire invltlb when checking a single mapping.
377  */
378 static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va);
379
380 static
381 pt_entry_t *
382 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
383 {
384         return pmap_pte(pmap, va);
385 }
386
387 /*
388  * Returns the pindex of a page table entry (representing a terminal page).
389  * There are NUPTE_TOTAL page table entries possible (a huge number)
390  *
391  * x86-64 has a 48-bit address space, where bit 47 is sign-extended out.
392  * We want to properly translate negative KVAs.
393  */
394 static __inline
395 vm_pindex_t
396 pmap_pte_pindex(vm_offset_t va)
397 {
398         return ((va >> PAGE_SHIFT) & (NUPTE_TOTAL - 1));
399 }
400
401 /*
402  * Returns the pindex of a page table.
403  */
404 static __inline
405 vm_pindex_t
406 pmap_pt_pindex(vm_offset_t va)
407 {
408         return (NUPTE_TOTAL + ((va >> PDRSHIFT) & (NUPT_TOTAL - 1)));
409 }
410
411 /*
412  * Returns the pindex of a page directory.
413  */
414 static __inline
415 vm_pindex_t
416 pmap_pd_pindex(vm_offset_t va)
417 {
418         return (NUPTE_TOTAL + NUPT_TOTAL +
419                 ((va >> PDPSHIFT) & (NUPD_TOTAL - 1)));
420 }
421
422 static __inline
423 vm_pindex_t
424 pmap_pdp_pindex(vm_offset_t va)
425 {
426         return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
427                 ((va >> PML4SHIFT) & (NUPDP_TOTAL - 1)));
428 }
429
430 static __inline
431 vm_pindex_t
432 pmap_pml4_pindex(void)
433 {
434         return (NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL + NUPDP_TOTAL);
435 }
436
437 /*
438  * Return various clipped indexes for a given VA
439  *
440  * Returns the index of a pte in a page table, representing a terminal
441  * page.
442  */
443 static __inline
444 vm_pindex_t
445 pmap_pte_index(vm_offset_t va)
446 {
447         return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
448 }
449
450 /*
451  * Returns the index of a pt in a page directory, representing a page
452  * table.
453  */
454 static __inline
455 vm_pindex_t
456 pmap_pt_index(vm_offset_t va)
457 {
458         return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
459 }
460
461 /*
462  * Returns the index of a pd in a page directory page, representing a page
463  * directory.
464  */
465 static __inline
466 vm_pindex_t
467 pmap_pd_index(vm_offset_t va)
468 {
469         return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
470 }
471
472 /*
473  * Returns the index of a pdp in the pml4 table, representing a page
474  * directory page.
475  */
476 static __inline
477 vm_pindex_t
478 pmap_pdp_index(vm_offset_t va)
479 {
480         return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
481 }
482
483 /*
484  * Generic procedure to index a pte from a pt, pd, or pdp.
485  *
486  * NOTE: Normally passed pindex as pmap_xx_index().  pmap_xx_pindex() is NOT
487  *       a page table page index but is instead of PV lookup index.
488  */
489 static
490 void *
491 pv_pte_lookup(pv_entry_t pv, vm_pindex_t pindex)
492 {
493         pt_entry_t *pte;
494
495         pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pv->pv_m));
496         return(&pte[pindex]);
497 }
498
499 /*
500  * Return pointer to PDP slot in the PML4
501  */
502 static __inline
503 pml4_entry_t *
504 pmap_pdp(pmap_t pmap, vm_offset_t va)
505 {
506         return (&pmap->pm_pml4[pmap_pdp_index(va)]);
507 }
508
509 /*
510  * Return pointer to PD slot in the PDP given a pointer to the PDP
511  */
512 static __inline
513 pdp_entry_t *
514 pmap_pdp_to_pd(pml4_entry_t pdp_pte, vm_offset_t va)
515 {
516         pdp_entry_t *pd;
517
518         pd = (pdp_entry_t *)PHYS_TO_DMAP(pdp_pte & PG_FRAME);
519         return (&pd[pmap_pd_index(va)]);
520 }
521
522 /*
523  * Return pointer to PD slot in the PDP.
524  */
525 static __inline
526 pdp_entry_t *
527 pmap_pd(pmap_t pmap, vm_offset_t va)
528 {
529         pml4_entry_t *pdp;
530
531         pdp = pmap_pdp(pmap, va);
532         if ((*pdp & pmap->pmap_bits[PG_V_IDX]) == 0)
533                 return NULL;
534         return (pmap_pdp_to_pd(*pdp, va));
535 }
536
537 /*
538  * Return pointer to PT slot in the PD given a pointer to the PD
539  */
540 static __inline
541 pd_entry_t *
542 pmap_pd_to_pt(pdp_entry_t pd_pte, vm_offset_t va)
543 {
544         pd_entry_t *pt;
545
546         pt = (pd_entry_t *)PHYS_TO_DMAP(pd_pte & PG_FRAME);
547         return (&pt[pmap_pt_index(va)]);
548 }
549
550 /*
551  * Return pointer to PT slot in the PD
552  *
553  * SIMPLE PMAP NOTE: Simple pmaps (embedded in objects) do not have PDPs,
554  *                   so we cannot lookup the PD via the PDP.  Instead we
555  *                   must look it up via the pmap.
556  */
557 static __inline
558 pd_entry_t *
559 pmap_pt(pmap_t pmap, vm_offset_t va)
560 {
561         pdp_entry_t *pd;
562         pv_entry_t pv;
563         vm_pindex_t pd_pindex;
564
565         if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
566                 pd_pindex = pmap_pd_pindex(va);
567                 spin_lock(&pmap->pm_spin);
568                 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pd_pindex);
569                 spin_unlock(&pmap->pm_spin);
570                 if (pv == NULL || pv->pv_m == NULL)
571                         return NULL;
572                 return (pmap_pd_to_pt(VM_PAGE_TO_PHYS(pv->pv_m), va));
573         } else {
574                 pd = pmap_pd(pmap, va);
575                 if (pd == NULL || (*pd & pmap->pmap_bits[PG_V_IDX]) == 0)
576                          return NULL;
577                 return (pmap_pd_to_pt(*pd, va));
578         }
579 }
580
581 /*
582  * Return pointer to PTE slot in the PT given a pointer to the PT
583  */
584 static __inline
585 pt_entry_t *
586 pmap_pt_to_pte(pd_entry_t pt_pte, vm_offset_t va)
587 {
588         pt_entry_t *pte;
589
590         pte = (pt_entry_t *)PHYS_TO_DMAP(pt_pte & PG_FRAME);
591         return (&pte[pmap_pte_index(va)]);
592 }
593
594 /*
595  * Return pointer to PTE slot in the PT
596  */
597 static __inline
598 pt_entry_t *
599 pmap_pte(pmap_t pmap, vm_offset_t va)
600 {
601         pd_entry_t *pt;
602
603         pt = pmap_pt(pmap, va);
604         if (pt == NULL || (*pt & pmap->pmap_bits[PG_V_IDX]) == 0)
605                  return NULL;
606         if ((*pt & pmap->pmap_bits[PG_PS_IDX]) != 0)
607                 return ((pt_entry_t *)pt);
608         return (pmap_pt_to_pte(*pt, va));
609 }
610
611 /*
612  * Of all the layers (PTE, PT, PD, PDP, PML4) the best one to cache is
613  * the PT layer.  This will speed up core pmap operations considerably.
614  *
615  * NOTE: The pmap spinlock does not need to be held but the passed-in pv
616  *       must be in a known associated state (typically by being locked when
617  *       the pmap spinlock isn't held).  We allow the race for that case.
618  */
619 static __inline
620 void
621 pv_cache(pv_entry_t pv, vm_pindex_t pindex)
622 {
623         if (pindex >= pmap_pt_pindex(0) && pindex <= pmap_pd_pindex(0))
624                 pv->pv_pmap->pm_pvhint = pv;
625 }
626
627
628 /*
629  * Return address of PT slot in PD (KVM only)
630  *
631  * Cannot be used for user page tables because it might interfere with
632  * the shared page-table-page optimization (pmap_mmu_optimize).
633  */
634 static __inline
635 pd_entry_t *
636 vtopt(vm_offset_t va)
637 {
638         uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
639                                   NPML4EPGSHIFT)) - 1);
640
641         return (PDmap + ((va >> PDRSHIFT) & mask));
642 }
643
644 /*
645  * KVM - return address of PTE slot in PT
646  */
647 static __inline
648 pt_entry_t *
649 vtopte(vm_offset_t va)
650 {
651         uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT +
652                                   NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
653
654         return (PTmap + ((va >> PAGE_SHIFT) & mask));
655 }
656
657 static uint64_t
658 allocpages(vm_paddr_t *firstaddr, long n)
659 {
660         uint64_t ret;
661
662         ret = *firstaddr;
663         bzero((void *)ret, n * PAGE_SIZE);
664         *firstaddr += n * PAGE_SIZE;
665         return (ret);
666 }
667
668 static
669 void
670 create_pagetables(vm_paddr_t *firstaddr)
671 {
672         long i;         /* must be 64 bits */
673         long nkpt_base;
674         long nkpt_phys;
675         int j;
676
677         /*
678          * We are running (mostly) V=P at this point
679          *
680          * Calculate NKPT - number of kernel page tables.  We have to
681          * accomodoate prealloction of the vm_page_array, dump bitmap,
682          * MSGBUF_SIZE, and other stuff.  Be generous.
683          *
684          * Maxmem is in pages.
685          *
686          * ndmpdp is the number of 1GB pages we wish to map.
687          */
688         ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
689         if (ndmpdp < 4)         /* Minimum 4GB of dirmap */
690                 ndmpdp = 4;
691         KKASSERT(ndmpdp <= NKPDPE * NPDEPG);
692
693         /*
694          * Starting at the beginning of kvm (not KERNBASE).
695          */
696         nkpt_phys = (Maxmem * sizeof(struct vm_page) + NBPDR - 1) / NBPDR;
697         nkpt_phys += (Maxmem * sizeof(struct pv_entry) + NBPDR - 1) / NBPDR;
698         nkpt_phys += ((nkpt + nkpt + 1 + NKPML4E + NKPDPE + NDMPML4E +
699                        ndmpdp) + 511) / 512;
700         nkpt_phys += 128;
701
702         /*
703          * Starting at KERNBASE - map 2G worth of page table pages.
704          * KERNBASE is offset -2G from the end of kvm.
705          */
706         nkpt_base = (NPDPEPG - KPDPI) * NPTEPG; /* typically 2 x 512 */
707
708         /*
709          * Allocate pages
710          */
711         KPTbase = allocpages(firstaddr, nkpt_base);
712         KPTphys = allocpages(firstaddr, nkpt_phys);
713         KPML4phys = allocpages(firstaddr, 1);
714         KPDPphys = allocpages(firstaddr, NKPML4E);
715         KPDphys = allocpages(firstaddr, NKPDPE);
716
717         /*
718          * Calculate the page directory base for KERNBASE,
719          * that is where we start populating the page table pages.
720          * Basically this is the end - 2.
721          */
722         KPDbase = KPDphys + ((NKPDPE - (NPDPEPG - KPDPI)) << PAGE_SHIFT);
723
724         DMPDPphys = allocpages(firstaddr, NDMPML4E);
725         if ((amd_feature & AMDID_PAGE1GB) == 0)
726                 DMPDphys = allocpages(firstaddr, ndmpdp);
727         dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
728
729         /*
730          * Fill in the underlying page table pages for the area around
731          * KERNBASE.  This remaps low physical memory to KERNBASE.
732          *
733          * Read-only from zero to physfree
734          * XXX not fully used, underneath 2M pages
735          */
736         for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
737                 ((pt_entry_t *)KPTbase)[i] = i << PAGE_SHIFT;
738                 ((pt_entry_t *)KPTbase)[i] |=
739                     pmap_bits_default[PG_RW_IDX] |
740                     pmap_bits_default[PG_V_IDX] |
741                     pmap_bits_default[PG_G_IDX];
742         }
743
744         /*
745          * Now map the initial kernel page tables.  One block of page
746          * tables is placed at the beginning of kernel virtual memory,
747          * and another block is placed at KERNBASE to map the kernel binary,
748          * data, bss, and initial pre-allocations.
749          */
750         for (i = 0; i < nkpt_base; i++) {
751                 ((pd_entry_t *)KPDbase)[i] = KPTbase + (i << PAGE_SHIFT);
752                 ((pd_entry_t *)KPDbase)[i] |=
753                     pmap_bits_default[PG_RW_IDX] |
754                     pmap_bits_default[PG_V_IDX];
755         }
756         for (i = 0; i < nkpt_phys; i++) {
757                 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
758                 ((pd_entry_t *)KPDphys)[i] |=
759                     pmap_bits_default[PG_RW_IDX] |
760                     pmap_bits_default[PG_V_IDX];
761         }
762
763         /*
764          * Map from zero to end of allocations using 2M pages as an
765          * optimization.  This will bypass some of the KPTBase pages
766          * above in the KERNBASE area.
767          */
768         for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
769                 ((pd_entry_t *)KPDbase)[i] = i << PDRSHIFT;
770                 ((pd_entry_t *)KPDbase)[i] |=
771                     pmap_bits_default[PG_RW_IDX] |
772                     pmap_bits_default[PG_V_IDX] |
773                     pmap_bits_default[PG_PS_IDX] |
774                     pmap_bits_default[PG_G_IDX];
775         }
776
777         /*
778          * And connect up the PD to the PDP.  The kernel pmap is expected
779          * to pre-populate all of its PDs.  See NKPDPE in vmparam.h.
780          */
781         for (i = 0; i < NKPDPE; i++) {
782                 ((pdp_entry_t *)KPDPphys)[NPDPEPG - NKPDPE + i] =
783                                 KPDphys + (i << PAGE_SHIFT);
784                 ((pdp_entry_t *)KPDPphys)[NPDPEPG - NKPDPE + i] |=
785                     pmap_bits_default[PG_RW_IDX] |
786                     pmap_bits_default[PG_V_IDX] |
787                     pmap_bits_default[PG_U_IDX];
788         }
789
790         /*
791          * Now set up the direct map space using either 2MB or 1GB pages
792          * Preset PG_M and PG_A because demotion expects it.
793          *
794          * When filling in entries in the PD pages make sure any excess
795          * entries are set to zero as we allocated enough PD pages
796          */
797         if ((amd_feature & AMDID_PAGE1GB) == 0) {
798                 for (i = 0; i < NPDEPG * ndmpdp; i++) {
799                         ((pd_entry_t *)DMPDphys)[i] = i << PDRSHIFT;
800                         ((pd_entry_t *)DMPDphys)[i] |=
801                             pmap_bits_default[PG_RW_IDX] |
802                             pmap_bits_default[PG_V_IDX] |
803                             pmap_bits_default[PG_PS_IDX] |
804                             pmap_bits_default[PG_G_IDX] |
805                             pmap_bits_default[PG_M_IDX] |
806                             pmap_bits_default[PG_A_IDX];
807                 }
808
809                 /*
810                  * And the direct map space's PDP
811                  */
812                 for (i = 0; i < ndmpdp; i++) {
813                         ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
814                                                         (i << PAGE_SHIFT);
815                         ((pdp_entry_t *)DMPDPphys)[i] |=
816                             pmap_bits_default[PG_RW_IDX] |
817                             pmap_bits_default[PG_V_IDX] |
818                             pmap_bits_default[PG_U_IDX];
819                 }
820         } else {
821                 for (i = 0; i < ndmpdp; i++) {
822                         ((pdp_entry_t *)DMPDPphys)[i] =
823                                                 (vm_paddr_t)i << PDPSHIFT;
824                         ((pdp_entry_t *)DMPDPphys)[i] |=
825                             pmap_bits_default[PG_RW_IDX] |
826                             pmap_bits_default[PG_V_IDX] |
827                             pmap_bits_default[PG_PS_IDX] |
828                             pmap_bits_default[PG_G_IDX] |
829                             pmap_bits_default[PG_M_IDX] |
830                             pmap_bits_default[PG_A_IDX];
831                 }
832         }
833
834         /* And recursively map PML4 to itself in order to get PTmap */
835         ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
836         ((pdp_entry_t *)KPML4phys)[PML4PML4I] |=
837             pmap_bits_default[PG_RW_IDX] |
838             pmap_bits_default[PG_V_IDX] |
839             pmap_bits_default[PG_U_IDX];
840
841         /*
842          * Connect the Direct Map slots up to the PML4
843          */
844         for (j = 0; j < NDMPML4E; ++j) {
845                 ((pdp_entry_t *)KPML4phys)[DMPML4I + j] =
846                     (DMPDPphys + ((vm_paddr_t)j << PML4SHIFT)) |
847                     pmap_bits_default[PG_RW_IDX] |
848                     pmap_bits_default[PG_V_IDX] |
849                     pmap_bits_default[PG_U_IDX];
850         }
851
852         /*
853          * Connect the KVA slot up to the PML4
854          */
855         ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
856         ((pdp_entry_t *)KPML4phys)[KPML4I] |=
857             pmap_bits_default[PG_RW_IDX] |
858             pmap_bits_default[PG_V_IDX] |
859             pmap_bits_default[PG_U_IDX];
860 }
861
862 /*
863  *      Bootstrap the system enough to run with virtual memory.
864  *
865  *      On the i386 this is called after mapping has already been enabled
866  *      and just syncs the pmap module with what has already been done.
867  *      [We can't call it easily with mapping off since the kernel is not
868  *      mapped with PA == VA, hence we would have to relocate every address
869  *      from the linked base (virtual) address "KERNBASE" to the actual
870  *      (physical) address starting relative to 0]
871  */
872 void
873 pmap_bootstrap(vm_paddr_t *firstaddr)
874 {
875         vm_offset_t va;
876         pt_entry_t *pte;
877
878         KvaStart = VM_MIN_KERNEL_ADDRESS;
879         KvaEnd = VM_MAX_KERNEL_ADDRESS;
880         KvaSize = KvaEnd - KvaStart;
881
882         avail_start = *firstaddr;
883
884         /*
885          * Create an initial set of page tables to run the kernel in.
886          */
887         create_pagetables(firstaddr);
888
889         virtual2_start = KvaStart;
890         virtual2_end = PTOV_OFFSET;
891
892         virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr;
893         virtual_start = pmap_kmem_choose(virtual_start);
894
895         virtual_end = VM_MAX_KERNEL_ADDRESS;
896
897         /* XXX do %cr0 as well */
898         load_cr4(rcr4() | CR4_PGE | CR4_PSE);
899         load_cr3(KPML4phys);
900
901         /*
902          * Initialize protection array.
903          */
904         i386_protection_init();
905
906         /*
907          * The kernel's pmap is statically allocated so we don't have to use
908          * pmap_create, which is unlikely to work correctly at this part of
909          * the boot sequence (XXX and which no longer exists).
910          */
911         kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys);
912         kernel_pmap.pm_count = 1;
913         CPUMASK_ASSALLONES(kernel_pmap.pm_active);
914         RB_INIT(&kernel_pmap.pm_pvroot);
915         spin_init(&kernel_pmap.pm_spin, "pmapbootstrap");
916         lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
917
918         /*
919          * Reserve some special page table entries/VA space for temporary
920          * mapping of pages.
921          */
922 #define SYSMAP(c, p, v, n)      \
923         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
924
925         va = virtual_start;
926         pte = vtopte(va);
927
928         /*
929          * CMAP1/CMAP2 are used for zeroing and copying pages.
930          */
931         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
932
933         /*
934          * Crashdump maps.
935          */
936         SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
937
938         /*
939          * ptvmmap is used for reading arbitrary physical pages via
940          * /dev/mem.
941          */
942         SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
943
944         /*
945          * msgbufp is used to map the system message buffer.
946          * XXX msgbufmap is not used.
947          */
948         SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
949                atop(round_page(MSGBUF_SIZE)))
950
951         virtual_start = va;
952         virtual_start = pmap_kmem_choose(virtual_start);
953
954         *CMAP1 = 0;
955
956         /*
957          * PG_G is terribly broken on SMP because we IPI invltlb's in some
958          * cases rather then invl1pg.  Actually, I don't even know why it
959          * works under UP because self-referential page table mappings
960          */
961 //      pgeflag = 0;
962
963 /*
964  * Initialize the 4MB page size flag
965  */
966 //      pseflag = 0;
967 /*
968  * The 4MB page version of the initial
969  * kernel page mapping.
970  */
971         pdir4mb = 0;
972
973 #if !defined(DISABLE_PSE)
974         if (cpu_feature & CPUID_PSE) {
975                 pt_entry_t ptditmp;
976                 /*
977                  * Note that we have enabled PSE mode
978                  */
979 //              pseflag = kernel_pmap.pmap_bits[PG_PS_IDX];
980                 ptditmp = *(PTmap + x86_64_btop(KERNBASE));
981                 ptditmp &= ~(NBPDR - 1);
982                 ptditmp |= pmap_bits_default[PG_V_IDX] |
983                     pmap_bits_default[PG_RW_IDX] |
984                     pmap_bits_default[PG_PS_IDX] |
985                     pmap_bits_default[PG_U_IDX];
986 //                  pgeflag;
987                 pdir4mb = ptditmp;
988         }
989 #endif
990         cpu_invltlb();
991
992         /* Initialize the PAT MSR */
993         pmap_init_pat();
994         pmap_pinit_defaults(&kernel_pmap);
995
996         TUNABLE_INT_FETCH("machdep.pmap_fast_kernel_cpusync",
997                           &pmap_fast_kernel_cpusync);
998
999 }
1000
1001 /*
1002  * Setup the PAT MSR.
1003  */
1004 void
1005 pmap_init_pat(void)
1006 {
1007         uint64_t pat_msr;
1008         u_long cr0, cr4;
1009
1010         /*
1011          * Default values mapping PATi,PCD,PWT bits at system reset.
1012          * The default values effectively ignore the PATi bit by
1013          * repeating the encodings for 0-3 in 4-7, and map the PCD
1014          * and PWT bit combinations to the expected PAT types.
1015          */
1016         pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |        /* 000 */
1017                   PAT_VALUE(1, PAT_WRITE_THROUGH) |     /* 001 */
1018                   PAT_VALUE(2, PAT_UNCACHED) |          /* 010 */
1019                   PAT_VALUE(3, PAT_UNCACHEABLE) |       /* 011 */
1020                   PAT_VALUE(4, PAT_WRITE_BACK) |        /* 100 */
1021                   PAT_VALUE(5, PAT_WRITE_THROUGH) |     /* 101 */
1022                   PAT_VALUE(6, PAT_UNCACHED) |          /* 110 */
1023                   PAT_VALUE(7, PAT_UNCACHEABLE);        /* 111 */
1024         pat_pte_index[PAT_WRITE_BACK]   = 0;
1025         pat_pte_index[PAT_WRITE_THROUGH]= 0         | X86_PG_NC_PWT;
1026         pat_pte_index[PAT_UNCACHED]     = X86_PG_NC_PCD;
1027         pat_pte_index[PAT_UNCACHEABLE]  = X86_PG_NC_PCD | X86_PG_NC_PWT;
1028         pat_pte_index[PAT_WRITE_PROTECTED] = pat_pte_index[PAT_UNCACHEABLE];
1029         pat_pte_index[PAT_WRITE_COMBINING] = pat_pte_index[PAT_UNCACHEABLE];
1030
1031         if (cpu_feature & CPUID_PAT) {
1032                 /*
1033                  * If we support the PAT then set-up entries for
1034                  * WRITE_PROTECTED and WRITE_COMBINING using bit patterns
1035                  * 4 and 5.
1036                  */
1037                 pat_msr = (pat_msr & ~PAT_MASK(4)) |
1038                           PAT_VALUE(4, PAT_WRITE_PROTECTED);
1039                 pat_msr = (pat_msr & ~PAT_MASK(5)) |
1040                           PAT_VALUE(5, PAT_WRITE_COMBINING);
1041                 pat_pte_index[PAT_WRITE_PROTECTED] = X86_PG_PTE_PAT | 0;
1042                 pat_pte_index[PAT_WRITE_COMBINING] = X86_PG_PTE_PAT | X86_PG_NC_PWT;
1043
1044                 /*
1045                  * Then enable the PAT
1046                  */
1047
1048                 /* Disable PGE. */
1049                 cr4 = rcr4();
1050                 load_cr4(cr4 & ~CR4_PGE);
1051
1052                 /* Disable caches (CD = 1, NW = 0). */
1053                 cr0 = rcr0();
1054                 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1055
1056                 /* Flushes caches and TLBs. */
1057                 wbinvd();
1058                 cpu_invltlb();
1059
1060                 /* Update PAT and index table. */
1061                 wrmsr(MSR_PAT, pat_msr);
1062
1063                 /* Flush caches and TLBs again. */
1064                 wbinvd();
1065                 cpu_invltlb();
1066
1067                 /* Restore caches and PGE. */
1068                 load_cr0(cr0);
1069                 load_cr4(cr4);
1070                 PatMsr = pat_msr;
1071         }
1072 }
1073
1074 /*
1075  * Set 4mb pdir for mp startup
1076  */
1077 void
1078 pmap_set_opt(void)
1079 {
1080         if (cpu_feature & CPUID_PSE) {
1081                 load_cr4(rcr4() | CR4_PSE);
1082                 if (pdir4mb && mycpu->gd_cpuid == 0) {  /* only on BSP */
1083                         cpu_invltlb();
1084                 }
1085         }
1086 }
1087
1088 /*
1089  *      Initialize the pmap module.
1090  *      Called by vm_init, to initialize any structures that the pmap
1091  *      system needs to map virtual memory.
1092  *      pmap_init has been enhanced to support in a fairly consistant
1093  *      way, discontiguous physical memory.
1094  */
1095 void
1096 pmap_init(void)
1097 {
1098         int i;
1099         int initial_pvs;
1100
1101         /*
1102          * Allocate memory for random pmap data structures.  Includes the
1103          * pv_head_table.
1104          */
1105
1106         for (i = 0; i < vm_page_array_size; i++) {
1107                 vm_page_t m;
1108
1109                 m = &vm_page_array[i];
1110                 TAILQ_INIT(&m->md.pv_list);
1111         }
1112
1113         /*
1114          * init the pv free list
1115          */
1116         initial_pvs = vm_page_array_size;
1117         if (initial_pvs < MINPV)
1118                 initial_pvs = MINPV;
1119         pvzone = &pvzone_store;
1120         pvinit = (void *)kmem_alloc(&kernel_map,
1121                                     initial_pvs * sizeof (struct pv_entry));
1122         zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
1123                   pvinit, initial_pvs);
1124
1125         /*
1126          * Now it is safe to enable pv_table recording.
1127          */
1128         pmap_initialized = TRUE;
1129 }
1130
1131 /*
1132  * Initialize the address space (zone) for the pv_entries.  Set a
1133  * high water mark so that the system can recover from excessive
1134  * numbers of pv entries.
1135  */
1136 void
1137 pmap_init2(void)
1138 {
1139         int shpgperproc = PMAP_SHPGPERPROC;
1140         int entry_max;
1141
1142         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1143         pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
1144         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1145         pv_entry_high_water = 9 * (pv_entry_max / 10);
1146
1147         /*
1148          * Subtract out pages already installed in the zone (hack)
1149          */
1150         entry_max = pv_entry_max - vm_page_array_size;
1151         if (entry_max <= 0)
1152                 entry_max = 1;
1153
1154         zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1);
1155 }
1156
1157 /*
1158  * Typically used to initialize a fictitious page by vm/device_pager.c
1159  */
1160 void
1161 pmap_page_init(struct vm_page *m)
1162 {
1163         vm_page_init(m);
1164         TAILQ_INIT(&m->md.pv_list);
1165 }
1166
1167 /***************************************************
1168  * Low level helper routines.....
1169  ***************************************************/
1170
1171 /*
1172  * this routine defines the region(s) of memory that should
1173  * not be tested for the modified bit.
1174  */
1175 static __inline
1176 int
1177 pmap_track_modified(vm_pindex_t pindex)
1178 {
1179         vm_offset_t va = (vm_offset_t)pindex << PAGE_SHIFT;
1180         if ((va < clean_sva) || (va >= clean_eva)) 
1181                 return 1;
1182         else
1183                 return 0;
1184 }
1185
1186 /*
1187  * Extract the physical page address associated with the map/VA pair.
1188  * The page must be wired for this to work reliably.
1189  *
1190  * XXX for the moment we're using pv_find() instead of pv_get(), as
1191  *     callers might be expecting non-blocking operation.
1192  */
1193 vm_paddr_t 
1194 pmap_extract(pmap_t pmap, vm_offset_t va)
1195 {
1196         vm_paddr_t rtval;
1197         pv_entry_t pt_pv;
1198         pt_entry_t *ptep;
1199
1200         rtval = 0;
1201         if (va >= VM_MAX_USER_ADDRESS) {
1202                 /*
1203                  * Kernel page directories might be direct-mapped and
1204                  * there is typically no PV tracking of pte's
1205                  */
1206                 pd_entry_t *pt;
1207
1208                 pt = pmap_pt(pmap, va);
1209                 if (pt && (*pt & pmap->pmap_bits[PG_V_IDX])) {
1210                         if (*pt & pmap->pmap_bits[PG_PS_IDX]) {
1211                                 rtval = *pt & PG_PS_FRAME;
1212                                 rtval |= va & PDRMASK;
1213                         } else {
1214                                 ptep = pmap_pt_to_pte(*pt, va);
1215                                 if (*pt & pmap->pmap_bits[PG_V_IDX]) {
1216                                         rtval = *ptep & PG_FRAME;
1217                                         rtval |= va & PAGE_MASK;
1218                                 }
1219                         }
1220                 }
1221         } else {
1222                 /*
1223                  * User pages currently do not direct-map the page directory
1224                  * and some pages might not used managed PVs.  But all PT's
1225                  * will have a PV.
1226                  */
1227                 pt_pv = pv_find(pmap, pmap_pt_pindex(va));
1228                 if (pt_pv) {
1229                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1230                         if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
1231                                 rtval = *ptep & PG_FRAME;
1232                                 rtval |= va & PAGE_MASK;
1233                         }
1234                         pv_drop(pt_pv);
1235                 }
1236         }
1237         return rtval;
1238 }
1239
1240 /*
1241  * Similar to extract but checks protections, SMP-friendly short-cut for
1242  * vm_fault_page[_quick]().  Can return NULL to cause the caller to
1243  * fall-through to the real fault code.
1244  *
1245  * The returned page, if not NULL, is held (and not busied).
1246  */
1247 vm_page_t
1248 pmap_fault_page_quick(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1249 {
1250         if (pmap && va < VM_MAX_USER_ADDRESS) {
1251                 pv_entry_t pt_pv;
1252                 pv_entry_t pte_pv;
1253                 pt_entry_t *ptep;
1254                 pt_entry_t req;
1255                 vm_page_t m;
1256                 int error;
1257
1258                 req = pmap->pmap_bits[PG_V_IDX] |
1259                       pmap->pmap_bits[PG_U_IDX];
1260                 if (prot & VM_PROT_WRITE)
1261                         req |= pmap->pmap_bits[PG_RW_IDX];
1262
1263                 pt_pv = pv_find(pmap, pmap_pt_pindex(va));
1264                 if (pt_pv == NULL)
1265                         return (NULL);
1266                 ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
1267                 if ((*ptep & req) != req) {
1268                         pv_drop(pt_pv);
1269                         return (NULL);
1270                 }
1271                 pte_pv = pv_get_try(pmap, pmap_pte_pindex(va), &error);
1272                 if (pte_pv && error == 0) {
1273                         m = pte_pv->pv_m;
1274                         vm_page_hold(m);
1275                         if (prot & VM_PROT_WRITE)
1276                                 vm_page_dirty(m);
1277                         pv_put(pte_pv);
1278                 } else if (pte_pv) {
1279                         pv_drop(pte_pv);
1280                         m = NULL;
1281                 } else {
1282                         m = NULL;
1283                 }
1284                 pv_drop(pt_pv);
1285                 return(m);
1286         } else {
1287                 return(NULL);
1288         }
1289 }
1290
1291 /*
1292  * Extract the physical page address associated kernel virtual address.
1293  */
1294 vm_paddr_t
1295 pmap_kextract(vm_offset_t va)
1296 {
1297         pd_entry_t pt;          /* pt entry in pd */
1298         vm_paddr_t pa;
1299
1300         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1301                 pa = DMAP_TO_PHYS(va);
1302         } else {
1303                 pt = *vtopt(va);
1304                 if (pt & kernel_pmap.pmap_bits[PG_PS_IDX]) {
1305                         pa = (pt & PG_PS_FRAME) | (va & PDRMASK);
1306                 } else {
1307                         /*
1308                          * Beware of a concurrent promotion that changes the
1309                          * PDE at this point!  For example, vtopte() must not
1310                          * be used to access the PTE because it would use the
1311                          * new PDE.  It is, however, safe to use the old PDE
1312                          * because the page table page is preserved by the
1313                          * promotion.
1314                          */
1315                         pa = *pmap_pt_to_pte(pt, va);
1316                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1317                 }
1318         }
1319         return pa;
1320 }
1321
1322 /***************************************************
1323  * Low level mapping routines.....
1324  ***************************************************/
1325
1326 /*
1327  * Routine: pmap_kenter
1328  * Function:
1329  *      Add a wired page to the KVA
1330  *      NOTE! note that in order for the mapping to take effect -- you
1331  *      should do an invltlb after doing the pmap_kenter().
1332  */
1333 void 
1334 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1335 {
1336         pt_entry_t *pte;
1337         pt_entry_t npte;
1338         pmap_inval_info info;
1339
1340         pmap_inval_init(&info);                         /* XXX remove */
1341         npte = pa |
1342             kernel_pmap.pmap_bits[PG_RW_IDX] |
1343             kernel_pmap.pmap_bits[PG_V_IDX];
1344 //          pgeflag;
1345         pte = vtopte(va);
1346         pmap_inval_interlock(&info, &kernel_pmap, va);  /* XXX remove */
1347         *pte = npte;
1348         pmap_inval_deinterlock(&info, &kernel_pmap);    /* XXX remove */
1349         pmap_inval_done(&info);                         /* XXX remove */
1350 }
1351
1352 /*
1353  * Routine: pmap_kenter_quick
1354  * Function:
1355  *      Similar to pmap_kenter(), except we only invalidate the
1356  *      mapping on the current CPU.
1357  */
1358 void
1359 pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
1360 {
1361         pt_entry_t *pte;
1362         pt_entry_t npte;
1363
1364         npte = pa |
1365             kernel_pmap.pmap_bits[PG_RW_IDX] |
1366             kernel_pmap.pmap_bits[PG_V_IDX];
1367 //          pgeflag;
1368         pte = vtopte(va);
1369         *pte = npte;
1370         cpu_invlpg((void *)va);
1371 }
1372
1373 void
1374 pmap_kenter_sync(vm_offset_t va)
1375 {
1376         pmap_inval_info info;
1377
1378         pmap_inval_init(&info);
1379         pmap_inval_interlock(&info, &kernel_pmap, va);
1380         pmap_inval_deinterlock(&info, &kernel_pmap);
1381         pmap_inval_done(&info);
1382 }
1383
1384 void
1385 pmap_kenter_sync_quick(vm_offset_t va)
1386 {
1387         cpu_invlpg((void *)va);
1388 }
1389
1390 /*
1391  * remove a page from the kernel pagetables
1392  */
1393 void
1394 pmap_kremove(vm_offset_t va)
1395 {
1396         pt_entry_t *pte;
1397         pmap_inval_info info;
1398
1399         pmap_inval_init(&info);
1400         pte = vtopte(va);
1401         pmap_inval_interlock(&info, &kernel_pmap, va);
1402         (void)pte_load_clear(pte);
1403         pmap_inval_deinterlock(&info, &kernel_pmap);
1404         pmap_inval_done(&info);
1405 }
1406
1407 void
1408 pmap_kremove_quick(vm_offset_t va)
1409 {
1410         pt_entry_t *pte;
1411         pte = vtopte(va);
1412         (void)pte_load_clear(pte);
1413         cpu_invlpg((void *)va);
1414 }
1415
1416 /*
1417  * XXX these need to be recoded.  They are not used in any critical path.
1418  */
1419 void
1420 pmap_kmodify_rw(vm_offset_t va)
1421 {
1422         atomic_set_long(vtopte(va), kernel_pmap.pmap_bits[PG_RW_IDX]);
1423         cpu_invlpg((void *)va);
1424 }
1425
1426 /* NOT USED
1427 void
1428 pmap_kmodify_nc(vm_offset_t va)
1429 {
1430         atomic_set_long(vtopte(va), PG_N);
1431         cpu_invlpg((void *)va);
1432 }
1433 */
1434
1435 /*
1436  * Used to map a range of physical addresses into kernel virtual
1437  * address space during the low level boot, typically to map the
1438  * dump bitmap, message buffer, and vm_page_array.
1439  *
1440  * These mappings are typically made at some pointer after the end of the
1441  * kernel text+data.
1442  *
1443  * We could return PHYS_TO_DMAP(start) here and not allocate any
1444  * via (*virtp), but then kmem from userland and kernel dumps won't
1445  * have access to the related pointers.
1446  */
1447 vm_offset_t
1448 pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
1449 {
1450         vm_offset_t va;
1451         vm_offset_t va_start;
1452
1453         /*return PHYS_TO_DMAP(start);*/
1454
1455         va_start = *virtp;
1456         va = va_start;
1457
1458         while (start < end) {
1459                 pmap_kenter_quick(va, start);
1460                 va += PAGE_SIZE;
1461                 start += PAGE_SIZE;
1462         }
1463         *virtp = va;
1464         return va_start;
1465 }
1466
1467 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
1468
1469 /*
1470  * Remove the specified set of pages from the data and instruction caches.
1471  *
1472  * In contrast to pmap_invalidate_cache_range(), this function does not
1473  * rely on the CPU's self-snoop feature, because it is intended for use
1474  * when moving pages into a different cache domain.
1475  */
1476 void
1477 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1478 {
1479         vm_offset_t daddr, eva;
1480         int i;
1481
1482         if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1483             (cpu_feature & CPUID_CLFSH) == 0)
1484                 wbinvd();
1485         else {
1486                 cpu_mfence();
1487                 for (i = 0; i < count; i++) {
1488                         daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
1489                         eva = daddr + PAGE_SIZE;
1490                         for (; daddr < eva; daddr += cpu_clflush_line_size)
1491                                 clflush(daddr);
1492                 }
1493                 cpu_mfence();
1494         }
1495 }
1496
1497 void
1498 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
1499 {
1500         KASSERT((sva & PAGE_MASK) == 0,
1501             ("pmap_invalidate_cache_range: sva not page-aligned"));
1502         KASSERT((eva & PAGE_MASK) == 0,
1503             ("pmap_invalidate_cache_range: eva not page-aligned"));
1504
1505         if (cpu_feature & CPUID_SS) {
1506                 ; /* If "Self Snoop" is supported, do nothing. */
1507         } else {
1508                 /* Globally invalidate caches */
1509                 cpu_wbinvd_on_all_cpus();
1510         }
1511 }
1512 void
1513 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1514 {
1515         smp_invlpg_range(pmap->pm_active, sva, eva);
1516 }
1517
1518 /*
1519  * Add a list of wired pages to the kva
1520  * this routine is only used for temporary
1521  * kernel mappings that do not need to have
1522  * page modification or references recorded.
1523  * Note that old mappings are simply written
1524  * over.  The page *must* be wired.
1525  */
1526 void
1527 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1528 {
1529         vm_offset_t end_va;
1530
1531         end_va = va + count * PAGE_SIZE;
1532                 
1533         while (va < end_va) {
1534                 pt_entry_t *pte;
1535
1536                 pte = vtopte(va);
1537                 *pte = VM_PAGE_TO_PHYS(*m) |
1538                     kernel_pmap.pmap_bits[PG_RW_IDX] |
1539                     kernel_pmap.pmap_bits[PG_V_IDX] |
1540                     kernel_pmap.pmap_cache_bits[(*m)->pat_mode];
1541 //              pgeflag;
1542                 cpu_invlpg((void *)va);
1543                 va += PAGE_SIZE;
1544                 m++;
1545         }
1546         smp_invltlb();
1547 }
1548
1549 /*
1550  * This routine jerks page mappings from the
1551  * kernel -- it is meant only for temporary mappings.
1552  *
1553  * MPSAFE, INTERRUPT SAFE (cluster callback)
1554  */
1555 void
1556 pmap_qremove(vm_offset_t va, int count)
1557 {
1558         vm_offset_t end_va;
1559
1560         end_va = va + count * PAGE_SIZE;
1561
1562         while (va < end_va) {
1563                 pt_entry_t *pte;
1564
1565                 pte = vtopte(va);
1566                 (void)pte_load_clear(pte);
1567                 cpu_invlpg((void *)va);
1568                 va += PAGE_SIZE;
1569         }
1570         smp_invltlb();
1571 }
1572
1573 /*
1574  * Create a new thread and optionally associate it with a (new) process.
1575  * NOTE! the new thread's cpu may not equal the current cpu.
1576  */
1577 void
1578 pmap_init_thread(thread_t td)
1579 {
1580         /* enforce pcb placement & alignment */
1581         td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
1582         td->td_pcb = (struct pcb *)((intptr_t)td->td_pcb & ~(intptr_t)0xF);
1583         td->td_savefpu = &td->td_pcb->pcb_save;
1584         td->td_sp = (char *)td->td_pcb; /* no -16 */
1585 }
1586
1587 /*
1588  * This routine directly affects the fork perf for a process.
1589  */
1590 void
1591 pmap_init_proc(struct proc *p)
1592 {
1593 }
1594
1595 static void
1596 pmap_pinit_defaults(struct pmap *pmap)
1597 {
1598         bcopy(pmap_bits_default, pmap->pmap_bits,
1599               sizeof(pmap_bits_default));
1600         bcopy(protection_codes, pmap->protection_codes,
1601               sizeof(protection_codes));
1602         bcopy(pat_pte_index, pmap->pmap_cache_bits,
1603               sizeof(pat_pte_index));
1604         pmap->pmap_cache_mask = X86_PG_NC_PWT | X86_PG_NC_PCD | X86_PG_PTE_PAT;
1605         pmap->copyinstr = std_copyinstr;
1606         pmap->copyin = std_copyin;
1607         pmap->copyout = std_copyout;
1608         pmap->fubyte = std_fubyte;
1609         pmap->subyte = std_subyte;
1610         pmap->fuword = std_fuword;
1611         pmap->suword = std_suword;
1612         pmap->suword32 = std_suword32;
1613 }
1614 /*
1615  * Initialize pmap0/vmspace0.  This pmap is not added to pmap_list because
1616  * it, and IdlePTD, represents the template used to update all other pmaps.
1617  *
1618  * On architectures where the kernel pmap is not integrated into the user
1619  * process pmap, this pmap represents the process pmap, not the kernel pmap.
1620  * kernel_pmap should be used to directly access the kernel_pmap.
1621  */
1622 void
1623 pmap_pinit0(struct pmap *pmap)
1624 {
1625         pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys);
1626         pmap->pm_count = 1;
1627         CPUMASK_ASSZERO(pmap->pm_active);
1628         pmap->pm_pvhint = NULL;
1629         RB_INIT(&pmap->pm_pvroot);
1630         spin_init(&pmap->pm_spin, "pmapinit0");
1631         lwkt_token_init(&pmap->pm_token, "pmap_tok");
1632         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1633         pmap_pinit_defaults(pmap);
1634 }
1635
1636 /*
1637  * Initialize a preallocated and zeroed pmap structure,
1638  * such as one in a vmspace structure.
1639  */
1640 static void
1641 pmap_pinit_simple(struct pmap *pmap)
1642 {
1643         /*
1644          * Misc initialization
1645          */
1646         pmap->pm_count = 1;
1647         CPUMASK_ASSZERO(pmap->pm_active);
1648         pmap->pm_pvhint = NULL;
1649         pmap->pm_flags = PMAP_FLAG_SIMPLE;
1650
1651         pmap_pinit_defaults(pmap);
1652
1653         /*
1654          * Don't blow up locks/tokens on re-use (XXX fix/use drop code
1655          * for this).
1656          */
1657         if (pmap->pm_pmlpv == NULL) {
1658                 RB_INIT(&pmap->pm_pvroot);
1659                 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1660                 spin_init(&pmap->pm_spin, "pmapinitsimple");
1661                 lwkt_token_init(&pmap->pm_token, "pmap_tok");
1662         }
1663 }
1664
1665 void
1666 pmap_pinit(struct pmap *pmap)
1667 {
1668         pv_entry_t pv;
1669         int j;
1670
1671         if (pmap->pm_pmlpv) {
1672                 if (pmap->pmap_bits[TYPE_IDX] != REGULAR_PMAP) {
1673                         pmap_puninit(pmap);
1674                 }
1675         }
1676
1677         pmap_pinit_simple(pmap);
1678         pmap->pm_flags &= ~PMAP_FLAG_SIMPLE;
1679
1680         /*
1681          * No need to allocate page table space yet but we do need a valid
1682          * page directory table.
1683          */
1684         if (pmap->pm_pml4 == NULL) {
1685                 pmap->pm_pml4 =
1686                     (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
1687         }
1688
1689         /*
1690          * Allocate the page directory page, which wires it even though
1691          * it isn't being entered into some higher level page table (it
1692          * being the highest level).  If one is already cached we don't
1693          * have to do anything.
1694          */
1695         if ((pv = pmap->pm_pmlpv) == NULL) {
1696                 pv = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
1697                 pmap->pm_pmlpv = pv;
1698                 pmap_kenter((vm_offset_t)pmap->pm_pml4,
1699                             VM_PAGE_TO_PHYS(pv->pv_m));
1700                 pv_put(pv);
1701
1702                 /*
1703                  * Install DMAP and KMAP.
1704                  */
1705                 for (j = 0; j < NDMPML4E; ++j) {
1706                         pmap->pm_pml4[DMPML4I + j] =
1707                             (DMPDPphys + ((vm_paddr_t)j << PML4SHIFT)) |
1708                             pmap->pmap_bits[PG_RW_IDX] |
1709                             pmap->pmap_bits[PG_V_IDX] |
1710                             pmap->pmap_bits[PG_U_IDX];
1711                 }
1712                 pmap->pm_pml4[KPML4I] = KPDPphys |
1713                     pmap->pmap_bits[PG_RW_IDX] |
1714                     pmap->pmap_bits[PG_V_IDX] |
1715                     pmap->pmap_bits[PG_U_IDX];
1716
1717                 /*
1718                  * install self-referential address mapping entry
1719                  */
1720                 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pv->pv_m) |
1721                     pmap->pmap_bits[PG_V_IDX] |
1722                     pmap->pmap_bits[PG_RW_IDX] |
1723                     pmap->pmap_bits[PG_A_IDX] |
1724                     pmap->pmap_bits[PG_M_IDX];
1725         } else {
1726                 KKASSERT(pv->pv_m->flags & PG_MAPPED);
1727                 KKASSERT(pv->pv_m->flags & PG_WRITEABLE);
1728         }
1729         KKASSERT(pmap->pm_pml4[255] == 0);
1730         KKASSERT(RB_ROOT(&pmap->pm_pvroot) == pv);
1731         KKASSERT(pv->pv_entry.rbe_left == NULL);
1732         KKASSERT(pv->pv_entry.rbe_right == NULL);
1733 }
1734
1735 /*
1736  * Clean up a pmap structure so it can be physically freed.  This routine
1737  * is called by the vmspace dtor function.  A great deal of pmap data is
1738  * left passively mapped to improve vmspace management so we have a bit
1739  * of cleanup work to do here.
1740  */
1741 void
1742 pmap_puninit(pmap_t pmap)
1743 {
1744         pv_entry_t pv;
1745         vm_page_t p;
1746
1747         KKASSERT(CPUMASK_TESTZERO(pmap->pm_active));
1748         if ((pv = pmap->pm_pmlpv) != NULL) {
1749                 if (pv_hold_try(pv) == 0)
1750                         pv_lock(pv);
1751                 KKASSERT(pv == pmap->pm_pmlpv);
1752                 p = pmap_remove_pv_page(pv);
1753                 pv_free(pv);
1754                 pmap_kremove((vm_offset_t)pmap->pm_pml4);
1755                 vm_page_busy_wait(p, FALSE, "pgpun");
1756                 KKASSERT(p->flags & (PG_FICTITIOUS|PG_UNMANAGED));
1757                 vm_page_unwire(p, 0);
1758                 vm_page_flag_clear(p, PG_MAPPED | PG_WRITEABLE);
1759
1760                 /*
1761                  * XXX eventually clean out PML4 static entries and
1762                  * use vm_page_free_zero()
1763                  */
1764                 vm_page_free(p);
1765                 pmap->pm_pmlpv = NULL;
1766         }
1767         if (pmap->pm_pml4) {
1768                 KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys));
1769                 kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE);
1770                 pmap->pm_pml4 = NULL;
1771         }
1772         KKASSERT(pmap->pm_stats.resident_count == 0);
1773         KKASSERT(pmap->pm_stats.wired_count == 0);
1774 }
1775
1776 /*
1777  * Wire in kernel global address entries.  To avoid a race condition
1778  * between pmap initialization and pmap_growkernel, this procedure
1779  * adds the pmap to the master list (which growkernel scans to update),
1780  * then copies the template.
1781  */
1782 void
1783 pmap_pinit2(struct pmap *pmap)
1784 {
1785         spin_lock(&pmap_spin);
1786         TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode);
1787         spin_unlock(&pmap_spin);
1788 }
1789
1790 /*
1791  * This routine is called when various levels in the page table need to
1792  * be populated.  This routine cannot fail.
1793  *
1794  * This function returns two locked pv_entry's, one representing the
1795  * requested pv and one representing the requested pv's parent pv.  If
1796  * the pv did not previously exist it will be mapped into its parent
1797  * and wired, otherwise no additional wire count will be added.
1798  */
1799 static
1800 pv_entry_t
1801 pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp)
1802 {
1803         pt_entry_t *ptep;
1804         pv_entry_t pv;
1805         pv_entry_t pvp;
1806         vm_pindex_t pt_pindex;
1807         vm_page_t m;
1808         int isnew;
1809         int ispt;
1810
1811         /*
1812          * If the pv already exists and we aren't being asked for the
1813          * parent page table page we can just return it.  A locked+held pv
1814          * is returned.  The pv will also have a second hold related to the
1815          * pmap association that we don't have to worry about.
1816          */
1817         ispt = 0;
1818         pv = pv_alloc(pmap, ptepindex, &isnew);
1819         if (isnew == 0 && pvpp == NULL)
1820                 return(pv);
1821
1822         /*
1823          * Special case terminal PVs.  These are not page table pages so
1824          * no vm_page is allocated (the caller supplied the vm_page).  If
1825          * pvpp is non-NULL we are being asked to also removed the pt_pv
1826          * for this pv.
1827          *
1828          * Note that pt_pv's are only returned for user VAs. We assert that
1829          * a pt_pv is not being requested for kernel VAs.
1830          */
1831         if (ptepindex < pmap_pt_pindex(0)) {
1832                 if (ptepindex >= NUPTE_USER)
1833                         KKASSERT(pvpp == NULL);
1834                 else
1835                         KKASSERT(pvpp != NULL);
1836                 if (pvpp) {
1837                         pt_pindex = NUPTE_TOTAL + (ptepindex >> NPTEPGSHIFT);
1838                         pvp = pmap_allocpte(pmap, pt_pindex, NULL);
1839                         if (isnew)
1840                                 vm_page_wire_quick(pvp->pv_m);
1841                         *pvpp = pvp;
1842                 } else {
1843                         pvp = NULL;
1844                 }
1845                 return(pv);
1846         }
1847
1848         /*
1849          * Non-terminal PVs allocate a VM page to represent the page table,
1850          * so we have to resolve pvp and calculate ptepindex for the pvp
1851          * and then for the page table entry index in the pvp for
1852          * fall-through.
1853          */
1854         if (ptepindex < pmap_pd_pindex(0)) {
1855                 /*
1856                  * pv is PT, pvp is PD
1857                  */
1858                 ptepindex = (ptepindex - pmap_pt_pindex(0)) >> NPDEPGSHIFT;
1859                 ptepindex += NUPTE_TOTAL + NUPT_TOTAL;
1860                 pvp = pmap_allocpte(pmap, ptepindex, NULL);
1861                 if (!isnew)
1862                         goto notnew;
1863
1864                 /*
1865                  * PT index in PD
1866                  */
1867                 ptepindex = pv->pv_pindex - pmap_pt_pindex(0);
1868                 ptepindex &= ((1ul << NPDEPGSHIFT) - 1);
1869                 ispt = 1;
1870         } else if (ptepindex < pmap_pdp_pindex(0)) {
1871                 /*
1872                  * pv is PD, pvp is PDP
1873                  *
1874                  * SIMPLE PMAP NOTE: Simple pmaps do not allocate above
1875                  *                   the PD.
1876                  */
1877                 ptepindex = (ptepindex - pmap_pd_pindex(0)) >> NPDPEPGSHIFT;
1878                 ptepindex += NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL;
1879
1880                 if (pmap->pm_flags & PMAP_FLAG_SIMPLE) {
1881                         KKASSERT(pvpp == NULL);
1882                         pvp = NULL;
1883                 } else {
1884                         pvp = pmap_allocpte(pmap, ptepindex, NULL);
1885                 }
1886                 if (!isnew)
1887                         goto notnew;
1888
1889                 /*
1890                  * PD index in PDP
1891                  */
1892                 ptepindex = pv->pv_pindex - pmap_pd_pindex(0);
1893                 ptepindex &= ((1ul << NPDPEPGSHIFT) - 1);
1894         } else if (ptepindex < pmap_pml4_pindex()) {
1895                 /*
1896                  * pv is PDP, pvp is the root pml4 table
1897                  */
1898                 pvp = pmap_allocpte(pmap, pmap_pml4_pindex(), NULL);
1899                 if (!isnew)
1900                         goto notnew;
1901
1902                 /*
1903                  * PDP index in PML4
1904                  */
1905                 ptepindex = pv->pv_pindex - pmap_pdp_pindex(0);
1906                 ptepindex &= ((1ul << NPML4EPGSHIFT) - 1);
1907         } else {
1908                 /*
1909                  * pv represents the top-level PML4, there is no parent.
1910                  */
1911                 pvp = NULL;
1912                 if (!isnew)
1913                         goto notnew;
1914         }
1915
1916         /*
1917          * This code is only reached if isnew is TRUE and this is not a
1918          * terminal PV.  We need to allocate a vm_page for the page table
1919          * at this level and enter it into the parent page table.
1920          *
1921          * page table pages are marked PG_WRITEABLE and PG_MAPPED.
1922          */
1923         for (;;) {
1924                 m = vm_page_alloc(NULL, pv->pv_pindex,
1925                                   VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM |
1926                                   VM_ALLOC_INTERRUPT);
1927                 if (m)
1928                         break;
1929                 vm_wait(0);
1930         }
1931         vm_page_spin_lock(m);
1932         pmap_page_stats_adding(m);
1933         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1934         pv->pv_m = m;
1935         vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1936         vm_page_spin_unlock(m);
1937         vm_page_unmanage(m);    /* m must be spinunlocked */
1938
1939         if ((m->flags & PG_ZERO) == 0) {
1940                 pmap_zero_page(VM_PAGE_TO_PHYS(m));
1941         }
1942 #ifdef PMAP_DEBUG
1943         else {
1944                 pmap_page_assertzero(VM_PAGE_TO_PHYS(m));
1945         }
1946 #endif
1947         m->valid = VM_PAGE_BITS_ALL;
1948         vm_page_flag_clear(m, PG_ZERO);
1949         vm_page_wire(m);        /* wire for mapping in parent */
1950
1951         /*
1952          * Wire the page into pvp, bump the wire-count for pvp's page table
1953          * page.  Bump the resident_count for the pmap.  There is no pvp
1954          * for the top level, address the pm_pml4[] array directly.
1955          *
1956          * If the caller wants the parent we return it, otherwise
1957          * we just put it away.
1958          *
1959          * No interlock is needed for pte 0 -> non-zero.
1960          *
1961          * In the situation where *ptep is valid we might have an unmanaged
1962          * page table page shared from another page table which we need to
1963          * unshare before installing our private page table page.
1964          */
1965         if (pvp) {
1966                 ptep = pv_pte_lookup(pvp, ptepindex);
1967                 if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
1968                         pt_entry_t pte;
1969                         pmap_inval_info info;
1970
1971                         if (ispt == 0) {
1972                                 panic("pmap_allocpte: unexpected pte %p/%d",
1973                                       pvp, (int)ptepindex);
1974                         }
1975                         pmap_inval_init(&info);
1976                         pmap_inval_interlock(&info, pmap, (vm_offset_t)-1);
1977                         pte = pte_load_clear(ptep);
1978                         pmap_inval_deinterlock(&info, pmap);
1979                         pmap_inval_done(&info);
1980                         if (vm_page_unwire_quick(
1981                                         PHYS_TO_VM_PAGE(pte & PG_FRAME))) {
1982                                 panic("pmap_allocpte: shared pgtable "
1983                                       "pg bad wirecount");
1984                         }
1985                         atomic_add_long(&pmap->pm_stats.resident_count, -1);
1986                 } else {
1987                         vm_page_wire_quick(pvp->pv_m);
1988                 }
1989                 *ptep = VM_PAGE_TO_PHYS(m) |
1990                     (pmap->pmap_bits[PG_U_IDX] |
1991                     pmap->pmap_bits[PG_RW_IDX] |
1992                     pmap->pmap_bits[PG_V_IDX] |
1993                     pmap->pmap_bits[PG_A_IDX] |
1994                     pmap->pmap_bits[PG_M_IDX]);
1995         }
1996         vm_page_wakeup(m);
1997 notnew:
1998         if (pvpp)
1999                 *pvpp = pvp;
2000         else if (pvp)
2001                 pv_put(pvp);
2002         return (pv);
2003 }
2004
2005 /*
2006  * This version of pmap_allocpte() checks for possible segment optimizations
2007  * that would allow page-table sharing.  It can be called for terminal
2008  * page or page table page ptepindex's.
2009  *
2010  * The function is called with page table page ptepindex's for fictitious
2011  * and unmanaged terminal pages.  That is, we don't want to allocate a
2012  * terminal pv, we just want the pt_pv.  pvpp is usually passed as NULL
2013  * for this case.
2014  *
2015  * This function can return a pv and *pvpp associated with the passed in pmap
2016  * OR a pv and *pvpp associated with the shared pmap.  In the latter case
2017  * an unmanaged page table page will be entered into the pass in pmap.
2018  */
2019 static
2020 pv_entry_t
2021 pmap_allocpte_seg(pmap_t pmap, vm_pindex_t ptepindex, pv_entry_t *pvpp,
2022                   vm_map_entry_t entry, vm_offset_t va)
2023 {
2024         struct pmap_inval_info info;
2025         vm_object_t object;
2026         pmap_t obpmap;
2027         pmap_t *obpmapp;
2028         vm_offset_t b;
2029         pv_entry_t pte_pv;      /* in original or shared pmap */
2030         pv_entry_t pt_pv;       /* in original or shared pmap */
2031         pv_entry_t proc_pd_pv;  /* in original pmap */
2032         pv_entry_t proc_pt_pv;  /* in original pmap */
2033         pv_entry_t xpv;         /* PT in shared pmap */
2034         pd_entry_t *pt;         /* PT entry in PD of original pmap */
2035         pd_entry_t opte;        /* contents of *pt */
2036         pd_entry_t npte;        /* contents of *pt */
2037         vm_page_t m;
2038
2039 retry:
2040         /*
2041          * Basic tests, require a non-NULL vm_map_entry, require proper
2042          * alignment and type for the vm_map_entry, require that the
2043          * underlying object already be allocated.
2044          *
2045          * We allow almost any type of object to use this optimization.
2046          * The object itself does NOT have to be sized to a multiple of the
2047          * segment size, but the memory mapping does.
2048          *
2049          * XXX don't handle devices currently, because VM_PAGE_TO_PHYS()
2050          *     won't work as expected.
2051          */
2052         if (entry == NULL ||
2053             pmap_mmu_optimize == 0 ||                   /* not enabled */
2054             ptepindex >= pmap_pd_pindex(0) ||           /* not terminal or pt */
2055             entry->inheritance != VM_INHERIT_SHARE ||   /* not shared */
2056             entry->maptype != VM_MAPTYPE_NORMAL ||      /* weird map type */
2057             entry->object.vm_object == NULL ||          /* needs VM object */
2058             entry->object.vm_object->type == OBJT_DEVICE ||     /* ick */
2059             entry->object.vm_object->type == OBJT_MGTDEVICE ||  /* ick */
2060             (entry->offset & SEG_MASK) ||               /* must be aligned */
2061             (entry->start & SEG_MASK)) {
2062                 return(pmap_allocpte(pmap, ptepindex, pvpp));
2063         }
2064
2065         /*
2066          * Make sure the full segment can be represented.
2067          */
2068         b = va & ~(vm_offset_t)SEG_MASK;
2069         if (b < entry->start || b + SEG_SIZE > entry->end)
2070                 return(pmap_allocpte(pmap, ptepindex, pvpp));
2071
2072         /*
2073          * If the full segment can be represented dive the VM object's
2074          * shared pmap, allocating as required.
2075          */
2076         object = entry->object.vm_object;
2077
2078         if (entry->protection & VM_PROT_WRITE)
2079                 obpmapp = &object->md.pmap_rw;
2080         else
2081                 obpmapp = &object->md.pmap_ro;
2082
2083 #ifdef PMAP_DEBUG2
2084         if (pmap_enter_debug > 0) {
2085                 --pmap_enter_debug;
2086                 kprintf("pmap_allocpte_seg: va=%jx prot %08x o=%p "
2087                         "obpmapp %p %p\n",
2088                         va, entry->protection, object,
2089                         obpmapp, *obpmapp);
2090                 kprintf("pmap_allocpte_seg: entry %p %jx-%jx\n",
2091                         entry, entry->start, entry->end);
2092         }
2093 #endif
2094
2095         /*
2096          * We allocate what appears to be a normal pmap but because portions
2097          * of this pmap are shared with other unrelated pmaps we have to
2098          * set pm_active to point to all cpus.
2099          *
2100          * XXX Currently using pmap_spin to interlock the update, can't use
2101          *     vm_object_hold/drop because the token might already be held
2102          *     shared OR exclusive and we don't know.
2103          */
2104         while ((obpmap = *obpmapp) == NULL) {
2105                 obpmap = kmalloc(sizeof(*obpmap), M_OBJPMAP, M_WAITOK|M_ZERO);
2106                 pmap_pinit_simple(obpmap);
2107                 pmap_pinit2(obpmap);
2108                 spin_lock(&pmap_spin);
2109                 if (*obpmapp != NULL) {
2110                         /*
2111                          * Handle race
2112                          */
2113                         spin_unlock(&pmap_spin);
2114                         pmap_release(obpmap);
2115                         pmap_puninit(obpmap);
2116                         kfree(obpmap, M_OBJPMAP);
2117                         obpmap = *obpmapp; /* safety */
2118                 } else {
2119                         obpmap->pm_active = smp_active_mask;
2120                         *obpmapp = obpmap;
2121                         spin_unlock(&pmap_spin);
2122                 }
2123         }
2124
2125         /*
2126          * Layering is: PTE, PT, PD, PDP, PML4.  We have to return the
2127          * pte/pt using the shared pmap from the object but also adjust
2128          * the process pmap's page table page as a side effect.
2129          */
2130
2131         /*
2132          * Resolve the terminal PTE and PT in the shared pmap.  This is what
2133          * we will return.  This is true if ptepindex represents a terminal
2134          * page, otherwise pte_pv is actually the PT and pt_pv is actually
2135          * the PD.
2136          */
2137         pt_pv = NULL;
2138         pte_pv = pmap_allocpte(obpmap, ptepindex, &pt_pv);
2139         if (ptepindex >= pmap_pt_pindex(0))
2140                 xpv = pte_pv;
2141         else
2142                 xpv = pt_pv;
2143
2144         /*
2145          * Resolve the PD in the process pmap so we can properly share the
2146          * page table page.  Lock order is bottom-up (leaf first)!
2147          *
2148          * NOTE: proc_pt_pv can be NULL.
2149          */
2150         proc_pt_pv = pv_get(pmap, pmap_pt_pindex(b));
2151         proc_pd_pv = pmap_allocpte(pmap, pmap_pd_pindex(b), NULL);
2152 #ifdef PMAP_DEBUG2
2153         if (pmap_enter_debug > 0) {
2154                 --pmap_enter_debug;
2155                 kprintf("proc_pt_pv %p (wc %d) pd_pv %p va=%jx\n",
2156                         proc_pt_pv,
2157                         (proc_pt_pv ? proc_pt_pv->pv_m->wire_count : -1),
2158                         proc_pd_pv,
2159                         va);
2160         }
2161 #endif
2162
2163         /*
2164          * xpv is the page table page pv from the shared object
2165          * (for convenience), from above.
2166          *
2167          * Calculate the pte value for the PT to load into the process PD.
2168          * If we have to change it we must properly dispose of the previous
2169          * entry.
2170          */
2171         pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b));
2172         npte = VM_PAGE_TO_PHYS(xpv->pv_m) |
2173             (pmap->pmap_bits[PG_U_IDX] |
2174             pmap->pmap_bits[PG_RW_IDX] |
2175             pmap->pmap_bits[PG_V_IDX] |
2176             pmap->pmap_bits[PG_A_IDX] |
2177             pmap->pmap_bits[PG_M_IDX]);
2178
2179         /*
2180          * Dispose of previous page table page if it was local to the
2181          * process pmap.  If the old pt is not empty we cannot dispose of it
2182          * until we clean it out.  This case should not arise very often so
2183          * it is not optimized.
2184          */
2185         if (proc_pt_pv) {
2186                 if (proc_pt_pv->pv_m->wire_count != 1) {
2187                         pv_put(proc_pd_pv);
2188                         pv_put(proc_pt_pv);
2189                         pv_put(pt_pv);
2190                         pv_put(pte_pv);
2191                         pmap_remove(pmap,
2192                                     va & ~(vm_offset_t)SEG_MASK,
2193                                     (va + SEG_SIZE) & ~(vm_offset_t)SEG_MASK);
2194                         goto retry;
2195                 }
2196
2197                 /*
2198                  * The release call will indirectly clean out *pt
2199                  */
2200                 pmap_inval_init(&info);
2201                 pmap_release_pv(&info, proc_pt_pv, proc_pd_pv);
2202                 pmap_inval_done(&info);
2203                 proc_pt_pv = NULL;
2204                 /* relookup */
2205                 pt = pv_pte_lookup(proc_pd_pv, pmap_pt_index(b));
2206         }
2207
2208         /*
2209          * Handle remaining cases.
2210          */
2211         if (*pt == 0) {
2212                 *pt = npte;
2213                 vm_page_wire_quick(xpv->pv_m);
2214                 vm_page_wire_quick(proc_pd_pv->pv_m);
2215                 atomic_add_long(&pmap->pm_stats.resident_count, 1);
2216         } else if (*pt != npte) {
2217                 pmap_inval_init(&info);
2218                 pmap_inval_interlock(&info, pmap, (vm_offset_t)-1);
2219
2220                 opte = pte_load_clear(pt);
2221                 KKASSERT(opte && opte != npte);
2222
2223                 *pt = npte;
2224                 vm_page_wire_quick(xpv->pv_m);  /* pgtable pg that is npte */
2225
2226                 /*
2227                  * Clean up opte, bump the wire_count for the process
2228                  * PD page representing the new entry if it was
2229                  * previously empty.
2230                  *
2231                  * If the entry was not previously empty and we have
2232                  * a PT in the proc pmap then opte must match that
2233                  * pt.  The proc pt must be retired (this is done
2234                  * later on in this procedure).
2235                  *
2236                  * NOTE: replacing valid pte, wire_count on proc_pd_pv
2237                  * stays the same.
2238                  */
2239                 KKASSERT(opte & pmap->pmap_bits[PG_V_IDX]);
2240                 m = PHYS_TO_VM_PAGE(opte & PG_FRAME);
2241                 if (vm_page_unwire_quick(m)) {
2242                         panic("pmap_allocpte_seg: "
2243                               "bad wire count %p",
2244                               m);
2245                 }
2246
2247                 pmap_inval_deinterlock(&info, pmap);
2248                 pmap_inval_done(&info);
2249         }
2250
2251         /*
2252          * The existing process page table was replaced and must be destroyed
2253          * here.
2254          */
2255         if (proc_pd_pv)
2256                 pv_put(proc_pd_pv);
2257         if (pvpp)
2258                 *pvpp = pt_pv;
2259         else
2260                 pv_put(pt_pv);
2261
2262         return (pte_pv);
2263 }
2264
2265 /*
2266  * Release any resources held by the given physical map.
2267  *
2268  * Called when a pmap initialized by pmap_pinit is being released.  Should
2269  * only be called if the map contains no valid mappings.
2270  *
2271  * Caller must hold pmap->pm_token
2272  */
2273 struct pmap_release_info {
2274         pmap_t  pmap;
2275         int     retry;
2276 };
2277
2278 static int pmap_release_callback(pv_entry_t pv, void *data);
2279
2280 void
2281 pmap_release(struct pmap *pmap)
2282 {
2283         struct pmap_release_info info;
2284
2285         KASSERT(CPUMASK_TESTZERO(pmap->pm_active),
2286                 ("pmap still active! %016jx",
2287                 (uintmax_t)CPUMASK_LOWMASK(pmap->pm_active)));
2288
2289         spin_lock(&pmap_spin);
2290         TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
2291         spin_unlock(&pmap_spin);
2292
2293         /*
2294          * Pull pv's off the RB tree in order from low to high and release
2295          * each page.
2296          */
2297         info.pmap = pmap;
2298         do {
2299                 info.retry = 0;
2300                 spin_lock(&pmap->pm_spin);
2301                 RB_SCAN(pv_entry_rb_tree, &pmap->pm_pvroot, NULL,
2302                         pmap_release_callback, &info);
2303                 spin_unlock(&pmap->pm_spin);
2304         } while (info.retry);
2305
2306
2307         /*
2308          * One resident page (the pml4 page) should remain.
2309          * No wired pages should remain.
2310          */
2311         KKASSERT(pmap->pm_stats.resident_count ==
2312                  ((pmap->pm_flags & PMAP_FLAG_SIMPLE) ? 0 : 1));
2313
2314         KKASSERT(pmap->pm_stats.wired_count == 0);
2315 }
2316
2317 static int
2318 pmap_release_callback(pv_entry_t pv, void *data)
2319 {
2320         struct pmap_release_info *info = data;
2321         pmap_t pmap = info->pmap;
2322         int r;
2323
2324         if (pv_hold_try(pv)) {
2325                 spin_unlock(&pmap->pm_spin);
2326         } else {
2327                 spin_unlock(&pmap->pm_spin);
2328                 pv_lock(pv);
2329         }
2330         if (pv->pv_pmap != pmap) {
2331                 pv_put(pv);
2332                 spin_lock(&pmap->pm_spin);
2333                 info->retry = 1;
2334                 return(-1);
2335         }
2336         r = pmap_release_pv(NULL, pv, NULL);
2337         spin_lock(&pmap->pm_spin);
2338         return(r);
2339 }
2340
2341 /*
2342  * Called with held (i.e. also locked) pv.  This function will dispose of
2343  * the lock along with the pv.
2344  *
2345  * If the caller already holds the locked parent page table for pv it
2346  * must pass it as pvp, allowing us to avoid a deadlock, else it can
2347  * pass NULL for pvp.
2348  */
2349 static int
2350 pmap_release_pv(struct pmap_inval_info *info, pv_entry_t pv, pv_entry_t pvp)
2351 {
2352         vm_page_t p;
2353
2354         /*
2355          * The pmap is currently not spinlocked, pv is held+locked.
2356          * Remove the pv's page from its parent's page table.  The
2357          * parent's page table page's wire_count will be decremented.
2358          *
2359          * This will clean out the pte at any level of the page table.
2360          * If info is not NULL the appropriate invlpg/invltlb/smp
2361          * invalidation will be made.
2362          */
2363         pmap_remove_pv_pte(pv, pvp, info);
2364
2365         /*
2366          * Terminal pvs are unhooked from their vm_pages.  Because
2367          * terminal pages aren't page table pages they aren't wired
2368          * by us, so we have to be sure not to unwire them either.
2369          */
2370         if (pv->pv_pindex < pmap_pt_pindex(0)) {
2371                 pmap_remove_pv_page(pv);
2372                 goto skip;
2373         }
2374
2375         /*
2376          * We leave the top-level page table page cached, wired, and
2377          * mapped in the pmap until the dtor function (pmap_puninit())
2378          * gets called.
2379          *
2380          * Since we are leaving the top-level pv intact we need
2381          * to break out of what would otherwise be an infinite loop.
2382          */
2383         if (pv->pv_pindex == pmap_pml4_pindex()) {
2384                 pv_put(pv);
2385                 return(-1);
2386         }
2387
2388         /*
2389          * For page table pages (other than the top-level page),
2390          * remove and free the vm_page.  The representitive mapping
2391          * removed above by pmap_remove_pv_pte() did not undo the
2392          * last wire_count so we have to do that as well.
2393          */
2394         p = pmap_remove_pv_page(pv);
2395         vm_page_busy_wait(p, FALSE, "pmaprl");
2396         if (p->wire_count != 1) {
2397                 kprintf("p->wire_count was %016lx %d\n",
2398                         pv->pv_pindex, p->wire_count);
2399         }
2400         KKASSERT(p->wire_count == 1);
2401         KKASSERT(p->flags & PG_UNMANAGED);
2402
2403         vm_page_unwire(p, 0);
2404         KKASSERT(p->wire_count == 0);
2405
2406         /*
2407          * Theoretically this page, if not the pml4 page, should contain
2408          * all-zeros.  But its just too dangerous to mark it PG_ZERO.  Free
2409          * normally.
2410          */
2411         vm_page_free(p);
2412 skip:
2413         pv_free(pv);
2414         return 0;
2415 }
2416
2417 /*
2418  * This function will remove the pte associated with a pv from its parent.
2419  * Terminal pv's are supported.  The removal will be interlocked if info
2420  * is non-NULL.  The caller must dispose of pv instead of just unlocking
2421  * it.
2422  *
2423  * The wire count will be dropped on the parent page table.  The wire
2424  * count on the page being removed (pv->pv_m) from the parent page table
2425  * is NOT touched.  Note that terminal pages will not have any additional
2426  * wire counts while page table pages will have at least one representing
2427  * the mapping, plus others representing sub-mappings.
2428  *
2429  * NOTE: Cannot be called on kernel page table pages, only KVM terminal
2430  *       pages and user page table and terminal pages.
2431  *
2432  * The pv must be locked.
2433  *
2434  * XXX must lock parent pv's if they exist to remove pte XXX
2435  */
2436 static
2437 void
2438 pmap_remove_pv_pte(pv_entry_t pv, pv_entry_t pvp, struct pmap_inval_info *info)
2439 {
2440         vm_pindex_t ptepindex = pv->pv_pindex;
2441         pmap_t pmap = pv->pv_pmap;
2442         vm_page_t p;
2443         int gotpvp = 0;
2444
2445         KKASSERT(pmap);
2446
2447         if (ptepindex == pmap_pml4_pindex()) {
2448                 /*
2449                  * We are the top level pml4 table, there is no parent.
2450                  */
2451                 p = pmap->pm_pmlpv->pv_m;
2452         } else if (ptepindex >= pmap_pdp_pindex(0)) {
2453                 /*
2454                  * Remove a PDP page from the pml4e.  This can only occur
2455                  * with user page tables.  We do not have to lock the
2456                  * pml4 PV so just ignore pvp.
2457                  */
2458                 vm_pindex_t pml4_pindex;
2459                 vm_pindex_t pdp_index;
2460                 pml4_entry_t *pdp;
2461
2462                 pdp_index = ptepindex - pmap_pdp_pindex(0);
2463                 if (pvp == NULL) {
2464                         pml4_pindex = pmap_pml4_pindex();
2465                         pvp = pv_get(pv->pv_pmap, pml4_pindex);
2466                         KKASSERT(pvp);
2467                         gotpvp = 1;
2468                 }
2469                 pdp = &pmap->pm_pml4[pdp_index & ((1ul << NPML4EPGSHIFT) - 1)];
2470                 KKASSERT((*pdp & pmap->pmap_bits[PG_V_IDX]) != 0);
2471                 p = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
2472                 if (info) {
2473                         pmap_inval_interlock(info, pmap, (vm_offset_t)-1);
2474                         pte_load_clear(pdp);
2475                         pmap_inval_deinterlock(info, pmap);
2476                 } else {
2477                         *pdp = 0;
2478                 }
2479         } else if (ptepindex >= pmap_pd_pindex(0)) {
2480                 /*
2481                  * Remove a PD page from the pdp
2482                  *
2483                  * SIMPLE PMAP NOTE: Non-existant pvp's are ok in the case
2484                  *                   of a simple pmap because it stops at
2485                  *                   the PD page.
2486                  */
2487                 vm_pindex_t pdp_pindex;
2488                 vm_pindex_t pd_index;
2489                 pdp_entry_t *pd;
2490
2491                 pd_index = ptepindex - pmap_pd_pindex(0);
2492
2493                 if (pvp == NULL) {
2494                         pdp_pindex = NUPTE_TOTAL + NUPT_TOTAL + NUPD_TOTAL +
2495                                      (pd_index >> NPML4EPGSHIFT);
2496                         pvp = pv_get(pv->pv_pmap, pdp_pindex);
2497                         if (pvp)
2498                                 gotpvp = 1;
2499                 }
2500                 if (pvp) {
2501                         pd = pv_pte_lookup(pvp, pd_index &
2502                                                 ((1ul << NPDPEPGSHIFT) - 1));
2503                         KKASSERT((*pd & pmap->pmap_bits[PG_V_IDX]) != 0);
2504                         p = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
2505                         if (info) {
2506                                 pmap_inval_interlock(info, pmap,
2507                                                      (vm_offset_t)-1);
2508                                 pte_load_clear(pd);
2509                                 pmap_inval_deinterlock(info, pmap);
2510                         } else {
2511                                 *pd = 0;
2512                         }
2513                 } else {
2514                         KKASSERT(pmap->pm_flags & PMAP_FLAG_SIMPLE);
2515                         p = pv->pv_m;           /* degenerate test later */
2516                 }
2517         } else if (ptepindex >= pmap_pt_pindex(0)) {
2518                 /*
2519                  *  Remove a PT page from the pd
2520                  */
2521                 vm_pindex_t pd_pindex;
2522                 vm_pindex_t pt_index;
2523                 pd_entry_t *pt;
2524
2525                 pt_index = ptepindex - pmap_pt_pindex(0);
2526
2527                 if (pvp == NULL) {
2528                         pd_pindex = NUPTE_TOTAL + NUPT_TOTAL +
2529                                     (pt_index >> NPDPEPGSHIFT);
2530                         pvp = pv_get(pv->pv_pmap, pd_pindex);
2531                         KKASSERT(pvp);
2532                         gotpvp = 1;
2533                 }
2534                 pt = pv_pte_lookup(pvp, pt_index & ((1ul << NPDPEPGSHIFT) - 1));
2535                 KKASSERT((*pt & pmap->pmap_bits[PG_V_IDX]) != 0);
2536                 p = PHYS_TO_VM_PAGE(*pt & PG_FRAME);
2537                 if (info) {
2538                         pmap_inval_interlock(info, pmap, (vm_offset_t)-1);
2539                         pte_load_clear(pt);
2540                         pmap_inval_deinterlock(info, pmap);
2541                 } else {
2542                         *pt = 0;
2543                 }
2544         } else {
2545                 /*
2546                  * Remove a PTE from the PT page
2547                  *
2548                  * NOTE: pv's must be locked bottom-up to avoid deadlocking.
2549                  *       pv is a pte_pv so we can safely lock pt_pv.
2550                  *
2551                  * NOTE: FICTITIOUS pages may have multiple physical mappings
2552                  *       so PHYS_TO_VM_PAGE() will not necessarily work for
2553                  *       terminal ptes.
2554                  */
2555                 vm_pindex_t pt_pindex;
2556                 pt_entry_t *ptep;
2557                 pt_entry_t pte;
2558                 vm_offset_t va;
2559
2560                 pt_pindex = ptepindex >> NPTEPGSHIFT;
2561                 va = (vm_offset_t)ptepindex << PAGE_SHIFT;
2562
2563                 if (ptepindex >= NUPTE_USER) {
2564                         ptep = vtopte(ptepindex << PAGE_SHIFT);
2565                         KKASSERT(pvp == NULL);
2566                 } else {
2567                         if (pvp == NULL) {
2568                                 pt_pindex = NUPTE_TOTAL +
2569                                             (ptepindex >> NPDPEPGSHIFT);
2570                                 pvp = pv_get(pv->pv_pmap, pt_pindex);
2571                                 KKASSERT(pvp);
2572                                 gotpvp = 1;
2573                         }
2574                         ptep = pv_pte_lookup(pvp, ptepindex &
2575                                                   ((1ul << NPDPEPGSHIFT) - 1));
2576                 }
2577
2578                 if (info)
2579                         pmap_inval_interlock(info, pmap, va);
2580                 pte = pte_load_clear(ptep);
2581                 if (info)
2582                         pmap_inval_deinterlock(info, pmap);
2583                 else
2584                         cpu_invlpg((void *)va);
2585
2586                 /*
2587                  * Now update the vm_page_t
2588                  */
2589                 if ((pte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) !=
2590                     (pmap->pmap_bits[PG_MANAGED_IDX]|pmap->pmap_bits[PG_V_IDX])) {
2591                         kprintf("remove_pte badpte %016lx %016lx %d\n",
2592                                 pte, pv->pv_pindex,
2593                                 pv->pv_pindex < pmap_pt_pindex(0));
2594                 }
2595                 /* PHYS_TO_VM_PAGE() will not work for FICTITIOUS pages */
2596                 /*KKASSERT((pte & (PG_MANAGED|PG_V)) == (PG_MANAGED|PG_V));*/
2597                 if (pte & pmap->pmap_bits[PG_DEVICE_IDX])
2598                         p = pv->pv_m;
2599                 else
2600                         p = PHYS_TO_VM_PAGE(pte & PG_FRAME);
2601                 /* p = pv->pv_m; */
2602
2603                 if (pte & pmap->pmap_bits[PG_M_IDX]) {
2604                         if (pmap_track_modified(ptepindex))
2605                                 vm_page_dirty(p);
2606                 }
2607                 if (pte & pmap->pmap_bits[PG_A_IDX]) {
2608                         vm_page_flag_set(p, PG_REFERENCED);
2609                 }
2610                 if (pte & pmap->pmap_bits[PG_W_IDX])
2611                         atomic_add_long(&pmap->pm_stats.wired_count, -1);
2612                 if (pte & pmap->pmap_bits[PG_G_IDX])
2613                         cpu_invlpg((void *)va);
2614         }
2615
2616         /*
2617          * Unwire the parent page table page.  The wire_count cannot go below
2618          * 1 here because the parent page table page is itself still mapped.
2619          *
2620          * XXX remove the assertions later.
2621          */
2622         KKASSERT(pv->pv_m == p);
2623         if (pvp && vm_page_unwire_quick(pvp->pv_m))
2624                 panic("pmap_remove_pv_pte: Insufficient wire_count");
2625
2626         if (gotpvp)
2627                 pv_put(pvp);
2628 }
2629
2630 /*
2631  * Remove the vm_page association to a pv.  The pv must be locked.
2632  */
2633 static
2634 vm_page_t
2635 pmap_remove_pv_page(pv_entry_t pv)
2636 {
2637         vm_page_t m;
2638
2639         m = pv->pv_m;
2640         KKASSERT(m);
2641         vm_page_spin_lock(m);
2642         pv->pv_m = NULL;
2643         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2644         pmap_page_stats_deleting(m);
2645         /*
2646         if (m->object)
2647                 atomic_add_int(&m->object->agg_pv_list_count, -1);
2648         */
2649         if (TAILQ_EMPTY(&m->md.pv_list))
2650                 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
2651         vm_page_spin_unlock(m);
2652         return(m);
2653 }
2654
2655 /*
2656  * Grow the number of kernel page table entries, if needed.
2657  *
2658  * This routine is always called to validate any address space
2659  * beyond KERNBASE (for kldloads).  kernel_vm_end only governs the address
2660  * space below KERNBASE.
2661  */
2662 void
2663 pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
2664 {
2665         vm_paddr_t paddr;
2666         vm_offset_t ptppaddr;
2667         vm_page_t nkpg;
2668         pd_entry_t *pt, newpt;
2669         pdp_entry_t newpd;
2670         int update_kernel_vm_end;
2671
2672         /*
2673          * bootstrap kernel_vm_end on first real VM use
2674          */
2675         if (kernel_vm_end == 0) {
2676                 kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
2677                 nkpt = 0;
2678                 while ((*pmap_pt(&kernel_pmap, kernel_vm_end) & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) {
2679                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
2680                                         ~(PAGE_SIZE * NPTEPG - 1);
2681                         nkpt++;
2682                         if (kernel_vm_end - 1 >= kernel_map.max_offset) {
2683                                 kernel_vm_end = kernel_map.max_offset;
2684                                 break;                       
2685                         }
2686                 }
2687         }
2688
2689         /*
2690          * Fill in the gaps.  kernel_vm_end is only adjusted for ranges
2691          * below KERNBASE.  Ranges above KERNBASE are kldloaded and we
2692          * do not want to force-fill 128G worth of page tables.
2693          */
2694         if (kstart < KERNBASE) {
2695                 if (kstart > kernel_vm_end)
2696                         kstart = kernel_vm_end;
2697                 KKASSERT(kend <= KERNBASE);
2698                 update_kernel_vm_end = 1;
2699         } else {
2700                 update_kernel_vm_end = 0;
2701         }
2702
2703         kstart = rounddown2(kstart, PAGE_SIZE * NPTEPG);
2704         kend = roundup2(kend, PAGE_SIZE * NPTEPG);
2705
2706         if (kend - 1 >= kernel_map.max_offset)
2707                 kend = kernel_map.max_offset;
2708
2709         while (kstart < kend) {
2710                 pt = pmap_pt(&kernel_pmap, kstart);
2711                 if (pt == NULL) {
2712                         /* We need a new PDP entry */
2713                         nkpg = vm_page_alloc(NULL, nkpt,
2714                                              VM_ALLOC_NORMAL |
2715                                              VM_ALLOC_SYSTEM |
2716                                              VM_ALLOC_INTERRUPT);
2717                         if (nkpg == NULL) {
2718                                 panic("pmap_growkernel: no memory to grow "
2719                                       "kernel");
2720                         }
2721                         paddr = VM_PAGE_TO_PHYS(nkpg);
2722                         if ((nkpg->flags & PG_ZERO) == 0)
2723                                 pmap_zero_page(paddr);
2724                         vm_page_flag_clear(nkpg, PG_ZERO);
2725                         newpd = (pdp_entry_t)
2726                             (paddr |
2727                             kernel_pmap.pmap_bits[PG_V_IDX] |
2728                             kernel_pmap.pmap_bits[PG_RW_IDX] |
2729                             kernel_pmap.pmap_bits[PG_A_IDX] |
2730                             kernel_pmap.pmap_bits[PG_M_IDX]);
2731                         *pmap_pd(&kernel_pmap, kstart) = newpd;
2732                         nkpt++;
2733                         continue; /* try again */
2734                 }
2735                 if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) {
2736                         kstart = (kstart + PAGE_SIZE * NPTEPG) &
2737                                  ~(PAGE_SIZE * NPTEPG - 1);
2738                         if (kstart - 1 >= kernel_map.max_offset) {
2739                                 kstart = kernel_map.max_offset;
2740                                 break;                       
2741                         }
2742                         continue;
2743                 }
2744
2745                 /*
2746                  * This index is bogus, but out of the way
2747                  */
2748                 nkpg = vm_page_alloc(NULL, nkpt,
2749                                      VM_ALLOC_NORMAL |
2750                                      VM_ALLOC_SYSTEM |
2751                                      VM_ALLOC_INTERRUPT);
2752                 if (nkpg == NULL)
2753                         panic("pmap_growkernel: no memory to grow kernel");
2754
2755                 vm_page_wire(nkpg);
2756                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
2757                 pmap_zero_page(ptppaddr);
2758                 vm_page_flag_clear(nkpg, PG_ZERO);
2759                 newpt = (pd_entry_t) (ptppaddr |
2760                     kernel_pmap.pmap_bits[PG_V_IDX] |
2761                     kernel_pmap.pmap_bits[PG_RW_IDX] |
2762                     kernel_pmap.pmap_bits[PG_A_IDX] |
2763                     kernel_pmap.pmap_bits[PG_M_IDX]);
2764                 *pmap_pt(&kernel_pmap, kstart) = newpt;
2765                 nkpt++;
2766
2767                 kstart = (kstart + PAGE_SIZE * NPTEPG) &
2768                           ~(PAGE_SIZE * NPTEPG - 1);
2769
2770                 if (kstart - 1 >= kernel_map.max_offset) {
2771                         kstart = kernel_map.max_offset;
2772                         break;                       
2773                 }
2774         }
2775
2776         /*
2777          * Only update kernel_vm_end for areas below KERNBASE.
2778          */
2779         if (update_kernel_vm_end && kernel_vm_end < kstart)
2780                 kernel_vm_end = kstart;
2781 }
2782
2783 /*
2784  *      Add a reference to the specified pmap.
2785  */
2786 void
2787 pmap_reference(pmap_t pmap)
2788 {
2789         if (pmap != NULL) {
2790                 lwkt_gettoken(&pmap->pm_token);
2791                 ++pmap->pm_count;
2792                 lwkt_reltoken(&pmap->pm_token);
2793         }
2794 }
2795
2796 /***************************************************
2797  * page management routines.
2798  ***************************************************/
2799
2800 /*
2801  * Hold a pv without locking it
2802  */
2803 static void
2804 pv_hold(pv_entry_t pv)
2805 {
2806         atomic_add_int(&pv->pv_hold, 1);
2807 }
2808
2809 /*
2810  * Hold a pv_entry, preventing its destruction.  TRUE is returned if the pv
2811  * was successfully locked, FALSE if it wasn't.  The caller must dispose of
2812  * the pv properly.
2813  *
2814  * Either the pmap->pm_spin or the related vm_page_spin (if traversing a
2815  * pv list via its page) must be held by the caller.
2816  */
2817 static int
2818 _pv_hold_try(pv_entry_t pv PMAP_DEBUG_DECL)
2819 {
2820         u_int count;
2821
2822         /*
2823          * Critical path shortcut expects pv to already have one ref
2824          * (for the pv->pv_pmap).
2825          */
2826         if (atomic_cmpset_int(&pv->pv_hold, 1, PV_HOLD_LOCKED | 2)) {
2827 #ifdef PMAP_DEBUG
2828                 pv->pv_func = func;
2829                 pv->pv_line = lineno;
2830 #endif
2831                 return TRUE;
2832         }
2833
2834         for (;;) {
2835                 count = pv->pv_hold;
2836                 cpu_ccfence();
2837                 if ((count & PV_HOLD_LOCKED) == 0) {
2838                         if (atomic_cmpset_int(&pv->pv_hold, count,
2839                                               (count + 1) | PV_HOLD_LOCKED)) {
2840 #ifdef PMAP_DEBUG
2841                                 pv->pv_func = func;
2842                                 pv->pv_line = lineno;
2843 #endif
2844                                 return TRUE;
2845                         }
2846                 } else {
2847                         if (atomic_cmpset_int(&pv->pv_hold, count, count + 1))
2848                                 return FALSE;
2849                 }
2850                 /* retry */
2851         }
2852 }
2853
2854 /*
2855  * Drop a previously held pv_entry which could not be locked, allowing its
2856  * destruction.
2857  *
2858  * Must not be called with a spinlock held as we might zfree() the pv if it
2859  * is no longer associated with a pmap and this was the last hold count.
2860  */
2861 static void
2862 pv_drop(pv_entry_t pv)
2863 {
2864         u_int count;
2865
2866         for (;;) {
2867                 count = pv->pv_hold;
2868                 cpu_ccfence();
2869                 KKASSERT((count & PV_HOLD_MASK) > 0);
2870                 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) !=
2871                          (PV_HOLD_LOCKED | 1));
2872                 if (atomic_cmpset_int(&pv->pv_hold, count, count - 1)) {
2873                         if ((count & PV_HOLD_MASK) == 1) {
2874 #ifdef PMAP_DEBUG2
2875                                 if (pmap_enter_debug > 0) {
2876                                         --pmap_enter_debug;
2877                                         kprintf("pv_drop: free pv %p\n", pv);
2878                                 }
2879 #endif
2880                                 KKASSERT(count == 1);
2881                                 KKASSERT(pv->pv_pmap == NULL);
2882                                 zfree(pvzone, pv);
2883                         }
2884                         return;
2885                 }
2886                 /* retry */
2887         }
2888 }
2889
2890 /*
2891  * Find or allocate the requested PV entry, returning a locked, held pv.
2892  *
2893  * If (*isnew) is non-zero, the returned pv will have two hold counts, one
2894  * for the caller and one representing the pmap and vm_page association.
2895  *
2896  * If (*isnew) is zero, the returned pv will have only one hold count.
2897  *
2898  * Since both associations can only be adjusted while the pv is locked,
2899  * together they represent just one additional hold.
2900  */
2901 static
2902 pv_entry_t
2903 _pv_alloc(pmap_t pmap, vm_pindex_t pindex, int *isnew PMAP_DEBUG_DECL)
2904 {
2905         pv_entry_t pv;
2906         pv_entry_t pnew = NULL;
2907
2908         spin_lock(&pmap->pm_spin);
2909         for (;;) {
2910                 if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) {
2911                         pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot,
2912                                                         pindex);
2913                 }
2914                 if (pv == NULL) {
2915                         if (pnew == NULL) {
2916                                 spin_unlock(&pmap->pm_spin);
2917                                 pnew = zalloc(pvzone);
2918                                 spin_lock(&pmap->pm_spin);
2919                                 continue;
2920                         }
2921                         pnew->pv_pmap = pmap;
2922                         pnew->pv_pindex = pindex;
2923                         pnew->pv_hold = PV_HOLD_LOCKED | 2;
2924 #ifdef PMAP_DEBUG
2925                         pnew->pv_func = func;
2926                         pnew->pv_line = lineno;
2927 #endif
2928                         pv_entry_rb_tree_RB_INSERT(&pmap->pm_pvroot, pnew);
2929                         ++pmap->pm_generation;
2930                         atomic_add_long(&pmap->pm_stats.resident_count, 1);
2931                         spin_unlock(&pmap->pm_spin);
2932                         *isnew = 1;
2933                         return(pnew);
2934                 }
2935                 if (pnew) {
2936                         spin_unlock(&pmap->pm_spin);
2937                         zfree(pvzone, pnew);
2938                         pnew = NULL;
2939                         spin_lock(&pmap->pm_spin);
2940                         continue;
2941                 }
2942                 if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
2943                         spin_unlock(&pmap->pm_spin);
2944                 } else {
2945                         spin_unlock(&pmap->pm_spin);
2946                         _pv_lock(pv PMAP_DEBUG_COPY);
2947                 }
2948                 if (pv->pv_pmap == pmap && pv->pv_pindex == pindex) {
2949                         *isnew = 0;
2950                         return(pv);
2951                 }
2952                 pv_put(pv);
2953                 spin_lock(&pmap->pm_spin);
2954         }
2955 }
2956
2957 /*
2958  * Find the requested PV entry, returning a locked+held pv or NULL
2959  */
2960 static
2961 pv_entry_t
2962 _pv_get(pmap_t pmap, vm_pindex_t pindex PMAP_DEBUG_DECL)
2963 {
2964         pv_entry_t pv;
2965
2966         spin_lock(&pmap->pm_spin);
2967         for (;;) {
2968                 /*
2969                  * Shortcut cache
2970                  */
2971                 if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex) {
2972                         pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot,
2973                                                         pindex);
2974                 }
2975                 if (pv == NULL) {
2976                         spin_unlock(&pmap->pm_spin);
2977                         return NULL;
2978                 }
2979                 if (_pv_hold_try(pv PMAP_DEBUG_COPY)) {
2980                         spin_unlock(&pmap->pm_spin);
2981                 } else {
2982                         spin_unlock(&pmap->pm_spin);
2983                         _pv_lock(pv PMAP_DEBUG_COPY);
2984                 }
2985                 if (pv->pv_pmap == pmap && pv->pv_pindex == pindex) {
2986                         pv_cache(pv, pindex);
2987                         return(pv);
2988                 }
2989                 pv_put(pv);
2990                 spin_lock(&pmap->pm_spin);
2991         }
2992 }
2993
2994 /*
2995  * Lookup, hold, and attempt to lock (pmap,pindex).
2996  *
2997  * If the entry does not exist NULL is returned and *errorp is set to 0
2998  *
2999  * If the entry exists and could be successfully locked it is returned and
3000  * errorp is set to 0.
3001  *
3002  * If the entry exists but could NOT be successfully locked it is returned
3003  * held and *errorp is set to 1.
3004  */
3005 static
3006 pv_entry_t
3007 pv_get_try(pmap_t pmap, vm_pindex_t pindex, int *errorp)
3008 {
3009         pv_entry_t pv;
3010
3011         spin_lock_shared(&pmap->pm_spin);
3012         if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex)
3013                 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
3014         if (pv == NULL) {
3015                 spin_unlock_shared(&pmap->pm_spin);
3016                 *errorp = 0;
3017                 return NULL;
3018         }
3019         if (pv_hold_try(pv)) {
3020                 pv_cache(pv, pindex);
3021                 spin_unlock_shared(&pmap->pm_spin);
3022                 *errorp = 0;
3023                 KKASSERT(pv->pv_pmap == pmap && pv->pv_pindex == pindex);
3024                 return(pv);     /* lock succeeded */
3025         }
3026         spin_unlock_shared(&pmap->pm_spin);
3027         *errorp = 1;
3028         return (pv);            /* lock failed */
3029 }
3030
3031 /*
3032  * Find the requested PV entry, returning a held pv or NULL
3033  */
3034 static
3035 pv_entry_t
3036 pv_find(pmap_t pmap, vm_pindex_t pindex)
3037 {
3038         pv_entry_t pv;
3039
3040         spin_lock_shared(&pmap->pm_spin);
3041
3042         if ((pv = pmap->pm_pvhint) == NULL || pv->pv_pindex != pindex)
3043                 pv = pv_entry_rb_tree_RB_LOOKUP(&pmap->pm_pvroot, pindex);
3044         if (pv == NULL) {
3045                 spin_unlock_shared(&pmap->pm_spin);
3046                 return NULL;
3047         }
3048         pv_hold(pv);
3049         pv_cache(pv, pindex);
3050         spin_unlock_shared(&pmap->pm_spin);
3051         return(pv);
3052 }
3053
3054 /*
3055  * Lock a held pv, keeping the hold count
3056  */
3057 static
3058 void
3059 _pv_lock(pv_entry_t pv PMAP_DEBUG_DECL)
3060 {
3061         u_int count;
3062
3063         for (;;) {
3064                 count = pv->pv_hold;
3065                 cpu_ccfence();
3066                 if ((count & PV_HOLD_LOCKED) == 0) {
3067                         if (atomic_cmpset_int(&pv->pv_hold, count,
3068                                               count | PV_HOLD_LOCKED)) {
3069 #ifdef PMAP_DEBUG
3070                                 pv->pv_func = func;
3071                                 pv->pv_line = lineno;
3072 #endif
3073                                 return;
3074                         }
3075                         continue;
3076                 }
3077                 tsleep_interlock(pv, 0);
3078                 if (atomic_cmpset_int(&pv->pv_hold, count,
3079                                       count | PV_HOLD_WAITING)) {
3080 #ifdef PMAP_DEBUG
3081                         kprintf("pv waiting on %s:%d\n",
3082                                         pv->pv_func, pv->pv_line);
3083 #endif
3084                         tsleep(pv, PINTERLOCKED, "pvwait", hz);
3085                 }
3086                 /* retry */
3087         }
3088 }
3089
3090 /*
3091  * Unlock a held and locked pv, keeping the hold count.
3092  */
3093 static
3094 void
3095 pv_unlock(pv_entry_t pv)
3096 {
3097         u_int count;
3098
3099         for (;;) {
3100                 count = pv->pv_hold;
3101                 cpu_ccfence();
3102                 KKASSERT((count & (PV_HOLD_LOCKED | PV_HOLD_MASK)) >=
3103                          (PV_HOLD_LOCKED | 1));
3104                 if (atomic_cmpset_int(&pv->pv_hold, count,
3105                                       count &
3106                                       ~(PV_HOLD_LOCKED | PV_HOLD_WAITING))) {
3107                         if (count & PV_HOLD_WAITING)
3108                                 wakeup(pv);
3109                         break;
3110                 }
3111         }
3112 }
3113
3114 /*
3115  * Unlock and drop a pv.  If the pv is no longer associated with a pmap
3116  * and the hold count drops to zero we will free it.
3117  *
3118  * Caller should not hold any spin locks.  We are protected from hold races
3119  * by virtue of holds only occuring only with a pmap_spin or vm_page_spin
3120  * lock held.  A pv cannot be located otherwise.
3121  */
3122 static
3123 void
3124 pv_put(pv_entry_t pv)
3125 {
3126 #ifdef PMAP_DEBUG2
3127         if (pmap_enter_debug > 0) {
3128                 --pmap_enter_debug;
3129                 kprintf("pv_put pv=%p hold=%08x\n", pv, pv->pv_hold);
3130         }
3131 #endif
3132
3133         /*
3134          * Fast - shortcut most common condition
3135          */
3136         if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 1))
3137                 return;
3138
3139         /*
3140          * Slow
3141          */
3142         pv_unlock(pv);
3143         pv_drop(pv);
3144 }
3145
3146 /*
3147  * Remove the pmap association from a pv, require that pv_m already be removed,
3148  * then unlock and drop the pv.  Any pte operations must have already been
3149  * completed.  This call may result in a last-drop which will physically free
3150  * the pv.
3151  *
3152  * Removing the pmap association entails an additional drop.
3153  *
3154  * pv must be exclusively locked on call and will be disposed of on return.
3155  */
3156 static
3157 void
3158 pv_free(pv_entry_t pv)
3159 {
3160         pmap_t pmap;
3161
3162         KKASSERT(pv->pv_m == NULL);
3163         KKASSERT((pv->pv_hold & PV_HOLD_MASK) >= 2);
3164         if ((pmap = pv->pv_pmap) != NULL) {
3165                 spin_lock(&pmap->pm_spin);
3166                 pv_entry_rb_tree_RB_REMOVE(&pmap->pm_pvroot, pv);
3167                 ++pmap->pm_generation;
3168                 if (pmap->pm_pvhint == pv)
3169                         pmap->pm_pvhint = NULL;
3170                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
3171                 pv->pv_pmap = NULL;
3172                 pv->pv_pindex = 0;
3173                 spin_unlock(&pmap->pm_spin);
3174
3175                 /*
3176                  * Try to shortcut three atomic ops, otherwise fall through
3177                  * and do it normally.  Drop two refs and the lock all in
3178                  * one go.
3179                  */
3180                 if (atomic_cmpset_int(&pv->pv_hold, PV_HOLD_LOCKED | 2, 0)) {
3181 #ifdef PMAP_DEBUG2
3182                         if (pmap_enter_debug > 0) {
3183                                 --pmap_enter_debug;
3184                                 kprintf("pv_free: free pv %p\n", pv);
3185                         }
3186 #endif
3187                         zfree(pvzone, pv);
3188                         return;
3189                 }
3190                 pv_drop(pv);    /* ref for pv_pmap */
3191         }
3192         pv_put(pv);
3193 }
3194
3195 /*
3196  * This routine is very drastic, but can save the system
3197  * in a pinch.
3198  */
3199 void
3200 pmap_collect(void)
3201 {
3202         int i;
3203         vm_page_t m;
3204         static int warningdone=0;
3205
3206         if (pmap_pagedaemon_waken == 0)
3207                 return;
3208         pmap_pagedaemon_waken = 0;
3209         if (warningdone < 5) {
3210                 kprintf("pmap_collect: collecting pv entries -- "
3211                         "suggest increasing PMAP_SHPGPERPROC\n");
3212                 warningdone++;
3213         }
3214
3215         for (i = 0; i < vm_page_array_size; i++) {
3216                 m = &vm_page_array[i];
3217                 if (m->wire_count || m->hold_count)
3218                         continue;
3219                 if (vm_page_busy_try(m, TRUE) == 0) {
3220                         if (m->wire_count == 0 && m->hold_count == 0) {
3221                                 pmap_remove_all(m);
3222                         }
3223                         vm_page_wakeup(m);
3224                 }
3225         }
3226 }
3227
3228 /*
3229  * Scan the pmap for active page table entries and issue a callback.
3230  * The callback must dispose of pte_pv, whos PTE entry is at *ptep in
3231  * its parent page table.
3232  *
3233  * pte_pv will be NULL if the page or page table is unmanaged.
3234  * pt_pv will point to the page table page containing the pte for the page.
3235  *
3236  * NOTE! If we come across an unmanaged page TABLE (verses an unmanaged page),
3237  *       we pass a NULL pte_pv and we pass a pt_pv pointing to the passed
3238  *       process pmap's PD and page to the callback function.  This can be
3239  *       confusing because the pt_pv is really a pd_pv, and the target page
3240  *       table page is simply aliased by the pmap and not owned by it.
3241  *
3242  * It is assumed that the start and end are properly rounded to the page size.
3243  *
3244  * It is assumed that PD pages and above are managed and thus in the RB tree,
3245  * allowing us to use RB_SCAN from the PD pages down for ranged scans.
3246  */
3247 struct pmap_scan_info {
3248         struct pmap *pmap;
3249         vm_offset_t sva;
3250         vm_offset_t eva;
3251         vm_pindex_t sva_pd_pindex;
3252         vm_pindex_t eva_pd_pindex;
3253         void (*func)(pmap_t, struct pmap_scan_info *,
3254                      pv_entry_t, pv_entry_t, int, vm_offset_t,
3255                      pt_entry_t *, void *);
3256         void *arg;
3257         int doinval;
3258         int count;
3259         struct pmap_inval_info inval;
3260 };
3261
3262 static int pmap_scan_cmp(pv_entry_t pv, void *data);
3263 static int pmap_scan_callback(pv_entry_t pv, void *data);
3264
3265 static void
3266 pmap_scan(struct pmap_scan_info *info)
3267 {
3268         struct pmap *pmap = info->pmap;
3269         pv_entry_t pd_pv;       /* A page directory PV */
3270         pv_entry_t pt_pv;       /* A page table PV */
3271         pv_entry_t pte_pv;      /* A page table entry PV */
3272         pt_entry_t *ptep;
3273         pt_entry_t oldpte;
3274         struct pv_entry dummy_pv;
3275         int generation;
3276
3277         if (pmap == NULL)
3278                 return;
3279
3280         /*
3281          * Hold the token for stability; if the pmap is empty we have nothing
3282          * to do.
3283          */
3284         lwkt_gettoken(&pmap->pm_token);
3285 #if 0
3286         if (pmap->pm_stats.resident_count == 0) {
3287                 lwkt_reltoken(&pmap->pm_token);
3288                 return;
3289         }
3290 #endif
3291
3292         pmap_inval_init(&info->inval);
3293         info->count = 0;
3294
3295 again:
3296         /*
3297          * Special handling for scanning one page, which is a very common
3298          * operation (it is?).
3299          *
3300          * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4
3301          */
3302         if (info->sva + PAGE_SIZE == info->eva) {
3303                 generation = pmap->pm_generation;
3304                 if (info->sva >= VM_MAX_USER_ADDRESS) {
3305                         /*
3306                          * Kernel mappings do not track wire counts on
3307                          * page table pages and only maintain pd_pv and
3308                          * pte_pv levels so pmap_scan() works.
3309                          */
3310                         pt_pv = NULL;
3311                         pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva));
3312                         ptep = vtopte(info->sva);
3313                 } else {
3314                         /*
3315                          * User pages which are unmanaged will not have a
3316                          * pte_pv.  User page table pages which are unmanaged
3317                          * (shared from elsewhere) will also not have a pt_pv.
3318                          * The func() callback will pass both pte_pv and pt_pv
3319                          * as NULL in that case.
3320                          */
3321                         pte_pv = pv_get(pmap, pmap_pte_pindex(info->sva));
3322                         pt_pv = pv_get(pmap, pmap_pt_pindex(info->sva));
3323                         if (pt_pv == NULL) {
3324                                 KKASSERT(pte_pv == NULL);
3325                                 pd_pv = pv_get(pmap, pmap_pd_pindex(info->sva));
3326                                 if (pd_pv) {
3327                                         ptep = pv_pte_lookup(pd_pv,
3328                                                     pmap_pt_index(info->sva));
3329                                         if (*ptep) {
3330                                                 info->func(pmap, info,
3331                                                      NULL, pd_pv, 1,
3332                                                      info->sva, ptep,
3333                                                      info->arg);
3334                                         }
3335                                         pv_put(pd_pv);
3336                                 }
3337                                 goto fast_skip;
3338                         }
3339                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(info->sva));
3340                 }
3341
3342                 /*
3343                  * NOTE: *ptep can't be ripped out from under us if we hold
3344                  *       pte_pv locked, but bits can change.  However, there is
3345                  *       a race where another thread may be inserting pte_pv
3346                  *       and setting *ptep just after our pte_pv lookup fails.
3347                  *
3348                  *       In this situation we can end up with a NULL pte_pv
3349                  *       but find that we have a managed *ptep.  We explicitly
3350                  *       check for this race.
3351                  */
3352                 oldpte = *ptep;
3353                 cpu_ccfence();
3354                 if (oldpte == 0) {
3355                         /*
3356                          * Unlike the pv_find() case below we actually
3357                          * acquired a locked pv in this case so any
3358                          * race should have been resolved.  It is expected
3359                          * to not exist.
3360                          */
3361                         KKASSERT(pte_pv == NULL);
3362                 } else if (pte_pv) {
3363                         KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
3364                                            pmap->pmap_bits[PG_V_IDX])) ==
3365                                 (pmap->pmap_bits[PG_MANAGED_IDX] |
3366                                  pmap->pmap_bits[PG_V_IDX]),
3367                             ("badA *ptep %016lx/%016lx sva %016lx pte_pv %p"
3368                              "generation %d/%d",
3369                             *ptep, oldpte, info->sva, pte_pv,
3370                             generation, pmap->pm_generation));
3371                         info->func(pmap, info, pte_pv, pt_pv, 0,
3372                                    info->sva, ptep, info->arg);
3373                 } else {
3374                         /*
3375                          * Check for insertion race
3376                          */
3377                         if ((oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) &&
3378                             pt_pv) {
3379                                 pte_pv = pv_find(pmap,
3380                                                  pmap_pte_pindex(info->sva));
3381                                 if (pte_pv) {
3382                                         pv_drop(pte_pv);
3383                                         pv_put(pt_pv);
3384                                         kprintf("pmap_scan: RACE1 "
3385                                                 "%016jx, %016lx\n",
3386                                                 info->sva, oldpte);
3387                                         goto again;
3388                                 }
3389                         }
3390
3391                         /*
3392                          * Didn't race
3393                          */
3394                         KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] |
3395                                            pmap->pmap_bits[PG_V_IDX])) ==
3396                             pmap->pmap_bits[PG_V_IDX],
3397                             ("badB *ptep %016lx/%016lx sva %016lx pte_pv NULL"
3398                              "generation %d/%d",
3399                             *ptep, oldpte, info->sva,
3400                             generation, pmap->pm_generation));
3401                         info->func(pmap, info, NULL, pt_pv, 0,
3402                             info->sva, ptep, info->arg);
3403                 }
3404                 if (pt_pv)
3405                         pv_put(pt_pv);
3406 fast_skip:
3407                 pmap_inval_done(&info->inval);
3408                 lwkt_reltoken(&pmap->pm_token);
3409                 return;
3410         }
3411
3412         /*
3413          * Nominal scan case, RB_SCAN() for PD pages and iterate from
3414          * there.
3415          */
3416         info->sva_pd_pindex = pmap_pd_pindex(info->sva);
3417         info->eva_pd_pindex = pmap_pd_pindex(info->eva + NBPDP - 1);
3418
3419         if (info->sva >= VM_MAX_USER_ADDRESS) {
3420                 /*
3421                  * The kernel does not currently maintain any pv_entry's for
3422                  * higher-level page tables.
3423                  */
3424                 bzero(&dummy_pv, sizeof(dummy_pv));
3425                 dummy_pv.pv_pindex = info->sva_pd_pindex;
3426                 spin_lock(&pmap->pm_spin);
3427                 while (dummy_pv.pv_pindex < info->eva_pd_pindex) {
3428                         pmap_scan_callback(&dummy_pv, info);
3429                         ++dummy_pv.pv_pindex;
3430                 }
3431                 spin_unlock(&pmap->pm_spin);
3432         } else {
3433                 /*
3434                  * User page tables maintain local PML4, PDP, and PD
3435                  * pv_entry's at the very least.  PT pv's might be
3436                  * unmanaged and thus not exist.  PTE pv's might be
3437                  * unmanaged and thus not exist.
3438                  */
3439                 spin_lock(&pmap->pm_spin);
3440                 pv_entry_rb_tree_RB_SCAN(&pmap->pm_pvroot,
3441                         pmap_scan_cmp, pmap_scan_callback, info);
3442                 spin_unlock(&pmap->pm_spin);
3443         }
3444         pmap_inval_done(&info->inval);
3445         lwkt_reltoken(&pmap->pm_token);
3446 }
3447
3448 /*
3449  * WARNING! pmap->pm_spin held
3450  */
3451 static int
3452 pmap_scan_cmp(pv_entry_t pv, void *data)
3453 {
3454         struct pmap_scan_info *info = data;
3455         if (pv->pv_pindex < info->sva_pd_pindex)
3456                 return(-1);
3457         if (pv->pv_pindex >= info->eva_pd_pindex)
3458                 return(1);
3459         return(0);
3460 }
3461
3462 /*
3463  * WARNING! pmap->pm_spin held
3464  */
3465 static int
3466 pmap_scan_callback(pv_entry_t pv, void *data)
3467 {
3468         struct pmap_scan_info *info = data;
3469         struct pmap *pmap = info->pmap;
3470         pv_entry_t pd_pv;       /* A page directory PV */
3471         pv_entry_t pt_pv;       /* A page table PV */
3472         pv_entry_t pte_pv;      /* A page table entry PV */
3473         pt_entry_t *ptep;
3474         pt_entry_t oldpte;
3475         vm_offset_t sva;
3476         vm_offset_t eva;
3477         vm_offset_t va_next;
3478         vm_pindex_t pd_pindex;
3479         int error;
3480         int generation;
3481
3482         /*
3483          * Pull the PD pindex from the pv before releasing the spinlock.
3484          *
3485          * WARNING: pv is faked for kernel pmap scans.
3486          */
3487         pd_pindex = pv->pv_pindex;
3488         spin_unlock(&pmap->pm_spin);
3489         pv = NULL;      /* invalid after spinlock unlocked */
3490
3491         /*
3492          * Calculate the page range within the PD.  SIMPLE pmaps are
3493          * direct-mapped for the entire 2^64 address space.  Normal pmaps
3494          * reflect the user and kernel address space which requires
3495          * cannonicalization w/regards to converting pd_pindex's back
3496          * into addresses.
3497          */
3498         sva = (pd_pindex - NUPTE_TOTAL - NUPT_TOTAL) << PDPSHIFT;
3499         if ((pmap->pm_flags & PMAP_FLAG_SIMPLE) == 0 &&
3500             (sva & PML4_SIGNMASK)) {
3501                 sva |= PML4_SIGNMASK;
3502         }
3503         eva = sva + NBPDP;      /* can overflow */
3504         if (sva < info->sva)
3505                 sva = info->sva;
3506         if (eva < info->sva || eva > info->eva)
3507                 eva = info->eva;
3508
3509         /*
3510          * NOTE: kernel mappings do not track page table pages, only
3511          *       terminal pages.
3512          *
3513          * NOTE: Locks must be ordered bottom-up. pte,pt,pd,pdp,pml4.
3514          *       However, for the scan to be efficient we try to
3515          *       cache items top-down.
3516          */
3517         pd_pv = NULL;
3518         pt_pv = NULL;
3519
3520         for (; sva < eva; sva = va_next) {
3521                 if (sva >= VM_MAX_USER_ADDRESS) {
3522                         if (pt_pv) {
3523                                 pv_put(pt_pv);
3524                                 pt_pv = NULL;
3525                         }
3526                         goto kernel_skip;
3527                 }
3528
3529                 /*
3530                  * PD cache (degenerate case if we skip).  It is possible
3531                  * for the PD to not exist due to races.  This is ok.
3532                  */
3533                 if (pd_pv == NULL) {
3534                         pd_pv = pv_get(pmap, pmap_pd_pindex(sva));
3535                 } else if (pd_pv->pv_pindex != pmap_pd_pindex(sva)) {
3536                         pv_put(pd_pv);
3537                         pd_pv = pv_get(pmap, pmap_pd_pindex(sva));
3538                 }
3539                 if (pd_pv == NULL) {
3540                         va_next = (sva + NBPDP) & ~PDPMASK;
3541                         if (va_next < sva)
3542                                 va_next = eva;
3543                         continue;
3544                 }
3545
3546                 /*
3547                  * PT cache
3548                  */
3549                 if (pt_pv == NULL) {
3550                         if (pd_pv) {
3551                                 pv_put(pd_pv);
3552                                 pd_pv = NULL;
3553                         }
3554                         pt_pv = pv_get(pmap, pmap_pt_pindex(sva));
3555                 } else if (pt_pv->pv_pindex != pmap_pt_pindex(sva)) {
3556                         if (pd_pv) {
3557                                 pv_put(pd_pv);
3558                                 pd_pv = NULL;
3559                         }
3560                         pv_put(pt_pv);
3561                         pt_pv = pv_get(pmap, pmap_pt_pindex(sva));
3562                 }
3563
3564                 /*
3565                  * If pt_pv is NULL we either have an shared page table
3566                  * page and must issue a callback specific to that case,
3567                  * or there is no page table page.
3568                  *
3569                  * Either way we can skip the page table page.
3570                  */
3571                 if (pt_pv == NULL) {
3572                         /*
3573                          * Possible unmanaged (shared from another pmap)
3574                          * page table page.
3575                          */
3576                         if (pd_pv == NULL)
3577                                 pd_pv = pv_get(pmap, pmap_pd_pindex(sva));
3578                         KKASSERT(pd_pv != NULL);
3579                         ptep = pv_pte_lookup(pd_pv, pmap_pt_index(sva));
3580                         if (*ptep & pmap->pmap_bits[PG_V_IDX]) {
3581                                 info->func(pmap, info, NULL, pd_pv, 1,
3582                                            sva, ptep, info->arg);
3583                         }
3584
3585                         /*
3586                          * Done, move to next page table page.
3587                          */
3588                         va_next = (sva + NBPDR) & ~PDRMASK;
3589                         if (va_next < sva)
3590                                 va_next = eva;
3591                         continue;
3592                 }
3593
3594                 /*
3595                  * From this point in the loop testing pt_pv for non-NULL
3596                  * means we are in UVM, else if it is NULL we are in KVM.
3597                  *
3598                  * Limit our scan to either the end of the va represented
3599                  * by the current page table page, or to the end of the
3600                  * range being removed.
3601                  */
3602 kernel_skip:
3603                 va_next = (sva + NBPDR) & ~PDRMASK;
3604                 if (va_next < sva)
3605                         va_next = eva;
3606                 if (va_next > eva)
3607                         va_next = eva;
3608
3609                 /*
3610                  * Scan the page table for pages.  Some pages may not be
3611                  * managed (might not have a pv_entry).
3612                  *
3613                  * There is no page table management for kernel pages so
3614                  * pt_pv will be NULL in that case, but otherwise pt_pv
3615                  * is non-NULL, locked, and referenced.
3616                  */
3617
3618                 /*
3619                  * At this point a non-NULL pt_pv means a UVA, and a NULL
3620                  * pt_pv means a KVA.
3621                  */
3622                 if (pt_pv)
3623                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(sva));
3624                 else
3625                         ptep = vtopte(sva);
3626
3627                 while (sva < va_next) {
3628                         /*
3629                          * Acquire the related pte_pv, if any.  If *ptep == 0
3630                          * the related pte_pv should not exist, but if *ptep
3631                          * is not zero the pte_pv may or may not exist (e.g.
3632                          * will not exist for an unmanaged page).
3633                          *
3634                          * However a multitude of races are possible here.
3635                          *
3636                          * In addition, the (pt_pv, pte_pv) lock order is
3637                          * backwards, so we have to be careful in aquiring
3638                          * a properly locked pte_pv.
3639                          */
3640                         generation = pmap->pm_generation;
3641                         if (pt_pv) {
3642                                 pte_pv = pv_get_try(pmap, pmap_pte_pindex(sva),
3643                                                     &error);
3644                                 if (error) {
3645                                         if (pd_pv) {
3646                                                 pv_put(pd_pv);
3647                                                 pd_pv = NULL;
3648                                         }
3649                                         pv_put(pt_pv);   /* must be non-NULL */
3650                                         pt_pv = NULL;
3651                                         pv_lock(pte_pv); /* safe to block now */
3652                                         pv_put(pte_pv);
3653                                         pte_pv = NULL;
3654                                         pt_pv = pv_get(pmap,
3655                                                        pmap_pt_pindex(sva));
3656                                         /*
3657                                          * pt_pv reloaded, need new ptep
3658                                          */
3659                                         KKASSERT(pt_pv != NULL);
3660                                         ptep = pv_pte_lookup(pt_pv,
3661                                                         pmap_pte_index(sva));
3662                                         continue;
3663                                 }
3664                         } else {
3665                                 pte_pv = pv_get(pmap, pmap_pte_pindex(sva));
3666                         }
3667
3668                         /*
3669                          * Ok, if *ptep == 0 we had better NOT have a pte_pv.
3670                          */
3671                         oldpte = *ptep;
3672                         if (oldpte == 0) {
3673                                 if (pte_pv) {
3674                                         kprintf("Unexpected non-NULL pte_pv "
3675                                                 "%p pt_pv %p "
3676                                                 "*ptep = %016lx/%016lx\n",
3677                                                 pte_pv, pt_pv, *ptep, oldpte);
3678                                         panic("Unexpected non-NULL pte_pv");
3679                                 }
3680                                 sva += PAGE_SIZE;
3681                                 ++ptep;
3682                                 continue;
3683                         }
3684
3685                         /*
3686                          * Ready for the callback.  The locked pte_pv (if any)
3687                          * is consumed by the callback.  pte_pv will exist if
3688                          *  the page is managed, and will not exist if it
3689                          * isn't.
3690                          */
3691                         if (pte_pv) {
3692                                 KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) ==
3693                                     (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX]),
3694                                     ("badC *ptep %016lx/%016lx sva %016lx "
3695                                     "pte_pv %p pm_generation %d/%d",
3696                                     *ptep, oldpte, sva, pte_pv,
3697                                     generation, pmap->pm_generation));
3698                                 info->func(pmap, info, pte_pv, pt_pv, 0,
3699                                     sva, ptep, info->arg);
3700                         } else {
3701                                 /*
3702                                  * Check for insertion race.  Since there is no
3703                                  * pte_pv to guard us it is possible for us
3704                                  * to race another thread doing an insertion.
3705                                  * Our lookup misses the pte_pv but our *ptep
3706                                  * check sees the inserted pte.
3707                                  *
3708                                  * XXX panic case seems to occur within a
3709                                  * vm_fork() of /bin/sh, which frankly
3710                                  * shouldn't happen since no other threads
3711                                  * should be inserting to our pmap in that
3712                                  * situation.  Removing, possibly.  Inserting,
3713                                  * shouldn't happen.
3714                                  */
3715                                 if ((oldpte & pmap->pmap_bits[PG_MANAGED_IDX]) &&
3716                                     pt_pv) {
3717                                         pte_pv = pv_find(pmap,
3718                                                          pmap_pte_pindex(sva));
3719                                         if (pte_pv) {
3720                                                 pv_drop(pte_pv);
3721                                                 kprintf("pmap_scan: RACE2 "
3722                                                         "%016jx, %016lx\n",
3723                                                         sva, oldpte);
3724                                                 continue;
3725                                         }
3726                                 }
3727
3728                                 /*
3729                                  * Didn't race
3730                                  */
3731                                 KASSERT((oldpte & (pmap->pmap_bits[PG_MANAGED_IDX] | pmap->pmap_bits[PG_V_IDX])) ==
3732                                     pmap->pmap_bits[PG_V_IDX],
3733                                     ("badD *ptep %016lx/%016lx sva %016lx "
3734                                     "pte_pv NULL pm_generation %d/%d",
3735                                      *ptep, oldpte, sva,
3736                                      generation, pmap->pm_generation));
3737                                 info->func(pmap, info, NULL, pt_pv, 0,
3738                                     sva, ptep, info->arg);
3739                         }
3740                         pte_pv = NULL;
3741                         sva += PAGE_SIZE;
3742                         ++ptep;
3743                 }
3744                 if ((++info->count & 7) == 0)
3745                         lwkt_user_yield();
3746         }
3747         if (pd_pv) {
3748                 pv_put(pd_pv);
3749                 pd_pv = NULL;
3750         }
3751         if (pt_pv) {
3752                 pv_put(pt_pv);
3753                 pt_pv = NULL;
3754         }
3755         if ((++info->count & 7) == 0)
3756                 lwkt_user_yield();
3757
3758         /*
3759          * Relock before returning.
3760          */
3761         spin_lock(&pmap->pm_spin);
3762         return (0);
3763 }
3764
3765 void
3766 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
3767 {
3768         struct pmap_scan_info info;
3769
3770         info.pmap = pmap;
3771         info.sva = sva;
3772         info.eva = eva;
3773         info.func = pmap_remove_callback;
3774         info.arg = NULL;
3775         info.doinval = 1;       /* normal remove requires pmap inval */
3776         pmap_scan(&info);
3777 }
3778
3779 static void
3780 pmap_remove_noinval(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
3781 {
3782         struct pmap_scan_info info;
3783
3784         info.pmap = pmap;
3785         info.sva = sva;
3786         info.eva = eva;
3787         info.func = pmap_remove_callback;
3788         info.arg = NULL;
3789         info.doinval = 0;       /* normal remove requires pmap inval */
3790         pmap_scan(&info);
3791 }
3792
3793 static void
3794 pmap_remove_callback(pmap_t pmap, struct pmap_scan_info *info,
3795                      pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
3796                      vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
3797 {
3798         pt_entry_t pte;
3799
3800         if (pte_pv) {
3801                 /*
3802                  * This will also drop pt_pv's wire_count. Note that
3803                  * terminal pages are not wired based on mmu presence.
3804                  */
3805                 if (info->doinval)
3806                         pmap_remove_pv_pte(pte_pv, pt_pv, &info->inval);
3807                 else
3808                         pmap_remove_pv_pte(pte_pv, pt_pv, NULL);
3809                 pmap_remove_pv_page(pte_pv);
3810                 pv_free(pte_pv);
3811         } else if (sharept == 0) {
3812                 /*
3813                  * Unmanaged page table (pt, pd, or pdp. Not pte).
3814                  *
3815                  * pt_pv's wire_count is still bumped by unmanaged pages
3816                  * so we must decrement it manually.
3817                  *
3818                  * We have to unwire the target page table page.
3819                  *
3820                  * It is unclear how we can invalidate a segment so we
3821                  * invalidate -1 which invlidates the tlb.
3822                  */
3823                 if (info->doinval)
3824                         pmap_inval_interlock(&info->inval, pmap, -1);
3825                 pte = pte_load_clear(ptep);
3826                 if (info->doinval)
3827                         pmap_inval_deinterlock(&info->inval, pmap);
3828                 if (pte & pmap->pmap_bits[PG_W_IDX])
3829                         atomic_add_long(&pmap->pm_stats.wired_count, -1);
3830                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
3831                 if (vm_page_unwire_quick(pt_pv->pv_m))
3832                         panic("pmap_remove: insufficient wirecount");
3833         } else {
3834                 /*
3835                  * Unmanaged page table (pt, pd, or pdp. Not pte) for
3836                  * a shared page table.
3837                  *
3838                  * pt_pv is actually the pd_pv for our pmap (not the shared
3839                  * object pmap).
3840                  *
3841                  * We have to unwire the target page table page and we
3842                  * have to unwire our page directory page.
3843                  *
3844                  * It is unclear how we can invalidate a segment so we
3845                  * invalidate -1 which invlidates the tlb.
3846                  */
3847                 if (info->doinval)
3848                         pmap_inval_interlock(&info->inval, pmap, -1);
3849                 pte = pte_load_clear(ptep);
3850                 if (info->doinval)
3851                         pmap_inval_deinterlock(&info->inval, pmap);
3852                 atomic_add_long(&pmap->pm_stats.resident_count, -1);
3853                 KKASSERT((pte & pmap->pmap_bits[PG_DEVICE_IDX]) == 0);
3854                 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
3855                         panic("pmap_remove: shared pgtable1 bad wirecount");
3856                 if (vm_page_unwire_quick(pt_pv->pv_m))
3857                         panic("pmap_remove: shared pgtable2 bad wirecount");
3858         }
3859 }
3860
3861 /*
3862  * Removes this physical page from all physical maps in which it resides.
3863  * Reflects back modify bits to the pager.
3864  *
3865  * This routine may not be called from an interrupt.
3866  */
3867 static
3868 void
3869 pmap_remove_all(vm_page_t m)
3870 {
3871         struct pmap_inval_info info;
3872         pv_entry_t pv;
3873
3874         if (!pmap_initialized /* || (m->flags & PG_FICTITIOUS)*/)
3875                 return;
3876
3877         pmap_inval_init(&info);
3878         vm_page_spin_lock(m);
3879         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3880                 KKASSERT(pv->pv_m == m);
3881                 if (pv_hold_try(pv)) {
3882                         vm_page_spin_unlock(m);
3883                 } else {
3884                         vm_page_spin_unlock(m);
3885                         pv_lock(pv);
3886                 }
3887                 if (pv->pv_m != m) {
3888                         pv_put(pv);
3889                         vm_page_spin_lock(m);
3890                         continue;
3891                 }
3892
3893                 /*
3894                  * Holding no spinlocks, pv is locked.
3895                  */
3896                 pmap_remove_pv_pte(pv, NULL, &info);
3897                 pmap_remove_pv_page(pv);
3898                 pv_free(pv);
3899                 vm_page_spin_lock(m);
3900         }
3901         KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
3902         vm_page_spin_unlock(m);
3903         pmap_inval_done(&info);
3904 }
3905
3906 /*
3907  * Set the physical protection on the specified range of this map
3908  * as requested.  This function is typically only used for debug watchpoints
3909  * and COW pages.
3910  *
3911  * This function may not be called from an interrupt if the map is
3912  * not the kernel_pmap.
3913  *
3914  * NOTE!  For shared page table pages we just unmap the page.
3915  */
3916 void
3917 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3918 {
3919         struct pmap_scan_info info;
3920         /* JG review for NX */
3921
3922         if (pmap == NULL)
3923                 return;
3924         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
3925                 pmap_remove(pmap, sva, eva);
3926                 return;
3927         }
3928         if (prot & VM_PROT_WRITE)
3929                 return;
3930         info.pmap = pmap;
3931         info.sva = sva;
3932         info.eva = eva;
3933         info.func = pmap_protect_callback;
3934         info.arg = &prot;
3935         info.doinval = 1;
3936         pmap_scan(&info);
3937 }
3938
3939 static
3940 void
3941 pmap_protect_callback(pmap_t pmap, struct pmap_scan_info *info,
3942                       pv_entry_t pte_pv, pv_entry_t pt_pv, int sharept,
3943                       vm_offset_t va, pt_entry_t *ptep, void *arg __unused)
3944 {
3945         pt_entry_t pbits;
3946         pt_entry_t cbits;
3947         pt_entry_t pte;
3948         vm_page_t m;
3949
3950         /*
3951          * XXX non-optimal.
3952          */
3953         pmap_inval_interlock(&info->inval, pmap, va);
3954 again:
3955         pbits = *ptep;
3956         cbits = pbits;
3957         if (pte_pv) {
3958                 m = NULL;
3959                 if (pbits & pmap->pmap_bits[PG_A_IDX]) {
3960                         if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
3961                                 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
3962                                 KKASSERT(m == pte_pv->pv_m);
3963                                 vm_page_flag_set(m, PG_REFERENCED);
3964                         }
3965                         cbits &= ~pmap->pmap_bits[PG_A_IDX];
3966                 }
3967                 if (pbits & pmap->pmap_bits[PG_M_IDX]) {
3968                         if (pmap_track_modified(pte_pv->pv_pindex)) {
3969                                 if ((pbits & pmap->pmap_bits[PG_DEVICE_IDX]) == 0) {
3970                                         if (m == NULL) {
3971                                                 m = PHYS_TO_VM_PAGE(pbits &
3972                                                                     PG_FRAME);
3973                                         }
3974                                         vm_page_dirty(m);
3975                                 }
3976                                 cbits &= ~pmap->pmap_bits[PG_M_IDX];
3977                         }
3978                 }
3979         } else if (sharept) {
3980                 /*
3981                  * Unmanaged page table, pt_pv is actually the pd_pv
3982                  * for our pmap (not the object's shared pmap).
3983                  *
3984                  * When asked to protect something in a shared page table
3985                  * page we just unmap the page table page.  We have to
3986                  * invalidate the tlb in this situation.
3987                  *
3988                  * XXX Warning, shared page tables will not be used for
3989                  * OBJT_DEVICE or OBJT_MGTDEVICE (PG_FICTITIOUS) mappings
3990                  * so PHYS_TO_VM_PAGE() should be safe here.
3991                  */
3992                 pte = pte_load_clear(ptep);
3993                 pmap_inval_invltlb(&info->inval);
3994                 if (vm_page_unwire_quick(PHYS_TO_VM_PAGE(pte & PG_FRAME)))
3995                         panic("pmap_protect: pgtable1 pg bad wirecount");
3996                 if (vm_page_unwire_quick(pt_pv->pv_m))
3997                         panic("pmap_protect: pgtable2 pg bad wirecount");
3998                 ptep = NULL;
3999         }
4000         /* else unmanaged page, adjust bits, no wire changes */
4001
4002         if (ptep) {
4003                 cbits &= ~pmap->pmap_bits[PG_RW_IDX];
4004 #ifdef PMAP_DEBUG2
4005                 if (pmap_enter_debug > 0) {
4006                         --pmap_enter_debug;
4007                         kprintf("pmap_protect va=%lx ptep=%p pte_pv=%p "
4008                                 "pt_pv=%p cbits=%08lx\n",
4009                                 va, ptep, pte_pv,
4010                                 pt_pv, cbits
4011                         );
4012                 }
4013 #endif
4014                 if (pbits != cbits && !atomic_cmpset_long(ptep, pbits, cbits)) {
4015                         goto again;
4016                 }
4017         }
4018         pmap_inval_deinterlock(&info->inval, pmap);
4019         if (pte_pv)
4020                 pv_put(pte_pv);
4021 }
4022
4023 /*
4024  * Insert the vm_page (m) at the virtual address (va), replacing any prior
4025  * mapping at that address.  Set protection and wiring as requested.
4026  *
4027  * If entry is non-NULL we check to see if the SEG_SIZE optimization is
4028  * possible.  If it is we enter the page into the appropriate shared pmap
4029  * hanging off the related VM object instead of the passed pmap, then we
4030  * share the page table page from the VM object's pmap into the current pmap.
4031  *
4032  * NOTE: This routine MUST insert the page into the pmap now, it cannot
4033  *       lazy-evaluate.
4034  */
4035 void
4036 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
4037            boolean_t wired, vm_map_entry_t entry)
4038 {
4039         pmap_inval_info info;
4040         pv_entry_t pt_pv;       /* page table */
4041         pv_entry_t pte_pv;      /* page table entry */
4042         pt_entry_t *ptep;
4043         vm_paddr_t opa;
4044         pt_entry_t origpte, newpte;
4045         vm_paddr_t pa;
4046
4047         if (pmap == NULL)
4048                 return;
4049         va = trunc_page(va);
4050 #ifdef PMAP_DIAGNOSTIC
4051         if (va >= KvaEnd)
4052                 panic("pmap_enter: toobig");
4053         if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
4054                 panic("pmap_enter: invalid to pmap_enter page table "
4055                       "pages (va: 0x%lx)", va);
4056 #endif
4057         if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
4058                 kprintf("Warning: pmap_enter called on UVA with "
4059                         "kernel_pmap\n");
4060 #ifdef DDB
4061                 db_print_backtrace();
4062 #endif
4063         }
4064         if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
4065                 kprintf("Warning: pmap_enter called on KVA without"
4066                         "kernel_pmap\n");
4067 #ifdef DDB
4068                 db_print_backtrace();
4069 #endif
4070         }
4071
4072         /*
4073          * Get locked PV entries for our new page table entry (pte_pv)
4074          * and for its parent page table (pt_pv).  We need the parent
4075          * so we can resolve the location of the ptep.
4076          *
4077          * Only hardware MMU actions can modify the ptep out from
4078          * under us.
4079          *
4080          * if (m) is fictitious or unmanaged we do not create a managing
4081          * pte_pv for it.  Any pre-existing page's management state must
4082          * match (avoiding code complexity).
4083          *
4084          * If the pmap is still being initialized we assume existing
4085          * page tables.
4086          *
4087          * Kernel mapppings do not track page table pages (i.e. pt_pv).
4088          */
4089         if (pmap_initialized == FALSE) {
4090                 pte_pv = NULL;
4091                 pt_pv = NULL;
4092                 ptep = vtopte(va);
4093                 origpte = *ptep;
4094         } else if (m->flags & (/*PG_FICTITIOUS |*/ PG_UNMANAGED)) { /* XXX */
4095                 pte_pv = NULL;
4096                 if (va >= VM_MAX_USER_ADDRESS) {
4097                         pt_pv = NULL;
4098                         ptep = vtopte(va);
4099                 } else {
4100                         pt_pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va),
4101                                                   NULL, entry, va);
4102                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
4103                 }
4104                 origpte = *ptep;
4105                 cpu_ccfence();
4106                 KASSERT(origpte == 0 ||
4107                          (origpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0,
4108                          ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
4109         } else {
4110                 if (va >= VM_MAX_USER_ADDRESS) {
4111                         /*
4112                          * Kernel map, pv_entry-tracked.
4113                          */
4114                         pt_pv = NULL;
4115                         pte_pv = pmap_allocpte(pmap, pmap_pte_pindex(va), NULL);
4116                         ptep = vtopte(va);
4117                 } else {
4118                         /*
4119                          * User map
4120                          */
4121                         pte_pv = pmap_allocpte_seg(pmap, pmap_pte_pindex(va),
4122                                                    &pt_pv, entry, va);
4123                         ptep = pv_pte_lookup(pt_pv, pmap_pte_index(va));
4124                 }
4125                 origpte = *ptep;
4126                 cpu_ccfence();
4127                 KASSERT(origpte == 0 ||
4128                          (origpte & pmap->pmap_bits[PG_MANAGED_IDX]),
4129                          ("Invalid PTE 0x%016jx @ 0x%016jx\n", origpte, va));
4130         }
4131
4132         pa = VM_PAGE_TO_PHYS(m);
4133         opa = origpte & PG_FRAME;
4134
4135         newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) |
4136                  pmap->pmap_bits[PG_V_IDX] | pmap->pmap_bits[PG_A_IDX]);
4137         if (wired)
4138                 newpte |= pmap->pmap_bits[PG_W_IDX];
4139         if (va < VM_MAX_USER_ADDRESS)
4140                 newpte |= pmap->pmap_bits[PG_U_IDX];
4141         if (pte_pv)
4142                 newpte |= pmap->pmap_bits[PG_MANAGED_IDX];
4143 //      if (pmap == &kernel_pmap)
4144 //              newpte |= pgeflag;
4145         newpte |= pmap->pmap_cache_bits[m->pat_mode];
4146         if (m->flags & PG_FICTITIOUS)
4147                 newpte |= pmap->pmap_bits[PG_DEVICE_IDX];
4148
4149         /*
4150          * It is possible for multiple faults to occur in threaded
4151          * environments, the existing pte might be correct.
4152          */
4153         if (((origpte ^ newpte) & ~(pt_entry_t)(pmap->pmap_bits[PG_M_IDX] |
4154             pmap->pmap_bits[PG_A_IDX])) == 0)
4155                 goto done;
4156
4157         if ((prot & VM_PROT_NOSYNC) == 0)
4158                 pmap_inval_init(&info);
4159
4160         /*
4161          * Ok, either the address changed or the protection or wiring
4162          * changed.
4163          *
4164          * Clear the current entry, interlocking the removal.  For managed
4165          * pte's this will also flush the modified state to the vm_page.
4166          * Atomic ops are mandatory in order to ensure that PG_M events are
4167          * not lost during any transition.
4168          *
4169          * WARNING: The caller has busied the new page but not the original
4170          *          vm_page which we are trying to replace.  Because we hold
4171          *          the pte_pv lock, but have not busied the page, PG bits
4172          *          can be cleared out from under us.
4173          */
4174         if (opa) {
4175                 if (pte_pv) {
4176                         /*
4177                          * pmap_remove_pv_pte() unwires pt_pv and assumes
4178                          * we will free pte_pv, but since we are reusing
4179                          * pte_pv we want to retain the wire count.
4180                          *
4181                          * pt_pv won't exist for a kernel page (managed or
4182                          * otherwise).
4183                          */
4184                         if (pt_pv)
4185                                 vm_page_wire_quick(pt_pv->pv_m);
4186                         if (prot & VM_PROT_NOSYNC)
4187                                 pmap_remove_pv_pte(pte_pv, pt_pv, NULL);
4188                         else
4189                                 pmap_remove_pv_pte(pte_pv, pt_pv, &info);
4190                         if (pte_pv->pv_m)
4191                                 pmap_remove_pv_page(pte_pv);
4192                 } else if (prot & VM_PROT_NOSYNC) {
4193                         /*
4194                          * Unmanaged page, NOSYNC (no mmu sync) requested.
4195                          *
4196                          * Leave wire count on PT page intact.
4197                          */
4198                         (void)pte_load_clear(ptep);
4199                         cpu_invlpg((void *)va);
4200                         atomic_add_long(&pmap->pm_stats.resident_count, -1);
4201                 } else {
4202                         /*
4203                          * Unmanaged page, normal enter.
4204                          *
4205                          * Leave wire count on PT page intact.
4206                          */
4207                         pmap_inval_interlock(&info, pmap, va);
4208                         (void)pte_load_clear(ptep);
4209                         pmap_inval_deinterlock(&info, pmap);
4210                         atomic_add_long(&pmap->pm_stats.resident_count, -1);
4211                 }
4212                 KKASSERT(*ptep == 0);
4213         }
4214
4215 #ifdef PMAP_DEBUG2
4216         if (pmap_enter_debug > 0) {
4217                 --pmap_enter_debug;
4218                 kprintf("pmap_enter: va=%lx m=%p origpte=%lx newpte=%lx ptep=%p"
4219                         " pte_pv=%p pt_pv=%p opa=%lx prot=%02x\n",
4220                         va, m,
4221                         origpte, newpte, ptep,
4222                         pte_pv, pt_pv, opa, prot);
4223         }
4224 #endif
4225
4226         if (pte_pv) {
4227                 /*
4228                  * Enter on the PV list if part of our managed memory.
4229                  * Wiring of the PT page is already handled.
4230                  */
4231                 KKASSERT(pte_pv->pv_m == NULL);
4232                 vm_page_spin_lock(m);
4233                 pte_pv->pv_m = m;
4234                 pmap_page_stats_adding(m);
4235                 TAILQ_INSERT_TAIL(&m->md.pv_list, pte_pv, pv_list);
4236                 vm_page_flag_set(m, PG_MAPPED);
4237                 vm_page_spin_unlock(m);
4238         } else if (pt_pv && opa == 0) {
4239                 /*
4240                  * We have to adjust the wire count on the PT page ourselves
4241                  * for unmanaged entries.  If opa was non-zero we retained
4242                  * the existing wire count from the removal.
4243                  */
4244                 vm_page_wire_quick(pt_pv->pv_m);
4245         }
4246
4247         /*
4248          * Kernel VMAs (pt_pv == NULL) require pmap invalidation interlocks.
4249          *
4250          * User VMAs do not because those will be zero->non-zero, so no
4251          * stale entries to worry about at this point.
4252          *
4253          * For KVM there appear to still be issues.  Theoretically we
4254          * should be able to scrap the interlocks entirely but we
4255          * get crashes.
4256          */
4257         if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL)
4258                 pmap_inval_interlock(&info, pmap, va);
4259
4260         /*
4261          * Set the pte
4262          */
4263         *(volatile pt_entry_t *)ptep = newpte;
4264
4265         if ((prot & VM_PROT_NOSYNC) == 0 && pt_pv == NULL)
4266                 pmap_inval_deinterlock(&info, pmap);
4267         else if (pt_pv == NULL)
4268                 cpu_invlpg((void *)va);
4269
4270         if (wired) {
4271                 if (pte_pv) {
4272                         atomic_add_long(&pte_pv->pv_pmap->pm_stats.wired_count,
4273                                         1);
4274                 } else {
4275                         atomic_add_long(&pmap->pm_stats.wired_count, 1);
4276                 }
4277         }
4278         if (newpte & pmap->pmap_bits[PG_RW_IDX])
4279                 vm_page_flag_set(m, PG_WRITEABLE);
4280
4281         /*
4282          * Unmanaged pages need manual resident_count tracking.
4283          */
4284         if (pte_pv == NULL && pt_pv)
4285                 atomic_add_long(&pt_pv->pv_pmap->pm_stats.resident_count, 1);
4286
4287         /*
4288          * Cleanup
4289          */
4290         if ((prot & VM_PROT_NOSYNC) == 0 || pte_pv == NULL)
4291                 pmap_inval_done(&info);
4292 done:
4293         KKASSERT((newpte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0 ||
4294                  (m->flags & PG_MAPPED));
4295
4296         /*
4297          * Cleanup the pv entry, allowing other accessors.
4298          */
4299         if (pte_pv)
4300                 pv_put(pte_pv);
4301         if (pt_pv)
4302                 pv_put(pt_pv);
4303 }
4304
4305 /*
4306  * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
4307  * This code also assumes that the pmap has no pre-existing entry for this
4308  * VA.
4309  *
4310  * This code currently may only be used on user pmaps, not kernel_pmap.
4311  */
4312 void
4313 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
4314 {
4315         pmap_enter(pmap, va, m, VM_PROT_READ, FALSE, NULL);
4316 }
4317
4318 /*
4319  * Make a temporary mapping for a physical address.  This is only intended
4320  * to be used for panic dumps.
4321  *
4322  * The caller is responsible for calling smp_invltlb().
4323  */
4324 void *
4325 pmap_kenter_temporary(vm_paddr_t pa, long i)
4326 {
4327         pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
4328         return ((void *)crashdumpmap);
4329 }
4330
4331 #define MAX_INIT_PT (96)
4332
4333 /*
4334  * This routine preloads the ptes for a given object into the specified pmap.
4335  * This eliminates the blast of soft faults on process startup and
4336  * immediately after an mmap.
4337  */
4338 static int pmap_object_init_pt_callback(vm_page_t p, void *data);
4339
4340 void
4341 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
4342                     vm_object_t object, vm_pindex_t pindex,
4343                     vm_size_t size, int limit)
4344 {
4345         struct rb_vm_page_scan_info info;
4346         struct lwp *lp;
4347         vm_size_t psize;
4348
4349         /*
4350          * We can't preinit if read access isn't set or there is no pmap
4351          * or object.
4352          */
4353         if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
4354                 return;
4355
4356         /*
4357          * We can't preinit if the pmap is not the current pmap
4358          */
4359         lp = curthread->td_lwp;
4360         if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
4361                 return;
4362
4363         /*
4364          * Misc additional checks
4365          */
4366         psize = x86_64_btop(size);
4367
4368         if ((object->type != OBJT_VNODE) ||
4369                 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
4370                         (object->resident_page_count > MAX_INIT_PT))) {
4371                 return;
4372         }
4373
4374         if (pindex + psize > object->size) {
4375                 if (object->size < pindex)
4376                         return;           
4377                 psize = object->size - pindex;
4378         }
4379
4380         if (psize == 0)
4381                 return;
4382
4383         /*
4384          * If everything is segment-aligned do not pre-init here.  Instead
4385          * allow the normal vm_fault path to pass a segment hint to
4386          * pmap_enter() which will then use an object-referenced shared
4387          * page table page.
4388          */
4389         if ((addr & SEG_MASK) == 0 &&
4390             (ctob(psize) & SEG_MASK) == 0 &&
4391             (ctob(pindex) & SEG_MASK) == 0) {
4392                 return;
4393         }
4394
4395         /*
4396          * Use a red-black scan to traverse the requested range and load
4397          * any valid pages found into the pmap.
4398          *
4399          * We cannot safely scan the object's memq without holding the
4400          * object token.
4401          */
4402         info.start_pindex = pindex;
4403         info.end_pindex = pindex + psize - 1;
4404         info.limit = limit;
4405         info.mpte = NULL;
4406         info.addr = addr;
4407         info.pmap = pmap;
4408
4409         vm_object_hold_shared(object);
4410         vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
4411                                 pmap_object_init_pt_callback, &info);
4412         vm_object_drop(object);
4413 }
4414
4415 static
4416 int
4417 pmap_object_init_pt_callback(vm_page_t p, void *data)
4418 {
4419         struct rb_vm_page_scan_info *info = data;
4420         vm_pindex_t rel_index;
4421
4422         /*
4423          * don't allow an madvise to blow away our really
4424          * free pages allocating pv entries.
4425          */
4426         if ((info->limit & MAP_PREFAULT_MADVISE) &&
4427                 vmstats.v_free_count < vmstats.v_free_reserved) {
4428                     return(-1);
4429         }
4430
4431         /*
4432          * Ignore list markers and ignore pages we cannot instantly
4433          * busy (while holding the object token).
4434          */
4435         if (p->flags & PG_MARKER)
4436                 return 0;
4437         if (vm_page_busy_try(p, TRUE))
4438                 return 0;
4439         if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
4440             (p->flags & PG_FICTITIOUS) == 0) {
4441                 if ((p->queue - p->pc) == PQ_CACHE)
4442                         vm_page_deactivate(p);
4443                 rel_index = p->pindex - info->start_pindex;
4444                 pmap_enter_quick(info->pmap,
4445                                  info->addr + x86_64_ptob(rel_index), p);
4446         }
4447         vm_page_wakeup(p);
4448         lwkt_yield();
4449         return(0);
4450 }
4451
4452 /*
4453  * Return TRUE if the pmap is in shape to trivially pre-fault the specified
4454  * address.
4455  *
4456  * Returns FALSE if it would be non-trivial or if a pte is already loaded
4457  * into the slot.
4458  *
4459  * XXX This is safe only because page table pages are not freed.
4460  */
4461 int
4462 pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
4463 {
4464         pt_entry_t *pte;
4465
4466         /*spin_lock(&pmap->pm_spin);*/
4467         if ((pte = pmap_pte(pmap, addr)) != NULL) {
4468                 if (*pte & pmap->pmap_bits[PG_V_IDX]) {
4469                         /*spin_unlock(&pmap->pm_spin);*/
4470                         return FALSE;
4471                 }
4472         }
4473         /*spin_unlock(&pmap->pm_spin);*/
4474         return TRUE;
4475 }
4476
4477 /*
4478  * Change the wiring attribute for a pmap/va pair.  The mapping must already
4479  * exist in the pmap.  The mapping may or may not be managed.
4480  */
4481 void
4482 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired,
4483                    vm_map_entry_t entry)
4484 {
4485         pt_entry_t *ptep;
4486         pv_entry_t pv;
4487
4488         if (pmap == NULL)
4489                 return;
4490         lwkt_gettoken(&pmap->pm_token);
4491         pv = pmap_allocpte_seg(pmap, pmap_pt_pindex(va), NULL, entry, va);
4492         ptep = pv_pte_lookup(pv, pmap_pte_index(va));
4493
4494         if (wired && !pmap_pte_w(pmap, ptep))
4495                 atomic_add_long(&pv->pv_pmap->pm_stats.wired_count, 1);
4496         else if (!wired && pmap_pte_w(pmap, ptep))
4497                 atomic_add_long(&pv->pv_pmap->pm_stats.wired_count, -1);
4498
4499         /*
4500          * Wiring is not a hardware characteristic so there is no need to
4501          * invalidate TLB.  However, in an SMP environment we must use
4502          * a locked bus cycle to update the pte (if we are not using 
4503          * the pmap_inval_*() API that is)... it's ok to do this for simple
4504          * wiring changes.
4505          */
4506         if (wired)
4507                 atomic_set_long(ptep, pmap->pmap_bits[PG_W_IDX]);
4508         else
4509                 atomic_clear_long(ptep, pmap->pmap_bits[PG_W_IDX]);
4510         pv_put(pv);
4511         lwkt_reltoken(&pmap->pm_token);
4512 }
4513
4514
4515
4516 /*
4517  * Copy the range specified by src_addr/len from the source map to
4518  * the range dst_addr/len in the destination map.
4519  *
4520  * This routine is only advisory and need not do anything.
4521  */
4522 void
4523 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 
4524           vm_size_t len, vm_offset_t src_addr)
4525 {
4526 }       
4527
4528 /*
4529  * pmap_zero_page:
4530  *
4531  *      Zero the specified physical page.
4532  *
4533  *      This function may be called from an interrupt and no locking is
4534  *      required.
4535  */
4536 void
4537 pmap_zero_page(vm_paddr_t phys)
4538 {
4539         vm_offset_t va = PHYS_TO_DMAP(phys);
4540
4541         pagezero((void *)va);
4542 }
4543
4544 /*
4545  * pmap_page_assertzero:
4546  *
4547  *      Assert that a page is empty, panic if it isn't.
4548  */
4549 void
4550 pmap_page_assertzero(vm_paddr_t phys)
4551 {
4552         vm_offset_t va = PHYS_TO_DMAP(phys);
4553         size_t i;
4554
4555         for (i = 0; i < PAGE_SIZE; i += sizeof(long)) {
4556                 if (*(long *)((char *)va + i) != 0) {
4557                         panic("pmap_page_assertzero() @ %p not zero!",
4558                               (void *)(intptr_t)va);
4559                 }
4560         }
4561 }
4562
4563 /*
4564  * pmap_zero_page:
4565  *
4566  *      Zero part of a physical page by mapping it into memory and clearing
4567  *      its contents with bzero.
4568  *
4569  *      off and size may not cover an area beyond a single hardware page.
4570  */
4571 void
4572 pmap_zero_page_area(vm_paddr_t phys, int off, int size)
4573 {
4574         vm_offset_t virt = PHYS_TO_DMAP(phys);
4575
4576         bzero((char *)virt + off, size);
4577 }
4578
4579 /*
4580  * pmap_copy_page:
4581  *
4582  *      Copy the physical page from the source PA to the target PA.
4583  *      This function may be called from an interrupt.  No locking
4584  *      is required.
4585  */
4586 void
4587 pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
4588 {
4589         vm_offset_t src_virt, dst_virt;
4590
4591         src_virt = PHYS_TO_DMAP(src);
4592         dst_virt = PHYS_TO_DMAP(dst);
4593         bcopy((void *)src_virt, (void *)dst_virt, PAGE_SIZE);
4594 }
4595
4596 /*
4597  * pmap_copy_page_frag:
4598  *
4599  *      Copy the physical page from the source PA to the target PA.
4600  *      This function may be called from an interrupt.  No locking
4601  *      is required.
4602  */
4603 void
4604 pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
4605 {
4606         vm_offset_t src_virt, dst_virt;
4607
4608         src_virt = PHYS_TO_DMAP(src);
4609         dst_virt = PHYS_TO_DMAP(dst);
4610
4611         bcopy((char *)src_virt + (src & PAGE_MASK),
4612               (char *)dst_virt + (dst & PAGE_MASK),
4613               bytes);
4614 }
4615
4616 /*
4617  * Returns true if the pmap's pv is one of the first 16 pvs linked to from
4618  * this page.  This count may be changed upwards or downwards in the future;
4619  * it is only necessary that true be returned for a small subset of pmaps
4620  * for proper page aging.
4621  */
4622 boolean_t
4623 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4624 {
4625         pv_entry_t pv;
4626         int loops = 0;
4627
4628         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
4629                 return FALSE;
4630
4631         vm_page_spin_lock(m);
4632         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4633                 if (pv->pv_pmap == pmap) {
4634                         vm_page_spin_unlock(m);
4635                         return TRUE;
4636                 }
4637                 loops++;
4638                 if (loops >= 16)
4639                         break;
4640         }
4641         vm_page_spin_unlock(m);
4642         return (FALSE);
4643 }
4644
4645 /*
4646  * Remove all pages from specified address space this aids process exit
4647  * speeds.  Also, this code may be special cased for the current process
4648  * only.
4649  */
4650 void
4651 pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4652 {
4653         pmap_remove_noinval(pmap, sva, eva);
4654         cpu_invltlb();
4655 }
4656
4657 /*
4658  * pmap_testbit tests bits in pte's note that the testbit/clearbit
4659  * routines are inline, and a lot of things compile-time evaluate.
4660  */
4661 static
4662 boolean_t
4663 pmap_testbit(vm_page_t m, int bit)
4664 {
4665         pv_entry_t pv;
4666         pt_entry_t *pte;
4667         pmap_t pmap;
4668
4669         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
4670                 return FALSE;
4671
4672         if (TAILQ_FIRST(&m->md.pv_list) == NULL)
4673                 return FALSE;
4674         vm_page_spin_lock(m);
4675         if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
4676                 vm_page_spin_unlock(m);
4677                 return FALSE;
4678         }
4679
4680         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4681
4682 #if defined(PMAP_DIAGNOSTIC)
4683                 if (pv->pv_pmap == NULL) {
4684                         kprintf("Null pmap (tb) at pindex: %"PRIu64"\n",
4685                             pv->pv_pindex);
4686                         continue;
4687                 }
4688 #endif
4689                 pmap = pv->pv_pmap;
4690
4691                 /*
4692                  * If the bit being tested is the modified bit, then
4693                  * mark clean_map and ptes as never
4694                  * modified.
4695                  *
4696                  * WARNING!  Because we do not lock the pv, *pte can be in a
4697                  *           state of flux.  Despite this the value of *pte
4698                  *           will still be related to the vm_page in some way
4699                  *           because the pv cannot be destroyed as long as we
4700                  *           hold the vm_page spin lock.
4701                  */
4702                 if (bit == PG_A_IDX || bit == PG_M_IDX) {
4703                                 //& (pmap->pmap_bits[PG_A_IDX] | pmap->pmap_bits[PG_M_IDX])) {
4704                         if (!pmap_track_modified(pv->pv_pindex))
4705                                 continue;
4706                 }
4707
4708                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
4709                 if (*pte & pmap->pmap_bits[bit]) {
4710                         vm_page_spin_unlock(m);
4711                         return TRUE;
4712                 }
4713         }
4714         vm_page_spin_unlock(m);
4715         return (FALSE);
4716 }
4717
4718 /*
4719  * This routine is used to modify bits in ptes.  Only one bit should be
4720  * specified.  PG_RW requires special handling.
4721  *
4722  * Caller must NOT hold any spin locks
4723  */
4724 static __inline
4725 void
4726 pmap_clearbit(vm_page_t m, int bit_index)
4727 {
4728         struct pmap_inval_info info;
4729         pv_entry_t pv;
4730         pt_entry_t *pte;
4731         pt_entry_t pbits;
4732         pmap_t pmap;
4733
4734         if (bit_index == PG_RW_IDX)
4735                 vm_page_flag_clear(m, PG_WRITEABLE);
4736         if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
4737                 return;
4738         }
4739
4740         /*
4741          * PG_M or PG_A case
4742          *
4743          * Loop over all current mappings setting/clearing as appropos If
4744          * setting RO do we need to clear the VAC?
4745          *
4746          * NOTE: When clearing PG_M we could also (not implemented) drop
4747          *       through to the PG_RW code and clear PG_RW too, forcing
4748          *       a fault on write to redetect PG_M for virtual kernels, but
4749          *       it isn't necessary since virtual kernels invalidate the
4750          *       pte when they clear the VPTE_M bit in their virtual page
4751          *       tables.
4752          *
4753          * NOTE: Does not re-dirty the page when clearing only PG_M.
4754          *
4755          * NOTE: Because we do not lock the pv, *pte can be in a state of
4756          *       flux.  Despite this the value of *pte is still somewhat
4757          *       related while we hold the vm_page spin lock.
4758          *
4759          *       *pte can be zero due to this race.  Since we are clearing
4760          *       bits we basically do no harm when this race  ccurs.
4761          */
4762         if (bit_index != PG_RW_IDX) {
4763                 vm_page_spin_lock(m);
4764                 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4765 #if defined(PMAP_DIAGNOSTIC)
4766                         if (pv->pv_pmap == NULL) {
4767                                 kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
4768                                     pv->pv_pindex);
4769                                 continue;
4770                         }
4771 #endif
4772                         pmap = pv->pv_pmap;
4773                         pte = pmap_pte_quick(pv->pv_pmap,
4774                                              pv->pv_pindex << PAGE_SHIFT);
4775                         pbits = *pte;
4776                         if (pbits & pmap->pmap_bits[bit_index])
4777                                 atomic_clear_long(pte, pmap->pmap_bits[bit_index]);
4778                 }
4779                 vm_page_spin_unlock(m);
4780                 return;
4781         }
4782
4783         /*
4784          * Clear PG_RW.  Also clears PG_M and marks the page dirty if PG_M
4785          * was set.
4786          */
4787         pmap_inval_init(&info);
4788
4789 restart:
4790         vm_page_spin_lock(m);
4791         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4792                 /*
4793                  * don't write protect pager mappings
4794                  */
4795                 if (!pmap_track_modified(pv->pv_pindex))
4796                         continue;
4797
4798 #if defined(PMAP_DIAGNOSTIC)
4799                 if (pv->pv_pmap == NULL) {
4800                         kprintf("Null pmap (cb) at pindex: %"PRIu64"\n",
4801                             pv->pv_pindex);
4802                         continue;
4803                 }
4804 #endif
4805                 pmap = pv->pv_pmap;
4806                 /*
4807                  * Skip pages which do not have PG_RW set.
4808                  */
4809                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
4810                 if ((*pte & pmap->pmap_bits[PG_RW_IDX]) == 0)
4811                         continue;
4812
4813                 /*
4814                  * Lock the PV
4815                  */
4816                 if (pv_hold_try(pv)) {
4817                         vm_page_spin_unlock(m);
4818                 } else {
4819                         vm_page_spin_unlock(m);
4820                         pv_lock(pv);    /* held, now do a blocking lock */
4821                 }
4822                 if (pv->pv_pmap != pmap || pv->pv_m != m) {
4823                         pv_put(pv);     /* and release */
4824                         goto restart;   /* anything could have happened */
4825                 }
4826                 pmap_inval_interlock(&info, pmap,
4827                                      (vm_offset_t)pv->pv_pindex << PAGE_SHIFT);
4828                 KKASSERT(pv->pv_pmap == pmap);
4829                 for (;;) {
4830                         pbits = *pte;
4831                         cpu_ccfence();
4832                         if (atomic_cmpset_long(pte, pbits, pbits &
4833                             ~(pmap->pmap_bits[PG_RW_IDX] |
4834                             pmap->pmap_bits[PG_M_IDX]))) {
4835                                 break;
4836                         }
4837                 }
4838                 pmap_inval_deinterlock(&info, pmap);
4839                 vm_page_spin_lock(m);
4840
4841                 /*
4842                  * If PG_M was found to be set while we were clearing PG_RW
4843                  * we also clear PG_M (done above) and mark the page dirty.
4844                  * Callers expect this behavior.
4845                  */
4846                 if (pbits & pmap->pmap_bits[PG_M_IDX])
4847                         vm_page_dirty(m);
4848                 pv_put(pv);
4849         }
4850         vm_page_spin_unlock(m);
4851         pmap_inval_done(&info);
4852 }
4853
4854 /*
4855  * Lower the permission for all mappings to a given page.
4856  *
4857  * Page must be busied by caller.  Because page is busied by caller this
4858  * should not be able to race a pmap_enter().
4859  */
4860 void
4861 pmap_page_protect(vm_page_t m, vm_prot_t prot)
4862 {
4863         /* JG NX support? */
4864         if ((prot & VM_PROT_WRITE) == 0) {
4865                 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
4866                         /*
4867                          * NOTE: pmap_clearbit(.. PG_RW) also clears
4868                          *       the PG_WRITEABLE flag in (m).
4869                          */
4870                         pmap_clearbit(m, PG_RW_IDX);
4871                 } else {
4872                         pmap_remove_all(m);
4873                 }
4874         }
4875 }
4876
4877 vm_paddr_t
4878 pmap_phys_address(vm_pindex_t ppn)
4879 {
4880         return (x86_64_ptob(ppn));
4881 }
4882
4883 /*
4884  * Return a count of reference bits for a page, clearing those bits.
4885  * It is not necessary for every reference bit to be cleared, but it
4886  * is necessary that 0 only be returned when there are truly no
4887  * reference bits set.
4888  *
4889  * XXX: The exact number of bits to check and clear is a matter that
4890  * should be tested and standardized at some point in the future for
4891  * optimal aging of shared pages.
4892  *
4893  * This routine may not block.
4894  */
4895 int
4896 pmap_ts_referenced(vm_page_t m)
4897 {
4898         pv_entry_t pv;
4899         pt_entry_t *pte;
4900         pmap_t pmap;
4901         int rtval = 0;
4902
4903         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
4904                 return (rtval);
4905
4906         vm_page_spin_lock(m);
4907         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4908                 if (!pmap_track_modified(pv->pv_pindex))
4909                         continue;
4910                 pmap = pv->pv_pmap;
4911                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_pindex << PAGE_SHIFT);
4912                 if (pte && (*pte & pmap->pmap_bits[PG_A_IDX])) {
4913                         atomic_clear_long(pte, pmap->pmap_bits[PG_A_IDX]);
4914                         rtval++;
4915                         if (rtval > 4)
4916                                 break;
4917                 }
4918         }
4919         vm_page_spin_unlock(m);
4920         return (rtval);
4921 }
4922
4923 /*
4924  *      pmap_is_modified:
4925  *
4926  *      Return whether or not the specified physical page was modified
4927  *      in any physical maps.
4928  */
4929 boolean_t
4930 pmap_is_modified(vm_page_t m)
4931 {
4932         boolean_t res;
4933
4934         res = pmap_testbit(m, PG_M_IDX);
4935         return (res);
4936 }
4937
4938 /*
4939  *      Clear the modify bits on the specified physical page.
4940  */
4941 void
4942 pmap_clear_modify(vm_page_t m)
4943 {
4944         pmap_clearbit(m, PG_M_IDX);
4945 }
4946
4947 /*
4948  *      pmap_clear_reference:
4949  *
4950  *      Clear the reference bit on the specified physical page.
4951  */
4952 void
4953 pmap_clear_reference(vm_page_t m)
4954 {
4955         pmap_clearbit(m, PG_A_IDX);
4956 }
4957
4958 /*
4959  * Miscellaneous support routines follow
4960  */
4961
4962 static
4963 void
4964 i386_protection_init(void)
4965 {
4966         int *kp, prot;
4967
4968         /* JG NX support may go here; No VM_PROT_EXECUTE ==> set NX bit  */
4969         kp = protection_codes;
4970         for (prot = 0; prot < PROTECTION_CODES_SIZE; prot++) {
4971                 switch (prot) {
4972                 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
4973                         /*
4974                          * Read access is also 0. There isn't any execute bit,
4975                          * so just make it readable.
4976                          */
4977                 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
4978                 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
4979                 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
4980                         *kp++ = 0;
4981                         break;
4982                 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
4983                 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
4984                 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
4985                 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
4986                         *kp++ = pmap_bits_default[PG_RW_IDX];
4987                         break;
4988                 }
4989         }
4990 }
4991
4992 /*
4993  * Map a set of physical memory pages into the kernel virtual
4994  * address space. Return a pointer to where it is mapped. This
4995  * routine is intended to be used for mapping device memory,
4996  * NOT real memory.
4997  *
4998  * NOTE: We can't use pgeflag unless we invalidate the pages one at
4999  *       a time.
5000  *
5001  * NOTE: The PAT attributes {WRITE_BACK, WRITE_THROUGH, UNCACHED, UNCACHEABLE}
5002  *       work whether the cpu supports PAT or not.  The remaining PAT
5003  *       attributes {WRITE_PROTECTED, WRITE_COMBINING} only work if the cpu
5004  *       supports PAT.
5005  */
5006 void *
5007 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
5008 {
5009         return(pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
5010 }
5011
5012 void *
5013 pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size)
5014 {
5015         return(pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
5016 }
5017
5018 void *
5019 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
5020 {
5021         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
5022 }
5023
5024 /*
5025  * Map a set of physical memory pages into the kernel virtual
5026  * address space. Return a pointer to where it is mapped. This
5027  * routine is intended to be used for mapping device memory,
5028  * NOT real memory.
5029  */
5030 void *
5031 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
5032 {
5033         vm_offset_t va, tmpva, offset;
5034         pt_entry_t *pte;
5035         vm_size_t tmpsize;
5036
5037         offset = pa & PAGE_MASK;
5038         size = roundup(offset + size, PAGE_SIZE);
5039
5040         va = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE);
5041         if (va == 0)
5042                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
5043
5044         pa = pa & ~PAGE_MASK;
5045         for (tmpva = va, tmpsize = size; tmpsize > 0;) {
5046                 pte = vtopte(tmpva);
5047                 *pte = pa |
5048                     kernel_pmap.pmap_bits[PG_RW_IDX] |
5049                     kernel_pmap.pmap_bits[PG_V_IDX] | /* pgeflag | */
5050                     kernel_pmap.pmap_cache_bits[mode];
5051                 tmpsize -= PAGE_SIZE;
5052                 tmpva += PAGE_SIZE;
5053                 pa += PAGE_SIZE;
5054         }
5055         pmap_invalidate_range(&kernel_pmap, va, va + size);
5056         pmap_invalidate_cache_range(va, va + size);
5057
5058         return ((void *)(va + offset));
5059 }
5060
5061 void
5062 pmap_unmapdev(vm_offset_t va, vm_size_t size)
5063 {
5064         vm_offset_t base, offset;
5065
5066         base = va & ~PAGE_MASK;
5067         offset = va & PAGE_MASK;
5068         size = roundup(offset + size, PAGE_SIZE);
5069         pmap_qremove(va, size >> PAGE_SHIFT);
5070         kmem_free(&kernel_map, base, size);
5071 }
5072
5073 /*
5074  * Sets the memory attribute for the specified page.
5075  */
5076 void
5077 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5078 {
5079
5080     m->pat_mode = ma;
5081
5082     /*
5083      * If "m" is a normal page, update its direct mapping.  This update
5084      * can be relied upon to perform any cache operations that are
5085      * required for data coherence.
5086      */
5087     if ((m->flags & PG_FICTITIOUS) == 0)
5088         pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 1, m->pat_mode);
5089 }
5090
5091 /*
5092  * Change the PAT attribute on an existing kernel memory map.  Caller
5093  * must ensure that the virtual memory in question is not accessed
5094  * during the adjustment.
5095  */
5096 void
5097 pmap_change_attr(vm_offset_t va, vm_size_t count, int mode)
5098 {
5099         pt_entry_t *pte;
5100         vm_offset_t base;
5101         int changed = 0;
5102
5103         if (va == 0)
5104                 panic("pmap_change_attr: va is NULL");
5105         base = trunc_page(va);
5106
5107         while (count) {
5108                 pte = vtopte(va);
5109                 *pte = (*pte & ~(pt_entry_t)(kernel_pmap.pmap_cache_mask)) |
5110                        kernel_pmap.pmap_cache_bits[mode];
5111                 --count;
5112                 va += PAGE_SIZE;
5113         }
5114
5115         changed = 1;    /* XXX: not optimal */
5116
5117         /*
5118          * Flush CPU caches if required to make sure any data isn't cached that
5119          * shouldn't be, etc.
5120          */
5121         if (changed) {
5122                 pmap_invalidate_range(&kernel_pmap, base, va);
5123                 pmap_invalidate_cache_range(base, va);
5124         }
5125 }
5126
5127 /*
5128  * perform the pmap work for mincore
5129  */
5130 int
5131 pmap_mincore(pmap_t pmap, vm_offset_t addr)
5132 {
5133         pt_entry_t *ptep, pte;
5134         vm_page_t m;
5135         int val = 0;
5136         
5137         lwkt_gettoken(&pmap->pm_token);
5138         ptep = pmap_pte(pmap, addr);
5139
5140         if (ptep && (pte = *ptep) != 0) {
5141                 vm_offset_t pa;
5142
5143                 val = MINCORE_INCORE;
5144                 if ((pte & pmap->pmap_bits[PG_MANAGED_IDX]) == 0)
5145                         goto done;
5146
5147                 pa = pte & PG_FRAME;
5148
5149                 if (pte & pmap->pmap_bits[PG_DEVICE_IDX])
5150                         m = NULL;
5151                 else
5152                         m = PHYS_TO_VM_PAGE(pa);
5153
5154                 /*
5155                  * Modified by us
5156                  */
5157                 if (pte & pmap->pmap_bits[PG_M_IDX])
5158                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
5159                 /*
5160                  * Modified by someone
5161                  */
5162                 else if (m && (m->dirty || pmap_is_modified(m)))
5163                         val |= MINCORE_MODIFIED_OTHER;
5164                 /*
5165                  * Referenced by us
5166                  */
5167                 if (pte & pmap->pmap_bits[PG_A_IDX])
5168                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
5169
5170                 /*
5171                  * Referenced by someone
5172                  */
5173                 else if (m && ((m->flags & PG_REFERENCED) ||
5174                                 pmap_ts_referenced(m))) {
5175                         val |= MINCORE_REFERENCED_OTHER;
5176                         vm_page_flag_set(m, PG_REFERENCED);
5177                 }
5178         } 
5179 done:
5180         lwkt_reltoken(&pmap->pm_token);
5181
5182         return val;
5183 }
5184
5185 /*
5186  * Replace p->p_vmspace with a new one.  If adjrefs is non-zero the new
5187  * vmspace will be ref'd and the old one will be deref'd.
5188  *
5189  * The vmspace for all lwps associated with the process will be adjusted
5190  * and cr3 will be reloaded if any lwp is the current lwp.
5191  *
5192  * The process must hold the vmspace->vm_map.token for oldvm and newvm
5193  */
5194 void
5195 pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
5196 {
5197         struct vmspace *oldvm;
5198         struct lwp *lp;
5199
5200         oldvm = p->p_vmspace;
5201         if (oldvm != newvm) {
5202                 if (adjrefs)
5203                         vmspace_ref(newvm);
5204                 p->p_vmspace = newvm;
5205                 KKASSERT(p->p_nthreads == 1);
5206                 lp = RB_ROOT(&p->p_lwp_tree);
5207                 pmap_setlwpvm(lp, newvm);
5208                 if (adjrefs)
5209                         vmspace_rel(oldvm);
5210         }
5211 }
5212
5213 /*
5214  * Set the vmspace for a LWP.  The vmspace is almost universally set the
5215  * same as the process vmspace, but virtual kernels need to swap out contexts
5216  * on a per-lwp basis.
5217  *
5218  * Caller does not necessarily hold any vmspace tokens.  Caller must control
5219  * the lwp (typically be in the context of the lwp).  We use a critical
5220  * section to protect against statclock and hardclock (statistics collection).
5221  */
5222 void
5223 pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
5224 {
5225         struct vmspace *oldvm;
5226         struct pmap *pmap;
5227
5228         oldvm = lp->lwp_vmspace;
5229
5230         if (oldvm != newvm) {
5231                 crit_enter();
5232                 lp->lwp_vmspace = newvm;
5233                 if (curthread->td_lwp == lp) {
5234                         pmap = vmspace_pmap(newvm);
5235                         ATOMIC_CPUMASK_ORBIT(pmap->pm_active, mycpu->gd_cpuid);
5236                         if (pmap->pm_active_lock & CPULOCK_EXCL)
5237                                 pmap_interlock_wait(newvm);
5238 #if defined(SWTCH_OPTIM_STATS)
5239                         tlb_flush_count++;
5240 #endif
5241                         if (pmap->pmap_bits[TYPE_IDX] == REGULAR_PMAP) {
5242                                 curthread->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4);
5243                         } else if (pmap->pmap_bits[TYPE_IDX] == EPT_PMAP) {
5244                                 curthread->td_pcb->pcb_cr3 = KPML4phys;
5245                         } else {
5246                                 panic("pmap_setlwpvm: unknown pmap type\n");
5247                         }
5248                         load_cr3(curthread->td_pcb->pcb_cr3);
5249                         pmap = vmspace_pmap(oldvm);
5250                         ATOMIC_CPUMASK_NANDBIT(pmap->pm_active,
5251                                                mycpu->gd_cpuid);
5252                 }
5253                 crit_exit();
5254         }
5255 }
5256
5257 /*
5258  * Called when switching to a locked pmap, used to interlock against pmaps
5259  * undergoing modifications to prevent us from activating the MMU for the
5260  * target pmap until all such modifications have completed.  We have to do
5261  * this because the thread making the modifications has already set up its
5262  * SMP synchronization mask.
5263  *
5264  * This function cannot sleep!
5265  *
5266  * No requirements.
5267  */
5268 void
5269 pmap_interlock_wait(struct vmspace *vm)
5270 {
5271         struct pmap *pmap = &vm->vm_pmap;
5272
5273         if (pmap->pm_active_lock & CPULOCK_EXCL) {
5274                 crit_enter();
5275                 KKASSERT(curthread->td_critcount >= 2);
5276                 DEBUG_PUSH_INFO("pmap_interlock_wait");
5277                 while (pmap->pm_active_lock & CPULOCK_EXCL) {
5278                         cpu_ccfence();
5279                         lwkt_process_ipiq();
5280                 }
5281                 DEBUG_POP_INFO();
5282                 crit_exit();
5283         }
5284 }
5285
5286 vm_offset_t
5287 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
5288 {
5289
5290         if ((obj == NULL) || (size < NBPDR) ||
5291             ((obj->type != OBJT_DEVICE) && (obj->type != OBJT_MGTDEVICE))) {
5292                 return addr;
5293         }
5294
5295         addr = roundup2(addr, NBPDR);
5296         return addr;
5297 }
5298
5299 /*
5300  * Used by kmalloc/kfree, page already exists at va
5301  */
5302 vm_page_t
5303 pmap_kvtom(vm_offset_t va)
5304 {
5305         pt_entry_t *ptep = vtopte(va);
5306
5307         KKASSERT((*ptep & kernel_pmap.pmap_bits[PG_DEVICE_IDX]) == 0);
5308         return(PHYS_TO_VM_PAGE(*ptep & PG_FRAME));
5309 }
5310
5311 /*
5312  * Initialize machine-specific shared page directory support.  This
5313  * is executed when a VM object is created.
5314  */
5315 void
5316 pmap_object_init(vm_object_t object)
5317 {
5318         object->md.pmap_rw = NULL;
5319         object->md.pmap_ro = NULL;
5320 }
5321
5322 /*
5323  * Clean up machine-specific shared page directory support.  This
5324  * is executed when a VM object is destroyed.
5325  */
5326 void
5327 pmap_object_free(vm_object_t object)
5328 {
5329         pmap_t pmap;
5330
5331         if ((pmap = object->md.pmap_rw) != NULL) {
5332                 object->md.pmap_rw = NULL;
5333                 pmap_remove_noinval(pmap,
5334                                   VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
5335                 CPUMASK_ASSZERO(pmap->pm_active);
5336                 pmap_release(pmap);
5337                 pmap_puninit(pmap);
5338                 kfree(pmap, M_OBJPMAP);
5339         }
5340         if ((pmap = object->md.pmap_ro) != NULL) {
5341                 object->md.pmap_ro = NULL;
5342                 pmap_remove_noinval(pmap,
5343                                   VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
5344                 CPUMASK_ASSZERO(pmap->pm_active);
5345                 pmap_release(pmap);
5346                 pmap_puninit(pmap);
5347                 kfree(pmap, M_OBJPMAP);
5348         }
5349 }