amd64: Properly size an array for SMP.
[dragonfly.git] / sys / platform / pc64 / amd64 / pmap.c
CommitLineData
d7f50089 1/*
d7f50089 2 * Copyright (c) 1991 Regents of the University of California.
d7f50089 3 * Copyright (c) 1994 John S. Dyson
d7f50089 4 * Copyright (c) 1994 David Greenman
48ffc236
JG
5 * Copyright (c) 2003 Peter Wemm
6 * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
7 * Copyright (c) 2008, 2009 The DragonFly Project.
8 * Copyright (c) 2008, 2009 Jordan Gordeev.
d7f50089 9 * All rights reserved.
c8fe38ae
MD
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
14 *
d7f50089
YY
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
d7f50089
YY
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
c8fe38ae
MD
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
d7f50089 41 * SUCH DAMAGE.
c8fe38ae
MD
42 *
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
d7f50089 44 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
c8fe38ae 45 * $DragonFly: src/sys/platform/pc64/amd64/pmap.c,v 1.3 2008/08/29 17:07:10 dillon Exp $
d7f50089 46 */
c8fe38ae 47
d7f50089 48/*
c8fe38ae
MD
49 * Manages physical address maps.
50 *
51 * In addition to hardware address maps, this
52 * module is called upon to provide software-use-only
53 * maps which may or may not be stored in the same
54 * form as hardware maps. These pseudo-maps are
55 * used to store intermediate results from copy
56 * operations to and from address spaces.
57 *
58 * Since the information managed by this module is
59 * also stored by the logical address mapping module,
60 * this module may throw away valid virtual-to-physical
61 * mappings at almost any time. However, invalidations
62 * of virtual-to-physical mappings must be done as
63 * requested.
64 *
65 * In order to cope with hardware architectures which
66 * make virtual-to-physical map invalidates expensive,
67 * this module may delay invalidate or reduced protection
68 * operations until such time as they are actually
69 * necessary. This module is given full information as
70 * to which processors are currently using which maps,
71 * and to when physical maps must be made correct.
72 */
73
74#if JG
75#include "opt_disable_pse.h"
76#include "opt_pmap.h"
77#endif
78#include "opt_msgbuf.h"
d7f50089 79
c8fe38ae 80#include <sys/param.h>
d7f50089
YY
81#include <sys/systm.h>
82#include <sys/kernel.h>
d7f50089 83#include <sys/proc.h>
c8fe38ae
MD
84#include <sys/msgbuf.h>
85#include <sys/vmmeter.h>
86#include <sys/mman.h>
d7f50089 87
c8fe38ae
MD
88#include <vm/vm.h>
89#include <vm/vm_param.h>
90#include <sys/sysctl.h>
91#include <sys/lock.h>
d7f50089 92#include <vm/vm_kern.h>
c8fe38ae
MD
93#include <vm/vm_page.h>
94#include <vm/vm_map.h>
d7f50089 95#include <vm/vm_object.h>
c8fe38ae 96#include <vm/vm_extern.h>
d7f50089 97#include <vm/vm_pageout.h>
c8fe38ae
MD
98#include <vm/vm_pager.h>
99#include <vm/vm_zone.h>
100
101#include <sys/user.h>
102#include <sys/thread2.h>
103#include <sys/sysref2.h>
d7f50089 104
c8fe38ae 105#include <machine/cputypes.h>
d7f50089 106#include <machine/md_var.h>
c8fe38ae
MD
107#include <machine/specialreg.h>
108#include <machine/smp.h>
109#include <machine_base/apic/apicreg.h>
d7f50089 110#include <machine/globaldata.h>
c8fe38ae
MD
111#include <machine/pmap.h>
112#include <machine/pmap_inval.h>
113
48ffc236
JG
114#include <ddb/ddb.h>
115
c8fe38ae
MD
116#define PMAP_KEEP_PDIRS
117#ifndef PMAP_SHPGPERPROC
118#define PMAP_SHPGPERPROC 200
119#endif
120
121#if defined(DIAGNOSTIC)
122#define PMAP_DIAGNOSTIC
123#endif
124
125#define MINPV 2048
126
127#if !defined(PMAP_DIAGNOSTIC)
128#define PMAP_INLINE __inline
129#else
130#define PMAP_INLINE
131#endif
132
48ffc236
JG
133/* JGPMAP32 */
134#define PTDPTDI 0
135
136#define READY0
137#define READY1
138#define READY2
139#define READY3
140#define READY4
141#define READY5
142
c8fe38ae
MD
143/*
144 * Get PDEs and PTEs for user/kernel address space
145 */
48ffc236 146#if JGPMAP32
c8fe38ae 147#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
48ffc236
JG
148#endif
149static pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va);
c8fe38ae
MD
150#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
151
152#define pmap_pde_v(pte) ((*(pd_entry_t *)pte & PG_V) != 0)
153#define pmap_pte_w(pte) ((*(pt_entry_t *)pte & PG_W) != 0)
154#define pmap_pte_m(pte) ((*(pt_entry_t *)pte & PG_M) != 0)
155#define pmap_pte_u(pte) ((*(pt_entry_t *)pte & PG_A) != 0)
156#define pmap_pte_v(pte) ((*(pt_entry_t *)pte & PG_V) != 0)
157
158
159/*
160 * Given a map and a machine independent protection code,
161 * convert to a vax protection code.
162 */
163#define pte_prot(m, p) \
164 (protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
165static int protection_codes[8];
d7f50089
YY
166
167struct pmap kernel_pmap;
c8fe38ae 168static TAILQ_HEAD(,pmap) pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list);
d7f50089 169
c8fe38ae
MD
170vm_paddr_t avail_start; /* PA of first available physical page */
171vm_paddr_t avail_end; /* PA of last available physical page */
172vm_offset_t virtual_start; /* VA of first avail page (after kernel bss) */
173vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
174vm_offset_t KvaStart; /* VA start of KVA space */
175vm_offset_t KvaEnd; /* VA end of KVA space (non-inclusive) */
176vm_offset_t KvaSize; /* max size of kernel virtual address space */
177static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
178static int pgeflag; /* PG_G or-in */
179static int pseflag; /* PG_PS or-in */
d7f50089 180
c8fe38ae
MD
181static vm_object_t kptobj;
182
48ffc236
JG
183static int ndmpdp;
184static vm_paddr_t dmaplimit;
c8fe38ae
MD
185static int nkpt;
186vm_offset_t kernel_vm_end;
d7f50089 187
48ffc236
JG
188static uint64_t KPDphys; /* phys addr of kernel level 2 */
189uint64_t KPDPphys; /* phys addr of kernel level 3 */
190uint64_t KPML4phys; /* phys addr of kernel level 4 */
191
192static uint64_t DMPDphys; /* phys addr of direct mapped level 2 */
193static uint64_t DMPDPphys; /* phys addr of direct mapped level 3 */
194
d7f50089 195/*
c8fe38ae 196 * Data for the pv entry allocation mechanism
d7f50089 197 */
c8fe38ae
MD
198static vm_zone_t pvzone;
199static struct vm_zone pvzone_store;
200static struct vm_object pvzone_obj;
201static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
202static int pmap_pagedaemon_waken = 0;
203static struct pv_entry *pvinit;
d7f50089
YY
204
205/*
c8fe38ae 206 * All those kernel PT submaps that BSD is so fond of
d7f50089 207 */
c8fe38ae
MD
208pt_entry_t *CMAP1 = 0, *ptmmap;
209caddr_t CADDR1 = 0, ptvmmap = 0;
210static pt_entry_t *msgbufmap;
211struct msgbuf *msgbufp=0;
d7f50089 212
c8fe38ae
MD
213/*
214 * Crashdump maps.
d7f50089 215 */
c8fe38ae
MD
216static pt_entry_t *pt_crashdumpmap;
217static caddr_t crashdumpmap;
218
219extern uint64_t KPTphys;
220extern pt_entry_t *SMPpt;
221extern uint64_t SMPptpa;
222
223#define DISABLE_PSE
224
225static PMAP_INLINE void free_pv_entry (pv_entry_t pv);
c8fe38ae
MD
226static pv_entry_t get_pv_entry (void);
227static void i386_protection_init (void);
228static __inline void pmap_clearbit (vm_page_t m, int bit);
229
230static void pmap_remove_all (vm_page_t m);
231static void pmap_enter_quick (pmap_t pmap, vm_offset_t va, vm_page_t m);
232static int pmap_remove_pte (struct pmap *pmap, pt_entry_t *ptq,
233 vm_offset_t sva, pmap_inval_info_t info);
234static void pmap_remove_page (struct pmap *pmap,
235 vm_offset_t va, pmap_inval_info_t info);
236static int pmap_remove_entry (struct pmap *pmap, vm_page_t m,
237 vm_offset_t va, pmap_inval_info_t info);
238static boolean_t pmap_testbit (vm_page_t m, int bit);
239static void pmap_insert_entry (pmap_t pmap, vm_offset_t va,
240 vm_page_t mpte, vm_page_t m);
241
242static vm_page_t pmap_allocpte (pmap_t pmap, vm_offset_t va);
243
244static int pmap_release_free_page (pmap_t pmap, vm_page_t p);
245static vm_page_t _pmap_allocpte (pmap_t pmap, vm_pindex_t ptepindex);
246static pt_entry_t * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
247static vm_page_t pmap_page_lookup (vm_object_t object, vm_pindex_t pindex);
48ffc236
JG
248static int pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
249 pmap_inval_info_t info);
c8fe38ae
MD
250static int pmap_unuse_pt (pmap_t, vm_offset_t, vm_page_t, pmap_inval_info_t);
251static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
252
253static unsigned pdir4mb;
d7f50089
YY
254
255/*
c8fe38ae 256 * Move the kernel virtual free pointer to the next
f9cc0f15
JG
257 * 2MB. This is used to help improve performance
258 * by using a large (2MB) page for much of the kernel
c8fe38ae 259 * (.text, .data, .bss)
d7f50089 260 */
c8fe38ae
MD
261static vm_offset_t
262pmap_kmem_choose(vm_offset_t addr)
f9cc0f15 263READY2
d7f50089 264{
c8fe38ae 265 vm_offset_t newaddr = addr;
f9cc0f15
JG
266
267 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
c8fe38ae 268 return newaddr;
d7f50089
YY
269}
270
d7f50089 271/*
c8fe38ae 272 * pmap_pte_quick:
d7f50089 273 *
c8fe38ae
MD
274 * Super fast pmap_pte routine best used when scanning the pv lists.
275 * This eliminates many course-grained invltlb calls. Note that many of
276 * the pv list scans are across different pmaps and it is very wasteful
277 * to do an entire invltlb when checking a single mapping.
278 *
279 * Should only be called while in a critical section.
280 */
48ffc236
JG
281static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va);
282
c8fe38ae
MD
283static pt_entry_t *
284pmap_pte_quick(pmap_t pmap, vm_offset_t va)
48ffc236 285READY0
c8fe38ae 286{
48ffc236
JG
287 return pmap_pte(pmap, va);
288}
289
290/* Return a non-clipped PD index for a given VA */
291static __inline vm_pindex_t
292pmap_pde_pindex(vm_offset_t va)
293READY1
294{
295 return va >> PDRSHIFT;
296}
297
298/* Return various clipped indexes for a given VA */
299static __inline vm_pindex_t
300pmap_pte_index(vm_offset_t va)
301READY1
302{
303
304 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
305}
306
307static __inline vm_pindex_t
308pmap_pde_index(vm_offset_t va)
309READY1
310{
311
312 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
313}
314
315static __inline vm_pindex_t
316pmap_pdpe_index(vm_offset_t va)
317READY1
318{
319
320 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
321}
322
323static __inline vm_pindex_t
324pmap_pml4e_index(vm_offset_t va)
325READY1
326{
327
328 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
329}
330
331/* Return a pointer to the PML4 slot that corresponds to a VA */
332static __inline pml4_entry_t *
333pmap_pml4e(pmap_t pmap, vm_offset_t va)
334READY1
335{
336
337 return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
338}
339
340/* Return a pointer to the PDP slot that corresponds to a VA */
341static __inline pdp_entry_t *
342pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
343READY1
344{
345 pdp_entry_t *pdpe;
346
347 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
348 return (&pdpe[pmap_pdpe_index(va)]);
349}
350
351/* Return a pointer to the PDP slot that corresponds to a VA */
352static __inline pdp_entry_t *
353pmap_pdpe(pmap_t pmap, vm_offset_t va)
354READY1
355{
356 pml4_entry_t *pml4e;
357
358 pml4e = pmap_pml4e(pmap, va);
359 if ((*pml4e & PG_V) == 0)
360 return NULL;
361 return (pmap_pml4e_to_pdpe(pml4e, va));
362}
363
364/* Return a pointer to the PD slot that corresponds to a VA */
365static __inline pd_entry_t *
366pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
367READY1
368{
369 pd_entry_t *pde;
370
371 pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
372 return (&pde[pmap_pde_index(va)]);
373}
374
375/* Return a pointer to the PD slot that corresponds to a VA */
376static __inline pd_entry_t *
377pmap_pde(pmap_t pmap, vm_offset_t va)
378READY1
379{
380 pdp_entry_t *pdpe;
381
382 pdpe = pmap_pdpe(pmap, va);
383 if (pdpe == NULL || (*pdpe & PG_V) == 0)
384 return NULL;
385 return (pmap_pdpe_to_pde(pdpe, va));
386}
387
388/* Return a pointer to the PT slot that corresponds to a VA */
389static __inline pt_entry_t *
390pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
391READY1
392{
393 pt_entry_t *pte;
394
395 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
396 return (&pte[pmap_pte_index(va)]);
397}
398
399/* Return a pointer to the PT slot that corresponds to a VA */
400static __inline pt_entry_t *
401pmap_pte(pmap_t pmap, vm_offset_t va)
402READY1
403{
404 pd_entry_t *pde;
405
406 pde = pmap_pde(pmap, va);
407 if (pde == NULL || (*pde & PG_V) == 0)
408 return NULL;
409 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
410 return ((pt_entry_t *)pde);
411 return (pmap_pde_to_pte(pde, va));
412}
413
414
415PMAP_INLINE pt_entry_t *
416vtopte(vm_offset_t va)
417READY1
418{
419 uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
420
421 return (PTmap + ((va >> PAGE_SHIFT) & mask));
c8fe38ae 422}
d7f50089 423
48ffc236
JG
424static __inline pd_entry_t *
425vtopde(vm_offset_t va)
426READY1
427{
428 uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
429
430 return (PDmap + ((va >> PDRSHIFT) & mask));
431}
c8fe38ae 432
48ffc236 433static uint64_t
c8fe38ae 434allocpages(vm_paddr_t *firstaddr, int n)
48ffc236 435READY1
d7f50089 436{
48ffc236 437 uint64_t ret;
c8fe38ae
MD
438
439 ret = *firstaddr;
440 bzero((void *)ret, n * PAGE_SIZE);
441 *firstaddr += n * PAGE_SIZE;
442 return (ret);
d7f50089
YY
443}
444
c8fe38ae
MD
445void
446create_pagetables(vm_paddr_t *firstaddr)
48ffc236 447READY0
c8fe38ae
MD
448{
449 int i;
450 int count;
451 uint64_t cpu0pp, cpu0idlestk;
452 int idlestk_page_offset = offsetof(struct privatespace, idlestack) / PAGE_SIZE;
453
454 /* we are running (mostly) V=P at this point */
455
48ffc236
JG
456 /* Allocate pages */
457 KPTphys = allocpages(firstaddr, NKPT);
458 KPML4phys = allocpages(firstaddr, 1);
459 KPDPphys = allocpages(firstaddr, NKPML4E);
460 KPDphys = allocpages(firstaddr, NKPDPE);
461
462 ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
463 if (ndmpdp < 4) /* Minimum 4GB of dirmap */
464 ndmpdp = 4;
465 DMPDPphys = allocpages(firstaddr, NDMPML4E);
466 if ((amd_feature & AMDID_PAGE1GB) == 0)
467 DMPDphys = allocpages(firstaddr, ndmpdp);
468 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
469
470 /* Fill in the underlying page table pages */
471 /* Read-only from zero to physfree */
472 /* XXX not fully used, underneath 2M pages */
473 for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
474 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
475 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
476 }
477
478 /* Now map the page tables at their location within PTmap */
479 for (i = 0; i < NKPT; i++) {
480 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
481 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
482 }
483
484 /* Map from zero to end of allocations under 2M pages */
485 /* This replaces some of the KPTphys entries above */
486 for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
487 ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
488 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
489 }
490
491 /* And connect up the PD to the PDP */
492 for (i = 0; i < NKPDPE; i++) {
493 ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys +
494 (i << PAGE_SHIFT);
495 ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
496 }
497
498 /* Now set up the direct map space using either 2MB or 1GB pages */
499 /* Preset PG_M and PG_A because demotion expects it */
500 if ((amd_feature & AMDID_PAGE1GB) == 0) {
501 for (i = 0; i < NPDEPG * ndmpdp; i++) {
502 ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT;
503 ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS |
504 PG_G | PG_M | PG_A;
505 }
506 /* And the direct map space's PDP */
507 for (i = 0; i < ndmpdp; i++) {
508 ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
509 (i << PAGE_SHIFT);
510 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U;
511 }
512 } else {
513 for (i = 0; i < ndmpdp; i++) {
514 ((pdp_entry_t *)DMPDPphys)[i] =
515 (vm_paddr_t)i << PDPSHIFT;
516 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_PS |
517 PG_G | PG_M | PG_A;
518 }
519 }
520
521 /* And recursively map PML4 to itself in order to get PTmap */
522 ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
523 ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
524
525 /* Connect the Direct Map slot up to the PML4 */
526 ((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys;
527 ((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U;
528
529 /* Connect the KVA slot up to the PML4 */
530 ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
531 ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;
532#if JGPMAP32
c8fe38ae
MD
533 common_lvl4_phys = allocpages(firstaddr, 1); /* 512 512G mappings */
534 common_lvl3_phys = allocpages(firstaddr, 1); /* 512 1G mappings */
535 KPTphys = allocpages(firstaddr, NKPT); /* kernel page table */
536 IdlePTD = allocpages(firstaddr, 1); /* kernel page dir */
537 cpu0pp = allocpages(firstaddr, MDGLOBALDATA_BASEALLOC_PAGES);
538 cpu0idlestk = allocpages(firstaddr, UPAGES);
539 SMPptpa = allocpages(firstaddr, 1);
540 SMPpt = (void *)(SMPptpa + KERNBASE);
541
542
543 /*
544 * Load kernel page table with kernel memory mappings
545 */
546 for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
547 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
548 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V;
549 }
550
551#ifndef JG
552 for (i = 0; i < NKPT; i++) {
553 ((pd_entry_t *)IdlePTD)[i] = KPTphys + (i << PAGE_SHIFT);
554 ((pd_entry_t *)IdlePTD)[i] |= PG_RW | PG_V;
555 }
556#endif
557
558 /*
559 * Set up the kernel page table itself.
560 */
561 for (i = 0; i < NKPT; i++) {
562 ((pd_entry_t *)IdlePTD)[KPTDI + i] = KPTphys + (i << PAGE_SHIFT);
563 ((pd_entry_t *)IdlePTD)[KPTDI + i] |= PG_RW | PG_V;
564 }
565
566#ifndef JG
567 count = ISA_HOLE_LENGTH >> PAGE_SHIFT;
568 for (i = 0; i < count; i++) {
569 ((pt_entry_t *)KPTphys)[amd64_btop(ISA_HOLE_START) + i] = \
570 (ISA_HOLE_START + i * PAGE_SIZE) | PG_RW | PG_V;
571 }
572#endif
573
574 /*
575 * Self-mapping
576 */
577 ((pd_entry_t *)IdlePTD)[PTDPTDI] = (pd_entry_t)IdlePTD | PG_RW | PG_V;
578
579 /*
580 * Map CPU_prvspace[0].mdglobaldata
581 */
582 for (i = 0; i < MDGLOBALDATA_BASEALLOC_PAGES; i++) {
583 ((pt_entry_t *)SMPptpa)[i] = \
584 (cpu0pp + i * PAGE_SIZE) | PG_RW | PG_V;
585 }
586
587 /*
588 * Map CPU_prvspace[0].idlestack
589 */
590 for (i = 0; i < UPAGES; i++) {
591 ((pt_entry_t *)SMPptpa)[idlestk_page_offset + i] = \
592 (cpu0idlestk + i * PAGE_SIZE) | PG_RW | PG_V;
593 }
594
595 /*
596 * Link SMPpt.
597 */
598 ((pd_entry_t *)IdlePTD)[MPPTDI] = SMPptpa | PG_RW | PG_V;
599
600 /*
601 * PML4 maps level 3
602 */
603 ((pml4_entry_t *)common_lvl4_phys)[LINKPML4I] = common_lvl3_phys | PG_RW | PG_V | PG_U;
604
605 /*
606 * location of "virtual CR3" - a PDP entry that is loaded
607 * with a PD physical address (+ page attributes).
608 * Matt: location of user page directory entry (representing 1G)
609 */
610 link_pdpe = &((pdp_entry_t *)common_lvl3_phys)[LINKPDPI];
48ffc236 611#endif /* JGPMAP32 */
c8fe38ae
MD
612}
613
48ffc236 614READY0
c8fe38ae
MD
615void
616init_paging(vm_paddr_t *firstaddr) {
617 create_pagetables(firstaddr);
618
48ffc236 619#if JGPMAP32
c8fe38ae
MD
620 /* switch to the newly created page table */
621 *link_pdpe = IdlePTD | PG_RW | PG_V | PG_U;
622 load_cr3(common_lvl4_phys);
623 link_pdpe = (void *)((char *)link_pdpe + KERNBASE);
624
625 KvaStart = (vm_offset_t)VADDR(PTDPTDI, 0);
626 KvaEnd = (vm_offset_t)VADDR(APTDPTDI, 0);
627 KvaSize = KvaEnd - KvaStart;
48ffc236 628#endif
d7f50089
YY
629}
630
631/*
c8fe38ae
MD
632 * Bootstrap the system enough to run with virtual memory.
633 *
634 * On the i386 this is called after mapping has already been enabled
635 * and just syncs the pmap module with what has already been done.
636 * [We can't call it easily with mapping off since the kernel is not
637 * mapped with PA == VA, hence we would have to relocate every address
638 * from the linked base (virtual) address "KERNBASE" to the actual
639 * (physical) address starting relative to 0]
d7f50089
YY
640 */
641void
48ffc236
JG
642pmap_bootstrap(vm_paddr_t *firstaddr)
643READY0
c8fe38ae
MD
644{
645 vm_offset_t va;
646 pt_entry_t *pte;
647 struct mdglobaldata *gd;
648 int i;
649 int pg;
650
48ffc236
JG
651 KvaStart = VM_MIN_KERNEL_ADDRESS;
652 KvaEnd = VM_MAX_KERNEL_ADDRESS;
653 KvaSize = KvaEnd - KvaStart;
654
c8fe38ae
MD
655 avail_start = *firstaddr;
656
657 /*
48ffc236 658 * Create an initial set of page tables to run the kernel in.
c8fe38ae 659 */
48ffc236
JG
660 create_pagetables(firstaddr);
661
c8fe38ae
MD
662 virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr;
663 virtual_start = pmap_kmem_choose(virtual_start);
48ffc236
JG
664
665 virtual_end = VM_MAX_KERNEL_ADDRESS;
666
667 /* XXX do %cr0 as well */
668 load_cr4(rcr4() | CR4_PGE | CR4_PSE);
669 load_cr3(KPML4phys);
c8fe38ae
MD
670
671 /*
672 * Initialize protection array.
673 */
674 i386_protection_init();
675
676 /*
677 * The kernel's pmap is statically allocated so we don't have to use
678 * pmap_create, which is unlikely to work correctly at this part of
679 * the boot sequence (XXX and which no longer exists).
680 */
48ffc236 681#if JGPMAP32
c8fe38ae 682 kernel_pmap.pm_pdir = (pd_entry_t *)(PTOV_OFFSET + (uint64_t)IdlePTD);
48ffc236
JG
683#endif
684 kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys);
c8fe38ae
MD
685 kernel_pmap.pm_count = 1;
686 kernel_pmap.pm_active = (cpumask_t)-1; /* don't allow deactivation */
687 TAILQ_INIT(&kernel_pmap.pm_pvlist);
688 nkpt = NKPT;
689
690 /*
691 * Reserve some special page table entries/VA space for temporary
692 * mapping of pages.
693 */
694#define SYSMAP(c, p, v, n) \
695 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
696
697 va = virtual_start;
48ffc236 698#ifdef JG
c8fe38ae 699 pte = (pt_entry_t *) pmap_pte(&kernel_pmap, va);
48ffc236
JG
700#else
701 pte = vtopte(va);
702#endif
c8fe38ae
MD
703
704 /*
705 * CMAP1/CMAP2 are used for zeroing and copying pages.
706 */
707 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
708
709 /*
710 * Crashdump maps.
711 */
712 SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
713
714 /*
715 * ptvmmap is used for reading arbitrary physical pages via
716 * /dev/mem.
717 */
718 SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
719
720 /*
721 * msgbufp is used to map the system message buffer.
722 * XXX msgbufmap is not used.
723 */
724 SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
725 atop(round_page(MSGBUF_SIZE)))
726
727 virtual_start = va;
728
729 *CMAP1 = 0;
48ffc236 730#if JGPMAP32
c8fe38ae
MD
731 for (i = 0; i < NKPT; i++)
732 PTD[i] = 0;
48ffc236 733#endif
c8fe38ae
MD
734
735 /*
736 * PG_G is terribly broken on SMP because we IPI invltlb's in some
737 * cases rather then invl1pg. Actually, I don't even know why it
738 * works under UP because self-referential page table mappings
739 */
740#ifdef SMP
741 pgeflag = 0;
742#else
743 if (cpu_feature & CPUID_PGE)
744 pgeflag = PG_G;
745#endif
746
747/*
748 * Initialize the 4MB page size flag
749 */
750 pseflag = 0;
751/*
752 * The 4MB page version of the initial
753 * kernel page mapping.
754 */
755 pdir4mb = 0;
756
757#if !defined(DISABLE_PSE)
758 if (cpu_feature & CPUID_PSE) {
759 pt_entry_t ptditmp;
760 /*
761 * Note that we have enabled PSE mode
762 */
763 pseflag = PG_PS;
764 ptditmp = *(PTmap + amd64_btop(KERNBASE));
765 ptditmp &= ~(NBPDR - 1);
766 ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
767 pdir4mb = ptditmp;
768
769#ifndef SMP
770 /*
771 * Enable the PSE mode. If we are SMP we can't do this
772 * now because the APs will not be able to use it when
773 * they boot up.
774 */
775 load_cr4(rcr4() | CR4_PSE);
776
777 /*
778 * We can do the mapping here for the single processor
779 * case. We simply ignore the old page table page from
780 * now on.
781 */
782 /*
783 * For SMP, we still need 4K pages to bootstrap APs,
784 * PSE will be enabled as soon as all APs are up.
785 */
786 PTD[KPTDI] = (pd_entry_t)ptditmp;
48ffc236 787#if JGPMAP32
c8fe38ae 788 kernel_pmap.pm_pdir[KPTDI] = (pd_entry_t)ptditmp;
48ffc236 789#endif
c8fe38ae
MD
790 cpu_invltlb();
791#endif
792 }
793#endif
794#ifdef SMP
795 if (cpu_apic_address == 0)
796 panic("pmap_bootstrap: no local apic!");
797
057877ac 798#if JGPMAP32
c8fe38ae
MD
799 /* local apic is mapped on last page */
800 SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag |
801 (cpu_apic_address & PG_FRAME));
057877ac 802#endif
c8fe38ae
MD
803#endif
804
805 /*
806 * We need to finish setting up the globaldata page for the BSP.
807 * locore has already populated the page table for the mdglobaldata
808 * portion.
809 */
810 pg = MDGLOBALDATA_BASEALLOC_PAGES;
811 gd = &CPU_prvspace[0].mdglobaldata;
812 gd->gd_CMAP1 = &SMPpt[pg + 0];
813 gd->gd_CMAP2 = &SMPpt[pg + 1];
814 gd->gd_CMAP3 = &SMPpt[pg + 2];
815 gd->gd_PMAP1 = &SMPpt[pg + 3];
816 gd->gd_CADDR1 = CPU_prvspace[0].CPAGE1;
817 gd->gd_CADDR2 = CPU_prvspace[0].CPAGE2;
818 gd->gd_CADDR3 = CPU_prvspace[0].CPAGE3;
819 gd->gd_PADDR1 = (pt_entry_t *)CPU_prvspace[0].PPAGE1;
820
821 cpu_invltlb();
d7f50089
YY
822}
823
c8fe38ae 824#ifdef SMP
d7f50089 825/*
c8fe38ae 826 * Set 4mb pdir for mp startup
d7f50089
YY
827 */
828void
c8fe38ae 829pmap_set_opt(void)
48ffc236 830READY0
c8fe38ae
MD
831{
832 if (pseflag && (cpu_feature & CPUID_PSE)) {
833 load_cr4(rcr4() | CR4_PSE);
834 if (pdir4mb && mycpu->gd_cpuid == 0) { /* only on BSP */
48ffc236 835#if JGPMAP32
c8fe38ae
MD
836 kernel_pmap.pm_pdir[KPTDI] =
837 PTD[KPTDI] = (pd_entry_t)pdir4mb;
48ffc236 838#endif
c8fe38ae
MD
839 cpu_invltlb();
840 }
841 }
d7f50089 842}
c8fe38ae 843#endif
d7f50089 844
c8fe38ae
MD
845/*
846 * Initialize the pmap module.
847 * Called by vm_init, to initialize any structures that the pmap
848 * system needs to map virtual memory.
849 * pmap_init has been enhanced to support in a fairly consistant
850 * way, discontiguous physical memory.
d7f50089
YY
851 */
852void
c8fe38ae 853pmap_init(void)
48ffc236 854READY0
d7f50089 855{
c8fe38ae
MD
856 int i;
857 int initial_pvs;
858
859 /*
860 * object for kernel page table pages
861 */
48ffc236
JG
862 /* JG I think the number can be arbitrary */
863 kptobj = vm_object_allocate(OBJT_DEFAULT, 5);
c8fe38ae
MD
864
865 /*
866 * Allocate memory for random pmap data structures. Includes the
867 * pv_head_table.
868 */
869
870 for(i = 0; i < vm_page_array_size; i++) {
871 vm_page_t m;
872
873 m = &vm_page_array[i];
874 TAILQ_INIT(&m->md.pv_list);
875 m->md.pv_list_count = 0;
876 }
877
878 /*
879 * init the pv free list
880 */
881 initial_pvs = vm_page_array_size;
882 if (initial_pvs < MINPV)
883 initial_pvs = MINPV;
884 pvzone = &pvzone_store;
885 pvinit = (struct pv_entry *) kmem_alloc(&kernel_map,
886 initial_pvs * sizeof (struct pv_entry));
887 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
888 initial_pvs);
889
890 /*
891 * Now it is safe to enable pv_table recording.
892 */
893 pmap_initialized = TRUE;
057877ac 894 lapic = pmap_mapdev_uncacheable(cpu_apic_address, sizeof(struct LAPIC));
d7f50089
YY
895}
896
c8fe38ae
MD
897/*
898 * Initialize the address space (zone) for the pv_entries. Set a
899 * high water mark so that the system can recover from excessive
900 * numbers of pv entries.
901 */
d7f50089 902void
c8fe38ae 903pmap_init2(void)
48ffc236 904READY0
d7f50089 905{
c8fe38ae
MD
906 int shpgperproc = PMAP_SHPGPERPROC;
907
908 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
909 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
910 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
911 pv_entry_high_water = 9 * (pv_entry_max / 10);
912 zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
d7f50089
YY
913}
914
c8fe38ae
MD
915
916/***************************************************
917 * Low level helper routines.....
918 ***************************************************/
919
920#if defined(PMAP_DIAGNOSTIC)
d7f50089
YY
921
922/*
c8fe38ae
MD
923 * This code checks for non-writeable/modified pages.
924 * This should be an invalid condition.
d7f50089 925 */
c8fe38ae 926static int
48ffc236
JG
927pmap_nw_modified(pt_entry_t pte)
928READY1
d7f50089 929{
c8fe38ae
MD
930 if ((pte & (PG_M|PG_RW)) == PG_M)
931 return 1;
932 else
933 return 0;
d7f50089 934}
c8fe38ae
MD
935#endif
936
d7f50089 937
c8fe38ae
MD
938/*
939 * this routine defines the region(s) of memory that should
940 * not be tested for the modified bit.
941 */
942static PMAP_INLINE int
943pmap_track_modified(vm_offset_t va)
48ffc236 944READY0
d7f50089 945{
c8fe38ae
MD
946 if ((va < clean_sva) || (va >= clean_eva))
947 return 1;
948 else
949 return 0;
d7f50089
YY
950}
951
d7f50089 952/*
c8fe38ae
MD
953 * pmap_extract:
954 *
955 * Extract the physical page address associated with the map/VA pair.
956 *
957 * This function may not be called from an interrupt if the pmap is
958 * not kernel_pmap.
d7f50089 959 */
c8fe38ae
MD
960vm_paddr_t
961pmap_extract(pmap_t pmap, vm_offset_t va)
48ffc236 962READY1
d7f50089 963{
48ffc236
JG
964 vm_paddr_t rtval;
965 pt_entry_t *pte;
966 pd_entry_t pde, *pdep;
c8fe38ae 967
48ffc236
JG
968 rtval = 0;
969 pdep = pmap_pde(pmap, va);
970 if (pdep != NULL) {
971 pde = *pdep;
972 if (pde) {
973 if ((pde & PG_PS) != 0) {
974 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
975 } else {
976 pte = pmap_pde_to_pte(pdep, va);
977 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
978 }
c8fe38ae 979 }
c8fe38ae 980 }
48ffc236
JG
981 return rtval;
982}
983
984/*
985 * Routine: pmap_kextract
986 * Function:
987 * Extract the physical page address associated
988 * kernel virtual address.
989 */
990vm_paddr_t
991pmap_kextract(vm_offset_t va)
992READY1
993{
994 pd_entry_t pde;
995 vm_paddr_t pa;
996
997 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
998 pa = DMAP_TO_PHYS(va);
999 } else {
1000 pde = *vtopde(va);
1001 if (pde & PG_PS) {
1002 pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
1003 } else {
1004 /*
1005 * Beware of a concurrent promotion that changes the
1006 * PDE at this point! For example, vtopte() must not
1007 * be used to access the PTE because it would use the
1008 * new PDE. It is, however, safe to use the old PDE
1009 * because the page table page is preserved by the
1010 * promotion.
1011 */
1012 pa = *pmap_pde_to_pte(&pde, va);
1013 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1014 }
1015 }
1016 return pa;
d7f50089
YY
1017}
1018
c8fe38ae
MD
1019/***************************************************
1020 * Low level mapping routines.....
1021 ***************************************************/
1022
d7f50089 1023/*
c8fe38ae
MD
1024 * Routine: pmap_kenter
1025 * Function:
1026 * Add a wired page to the KVA
1027 * NOTE! note that in order for the mapping to take effect -- you
1028 * should do an invltlb after doing the pmap_kenter().
d7f50089 1029 */
c8fe38ae 1030void
d7f50089 1031pmap_kenter(vm_offset_t va, vm_paddr_t pa)
48ffc236 1032READY1
d7f50089 1033{
c8fe38ae
MD
1034 pt_entry_t *pte;
1035 pt_entry_t npte;
1036 pmap_inval_info info;
1037
1038 pmap_inval_init(&info);
1039 npte = pa | PG_RW | PG_V | pgeflag;
1040 pte = vtopte(va);
1041 pmap_inval_add(&info, &kernel_pmap, va);
1042 *pte = npte;
1043 pmap_inval_flush(&info);
d7f50089
YY
1044}
1045
1046/*
c8fe38ae
MD
1047 * Routine: pmap_kenter_quick
1048 * Function:
1049 * Similar to pmap_kenter(), except we only invalidate the
1050 * mapping on the current CPU.
d7f50089 1051 */
c8fe38ae
MD
1052void
1053pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
48ffc236 1054READY1
c8fe38ae
MD
1055{
1056 pt_entry_t *pte;
1057 pt_entry_t npte;
1058
1059 npte = pa | PG_RW | PG_V | pgeflag;
1060 pte = vtopte(va);
1061 *pte = npte;
1062 cpu_invlpg((void *)va);
1063}
1064
d7f50089
YY
1065void
1066pmap_kenter_sync(vm_offset_t va)
48ffc236 1067READY1
d7f50089 1068{
c8fe38ae
MD
1069 pmap_inval_info info;
1070
1071 pmap_inval_init(&info);
1072 pmap_inval_add(&info, &kernel_pmap, va);
1073 pmap_inval_flush(&info);
d7f50089
YY
1074}
1075
d7f50089
YY
1076void
1077pmap_kenter_sync_quick(vm_offset_t va)
48ffc236 1078READY1
d7f50089 1079{
c8fe38ae 1080 cpu_invlpg((void *)va);
d7f50089
YY
1081}
1082
d7f50089 1083/*
c8fe38ae 1084 * remove a page from the kernel pagetables
d7f50089
YY
1085 */
1086void
c8fe38ae 1087pmap_kremove(vm_offset_t va)
48ffc236 1088READY1
d7f50089 1089{
c8fe38ae
MD
1090 pt_entry_t *pte;
1091 pmap_inval_info info;
1092
1093 pmap_inval_init(&info);
1094 pte = vtopte(va);
1095 pmap_inval_add(&info, &kernel_pmap, va);
1096 *pte = 0;
1097 pmap_inval_flush(&info);
1098}
1099
1100void
1101pmap_kremove_quick(vm_offset_t va)
48ffc236 1102READY1
c8fe38ae
MD
1103{
1104 pt_entry_t *pte;
1105 pte = vtopte(va);
1106 *pte = 0;
1107 cpu_invlpg((void *)va);
d7f50089
YY
1108}
1109
1110/*
c8fe38ae 1111 * XXX these need to be recoded. They are not used in any critical path.
d7f50089
YY
1112 */
1113void
c8fe38ae 1114pmap_kmodify_rw(vm_offset_t va)
48ffc236 1115READY1
d7f50089 1116{
c8fe38ae
MD
1117 *vtopte(va) |= PG_RW;
1118 cpu_invlpg((void *)va);
d7f50089
YY
1119}
1120
c8fe38ae
MD
1121void
1122pmap_kmodify_nc(vm_offset_t va)
48ffc236 1123READY1
c8fe38ae
MD
1124{
1125 *vtopte(va) |= PG_N;
1126 cpu_invlpg((void *)va);
1127}
d7f50089
YY
1128
1129/*
c8fe38ae
MD
1130 * Used to map a range of physical addresses into kernel
1131 * virtual address space.
1132 *
1133 * For now, VM is already on, we only need to map the
1134 * specified memory.
d7f50089
YY
1135 */
1136vm_offset_t
1137pmap_map(vm_offset_t virt, vm_paddr_t start, vm_paddr_t end, int prot)
8fdd3267 1138READY3
d7f50089 1139{
8fdd3267 1140 return PHYS_TO_DMAP(start);
d7f50089
YY
1141}
1142
c8fe38ae 1143
d7f50089 1144/*
c8fe38ae
MD
1145 * Add a list of wired pages to the kva
1146 * this routine is only used for temporary
1147 * kernel mappings that do not need to have
1148 * page modification or references recorded.
1149 * Note that old mappings are simply written
1150 * over. The page *must* be wired.
d7f50089
YY
1151 */
1152void
c8fe38ae 1153pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
48ffc236 1154READY1
d7f50089 1155{
c8fe38ae
MD
1156 vm_offset_t end_va;
1157
1158 end_va = va + count * PAGE_SIZE;
1159
1160 while (va < end_va) {
1161 pt_entry_t *pte;
1162
1163 pte = vtopte(va);
1164 *pte = VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag;
1165 cpu_invlpg((void *)va);
1166 va += PAGE_SIZE;
1167 m++;
1168 }
1169#ifdef SMP
1170 smp_invltlb(); /* XXX */
1171#endif
1172}
1173
1174void
1175pmap_qenter2(vm_offset_t va, vm_page_t *m, int count, cpumask_t *mask)
48ffc236 1176READY1
c8fe38ae
MD
1177{
1178 vm_offset_t end_va;
1179 cpumask_t cmask = mycpu->gd_cpumask;
1180
1181 end_va = va + count * PAGE_SIZE;
1182
1183 while (va < end_va) {
1184 pt_entry_t *pte;
1185 pt_entry_t pteval;
1186
1187 /*
1188 * Install the new PTE. If the pte changed from the prior
1189 * mapping we must reset the cpu mask and invalidate the page.
1190 * If the pte is the same but we have not seen it on the
1191 * current cpu, invlpg the existing mapping. Otherwise the
1192 * entry is optimal and no invalidation is required.
1193 */
1194 pte = vtopte(va);
1195 pteval = VM_PAGE_TO_PHYS(*m) | PG_A | PG_RW | PG_V | pgeflag;
1196 if (*pte != pteval) {
1197 *mask = 0;
1198 *pte = pteval;
1199 cpu_invlpg((void *)va);
1200 } else if ((*mask & cmask) == 0) {
1201 cpu_invlpg((void *)va);
1202 }
1203 va += PAGE_SIZE;
1204 m++;
1205 }
1206 *mask |= cmask;
d7f50089
YY
1207}
1208
1209/*
c8fe38ae
MD
1210 * this routine jerks page mappings from the
1211 * kernel -- it is meant only for temporary mappings.
d7f50089 1212 */
c8fe38ae
MD
1213void
1214pmap_qremove(vm_offset_t va, int count)
48ffc236 1215READY1
d7f50089 1216{
c8fe38ae
MD
1217 vm_offset_t end_va;
1218
48ffc236 1219 end_va = va + count * PAGE_SIZE;
c8fe38ae
MD
1220
1221 while (va < end_va) {
1222 pt_entry_t *pte;
1223
1224 pte = vtopte(va);
1225 *pte = 0;
1226 cpu_invlpg((void *)va);
1227 va += PAGE_SIZE;
1228 }
1229#ifdef SMP
1230 smp_invltlb();
1231#endif
d7f50089
YY
1232}
1233
1234/*
c8fe38ae
MD
1235 * This routine works like vm_page_lookup() but also blocks as long as the
1236 * page is busy. This routine does not busy the page it returns.
1237 *
1238 * Unless the caller is managing objects whos pages are in a known state,
1239 * the call should be made with a critical section held so the page's object
1240 * association remains valid on return.
d7f50089 1241 */
c8fe38ae
MD
1242static vm_page_t
1243pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
48ffc236 1244READY1
d7f50089 1245{
c8fe38ae
MD
1246 vm_page_t m;
1247
1248 do {
1249 m = vm_page_lookup(object, pindex);
1250 } while (m && vm_page_sleep_busy(m, FALSE, "pplookp"));
1251
1252 return(m);
d7f50089
YY
1253}
1254
1255/*
c8fe38ae
MD
1256 * Create a new thread and optionally associate it with a (new) process.
1257 * NOTE! the new thread's cpu may not equal the current cpu.
d7f50089
YY
1258 */
1259void
c8fe38ae 1260pmap_init_thread(thread_t td)
48ffc236 1261READY1
d7f50089 1262{
c8fe38ae
MD
1263 /* enforce pcb placement */
1264 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
1265 td->td_savefpu = &td->td_pcb->pcb_save;
48ffc236 1266 td->td_sp = (char *)td->td_pcb - 16; /* JG is -16 needed on amd64? */
d7f50089
YY
1267}
1268
1269/*
c8fe38ae 1270 * This routine directly affects the fork perf for a process.
d7f50089
YY
1271 */
1272void
c8fe38ae 1273pmap_init_proc(struct proc *p)
48ffc236 1274READY1
d7f50089
YY
1275{
1276}
1277
1278/*
c8fe38ae
MD
1279 * Dispose the UPAGES for a process that has exited.
1280 * This routine directly impacts the exit perf of a process.
d7f50089
YY
1281 */
1282void
c8fe38ae 1283pmap_dispose_proc(struct proc *p)
48ffc236 1284READY1
d7f50089 1285{
c8fe38ae 1286 KASSERT(p->p_lock == 0, ("attempt to dispose referenced proc! %p", p));
d7f50089
YY
1287}
1288
c8fe38ae
MD
1289/***************************************************
1290 * Page table page management routines.....
1291 ***************************************************/
1292
d7f50089 1293/*
c8fe38ae
MD
1294 * This routine unholds page table pages, and if the hold count
1295 * drops to zero, then it decrements the wire count.
d7f50089 1296 */
c8fe38ae 1297static int
48ffc236
JG
1298_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, pmap_inval_info_t info)
1299READY1
c8fe38ae
MD
1300{
1301 /*
1302 * Wait until we can busy the page ourselves. We cannot have
1303 * any active flushes if we block.
1304 */
1305 if (m->flags & PG_BUSY) {
1306 pmap_inval_flush(info);
1307 while (vm_page_sleep_busy(m, FALSE, "pmuwpt"))
1308 ;
1309 }
1310 KASSERT(m->queue == PQ_NONE,
1311 ("_pmap_unwire_pte_hold: %p->queue != PQ_NONE", m));
1312
1313 if (m->hold_count == 1) {
1314 /*
1315 * Unmap the page table page
1316 */
1317 vm_page_busy(m);
1318 pmap_inval_add(info, pmap, -1);
48ffc236
JG
1319
1320 if (m->pindex >= (NUPDE + NUPDPE)) {
1321 /* PDP page */
1322 pml4_entry_t *pml4;
1323 pml4 = pmap_pml4e(pmap, va);
1324 *pml4 = 0;
1325 } else if (m->pindex >= NUPDE) {
1326 /* PD page */
1327 pdp_entry_t *pdp;
1328 pdp = pmap_pdpe(pmap, va);
1329 *pdp = 0;
1330 } else {
3535204a 1331 /* PT page */
48ffc236
JG
1332 pd_entry_t *pd;
1333 pd = pmap_pde(pmap, va);
1334 *pd = 0;
1335 }
c8fe38ae
MD
1336
1337 KKASSERT(pmap->pm_stats.resident_count > 0);
1338 --pmap->pm_stats.resident_count;
1339
1340 if (pmap->pm_ptphint == m)
1341 pmap->pm_ptphint = NULL;
1342
48ffc236
JG
1343 if (m->pindex < NUPDE) {
1344 /* We just released a PT, unhold the matching PD */
1345 vm_page_t pdpg;
1346
1347 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
1348 pmap_unwire_pte_hold(pmap, va, pdpg, info);
1349 }
1350 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
1351 /* We just released a PD, unhold the matching PDP */
1352 vm_page_t pdppg;
1353
1354 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
1355 pmap_unwire_pte_hold(pmap, va, pdppg, info);
1356 }
48ffc236 1357
c8fe38ae
MD
1358 /*
1359 * This was our last hold, the page had better be unwired
1360 * after we decrement wire_count.
1361 *
1362 * FUTURE NOTE: shared page directory page could result in
1363 * multiple wire counts.
1364 */
1365 vm_page_unhold(m);
1366 --m->wire_count;
1367 KKASSERT(m->wire_count == 0);
1368 --vmstats.v_wire_count;
1369 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
1370 vm_page_flash(m);
1371 vm_page_free_zero(m);
1372 return 1;
1373 } else {
1b2e0b92 1374 /* JG Can we get here? */
c8fe38ae
MD
1375 KKASSERT(m->hold_count > 1);
1376 vm_page_unhold(m);
1377 return 0;
1378 }
1379}
1380
1381static PMAP_INLINE int
48ffc236
JG
1382pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, pmap_inval_info_t info)
1383READY1
d7f50089 1384{
c8fe38ae
MD
1385 KKASSERT(m->hold_count > 0);
1386 if (m->hold_count > 1) {
1387 vm_page_unhold(m);
1388 return 0;
1389 } else {
48ffc236 1390 return _pmap_unwire_pte_hold(pmap, va, m, info);
c8fe38ae 1391 }
d7f50089
YY
1392}
1393
c8fe38ae
MD
1394/*
1395 * After removing a page table entry, this routine is used to
1396 * conditionally free the page, and manage the hold/wire counts.
d7f50089 1397 */
c8fe38ae
MD
1398static int
1399pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
1400 pmap_inval_info_t info)
48ffc236 1401READY1
c8fe38ae 1402{
48ffc236 1403 /* JG Use FreeBSD/amd64 or FreeBSD/i386 ptepde approaches? */
c8fe38ae 1404 vm_pindex_t ptepindex;
48ffc236 1405 if (va >= VM_MAX_USER_ADDRESS)
c8fe38ae
MD
1406 return 0;
1407
1408 if (mpte == NULL) {
48ffc236
JG
1409 ptepindex = pmap_pde_pindex(va);
1410#if JGHINT
c8fe38ae
MD
1411 if (pmap->pm_ptphint &&
1412 (pmap->pm_ptphint->pindex == ptepindex)) {
1413 mpte = pmap->pm_ptphint;
1414 } else {
48ffc236 1415#endif
c8fe38ae 1416 pmap_inval_flush(info);
48ffc236 1417 mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
c8fe38ae 1418 pmap->pm_ptphint = mpte;
48ffc236 1419#if JGHINT
c8fe38ae 1420 }
48ffc236 1421#endif
c8fe38ae
MD
1422 }
1423
48ffc236 1424 return pmap_unwire_pte_hold(pmap, va, mpte, info);
c8fe38ae 1425}
d7f50089
YY
1426
1427/*
c8fe38ae
MD
1428 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because
1429 * it, and IdlePTD, represents the template used to update all other pmaps.
1430 *
1431 * On architectures where the kernel pmap is not integrated into the user
1432 * process pmap, this pmap represents the process pmap, not the kernel pmap.
1433 * kernel_pmap should be used to directly access the kernel_pmap.
d7f50089
YY
1434 */
1435void
c8fe38ae 1436pmap_pinit0(struct pmap *pmap)
48ffc236 1437READY1
d7f50089 1438{
48ffc236 1439#if JGPMAP32
c8fe38ae
MD
1440 pmap->pm_pdir =
1441 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
1442 pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t) IdlePTD);
48ffc236
JG
1443#endif
1444 pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys);
c8fe38ae
MD
1445 pmap->pm_count = 1;
1446 pmap->pm_active = 0;
1447 pmap->pm_ptphint = NULL;
1448 TAILQ_INIT(&pmap->pm_pvlist);
1449 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
d7f50089
YY
1450}
1451
1452/*
c8fe38ae
MD
1453 * Initialize a preallocated and zeroed pmap structure,
1454 * such as one in a vmspace structure.
d7f50089
YY
1455 */
1456void
c8fe38ae 1457pmap_pinit(struct pmap *pmap)
48ffc236 1458READY1
d7f50089 1459{
c8fe38ae
MD
1460 vm_page_t ptdpg;
1461
1462 /*
1463 * No need to allocate page table space yet but we do need a valid
1464 * page directory table.
1465 */
48ffc236
JG
1466 if (pmap->pm_pml4 == NULL) {
1467 pmap->pm_pml4 =
1468 (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
c8fe38ae
MD
1469 }
1470
1471 /*
1472 * Allocate an object for the ptes
1473 */
1474 if (pmap->pm_pteobj == NULL)
0a5c555b 1475 pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, NUPDE + NUPDPE + PML4PML4I + 1);
c8fe38ae
MD
1476
1477 /*
1478 * Allocate the page directory page, unless we already have
1479 * one cached. If we used the cached page the wire_count will
1480 * already be set appropriately.
1481 */
1482 if ((ptdpg = pmap->pm_pdirm) == NULL) {
0a5c555b 1483 ptdpg = vm_page_grab(pmap->pm_pteobj, NUPDE + NUPDPE + PML4PML4I,
c8fe38ae
MD
1484 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1485 pmap->pm_pdirm = ptdpg;
1486 vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY);
1487 ptdpg->valid = VM_PAGE_BITS_ALL;
1488 ptdpg->wire_count = 1;
1489 ++vmstats.v_wire_count;
48ffc236 1490 pmap_kenter((vm_offset_t)pmap->pm_pml4, VM_PAGE_TO_PHYS(ptdpg));
c8fe38ae
MD
1491 }
1492 if ((ptdpg->flags & PG_ZERO) == 0)
48ffc236 1493 bzero(pmap->pm_pml4, PAGE_SIZE);
c8fe38ae 1494
48ffc236
JG
1495 pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
1496 pmap->pm_pml4[DMPML4I] = DMPDPphys | PG_RW | PG_V | PG_U;
c8fe38ae
MD
1497
1498 /* install self-referential address mapping entry */
48ffc236 1499 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
c8fe38ae
MD
1500
1501 pmap->pm_count = 1;
1502 pmap->pm_active = 0;
1503 pmap->pm_ptphint = NULL;
1504 TAILQ_INIT(&pmap->pm_pvlist);
1505 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1506 pmap->pm_stats.resident_count = 1;
d7f50089
YY
1507}
1508
1509/*
c8fe38ae
MD
1510 * Clean up a pmap structure so it can be physically freed. This routine
1511 * is called by the vmspace dtor function. A great deal of pmap data is
1512 * left passively mapped to improve vmspace management so we have a bit
1513 * of cleanup work to do here.
d7f50089
YY
1514 */
1515void
c8fe38ae 1516pmap_puninit(pmap_t pmap)
48ffc236 1517READY1
d7f50089 1518{
c8fe38ae
MD
1519 vm_page_t p;
1520
1521 KKASSERT(pmap->pm_active == 0);
1522 if ((p = pmap->pm_pdirm) != NULL) {
48ffc236
JG
1523 KKASSERT(pmap->pm_pml4 != NULL);
1524 KKASSERT(pmap->pm_pml4 != (PTOV_OFFSET + KPML4phys));
1525 pmap_kremove((vm_offset_t)pmap->pm_pml4);
c8fe38ae
MD
1526 p->wire_count--;
1527 vmstats.v_wire_count--;
1528 KKASSERT((p->flags & PG_BUSY) == 0);
1529 vm_page_busy(p);
1530 vm_page_free_zero(p);
1531 pmap->pm_pdirm = NULL;
1532 }
48ffc236
JG
1533 if (pmap->pm_pml4) {
1534 KKASSERT(pmap->pm_pml4 != (PTOV_OFFSET + KPML4phys));
1535 kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE);
1536 pmap->pm_pml4 = NULL;
c8fe38ae
MD
1537 }
1538 if (pmap->pm_pteobj) {
1539 vm_object_deallocate(pmap->pm_pteobj);
1540 pmap->pm_pteobj = NULL;
1541 }
d7f50089
YY
1542}
1543
1544/*
c8fe38ae
MD
1545 * Wire in kernel global address entries. To avoid a race condition
1546 * between pmap initialization and pmap_growkernel, this procedure
1547 * adds the pmap to the master list (which growkernel scans to update),
1548 * then copies the template.
d7f50089
YY
1549 */
1550void
c8fe38ae 1551pmap_pinit2(struct pmap *pmap)
48ffc236 1552READY0
d7f50089 1553{
c8fe38ae
MD
1554 crit_enter();
1555 TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode);
1556 /* XXX copies current process, does not fill in MPPTDI */
48ffc236 1557#if JGPMAP32
c8fe38ae 1558 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
48ffc236 1559#endif
c8fe38ae 1560 crit_exit();
d7f50089
YY
1561}
1562
1563/*
c8fe38ae
MD
1564 * Attempt to release and free a vm_page in a pmap. Returns 1 on success,
1565 * 0 on failure (if the procedure had to sleep).
d7f50089 1566 *
c8fe38ae
MD
1567 * When asked to remove the page directory page itself, we actually just
1568 * leave it cached so we do not have to incur the SMP inval overhead of
1569 * removing the kernel mapping. pmap_puninit() will take care of it.
d7f50089
YY
1570 */
1571static int
c8fe38ae 1572pmap_release_free_page(struct pmap *pmap, vm_page_t p)
48ffc236 1573READY1
d7f50089 1574{
48ffc236 1575 pml4_entry_t *pml4 = pmap->pm_pml4;
c8fe38ae
MD
1576 /*
1577 * This code optimizes the case of freeing non-busy
1578 * page-table pages. Those pages are zero now, and
1579 * might as well be placed directly into the zero queue.
1580 */
1581 if (vm_page_sleep_busy(p, FALSE, "pmaprl"))
d7f50089 1582 return 0;
d7f50089 1583
c8fe38ae
MD
1584 vm_page_busy(p);
1585
1586 /*
1587 * Remove the page table page from the processes address space.
1588 */
1b2e0b92
JG
1589 if (p->pindex >= (NUPDE + NUPDPE) && p->pindex != (NUPDE + NUPDPE + PML4PML4I)) {
1590 /*
1591 * We are a PDP page.
1592 * We look for the PML4 entry that points to us.
1593 */
1594 vm_page_t m4 = vm_page_lookup(pmap->pm_pteobj, NUPDE + NUPDPE + PML4PML4I);
1595 KKASSERT(m4 != NULL);
1596 pml4_entry_t *pml4 = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m4));
1597 int idx = (p->pindex - (NUPDE + NUPDPE)) % NPML4EPG;
1598 KKASSERT(pml4[idx] != 0);
1599 pml4[idx] = 0;
1600 m4->hold_count--;
1601 /* JG What about wire_count? */
1602 } else if (p->pindex >= NUPDE) {
1603 /*
1604 * We are a PD page.
1605 * We look for the PDP entry that points to us.
1606 */
1607 vm_page_t m3 = vm_page_lookup(pmap->pm_pteobj, NUPDE + NUPDPE + (p->pindex - NUPDE) / NPDPEPG);
1608 KKASSERT(m3 != NULL);
1609 pdp_entry_t *pdp = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m3));
1610 int idx = (p->pindex - NUPDE) % NPDPEPG;
1611 KKASSERT(pdp[idx] != 0);
1612 pdp[idx] = 0;
1613 m3->hold_count--;
1614 /* JG What about wire_count? */
1615 } else {
1616 /* We are a PT page.
1617 * We look for the PD entry that points to us.
1618 */
1619 vm_page_t m2 = vm_page_lookup(pmap->pm_pteobj, NUPDE + p->pindex / NPDEPG);
1620 KKASSERT(m2 != NULL);
1621 pd_entry_t *pd = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m2));
1622 int idx = p->pindex % NPDEPG;
1623 pd[idx] = 0;
1624 m2->hold_count--;
1625 /* JG What about wire_count? */
1626 }
c8fe38ae
MD
1627 KKASSERT(pmap->pm_stats.resident_count > 0);
1628 --pmap->pm_stats.resident_count;
1629
1630 if (p->hold_count) {
1631 panic("pmap_release: freeing held page table page");
1632 }
1633 if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
1634 pmap->pm_ptphint = NULL;
1635
1b2e0b92
JG
1636 /*
1637 * We leave the top-level page table page cached, wired, and mapped in
1638 * the pmap until the dtor function (pmap_puninit()) gets called.
1639 * However, still clean it up so we can set PG_ZERO.
1640 */
1641 if (p->pindex == NUPDE + NUPDPE + PML4PML4I) {
1642 bzero(pmap->pm_pml4, PAGE_SIZE);
1643 vm_page_flag_set(p, PG_ZERO);
1644 vm_page_wakeup(p);
1645 } else {
1646 p->wire_count--;
1647 vmstats.v_wire_count--;
1648 /* JG eventually revert to using vm_page_free_zero() */
1649 vm_page_free(p);
1650 }
c8fe38ae
MD
1651 return 1;
1652}
d7f50089
YY
1653
1654/*
c8fe38ae
MD
1655 * this routine is called if the page table page is not
1656 * mapped correctly.
d7f50089
YY
1657 */
1658static vm_page_t
c8fe38ae 1659_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex)
48ffc236 1660READY1
c8fe38ae 1661{
48ffc236 1662 vm_page_t m, pdppg, pdpg;
c8fe38ae
MD
1663
1664 /*
1665 * Find or fabricate a new pagetable page
1666 */
1667 m = vm_page_grab(pmap->pm_pteobj, ptepindex,
1668 VM_ALLOC_NORMAL | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
1669
48ffc236
JG
1670
1671 if ((m->flags & PG_ZERO) == 0) {
1672 pmap_zero_page(VM_PAGE_TO_PHYS(m));
1673 }
1674
c8fe38ae
MD
1675 KASSERT(m->queue == PQ_NONE,
1676 ("_pmap_allocpte: %p->queue != PQ_NONE", m));
1677
1678 /*
1679 * Increment the hold count for the page we will be returning to
1680 * the caller.
1681 */
1682 m->hold_count++;
1683
1684 /*
1685 * It is possible that someone else got in and mapped by the page
1686 * directory page while we were blocked, if so just unbusy and
1687 * return the held page.
1688 */
48ffc236 1689#if JGPMAP32
c8fe38ae
MD
1690 if ((ptepa = pmap->pm_pdir[ptepindex]) != 0) {
1691 KKASSERT((ptepa & PG_FRAME) == VM_PAGE_TO_PHYS(m));
1692 vm_page_wakeup(m);
1693 return(m);
1694 }
48ffc236 1695#endif
c8fe38ae
MD
1696
1697 if (m->wire_count == 0)
1698 vmstats.v_wire_count++;
1699 m->wire_count++;
1700
1701
1702 /*
1703 * Map the pagetable page into the process address space, if
1704 * it isn't already there.
1705 */
1706
1707 ++pmap->pm_stats.resident_count;
1708
48ffc236 1709#if JGPMAP32
c8fe38ae
MD
1710 ptepa = VM_PAGE_TO_PHYS(m);
1711 pmap->pm_pdir[ptepindex] =
1712 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
48ffc236
JG
1713#endif
1714 if (ptepindex >= (NUPDE + NUPDPE)) {
1715 pml4_entry_t *pml4;
1716 vm_pindex_t pml4index;
1717
3535204a 1718 /* Wire up a new PDP page */
48ffc236
JG
1719 pml4index = ptepindex - (NUPDE + NUPDPE);
1720 pml4 = &pmap->pm_pml4[pml4index];
1721 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1722
1723 } else if (ptepindex >= NUPDE) {
1724 vm_pindex_t pml4index;
1725 vm_pindex_t pdpindex;
1726 pml4_entry_t *pml4;
1727 pdp_entry_t *pdp;
1728
3535204a 1729 /* Wire up a new PD page */
48ffc236
JG
1730 pdpindex = ptepindex - NUPDE;
1731 pml4index = pdpindex >> NPML4EPGSHIFT;
1732
1733 pml4 = &pmap->pm_pml4[pml4index];
1734 if ((*pml4 & PG_V) == 0) {
9f5109e6 1735 /* Have to allocate a new PDP page, recurse */
48ffc236
JG
1736 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index)
1737 == NULL) {
1738 --m->wire_count;
1739 vm_page_free(m);
1740 return (NULL);
1741 }
1742 } else {
9f5109e6 1743 /* Add reference to the PDP page */
48ffc236 1744 pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
1b2e0b92 1745 pdppg->hold_count++;
48ffc236
JG
1746 }
1747 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
c8fe38ae 1748
48ffc236
JG
1749 /* Now find the pdp page */
1750 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1b2e0b92 1751 KKASSERT(*pdp == 0); /* JG DEBUG64 */
48ffc236 1752 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
c8fe38ae 1753
48ffc236
JG
1754 } else {
1755 vm_pindex_t pml4index;
1756 vm_pindex_t pdpindex;
1757 pml4_entry_t *pml4;
1758 pdp_entry_t *pdp;
1759 pd_entry_t *pd;
1760
3535204a 1761 /* Wire up a new PT page */
48ffc236
JG
1762 pdpindex = ptepindex >> NPDPEPGSHIFT;
1763 pml4index = pdpindex >> NPML4EPGSHIFT;
1764
1765 /* First, find the pdp and check that its valid. */
1766 pml4 = &pmap->pm_pml4[pml4index];
1767 if ((*pml4 & PG_V) == 0) {
9f5109e6
JG
1768 /* We miss a PDP page. We ultimately need a PD page.
1769 * Recursively allocating a PD page will allocate
1770 * the missing PDP page and will also allocate
1771 * the PD page we need.
1772 */
1773 /* Have to allocate a new PD page, recurse */
48ffc236
JG
1774 if (_pmap_allocpte(pmap, NUPDE + pdpindex)
1775 == NULL) {
1776 --m->wire_count;
1777 vm_page_free(m);
1778 return (NULL);
1779 }
1780 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1781 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
c8fe38ae 1782 } else {
48ffc236
JG
1783 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1784 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1785 if ((*pdp & PG_V) == 0) {
9f5109e6 1786 /* Have to allocate a new PD page, recurse */
48ffc236
JG
1787 if (_pmap_allocpte(pmap, NUPDE + pdpindex)
1788 == NULL) {
1789 --m->wire_count;
1790 vm_page_free(m);
1791 return (NULL);
1792 }
1793 } else {
9f5109e6 1794 /* Add reference to the PD page */
48ffc236 1795 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
1b2e0b92 1796 pdpg->hold_count++;
48ffc236 1797 }
c8fe38ae 1798 }
48ffc236
JG
1799 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
1800
1801 /* Now we know where the page directory page is */
1802 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
1b2e0b92 1803 KKASSERT(*pd == 0); /* JG DEBUG64 */
48ffc236 1804 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
c8fe38ae
MD
1805 }
1806
48ffc236
JG
1807
1808 /*
1809 * Set the page table hint
1810 */
1811 pmap->pm_ptphint = m;
1812
c8fe38ae
MD
1813 m->valid = VM_PAGE_BITS_ALL;
1814 vm_page_flag_clear(m, PG_ZERO);
1815 vm_page_flag_set(m, PG_MAPPED);
1816 vm_page_wakeup(m);
1817
1818 return m;
1819}
1820
1821static vm_page_t
1822pmap_allocpte(pmap_t pmap, vm_offset_t va)
48ffc236 1823READY1
d7f50089 1824{
c8fe38ae 1825 vm_pindex_t ptepindex;
48ffc236 1826 pd_entry_t *pd;
c8fe38ae
MD
1827 vm_page_t m;
1828
1829 /*
1830 * Calculate pagetable page index
1831 */
48ffc236 1832 ptepindex = pmap_pde_pindex(va);
c8fe38ae
MD
1833
1834 /*
1835 * Get the page directory entry
1836 */
48ffc236 1837 pd = pmap_pde(pmap, va);
c8fe38ae
MD
1838
1839 /*
48ffc236 1840 * This supports switching from a 2MB page to a
c8fe38ae
MD
1841 * normal 4K page.
1842 */
48ffc236 1843 if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
1b2e0b92 1844 panic("no promotion/demotion yet");
48ffc236
JG
1845 *pd = 0;
1846 pd = NULL;
c8fe38ae
MD
1847 cpu_invltlb();
1848 smp_invltlb();
1849 }
1850
1851 /*
1852 * If the page table page is mapped, we just increment the
1853 * hold count, and activate it.
1854 */
48ffc236
JG
1855 if (pd != NULL && (*pd & PG_V) != 0) {
1856 /* YYY hint is used here on i386 */
1857 m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
1858 pmap->pm_ptphint = m;
c8fe38ae
MD
1859 m->hold_count++;
1860 return m;
1861 }
1862 /*
1863 * Here if the pte page isn't mapped, or if it has been deallocated.
1864 */
1865 return _pmap_allocpte(pmap, ptepindex);
d7f50089
YY
1866}
1867
c8fe38ae
MD
1868
1869/***************************************************
1870 * Pmap allocation/deallocation routines.
1871 ***************************************************/
1872
d7f50089 1873/*
c8fe38ae
MD
1874 * Release any resources held by the given physical map.
1875 * Called when a pmap initialized by pmap_pinit is being released.
1876 * Should only be called if the map contains no valid mappings.
d7f50089 1877 */
c8fe38ae 1878static int pmap_release_callback(struct vm_page *p, void *data);
d7f50089 1879
c8fe38ae
MD
1880void
1881pmap_release(struct pmap *pmap)
48ffc236 1882READY1
d7f50089 1883{
c8fe38ae
MD
1884 vm_object_t object = pmap->pm_pteobj;
1885 struct rb_vm_page_scan_info info;
1886
1887 KASSERT(pmap->pm_active == 0, ("pmap still active! %08x", pmap->pm_active));
1888#if defined(DIAGNOSTIC)
1889 if (object->ref_count != 1)
1890 panic("pmap_release: pteobj reference count != 1");
1891#endif
1892
1893 info.pmap = pmap;
1894 info.object = object;
1895 crit_enter();
1896 TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
1897 crit_exit();
1898
1899 do {
1900 crit_enter();
1901 info.error = 0;
1902 info.mpte = NULL;
1903 info.limit = object->generation;
1904
1905 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1906 pmap_release_callback, &info);
1907 if (info.error == 0 && info.mpte) {
1908 if (!pmap_release_free_page(pmap, info.mpte))
1909 info.error = 1;
1910 }
1911 crit_exit();
1912 } while (info.error);
d7f50089
YY
1913}
1914
d7f50089 1915static int
c8fe38ae 1916pmap_release_callback(struct vm_page *p, void *data)
48ffc236 1917READY1
d7f50089 1918{
c8fe38ae
MD
1919 struct rb_vm_page_scan_info *info = data;
1920
0a5c555b 1921 if (p->pindex == NUPDE + NUPDPE + PML4PML4I) {
c8fe38ae
MD
1922 info->mpte = p;
1923 return(0);
1924 }
1925 if (!pmap_release_free_page(info->pmap, p)) {
1926 info->error = 1;
1927 return(-1);
1928 }
1929 if (info->object->generation != info->limit) {
1930 info->error = 1;
1931 return(-1);
1932 }
1933 return(0);
d7f50089
YY
1934}
1935
1936/*
c8fe38ae 1937 * Grow the number of kernel page table entries, if needed.
d7f50089 1938 */
c8fe38ae
MD
1939
1940void
1941pmap_growkernel(vm_offset_t addr)
48ffc236 1942READY1
d7f50089 1943{
48ffc236 1944 vm_paddr_t paddr;
c8fe38ae
MD
1945 struct pmap *pmap;
1946 vm_offset_t ptppaddr;
1947 vm_page_t nkpg;
48ffc236
JG
1948 pd_entry_t *pde, newpdir;
1949 pdp_entry_t newpdp;
c8fe38ae
MD
1950
1951 crit_enter();
1952 if (kernel_vm_end == 0) {
1953 kernel_vm_end = KERNBASE;
1954 nkpt = 0;
48ffc236 1955 while ((*pmap_pde(&kernel_pmap, kernel_vm_end) & PG_V) != 0) {
c8fe38ae
MD
1956 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1957 nkpt++;
48ffc236
JG
1958 if (kernel_vm_end - 1 >= kernel_map.max_offset) {
1959 kernel_vm_end = kernel_map.max_offset;
1960 break;
1961 }
c8fe38ae
MD
1962 }
1963 }
48ffc236
JG
1964 addr = roundup2(addr, PAGE_SIZE * NPTEPG);
1965 if (addr - 1 >= kernel_map.max_offset)
1966 addr = kernel_map.max_offset;
c8fe38ae 1967 while (kernel_vm_end < addr) {
48ffc236
JG
1968 pde = pmap_pde(&kernel_pmap, kernel_vm_end);
1969 if (pde == NULL) {
1970 /* We need a new PDP entry */
1971 nkpg = vm_page_alloc(kptobj, nkpt,
1972 VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM
1973 | VM_ALLOC_INTERRUPT);
1974 if (nkpg == NULL)
1975 panic("pmap_growkernel: no memory to grow kernel");
1976 if ((nkpg->flags & PG_ZERO) == 0)
1977 pmap_zero_page(nkpg);
1978 paddr = VM_PAGE_TO_PHYS(nkpg);
1979 newpdp = (pdp_entry_t)
1980 (paddr | PG_V | PG_RW | PG_A | PG_M);
1981 *pmap_pdpe(&kernel_pmap, kernel_vm_end) = newpdp;
1982 continue; /* try again */
1983 }
1984 if ((*pde & PG_V) != 0) {
c8fe38ae 1985 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
48ffc236
JG
1986 if (kernel_vm_end - 1 >= kernel_map.max_offset) {
1987 kernel_vm_end = kernel_map.max_offset;
1988 break;
1989 }
c8fe38ae
MD
1990 continue;
1991 }
1992
1993 /*
1994 * This index is bogus, but out of the way
1995 */
48ffc236 1996 nkpg = vm_page_alloc(kptobj, nkpt,
c8fe38ae
MD
1997 VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT);
1998 if (nkpg == NULL)
1999 panic("pmap_growkernel: no memory to grow kernel");
2000
2001 vm_page_wire(nkpg);
2002 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
2003 pmap_zero_page(ptppaddr);
2004 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
c8fe38ae
MD
2005 *pmap_pde(&kernel_pmap, kernel_vm_end) = newpdir;
2006 nkpt++;
2007
48ffc236
JG
2008 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
2009 if (kernel_vm_end - 1 >= kernel_map.max_offset) {
2010 kernel_vm_end = kernel_map.max_offset;
2011 break;
c8fe38ae 2012 }
c8fe38ae
MD
2013 }
2014 crit_exit();
d7f50089
YY
2015}
2016
2017/*
c8fe38ae
MD
2018 * Retire the given physical map from service.
2019 * Should only be called if the map contains
2020 * no valid mappings.
d7f50089 2021 */
c8fe38ae
MD
2022void
2023pmap_destroy(pmap_t pmap)
48ffc236 2024READY0
d7f50089 2025{
c8fe38ae
MD
2026 int count;
2027
2028 if (pmap == NULL)
2029 return;
2030
2031 count = --pmap->pm_count;
2032 if (count == 0) {
2033 pmap_release(pmap);
2034 panic("destroying a pmap is not yet implemented");
2035 }
d7f50089
YY
2036}
2037
2038/*
c8fe38ae 2039 * Add a reference to the specified pmap.
d7f50089 2040 */
c8fe38ae
MD
2041void
2042pmap_reference(pmap_t pmap)
48ffc236 2043READY2
d7f50089 2044{
c8fe38ae
MD
2045 if (pmap != NULL) {
2046 pmap->pm_count++;
2047 }
d7f50089
YY
2048}
2049
c8fe38ae
MD
2050/***************************************************
2051* page management routines.
2052 ***************************************************/
d7f50089
YY
2053
2054/*
2055 * free the pv_entry back to the free list. This function may be
2056 * called from an interrupt.
2057 */
c8fe38ae 2058static PMAP_INLINE void
d7f50089 2059free_pv_entry(pv_entry_t pv)
48ffc236 2060READY2
d7f50089 2061{
c8fe38ae 2062 pv_entry_count--;
48ffc236 2063 KKASSERT(pv_entry_count >= 0);
c8fe38ae 2064 zfree(pvzone, pv);
d7f50089
YY
2065}
2066
2067/*
2068 * get a new pv_entry, allocating a block from the system
2069 * when needed. This function may be called from an interrupt.
2070 */
2071static pv_entry_t
2072get_pv_entry(void)
48ffc236 2073READY2
d7f50089 2074{
c8fe38ae
MD
2075 pv_entry_count++;
2076 if (pv_entry_high_water &&
48ffc236
JG
2077 (pv_entry_count > pv_entry_high_water) &&
2078 (pmap_pagedaemon_waken == 0)) {
c8fe38ae 2079 pmap_pagedaemon_waken = 1;
48ffc236 2080 wakeup(&vm_pages_needed);
c8fe38ae
MD
2081 }
2082 return zalloc(pvzone);
d7f50089
YY
2083}
2084
2085/*
2086 * This routine is very drastic, but can save the system
2087 * in a pinch.
2088 */
2089void
2090pmap_collect(void)
48ffc236 2091READY0
d7f50089 2092{
c8fe38ae
MD
2093 int i;
2094 vm_page_t m;
2095 static int warningdone=0;
2096
2097 if (pmap_pagedaemon_waken == 0)
2098 return;
2099
2100 if (warningdone < 5) {
2101 kprintf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
2102 warningdone++;
2103 }
2104
2105 for(i = 0; i < vm_page_array_size; i++) {
2106 m = &vm_page_array[i];
2107 if (m->wire_count || m->hold_count || m->busy ||
2108 (m->flags & PG_BUSY))
2109 continue;
2110 pmap_remove_all(m);
2111 }
48ffc236 2112 pmap_pagedaemon_waken = 0;
d7f50089
YY
2113}
2114
c8fe38ae 2115
d7f50089
YY
2116/*
2117 * If it is the first entry on the list, it is actually
2118 * in the header and we must copy the following entry up
2119 * to the header. Otherwise we must search the list for
2120 * the entry. In either case we free the now unused entry.
2121 */
2122static int
c8fe38ae
MD
2123pmap_remove_entry(struct pmap *pmap, vm_page_t m,
2124 vm_offset_t va, pmap_inval_info_t info)
48ffc236 2125READY1
c8fe38ae
MD
2126{
2127 pv_entry_t pv;
2128 int rtval;
2129
2130 crit_enter();
2131 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
2132 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2133 if (pmap == pv->pv_pmap && va == pv->pv_va)
2134 break;
2135 }
2136 } else {
2137 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
2138 if (va == pv->pv_va)
2139 break;
2140 }
2141 }
2142
2143 rtval = 0;
48ffc236 2144 /* JGXXX When can 'pv' be NULL? */
c8fe38ae
MD
2145 if (pv) {
2146 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2147 m->md.pv_list_count--;
48ffc236 2148 KKASSERT(m->md.pv_list_count >= 0);
c8fe38ae
MD
2149 if (TAILQ_EMPTY(&m->md.pv_list))
2150 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
2151 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2152 ++pmap->pm_generation;
2153 rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem, info);
2154 free_pv_entry(pv);
2155 }
2156 crit_exit();
2157 return rtval;
d7f50089
YY
2158}
2159
2160/*
c8fe38ae
MD
2161 * Create a pv entry for page at pa for
2162 * (pmap, va).
d7f50089
YY
2163 */
2164static void
2165pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
48ffc236 2166READY1
d7f50089 2167{
c8fe38ae
MD
2168 pv_entry_t pv;
2169
2170 crit_enter();
2171 pv = get_pv_entry();
2172 pv->pv_va = va;
2173 pv->pv_pmap = pmap;
2174 pv->pv_ptem = mpte;
2175
2176 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
2177 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2178 m->md.pv_list_count++;
2179
2180 crit_exit();
d7f50089
YY
2181}
2182
2183/*
2184 * pmap_remove_pte: do the things to unmap a page in a process
2185 */
2186static int
c8fe38ae
MD
2187pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
2188 pmap_inval_info_t info)
48ffc236 2189READY1
c8fe38ae
MD
2190{
2191 pt_entry_t oldpte;
2192 vm_page_t m;
2193
2194 pmap_inval_add(info, pmap, va);
2195 oldpte = pte_load_clear(ptq);
2196 if (oldpte & PG_W)
2197 pmap->pm_stats.wired_count -= 1;
2198 /*
2199 * Machines that don't support invlpg, also don't support
2200 * PG_G. XXX PG_G is disabled for SMP so don't worry about
2201 * the SMP case.
2202 */
2203 if (oldpte & PG_G)
2204 cpu_invlpg((void *)va);
2205 KKASSERT(pmap->pm_stats.resident_count > 0);
2206 --pmap->pm_stats.resident_count;
2207 if (oldpte & PG_MANAGED) {
2208 m = PHYS_TO_VM_PAGE(oldpte);
2209 if (oldpte & PG_M) {
2210#if defined(PMAP_DIAGNOSTIC)
2211 if (pmap_nw_modified((pt_entry_t) oldpte)) {
2212 kprintf(
48ffc236 2213 "pmap_remove: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
c8fe38ae
MD
2214 va, oldpte);
2215 }
2216#endif
2217 if (pmap_track_modified(va))
2218 vm_page_dirty(m);
2219 }
2220 if (oldpte & PG_A)
2221 vm_page_flag_set(m, PG_REFERENCED);
2222 return pmap_remove_entry(pmap, m, va, info);
2223 } else {
2224 return pmap_unuse_pt(pmap, va, NULL, info);
2225 }
2226
d7f50089
YY
2227 return 0;
2228}
2229
2230/*
2231 * pmap_remove_page:
2232 *
2233 * Remove a single page from a process address space.
2234 *
2235 * This function may not be called from an interrupt if the pmap is
2236 * not kernel_pmap.
2237 */
2238static void
c8fe38ae 2239pmap_remove_page(struct pmap *pmap, vm_offset_t va, pmap_inval_info_t info)
48ffc236 2240READY1
c8fe38ae 2241{
48ffc236 2242 pt_entry_t *pte;
c8fe38ae 2243
48ffc236
JG
2244 pte = pmap_pte(pmap, va);
2245 if (pte == NULL)
2246 return;
2247 if ((*pte & PG_V) == 0)
2248 return;
2249 pmap_remove_pte(pmap, pte, va, info);
d7f50089
YY
2250}
2251
2252/*
2253 * pmap_remove:
2254 *
2255 * Remove the given range of addresses from the specified map.
2256 *
2257 * It is assumed that the start and end are properly
2258 * rounded to the page size.
2259 *
2260 * This function may not be called from an interrupt if the pmap is
2261 * not kernel_pmap.
2262 */
2263void
2264pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
48ffc236 2265READY1
d7f50089 2266{
48ffc236
JG
2267 vm_offset_t va_next;
2268 pml4_entry_t *pml4e;
2269 pdp_entry_t *pdpe;
2270 pd_entry_t ptpaddr, *pde;
2271 pt_entry_t *pte;
c8fe38ae
MD
2272 struct pmap_inval_info info;
2273
2274 if (pmap == NULL)
2275 return;
2276
2277 if (pmap->pm_stats.resident_count == 0)
2278 return;
2279
2280 pmap_inval_init(&info);
2281
2282 /*
2283 * special handling of removing one page. a very
2284 * common operation and easy to short circuit some
2285 * code.
2286 */
48ffc236
JG
2287 if (sva + PAGE_SIZE == eva) {
2288 pde = pmap_pde(pmap, sva);
2289 if (pde && (*pde & PG_PS) == 0) {
2290 pmap_remove_page(pmap, sva, &info);
2291 pmap_inval_flush(&info);
2292 return;
2293 }
c8fe38ae
MD
2294 }
2295
48ffc236
JG
2296 for (; sva < eva; sva = va_next) {
2297 pml4e = pmap_pml4e(pmap, sva);
2298 if ((*pml4e & PG_V) == 0) {
2299 va_next = (sva + NBPML4) & ~PML4MASK;
2300 if (va_next < sva)
2301 va_next = eva;
2302 continue;
2303 }
c8fe38ae 2304
48ffc236
JG
2305 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
2306 if ((*pdpe & PG_V) == 0) {
2307 va_next = (sva + NBPDP) & ~PDPMASK;
2308 if (va_next < sva)
2309 va_next = eva;
2310 continue;
2311 }
c8fe38ae
MD
2312
2313 /*
2314 * Calculate index for next page table.
2315 */
48ffc236
JG
2316 va_next = (sva + NBPDR) & ~PDRMASK;
2317 if (va_next < sva)
2318 va_next = eva;
c8fe38ae 2319
48ffc236
JG
2320 pde = pmap_pdpe_to_pde(pdpe, sva);
2321 ptpaddr = *pde;
c8fe38ae
MD
2322
2323 /*
48ffc236 2324 * Weed out invalid mappings.
c8fe38ae
MD
2325 */
2326 if (ptpaddr == 0)
2327 continue;
2328
48ffc236
JG
2329 /*
2330 * Check for large page.
2331 */
2332 if ((ptpaddr & PG_PS) != 0) {
2333 /* JG FreeBSD has more complex treatment here */
2334 pmap_inval_add(&info, pmap, -1);
2335 *pde = 0;
2336 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2337 continue;
2338 }
2339
c8fe38ae
MD
2340 /*
2341 * Limit our scan to either the end of the va represented
2342 * by the current page table page, or to the end of the
2343 * range being removed.
2344 */
48ffc236
JG
2345 if (va_next > eva)
2346 va_next = eva;
c8fe38ae
MD
2347
2348 /*
2349 * NOTE: pmap_remove_pte() can block.
2350 */
48ffc236
JG
2351 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2352 sva += PAGE_SIZE) {
2353 if (*pte == 0)
c8fe38ae 2354 continue;
48ffc236 2355 if (pmap_remove_pte(pmap, pte, sva, &info))
c8fe38ae
MD
2356 break;
2357 }
2358 }
2359 pmap_inval_flush(&info);
d7f50089
YY
2360}
2361
2362/*
2363 * pmap_remove_all:
2364 *
c8fe38ae
MD
2365 * Removes this physical page from all physical maps in which it resides.
2366 * Reflects back modify bits to the pager.
d7f50089 2367 *
c8fe38ae 2368 * This routine may not be called from an interrupt.
d7f50089 2369 */
c8fe38ae 2370
d7f50089
YY
2371static void
2372pmap_remove_all(vm_page_t m)
48ffc236 2373READY1
d7f50089 2374{
c8fe38ae
MD
2375 struct pmap_inval_info info;
2376 pt_entry_t *pte, tpte;
2377 pv_entry_t pv;
2378
2379 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2380 return;
2381
2382 pmap_inval_init(&info);
2383 crit_enter();
2384 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2385 KKASSERT(pv->pv_pmap->pm_stats.resident_count > 0);
2386 --pv->pv_pmap->pm_stats.resident_count;
2387
2388 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2389 pmap_inval_add(&info, pv->pv_pmap, pv->pv_va);
2390 tpte = pte_load_clear(pte);
2391
2392 if (tpte & PG_W)
2393 pv->pv_pmap->pm_stats.wired_count--;
2394
2395 if (tpte & PG_A)
2396 vm_page_flag_set(m, PG_REFERENCED);
2397
2398 /*
2399 * Update the vm_page_t clean and reference bits.
2400 */
2401 if (tpte & PG_M) {
2402#if defined(PMAP_DIAGNOSTIC)
48ffc236 2403 if (pmap_nw_modified(tpte)) {
c8fe38ae 2404 kprintf(
48ffc236 2405 "pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
c8fe38ae
MD
2406 pv->pv_va, tpte);
2407 }
2408#endif
2409 if (pmap_track_modified(pv->pv_va))
2410 vm_page_dirty(m);
2411 }
2412 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2413 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2414 ++pv->pv_pmap->pm_generation;
2415 m->md.pv_list_count--;
48ffc236 2416 KKASSERT(m->md.pv_list_count >= 0);
c8fe38ae
MD
2417 if (TAILQ_EMPTY(&m->md.pv_list))
2418 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
2419 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem, &info);
2420 free_pv_entry(pv);
2421 }
2422 crit_exit();
2423 KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
2424 pmap_inval_flush(&info);
d7f50089
YY
2425}
2426
2427/*
2428 * pmap_protect:
2429 *
2430 * Set the physical protection on the specified range of this map
2431 * as requested.
2432 *
2433 * This function may not be called from an interrupt if the map is
2434 * not the kernel_pmap.
2435 */
2436void
2437pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
48ffc236 2438READY1
d7f50089 2439{
48ffc236
JG
2440 vm_offset_t va_next;
2441 pml4_entry_t *pml4e;
2442 pdp_entry_t *pdpe;
2443 pd_entry_t ptpaddr, *pde;
2444 pt_entry_t *pte;
c8fe38ae
MD
2445 pmap_inval_info info;
2446
48ffc236
JG
2447 /* JG review for NX */
2448
c8fe38ae
MD
2449 if (pmap == NULL)
2450 return;
2451
2452 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2453 pmap_remove(pmap, sva, eva);
2454 return;
2455 }
2456
2457 if (prot & VM_PROT_WRITE)
2458 return;
2459
2460 pmap_inval_init(&info);
2461
48ffc236 2462 for (; sva < eva; sva = va_next) {
c8fe38ae 2463
48ffc236
JG
2464 pml4e = pmap_pml4e(pmap, sva);
2465 if ((*pml4e & PG_V) == 0) {
2466 va_next = (sva + NBPML4) & ~PML4MASK;
2467 if (va_next < sva)
2468 va_next = eva;
2469 continue;
2470 }
c8fe38ae 2471
48ffc236
JG
2472 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
2473 if ((*pdpe & PG_V) == 0) {
2474 va_next = (sva + NBPDP) & ~PDPMASK;
2475 if (va_next < sva)
2476 va_next = eva;
2477 continue;
2478 }
c8fe38ae 2479
48ffc236
JG
2480 va_next = (sva + NBPDR) & ~PDRMASK;
2481 if (va_next < sva)
2482 va_next = eva;
c8fe38ae 2483
48ffc236
JG
2484 pde = pmap_pdpe_to_pde(pdpe, sva);
2485 ptpaddr = *pde;
c8fe38ae 2486
48ffc236
JG
2487 /*
2488 * Check for large page.
2489 */
2490 if ((ptpaddr & PG_PS) != 0) {
c8fe38ae 2491 pmap_inval_add(&info, pmap, -1);
48ffc236 2492 *pde &= ~(PG_M|PG_RW);
c8fe38ae
MD
2493 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2494 continue;
2495 }
2496
2497 /*
2498 * Weed out invalid mappings. Note: we assume that the page
2499 * directory table is always allocated, and in kernel virtual.
2500 */
2501 if (ptpaddr == 0)
2502 continue;
2503
48ffc236
JG
2504 if (va_next > eva)
2505 va_next = eva;
c8fe38ae 2506
48ffc236
JG
2507 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2508 sva += PAGE_SIZE) {
2509 pt_entry_t obits, pbits;
c8fe38ae
MD
2510 vm_page_t m;
2511
2512 /*
2513 * XXX non-optimal. Note also that there can be
2514 * no pmap_inval_flush() calls until after we modify
2515 * ptbase[sindex] (or otherwise we have to do another
2516 * pmap_inval_add() call).
2517 */
48ffc236
JG
2518 pmap_inval_add(&info, pmap, sva);
2519 obits = pbits = *pte;
2520 if ((pbits & PG_V) == 0)
2521 continue;
c8fe38ae
MD
2522 if (pbits & PG_MANAGED) {
2523 m = NULL;
2524 if (pbits & PG_A) {
48ffc236 2525 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
c8fe38ae
MD
2526 vm_page_flag_set(m, PG_REFERENCED);
2527 pbits &= ~PG_A;
2528 }
2529 if (pbits & PG_M) {
48ffc236 2530 if (pmap_track_modified(sva)) {
c8fe38ae 2531 if (m == NULL)
3cfe1a9f 2532 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
c8fe38ae
MD
2533 vm_page_dirty(m);
2534 pbits &= ~PG_M;
2535 }
2536 }
2537 }
2538
2539 pbits &= ~PG_RW;
2540
48ffc236
JG
2541 if (pbits != obits) {
2542 *pte = pbits;
c8fe38ae
MD
2543 }
2544 }
2545 }
2546 pmap_inval_flush(&info);
d7f50089
YY
2547}
2548
2549/*
c8fe38ae
MD
2550 * Insert the given physical page (p) at
2551 * the specified virtual address (v) in the
2552 * target physical map with the protection requested.
d7f50089 2553 *
c8fe38ae
MD
2554 * If specified, the page will be wired down, meaning
2555 * that the related pte can not be reclaimed.
d7f50089 2556 *
c8fe38ae
MD
2557 * NB: This is the only routine which MAY NOT lazy-evaluate
2558 * or lose information. That is, this routine must actually
2559 * insert this page into the given map NOW.
d7f50089
YY
2560 */
2561void
2562pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2563 boolean_t wired)
48ffc236 2564READY1
d7f50089 2565{
c8fe38ae 2566 vm_paddr_t pa;
48ffc236 2567 pd_entry_t *pde;
c8fe38ae
MD
2568 pt_entry_t *pte;
2569 vm_paddr_t opa;
48ffc236 2570 pt_entry_t origpte, newpte;
c8fe38ae
MD
2571 vm_page_t mpte;
2572 pmap_inval_info info;
2573
2574 if (pmap == NULL)
2575 return;
2576
48ffc236 2577 va = trunc_page(va);
c8fe38ae
MD
2578#ifdef PMAP_DIAGNOSTIC
2579 if (va >= KvaEnd)
2580 panic("pmap_enter: toobig");
2581 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
48ffc236 2582 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va);
c8fe38ae
MD
2583#endif
2584 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2585 kprintf("Warning: pmap_enter called on UVA with kernel_pmap\n");
48ffc236
JG
2586#ifdef DDB
2587 db_print_backtrace();
2588#endif
c8fe38ae
MD
2589 }
2590 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2591 kprintf("Warning: pmap_enter called on KVA without kernel_pmap\n");
48ffc236
JG
2592#ifdef DDB
2593 db_print_backtrace();
2594#endif
c8fe38ae
MD
2595 }
2596
2597 /*
2598 * In the case that a page table page is not
2599 * resident, we are creating it here.
2600 */
48ffc236 2601 if (va < VM_MAX_USER_ADDRESS)
c8fe38ae
MD
2602 mpte = pmap_allocpte(pmap, va);
2603 else
2604 mpte = NULL;
2605
2606 pmap_inval_init(&info);
48ffc236
JG
2607 pde = pmap_pde(pmap, va);
2608 if (pde != NULL && (*pde & PG_V) != 0) {
2609 if ((*pde & PG_PS) != 0)
2610 panic("pmap_enter: attempted pmap_enter on 2MB page");
2611 pte = pmap_pde_to_pte(pde, va);
2612 } else
2613 panic("pmap_enter: invalid page directory va=%#lx", va);
2614
2615 KKASSERT(pte != NULL);
2616 pa = VM_PAGE_TO_PHYS(m);
48ffc236 2617 origpte = *pte;
c8fe38ae
MD
2618 opa = origpte & PG_FRAME;
2619
c8fe38ae
MD
2620 /*
2621 * Mapping has not changed, must be protection or wiring change.
2622 */
2623 if (origpte && (opa == pa)) {
2624 /*
2625 * Wiring change, just update stats. We don't worry about
2626 * wiring PT pages as they remain resident as long as there
2627 * are valid mappings in them. Hence, if a user page is wired,
2628 * the PT page will be also.
2629 */
2630 if (wired && ((origpte & PG_W) == 0))
2631 pmap->pm_stats.wired_count++;
2632 else if (!wired && (origpte & PG_W))
2633 pmap->pm_stats.wired_count--;
2634
2635#if defined(PMAP_DIAGNOSTIC)
48ffc236 2636 if (pmap_nw_modified(origpte)) {
c8fe38ae 2637 kprintf(
48ffc236 2638 "pmap_enter: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
c8fe38ae
MD
2639 va, origpte);
2640 }
2641#endif
2642
2643 /*
2644 * Remove the extra pte reference. Note that we cannot
2645 * optimize the RO->RW case because we have adjusted the
2646 * wiring count above and may need to adjust the wiring
2647 * bits below.
2648 */
2649 if (mpte)
2650 mpte->hold_count--;
2651
2652 /*
2653 * We might be turning off write access to the page,
2654 * so we go ahead and sense modify status.
2655 */
2656 if (origpte & PG_MANAGED) {
2657 if ((origpte & PG_M) && pmap_track_modified(va)) {
2658 vm_page_t om;
2659 om = PHYS_TO_VM_PAGE(opa);
2660 vm_page_dirty(om);
2661 }
2662 pa |= PG_MANAGED;
2663 KKASSERT(m->flags & PG_MAPPED);
2664 }
2665 goto validate;
2666 }
2667 /*
2668 * Mapping has changed, invalidate old range and fall through to
2669 * handle validating new mapping.
2670 */
2671 if (opa) {
2672 int err;
2673 err = pmap_remove_pte(pmap, pte, va, &info);
2674 if (err)
48ffc236 2675 panic("pmap_enter: pte vanished, va: 0x%lx", va);
c8fe38ae
MD
2676 }
2677
2678 /*
2679 * Enter on the PV list if part of our managed memory. Note that we
2680 * raise IPL while manipulating pv_table since pmap_enter can be
2681 * called at interrupt time.
2682 */
2683 if (pmap_initialized &&
2684 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2685 pmap_insert_entry(pmap, va, mpte, m);
2686 pa |= PG_MANAGED;
2687 vm_page_flag_set(m, PG_MAPPED);
2688 }
2689
2690 /*
2691 * Increment counters
2692 */
2693 ++pmap->pm_stats.resident_count;
2694 if (wired)
2695 pmap->pm_stats.wired_count++;
2696
2697validate:
2698 /*
2699 * Now validate mapping with desired protection/wiring.
2700 */
48ffc236 2701 newpte = (pt_entry_t) (pa | pte_prot(pmap, prot) | PG_V);
c8fe38ae
MD
2702
2703 if (wired)
2704 newpte |= PG_W;
48ffc236 2705 if (va < VM_MAX_USER_ADDRESS)
c8fe38ae
MD
2706 newpte |= PG_U;
2707 if (pmap == &kernel_pmap)
2708 newpte |= pgeflag;
2709
2710 /*
2711 * if the mapping or permission bits are different, we need
2712 * to update the pte.
2713 */
2714 if ((origpte & ~(PG_M|PG_A)) != newpte) {
2715 pmap_inval_add(&info, pmap, va);
2716 *pte = newpte | PG_A;
2717 if (newpte & PG_RW)
2718 vm_page_flag_set(m, PG_WRITEABLE);
2719 }
2720 KKASSERT((newpte & PG_MANAGED) == 0 || (m->flags & PG_MAPPED));
2721 pmap_inval_flush(&info);
d7f50089
YY
2722}
2723
2724/*
c8fe38ae
MD
2725 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
2726 * This code also assumes that the pmap has no pre-existing entry for this
2727 * VA.
d7f50089 2728 *
c8fe38ae 2729 * This code currently may only be used on user pmaps, not kernel_pmap.
d7f50089 2730 */
c8fe38ae
MD
2731static void
2732pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
48ffc236 2733READY1
d7f50089 2734{
c8fe38ae
MD
2735 pt_entry_t *pte;
2736 vm_paddr_t pa;
2737 vm_page_t mpte;
2738 vm_pindex_t ptepindex;
48ffc236 2739 pd_entry_t *ptepa;
c8fe38ae
MD
2740 pmap_inval_info info;
2741
2742 pmap_inval_init(&info);
2743
2744 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2745 kprintf("Warning: pmap_enter_quick called on UVA with kernel_pmap\n");
48ffc236
JG
2746#ifdef DDB
2747 db_print_backtrace();
2748#endif
c8fe38ae
MD
2749 }
2750 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2751 kprintf("Warning: pmap_enter_quick called on KVA without kernel_pmap\n");
48ffc236
JG
2752#ifdef DDB
2753 db_print_backtrace();
2754#endif
c8fe38ae
MD
2755 }
2756
2757 KKASSERT(va < UPT_MIN_ADDRESS); /* assert used on user pmaps only */
2758
2759 /*
2760 * Calculate the page table page (mpte), allocating it if necessary.
2761 *
2762 * A held page table page (mpte), or NULL, is passed onto the
2763 * section following.
2764 */
48ffc236 2765 if (va < VM_MAX_USER_ADDRESS) {
c8fe38ae
MD
2766 /*
2767 * Calculate pagetable page index
2768 */
48ffc236 2769 ptepindex = pmap_pde_pindex(va);
c8fe38ae
MD
2770
2771 do {
2772 /*
2773 * Get the page directory entry
2774 */
48ffc236 2775 ptepa = pmap_pde(pmap, va);
c8fe38ae
MD
2776
2777 /*
2778 * If the page table page is mapped, we just increment
2779 * the hold count, and activate it.
2780 */
48ffc236
JG
2781 if (ptepa && (*ptepa & PG_V) != 0) {
2782 if (*ptepa & PG_PS)
2783 panic("pmap_enter_quick: unexpected mapping into 2MB page");
2784// if (pmap->pm_ptphint &&
2785// (pmap->pm_ptphint->pindex == ptepindex)) {
2786// mpte = pmap->pm_ptphint;
2787// } else {
c8fe38ae
MD
2788 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
2789 pmap->pm_ptphint = mpte;
48ffc236 2790// }
c8fe38ae
MD
2791 if (mpte)
2792 mpte->hold_count++;
2793 } else {
2794 mpte = _pmap_allocpte(pmap, ptepindex);
2795 }
2796 } while (mpte == NULL);
2797 } else {
2798 mpte = NULL;
2799 /* this code path is not yet used */
2800 }
2801
2802 /*
2803 * With a valid (and held) page directory page, we can just use
2804 * vtopte() to get to the pte. If the pte is already present
2805 * we do not disturb it.
2806 */
2807 pte = vtopte(va);
2808 if (*pte & PG_V) {
2809 if (mpte)
48ffc236 2810 pmap_unwire_pte_hold(pmap, va, mpte, &info);
c8fe38ae
MD
2811 pa = VM_PAGE_TO_PHYS(m);
2812 KKASSERT(((*pte ^ pa) & PG_FRAME) == 0);
2813 return;
2814 }
2815
2816 /*
2817 * Enter on the PV list if part of our managed memory
2818 */
2819 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2820 pmap_insert_entry(pmap, va, mpte, m);
2821 vm_page_flag_set(m, PG_MAPPED);
2822 }
2823
2824 /*
2825 * Increment counters
2826 */
2827 ++pmap->pm_stats.resident_count;
2828
2829 pa = VM_PAGE_TO_PHYS(m);
2830
2831 /*
2832 * Now validate mapping with RO protection
2833 */
2834 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2835 *pte = pa | PG_V | PG_U;
2836 else
2837 *pte = pa | PG_V | PG_U | PG_MANAGED;
2838/* pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
2839 pmap_inval_flush(&info);
d7f50089
YY
2840}
2841
2842/*
c8fe38ae
MD
2843 * Make a temporary mapping for a physical address. This is only intended
2844 * to be used for panic dumps.
d7f50089 2845 */
48ffc236 2846/* JG Needed on amd64? */
c8fe38ae
MD
2847void *
2848pmap_kenter_temporary(vm_paddr_t pa, int i)
48ffc236 2849READY2
d7f50089 2850{
c8fe38ae
MD
2851 pmap_kenter((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
2852 return ((void *)crashdumpmap);
d7f50089
YY
2853}
2854
c8fe38ae
MD
2855#define MAX_INIT_PT (96)
2856
d7f50089
YY
2857/*
2858 * This routine preloads the ptes for a given object into the specified pmap.
2859 * This eliminates the blast of soft faults on process startup and
2860 * immediately after an mmap.
2861 */
2862static int pmap_object_init_pt_callback(vm_page_t p, void *data);
2863
2864void
2865pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
2866 vm_object_t object, vm_pindex_t pindex,
2867 vm_size_t size, int limit)
48ffc236 2868READY1
d7f50089 2869{
c8fe38ae
MD
2870 struct rb_vm_page_scan_info info;
2871 struct lwp *lp;
48ffc236 2872 vm_size_t psize;
c8fe38ae
MD
2873
2874 /*
2875 * We can't preinit if read access isn't set or there is no pmap
2876 * or object.
2877 */
2878 if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
2879 return;
2880
2881 /*
2882 * We can't preinit if the pmap is not the current pmap
2883 */
2884 lp = curthread->td_lwp;
2885 if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
2886 return;
2887
2888 psize = amd64_btop(size);
2889
2890 if ((object->type != OBJT_VNODE) ||
2891 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
2892 (object->resident_page_count > MAX_INIT_PT))) {
2893 return;
2894 }
2895
2896 if (psize + pindex > object->size) {
2897 if (object->size < pindex)
2898 return;
2899 psize = object->size - pindex;
2900 }
2901
2902 if (psize == 0)
2903 return;
2904
2905 /*
2906 * Use a red-black scan to traverse the requested range and load
2907 * any valid pages found into the pmap.
2908 *
2909 * We cannot safely scan the object's memq unless we are in a
2910 * critical section since interrupts can remove pages from objects.
2911 */
2912 info.start_pindex = pindex;
2913 info.end_pindex = pindex + psize - 1;
2914 info.limit = limit;
2915 info.mpte = NULL;
2916 info.addr = addr;
2917 info.pmap = pmap;
2918
2919 crit_enter();
2920 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
2921 pmap_object_init_pt_callback, &info);
2922 crit_exit();
d7f50089
YY
2923}
2924
2925static
2926int
2927pmap_object_init_pt_callback(vm_page_t p, void *data)
48ffc236 2928READY1
d7f50089 2929{
c8fe38ae
MD
2930 struct rb_vm_page_scan_info *info = data;
2931 vm_pindex_t rel_index;
2932 /*
2933 * don't allow an madvise to blow away our really
2934 * free pages allocating pv entries.
2935 */
2936 if ((info->limit & MAP_PREFAULT_MADVISE) &&
2937 vmstats.v_free_count < vmstats.v_free_reserved) {
2938 return(-1);
2939 }
2940 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2941 (p->busy == 0) && (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2942 if ((p->queue - p->pc) == PQ_CACHE)
2943 vm_page_deactivate(p);
2944 vm_page_busy(p);
2945 rel_index = p->pindex - info->start_pindex;
2946 pmap_enter_quick(info->pmap,
2947 info->addr + amd64_ptob(rel_index), p);
2948 vm_page_wakeup(p);
2949 }
d7f50089
YY
2950 return(0);
2951}
2952
2953/*
2954 * pmap_prefault provides a quick way of clustering pagefaults into a
2955 * processes address space. It is a "cousin" of pmap_object_init_pt,
2956 * except it runs at page fault time instead of mmap time.
2957 */
2958#define PFBAK 4
2959#define PFFOR 4
2960#define PAGEORDER_SIZE (PFBAK+PFFOR)
2961
2962static int pmap_prefault_pageorder[] = {
2963 -PAGE_SIZE, PAGE_SIZE,
2964 -2 * PAGE_SIZE, 2 * PAGE_SIZE,
2965 -3 * PAGE_SIZE, 3 * PAGE_SIZE,
2966 -4 * PAGE_SIZE, 4 * PAGE_SIZE
2967};
2968
2969void
2970pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
48ffc236 2971READY0
d7f50089 2972{
c8fe38ae
MD
2973 int i;
2974 vm_offset_t starta;
2975 vm_offset_t addr;
2976 vm_pindex_t pindex;
2977 vm_page_t m;
2978 vm_object_t object;
2979 struct lwp *lp;
2980
2981 /*
2982 * We do not currently prefault mappings that use virtual page
2983 * tables. We do not prefault foreign pmaps.
2984 */
2985 if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
2986 return;
2987 lp = curthread->td_lwp;
2988 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2989 return;
2990
2991 object = entry->object.vm_object;
2992
2993 starta = addra - PFBAK * PAGE_SIZE;
2994 if (starta < entry->start)
2995 starta = entry->start;
2996 else if (starta > addra)
2997 starta = 0;
2998
2999 /*
3000 * critical section protection is required to maintain the
3001 * page/object association, interrupts can free pages and remove
3002 * them from their objects.
3003 */
3004 crit_enter();
3005 for (i = 0; i < PAGEORDER_SIZE; i++) {
3006 vm_object_t lobject;
3007 pt_entry_t *pte;
3008
3009 addr = addra + pmap_prefault_pageorder[i];
3010 if (addr > addra + (PFFOR * PAGE_SIZE))
3011 addr = 0;
3012
3013 if (addr < starta || addr >= entry->end)
3014 continue;
3015
3016 if ((*pmap_pde(pmap, addr)) == 0)
3017 continue;
3018
3019 pte = vtopte(addr);
3020 if (*pte)
3021 continue;
3022
3023 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
3024 lobject = object;
3025
3026 for (m = vm_page_lookup(lobject, pindex);
3027 (!m && (lobject->type == OBJT_DEFAULT) &&
3028 (lobject->backing_object));
3029 lobject = lobject->backing_object
3030 ) {
3031 if (lobject->backing_object_offset & PAGE_MASK)
3032 break;
3033 pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
3034 m = vm_page_lookup(lobject->backing_object, pindex);
3035 }
3036
3037 /*
3038 * give-up when a page is not in memory
3039 */
3040 if (m == NULL)
3041 break;
3042
3043 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
3044 (m->busy == 0) &&
3045 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
3046
3047 if ((m->queue - m->pc) == PQ_CACHE) {
3048 vm_page_deactivate(m);
3049 }
3050 vm_page_busy(m);
3051 pmap_enter_quick(pmap, addr, m);
3052 vm_page_wakeup(m);
3053 }
3054 }
3055 crit_exit();
d7f50089
YY
3056}
3057
3058/*
3059 * Routine: pmap_change_wiring
3060 * Function: Change the wiring attribute for a map/virtual-address
3061 * pair.
3062 * In/out conditions:
3063 * The mapping must already exist in the pmap.
3064 */
3065void
3066pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
48ffc236 3067READY0
d7f50089 3068{
c8fe38ae
MD
3069 pt_entry_t *pte;
3070
3071 if (pmap == NULL)
3072 return;
3073
3074 pte = pmap_pte(pmap, va);
3075
3076 if (wired && !pmap_pte_w(pte))
3077 pmap->pm_stats.wired_count++;
3078 else if (!wired && pmap_pte_w(pte))
3079 pmap->pm_stats.wired_count--;
3080
3081 /*
3082 * Wiring is not a hardware characteristic so there is no need to
3083 * invalidate TLB. However, in an SMP environment we must use
3084 * a locked bus cycle to update the pte (if we are not using
3085 * the pmap_inval_*() API that is)... it's ok to do this for simple
3086 * wiring changes.
3087 */
3088#ifdef SMP
3089 if (wired)
3090 atomic_set_int(pte, PG_W);
3091 else
3092 atomic_clear_int(pte, PG_W);
3093#else
3094 if (wired)
3095 atomic_set_int_nonlocked(pte, PG_W);
3096 else
3097 atomic_clear_int_nonlocked(pte, PG_W);
3098#endif
d7f50089
YY
3099}
3100
c8fe38ae
MD
3101
3102
d7f50089
YY
3103/*
3104 * Copy the range specified by src_addr/len
3105 * from the source map to the range dst_addr/len
3106 * in the destination map.
3107 *
3108 * This routine is only advisory and need not do anything.
3109 */
3110void
3111pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
3112 vm_size_t len, vm_offset_t src_addr)
48ffc236 3113READY0
d7f50089 3114{
c8fe38ae
MD
3115 pmap_inval_info info;
3116 vm_offset_t addr;
3117 vm_offset_t end_addr = src_addr + len;
3118 vm_offset_t pdnxt;
3119 pd_entry_t src_frame, dst_frame;
3120 vm_page_t m;
3121
3122 if (dst_addr != src_addr)
3123 return;
3124 /*
3125 * XXX BUGGY. Amoung other things srcmpte is assumed to remain
3126 * valid through blocking calls, and that's just not going to
3127 * be the case.
3128 *
3129 * FIXME!
3130 */
3131 return;
3132
48ffc236 3133#if JGPMAP32
c8fe38ae
MD
3134 src_frame = src_pmap->pm_pdir[PTDPTDI] & PG_FRAME;
3135 if (src_frame != (PTDpde & PG_FRAME)) {
3136 return;
3137 }
3138
3139 dst_frame = dst_pmap->pm_pdir[PTDPTDI] & PG_FRAME;
3140 if (dst_frame != (APTDpde & PG_FRAME)) {
3141 APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V);
3142 /* The page directory is not shared between CPUs */
3143 cpu_invltlb();
3144 }
48ffc236 3145#endif
c8fe38ae
MD
3146 pmap_inval_init(&info);
3147 pmap_inval_add(&info, dst_pmap, -1);
3148 pmap_inval_add(&info, src_pmap, -1);
3149
3150 /*
3151 * critical section protection is required to maintain the page/object
3152 * association, interrupts can free pages and remove them from
3153 * their objects.
3154 */
3155 crit_enter();
3156 for (addr = src_addr; addr < end_addr; addr = pdnxt) {
3157 pt_entry_t *src_pte, *dst_pte;
3158 vm_page_t dstmpte, srcmpte;
3159 vm_offset_t srcptepaddr;
3160 vm_pindex_t ptepindex;
3161
3162 if (addr >= UPT_MIN_ADDRESS)
3163 panic("pmap_copy: invalid to pmap_copy page tables\n");
3164
3165 /*
3166 * Don't let optional prefaulting of pages make us go
3167 * way below the low water mark of free pages or way
3168 * above high water mark of used pv entries.
3169 */
3170 if (vmstats.v_free_count < vmstats.v_free_reserved ||
3171 pv_entry_count > pv_entry_high_water)
3172 break;
3173
3174 pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
3175 ptepindex = addr >> PDRSHIFT;
3176
48ffc236 3177#if JGPMAP32
c8fe38ae 3178 srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex];
48ffc236 3179#endif
c8fe38ae
MD
3180 if (srcptepaddr == 0)
3181 continue;
3182
3183 if (srcptepaddr & PG_PS) {
48ffc236 3184#if JGPMAP32
c8fe38ae
MD
3185 if (dst_pmap->pm_pdir[ptepindex] == 0) {
3186 dst_pmap->pm_pdir[ptepindex] = (pd_entry_t) srcptepaddr;
3187 dst_pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
3188 }
48ffc236 3189#endif
c8fe38ae
MD
3190 continue;
3191 }
3192
3193 srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
3194 if ((srcmpte == NULL) || (srcmpte->hold_count == 0) ||
3195 (srcmpte->flags & PG_BUSY)) {
3196 continue;
3197 }
3198
3199 if (pdnxt > end_addr)
3200 pdnxt = end_addr;
3201
3202 src_pte = vtopte(addr);
48ffc236 3203#if JGPMAP32
c8fe38ae 3204 dst_pte = avtopte(addr);
48ffc236 3205#endif
c8fe38ae
MD
3206 while (addr < pdnxt) {
3207 pt_entry_t ptetemp;
3208
3209 ptetemp = *src_pte;
3210 /*
3211 * we only virtual copy managed pages
3212 */
3213 if ((ptetemp & PG_MANAGED) != 0) {
3214 /*
3215 * We have to check after allocpte for the
3216 * pte still being around... allocpte can
3217 * block.
3218 *
3219 * pmap_allocpte() can block. If we lose
3220 * our page directory mappings we stop.
3221 */
3222 dstmpte = pmap_allocpte(dst_pmap, addr);
3223
48ffc236 3224#if JGPMAP32
c8fe38ae
MD
3225 if (src_frame != (PTDpde & PG_FRAME) ||
3226 dst_frame != (APTDpde & PG_FRAME)
3227 ) {
3228 kprintf("WARNING: pmap_copy: detected and corrected race\n");
3229 pmap_unwire_pte_hold(dst_pmap, dstmpte, &info);
3230 goto failed;
3231 } else if ((*dst_pte == 0) &&
3232 (ptetemp = *src_pte) != 0 &&
3233 (ptetemp & PG_MANAGED)) {
3234 /*
3235 * Clear the modified and
3236 * accessed (referenced) bits
3237 * during the copy.
3238 */
3239 m = PHYS_TO_VM_PAGE(ptetemp);
3240 *dst_pte = ptetemp & ~(PG_M | PG_A);
3241 ++dst_pmap->pm_stats.resident_count;
3242 pmap_insert_entry(dst_pmap, addr,
3243 dstmpte, m);
3244 KKASSERT(m->flags & PG_MAPPED);
3245 } else {
3246 kprintf("WARNING: pmap_copy: dst_pte race detected and corrected\n");
3247 pmap_unwire_pte_hold(dst_pmap, dstmpte, &info);
3248 goto failed;
3249 }
48ffc236 3250#endif
c8fe38ae
MD
3251 if (dstmpte->hold_count >= srcmpte->hold_count)
3252 break;
3253 }
3254 addr += PAGE_SIZE;
3255 src_pte++;
3256 dst_pte++;
3257 }
3258 }
3259failed:
3260 crit_exit();
3261 pmap_inval_flush(&info);
d7f50089
YY
3262}
3263
3264/*
3265 * pmap_zero_page:
3266 *
48ffc236 3267 * Zero the specified physical page.
d7f50089
YY
3268 *
3269 * This function may be called from an interrupt and no locking is
3270 * required.
3271 */
3272void
3273pmap_zero_page(vm_paddr_t phys)
48ffc236 3274READY1
d7f50089 3275{
48ffc236 3276 vm_offset_t va = PHYS_TO_DMAP(phys);
c8fe38ae 3277
48ffc236 3278 pagezero((void *)va);
d7f50089
YY
3279}
3280
3281/*
3282 * pmap_page_assertzero:
3283 *
3284 * Assert that a page is empty, panic if it isn't.
3285 */
3286void
3287pmap_page_assertzero(vm_paddr_t phys)
48ffc236 3288READY1
d7f50089 3289{
c8fe38ae
MD
3290 struct mdglobaldata *gd = mdcpu;
3291 int i;
3292
3293 crit_enter();
48ffc236
JG
3294 vm_offset_t virt = PHYS_TO_DMAP(phys);
3295
c8fe38ae 3296 for (i = 0; i < PAGE_SIZE; i += sizeof(int)) {
48ffc236 3297 if (*(int *)((char *)virt + i) != 0) {
c8fe38ae 3298 panic("pmap_page_assertzero() @ %p not zero!\n",
48ffc236 3299 (void *)virt);
c8fe38ae
MD
3300 }
3301 }
c8fe38ae 3302 crit_exit();
d7f50089
YY
3303}
3304
3305/*
3306 * pmap_zero_page:
3307 *
3308 * Zero part of a physical page by mapping it into memory and clearing
3309 * its contents with bzero.
3310 *
3311 * off and size may not cover an area beyond a single hardware page.
3312 */
3313void
3314pmap_zero_page_area(vm_paddr_t phys, int off, int size)
48ffc236 3315READY1
d7f50089 3316{
c8fe38ae
MD
3317 struct mdglobaldata *gd = mdcpu;
3318
3319 crit_enter();
48ffc236
JG
3320 vm_offset_t virt = PHYS_TO_DMAP(phys);
3321 bzero((char *)virt + off, size);
c8fe38ae 3322 crit_exit();
d7f50089
YY
3323}
3324
3325/*
3326 * pmap_copy_page:
3327 *
3328 * Copy the physical page from the source PA to the target PA.
3329 * This function may be called from an interrupt. No locking
3330 * is required.
3331 */
3332void
3333pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
48ffc236 3334READY1
d7f50089 3335{
48ffc236 3336 vm_offset_t src_virt, dst_virt;
c8fe38ae
MD
3337
3338 crit_enter();
48ffc236
JG
3339 src_virt = PHYS_TO_DMAP(src);
3340 dst_virt = PHYS_TO_DMAP(dst);
3341 bcopy(src_virt, dst_virt, PAGE_SIZE);
c8fe38ae 3342 crit_exit();
d7f50089
YY
3343}
3344
3345/*
3346 * pmap_copy_page_frag:
3347 *
3348 * Copy the physical page from the source PA to the target PA.
3349 * This function may be called from an interrupt. No locking
3350 * is required.
3351 */
3352void
3353pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
48ffc236 3354READY1
d7f50089 3355{
48ffc236 3356 vm_offset_t src_virt, dst_virt;
c8fe38ae
MD
3357
3358 crit_enter();
48ffc236
JG
3359 src_virt = PHYS_TO_DMAP(src);
3360 dst_virt = PHYS_TO_DMAP(dst);
3361 bcopy((char *)src_virt + (src & PAGE_MASK),
3362 (char *)dst_virt + (dst & PAGE_MASK),
c8fe38ae 3363 bytes);
c8fe38ae 3364 crit_exit();
d7f50089
YY
3365}
3366
3367/*
3368 * Returns true if the pmap's pv is one of the first
3369 * 16 pvs linked to from this page. This count may
3370 * be changed upwards or downwards in the future; it
3371 * is only necessary that true be returned for a small
3372 * subset of pmaps for proper page aging.
3373 */
3374boolean_t
3375pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
48ffc236 3376READY2
d7f50089 3377{
c8fe38ae
MD
3378 pv_entry_t pv;
3379 int loops = 0;
3380
3381 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3382 return FALSE;
3383
3384 crit_enter();
3385
3386 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3387 if (pv->pv_pmap == pmap) {
3388 crit_exit();
3389 return TRUE;
3390 }
3391 loops++;
3392 if (loops >= 16)
3393 break;
3394 }
3395 crit_exit();
d7f50089
YY
3396 return (FALSE);
3397}
3398
3399/*
3400 * Remove all pages from specified address space
3401 * this aids process exit speeds. Also, this code
3402 * is special cased for current process only, but
3403 * can have the more generic (and slightly slower)
3404 * mode enabled. This is much faster than pmap_remove
3405 * in the case of running down an entire address space.
3406 */
3407void
3408pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
48ffc236 3409READY1
d7f50089 3410{
c8fe38ae
MD
3411 struct lwp *lp;
3412 pt_entry_t *pte, tpte;
3413 pv_entry_t pv, npv;
3414 vm_page_t m;
3415 pmap_inval_info info;
3416 int iscurrentpmap;
48ffc236 3417 int save_generation;
c8fe38ae
MD
3418
3419 lp = curthread->td_lwp;
3420 if (lp && pmap == vmspace_pmap(lp->lwp_vmspace))
3421 iscurrentpmap = 1;
3422 else
3423 iscurrentpmap = 0;
3424
3425 pmap_inval_init(&info);
3426 crit_enter();
3427 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
3428 if (pv->pv_va >= eva || pv->pv_va < sva) {
3429 npv = TAILQ_NEXT(pv, pv_plist);
3430 continue;
3431 }
3432
3433 KKASSERT(pmap == pv->pv_pmap);
3434
3435 if (iscurrentpmap)
3436 pte = vtopte(pv->pv_va);
3437 else
3438 pte = pmap_pte_quick(pmap, pv->pv_va);
3439 if (pmap->pm_active)
3440 pmap_inval_add(&info, pmap, pv->pv_va);
3441
3442 /*
3443 * We cannot remove wired pages from a process' mapping
3444 * at this time
3445 */
3446 if (*pte & PG_W) {
3447 npv = TAILQ_NEXT(pv, pv_plist);
3448 continue;
3449 }
3450 tpte = pte_load_clear(pte);
3451
48ffc236 3452 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
c8fe38ae
MD
3453
3454 KASSERT(m < &vm_page_array[vm_page_array_size],
48ffc236 3455 ("pmap_remove_pages: bad tpte %lx", tpte));
c8fe38ae
MD
3456
3457 KKASSERT(pmap->pm_stats.resident_count > 0);
3458 --pmap->pm_stats.resident_count;
3459
3460 /*
3461 * Update the vm_page_t clean and reference bits.
3462 */
3463 if (tpte & PG_M) {
3464 vm_page_dirty(m);
3465 }
3466
3467 npv = TAILQ_NEXT(pv, pv_plist);
3468 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
3469 save_generation = ++pmap->pm_generation;
3470
3471 m->md.pv_list_count--;
3472 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3473 if (TAILQ_EMPTY(&m->md.pv_list))
3474 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
3475
3476 pmap_unuse_pt(pmap, pv->pv_va, pv->pv_ptem, &info);
3477 free_pv_entry(pv);
3478
3479 /*
3480 * Restart the scan if we blocked during the unuse or free
3481 * calls and other removals were made.
3482 */
3483 if (save_generation != pmap->pm_generation) {
3484 kprintf("Warning: pmap_remove_pages race-A avoided\n");
3485 pv = TAILQ_FIRST(&pmap->pm_pvlist);
3486 }
3487 }
3488 pmap_inval_flush(&info);
3489 crit_exit();
d7f50089
YY
3490}
3491
3492/*
c8fe38ae
MD
3493 * pmap_testbit tests bits in pte's
3494 * note that the testbit/clearbit routines are inline,
3495 * and a lot of things compile-time evaluate.
d7f50089
YY
3496 */
3497static boolean_t
3498pmap_testbit(vm_page_t m, int bit)
48ffc236 3499READY1
d7f50089 3500{
c8fe38ae
MD
3501 pv_entry_t pv;
3502 pt_entry_t *pte;
3503
3504 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3505 return FALSE;
3506
3507 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
3508 return FALSE;
3509
3510 crit_enter();
3511
3512 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3513 /*
3514 * if the bit being tested is the modified bit, then
3515 * mark clean_map and ptes as never
3516 * modified.
3517 */
3518 if (bit & (PG_A|PG_M)) {
3519 if (!pmap_track_modified(pv->pv_va))
3520 continue;
3521 }
3522
3523#if defined(PMAP_DIAGNOSTIC)
48ffc236
JG
3524 if (pv->pv_pmap == NULL) {
3525 kprintf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va);
c8fe38ae
MD
3526 continue;
3527 }
3528#endif
3529 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3530 if (*pte & bit) {
3531 crit_exit();
3532 return TRUE;
3533 }
3534 }
3535 crit_exit();
d7f50089
YY
3536 return (FALSE);
3537}
3538
3539/*
c8fe38ae 3540 * this routine is used to modify bits in ptes
d7f50089
YY
3541 */
3542static __inline void
3543pmap_clearbit(vm_page_t m, int bit)
48ffc236 3544READY1
d7f50089 3545{
c8fe38ae
MD
3546 struct pmap_inval_info info;
3547 pv_entry_t pv;
3548 pt_entry_t *pte;
3549 pt_entry_t pbits;
3550
3551 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3552 return;
3553
3554 pmap_inval_init(&info);
3555 crit_enter();
3556
3557 /*
3558 * Loop over all current mappings setting/clearing as appropos If
3559 * setting RO do we need to clear the VAC?
3560 */
3561 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3562 /*
3563 * don't write protect pager mappings
3564 */
3565 if (bit == PG_RW) {
3566 if (!pmap_track_modified(pv->pv_va))
3567 continue;
3568 }
3569
3570#if defined(PMAP_DIAGNOSTIC)
48ffc236
JG
3571 if (pv->pv_pmap == NULL) {
3572 kprintf("Null pmap (cb) at va: 0x%lx\n", pv->pv_va);
c8fe38ae
MD
3573 continue;
3574 }
3575#endif
3576
3577 /*
3578 * Careful here. We can use a locked bus instruction to
3579 * clear PG_A or PG_M safely but we need to synchronize
3580 * with the target cpus when we mess with PG_RW.
3581 *
3582 * We do not have to force synchronization when clearing
3583 * PG_M even for PTEs generated via virtual memory maps,
3584 * because the virtual kernel will invalidate the pmap
3585 * entry when/if it needs to resynchronize the Modify bit.
3586 */
3587 if (bit & PG_RW)
3588 pmap_inval_add(&info, pv->pv_pmap, pv->pv_va);
3589 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3590again:
3591 pbits = *pte;
3592 if (pbits & bit) {
3593 if (bit == PG_RW) {
3594 if (pbits & PG_M) {
3595 vm_page_dirty(m);
48ffc236 3596 atomic_clear_long(pte, PG_M|PG_RW);
c8fe38ae
MD
3597 } else {
3598 /*
3599 * The cpu may be trying to set PG_M
3600 * simultaniously with our clearing
3601 * of PG_RW.
3602 */
48ffc236 3603 if (!atomic_cmpset_long(pte, pbits,
c8fe38ae
MD
3604 pbits & ~PG_RW))
3605 goto again;
3606 }
3607 } else if (bit == PG_M) {
3608 /*
3609 * We could also clear PG_RW here to force
3610 * a fault on write to redetect PG_M for
3611 * virtual kernels, but it isn't necessary
3612 * since virtual kernels invalidate the pte
3613 * when they clear the VPTE_M bit in their
3614 * virtual page tables.
3615 */
48ffc236 3616 atomic_clear_long(pte, PG_M);
c8fe38ae 3617 } else {
48ffc236 3618 atomic_clear_long(pte, bit);
c8fe38ae
MD
3619 }
3620 }
3621 }
3622 pmap_inval_flush(&info);
3623 crit_exit();
d7f50089
YY
3624}
3625
3626/*
3627 * pmap_page_protect:
3628 *
3629 * Lower the permission for all mappings to a given page.