amd64: pmap: Improve comments.
[dragonfly.git] / sys / platform / pc64 / amd64 / pmap.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * Copyright (c) 1994 John S. Dyson
4 * Copyright (c) 1994 David Greenman
5 * Copyright (c) 2003 Peter Wemm
6 * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
7 * Copyright (c) 2008, 2009 The DragonFly Project.
8 * Copyright (c) 2008, 2009 Jordan Gordeev.
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
45 * $DragonFly: src/sys/platform/pc64/amd64/pmap.c,v 1.3 2008/08/29 17:07:10 dillon Exp $
46 */
47
48/*
49 * Manages physical address maps.
50 *
51 * In addition to hardware address maps, this
52 * module is called upon to provide software-use-only
53 * maps which may or may not be stored in the same
54 * form as hardware maps. These pseudo-maps are
55 * used to store intermediate results from copy
56 * operations to and from address spaces.
57 *
58 * Since the information managed by this module is
59 * also stored by the logical address mapping module,
60 * this module may throw away valid virtual-to-physical
61 * mappings at almost any time. However, invalidations
62 * of virtual-to-physical mappings must be done as
63 * requested.
64 *
65 * In order to cope with hardware architectures which
66 * make virtual-to-physical map invalidates expensive,
67 * this module may delay invalidate or reduced protection
68 * operations until such time as they are actually
69 * necessary. This module is given full information as
70 * to which processors are currently using which maps,
71 * and to when physical maps must be made correct.
72 */
73
74#if JG
75#include "opt_disable_pse.h"
76#include "opt_pmap.h"
77#endif
78#include "opt_msgbuf.h"
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/kernel.h>
83#include <sys/proc.h>
84#include <sys/msgbuf.h>
85#include <sys/vmmeter.h>
86#include <sys/mman.h>
87
88#include <vm/vm.h>
89#include <vm/vm_param.h>
90#include <sys/sysctl.h>
91#include <sys/lock.h>
92#include <vm/vm_kern.h>
93#include <vm/vm_page.h>
94#include <vm/vm_map.h>
95#include <vm/vm_object.h>
96#include <vm/vm_extern.h>
97#include <vm/vm_pageout.h>
98#include <vm/vm_pager.h>
99#include <vm/vm_zone.h>
100
101#include <sys/user.h>
102#include <sys/thread2.h>
103#include <sys/sysref2.h>
104
105#include <machine/cputypes.h>
106#include <machine/md_var.h>
107#include <machine/specialreg.h>
108#include <machine/smp.h>
109#include <machine_base/apic/apicreg.h>
110#include <machine/globaldata.h>
111#include <machine/pmap.h>
112#include <machine/pmap_inval.h>
113
114#include <ddb/ddb.h>
115
116#define PMAP_KEEP_PDIRS
117#ifndef PMAP_SHPGPERPROC
118#define PMAP_SHPGPERPROC 200
119#endif
120
121#if defined(DIAGNOSTIC)
122#define PMAP_DIAGNOSTIC
123#endif
124
125#define MINPV 2048
126
127#if !defined(PMAP_DIAGNOSTIC)
128#define PMAP_INLINE __inline
129#else
130#define PMAP_INLINE
131#endif
132
133/* JGPMAP32 */
134#define PTDPTDI 0
135
136#define READY0
137#define READY1
138#define READY2
139#define READY3
140#define READY4
141#define READY5
142
143/*
144 * Get PDEs and PTEs for user/kernel address space
145 */
146#if JGPMAP32
147#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
148#endif
149static pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va);
150#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
151
152#define pmap_pde_v(pte) ((*(pd_entry_t *)pte & PG_V) != 0)
153#define pmap_pte_w(pte) ((*(pt_entry_t *)pte & PG_W) != 0)
154#define pmap_pte_m(pte) ((*(pt_entry_t *)pte & PG_M) != 0)
155#define pmap_pte_u(pte) ((*(pt_entry_t *)pte & PG_A) != 0)
156#define pmap_pte_v(pte) ((*(pt_entry_t *)pte & PG_V) != 0)
157
158
159/*
160 * Given a map and a machine independent protection code,
161 * convert to a vax protection code.
162 */
163#define pte_prot(m, p) \
164 (protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
165static int protection_codes[8];
166
167struct pmap kernel_pmap;
168static TAILQ_HEAD(,pmap) pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list);
169
170vm_paddr_t avail_start; /* PA of first available physical page */
171vm_paddr_t avail_end; /* PA of last available physical page */
172vm_offset_t virtual_start; /* VA of first avail page (after kernel bss) */
173vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
174vm_offset_t KvaStart; /* VA start of KVA space */
175vm_offset_t KvaEnd; /* VA end of KVA space (non-inclusive) */
176vm_offset_t KvaSize; /* max size of kernel virtual address space */
177static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
178static int pgeflag; /* PG_G or-in */
179static int pseflag; /* PG_PS or-in */
180
181static vm_object_t kptobj;
182
183static int ndmpdp;
184static vm_paddr_t dmaplimit;
185static int nkpt;
186vm_offset_t kernel_vm_end;
187
188static uint64_t KPDphys; /* phys addr of kernel level 2 */
189uint64_t KPDPphys; /* phys addr of kernel level 3 */
190uint64_t KPML4phys; /* phys addr of kernel level 4 */
191
192static uint64_t DMPDphys; /* phys addr of direct mapped level 2 */
193static uint64_t DMPDPphys; /* phys addr of direct mapped level 3 */
194
195/*
196 * Data for the pv entry allocation mechanism
197 */
198static vm_zone_t pvzone;
199static struct vm_zone pvzone_store;
200static struct vm_object pvzone_obj;
201static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
202static int pmap_pagedaemon_waken = 0;
203static struct pv_entry *pvinit;
204
205/*
206 * All those kernel PT submaps that BSD is so fond of
207 */
208pt_entry_t *CMAP1 = 0, *ptmmap;
209caddr_t CADDR1 = 0, ptvmmap = 0;
210static pt_entry_t *msgbufmap;
211struct msgbuf *msgbufp=0;
212
213/*
214 * Crashdump maps.
215 */
216static pt_entry_t *pt_crashdumpmap;
217static caddr_t crashdumpmap;
218
219extern uint64_t KPTphys;
220extern pt_entry_t *SMPpt;
221extern uint64_t SMPptpa;
222
223#define DISABLE_PSE
224
225static PMAP_INLINE void free_pv_entry (pv_entry_t pv);
226static pv_entry_t get_pv_entry (void);
227static void i386_protection_init (void);
228static __inline void pmap_clearbit (vm_page_t m, int bit);
229
230static void pmap_remove_all (vm_page_t m);
231static void pmap_enter_quick (pmap_t pmap, vm_offset_t va, vm_page_t m);
232static int pmap_remove_pte (struct pmap *pmap, pt_entry_t *ptq,
233 vm_offset_t sva, pmap_inval_info_t info);
234static void pmap_remove_page (struct pmap *pmap,
235 vm_offset_t va, pmap_inval_info_t info);
236static int pmap_remove_entry (struct pmap *pmap, vm_page_t m,
237 vm_offset_t va, pmap_inval_info_t info);
238static boolean_t pmap_testbit (vm_page_t m, int bit);
239static void pmap_insert_entry (pmap_t pmap, vm_offset_t va,
240 vm_page_t mpte, vm_page_t m);
241
242static vm_page_t pmap_allocpte (pmap_t pmap, vm_offset_t va);
243
244static int pmap_release_free_page (pmap_t pmap, vm_page_t p);
245static vm_page_t _pmap_allocpte (pmap_t pmap, vm_pindex_t ptepindex);
246static pt_entry_t * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
247static vm_page_t pmap_page_lookup (vm_object_t object, vm_pindex_t pindex);
248static int pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
249 pmap_inval_info_t info);
250static int pmap_unuse_pt (pmap_t, vm_offset_t, vm_page_t, pmap_inval_info_t);
251static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
252
253static unsigned pdir4mb;
254
255/*
256 * Move the kernel virtual free pointer to the next
257 * 2MB. This is used to help improve performance
258 * by using a large (2MB) page for much of the kernel
259 * (.text, .data, .bss)
260 */
261static vm_offset_t
262pmap_kmem_choose(vm_offset_t addr)
263READY2
264{
265 vm_offset_t newaddr = addr;
266
267 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
268 return newaddr;
269}
270
271/*
272 * pmap_pte_quick:
273 *
274 * Super fast pmap_pte routine best used when scanning the pv lists.
275 * This eliminates many course-grained invltlb calls. Note that many of
276 * the pv list scans are across different pmaps and it is very wasteful
277 * to do an entire invltlb when checking a single mapping.
278 *
279 * Should only be called while in a critical section.
280 */
281static __inline pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t va);
282
283static pt_entry_t *
284pmap_pte_quick(pmap_t pmap, vm_offset_t va)
285READY0
286{
287 return pmap_pte(pmap, va);
288}
289
290/* Return a non-clipped PD index for a given VA */
291static __inline vm_pindex_t
292pmap_pde_pindex(vm_offset_t va)
293READY1
294{
295 return va >> PDRSHIFT;
296}
297
298/* Return various clipped indexes for a given VA */
299static __inline vm_pindex_t
300pmap_pte_index(vm_offset_t va)
301READY1
302{
303
304 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
305}
306
307static __inline vm_pindex_t
308pmap_pde_index(vm_offset_t va)
309READY1
310{
311
312 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
313}
314
315static __inline vm_pindex_t
316pmap_pdpe_index(vm_offset_t va)
317READY1
318{
319
320 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
321}
322
323static __inline vm_pindex_t
324pmap_pml4e_index(vm_offset_t va)
325READY1
326{
327
328 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
329}
330
331/* Return a pointer to the PML4 slot that corresponds to a VA */
332static __inline pml4_entry_t *
333pmap_pml4e(pmap_t pmap, vm_offset_t va)
334READY1
335{
336
337 return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
338}
339
340/* Return a pointer to the PDP slot that corresponds to a VA */
341static __inline pdp_entry_t *
342pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
343READY1
344{
345 pdp_entry_t *pdpe;
346
347 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
348 return (&pdpe[pmap_pdpe_index(va)]);
349}
350
351/* Return a pointer to the PDP slot that corresponds to a VA */
352static __inline pdp_entry_t *
353pmap_pdpe(pmap_t pmap, vm_offset_t va)
354READY1
355{
356 pml4_entry_t *pml4e;
357
358 pml4e = pmap_pml4e(pmap, va);
359 if ((*pml4e & PG_V) == 0)
360 return NULL;
361 return (pmap_pml4e_to_pdpe(pml4e, va));
362}
363
364/* Return a pointer to the PD slot that corresponds to a VA */
365static __inline pd_entry_t *
366pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
367READY1
368{
369 pd_entry_t *pde;
370
371 pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
372 return (&pde[pmap_pde_index(va)]);
373}
374
375/* Return a pointer to the PD slot that corresponds to a VA */
376static __inline pd_entry_t *
377pmap_pde(pmap_t pmap, vm_offset_t va)
378READY1
379{
380 pdp_entry_t *pdpe;
381
382 pdpe = pmap_pdpe(pmap, va);
383 if (pdpe == NULL || (*pdpe & PG_V) == 0)
384 return NULL;
385 return (pmap_pdpe_to_pde(pdpe, va));
386}
387
388/* Return a pointer to the PT slot that corresponds to a VA */
389static __inline pt_entry_t *
390pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
391READY1
392{
393 pt_entry_t *pte;
394
395 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
396 return (&pte[pmap_pte_index(va)]);
397}
398
399/* Return a pointer to the PT slot that corresponds to a VA */
400static __inline pt_entry_t *
401pmap_pte(pmap_t pmap, vm_offset_t va)
402READY1
403{
404 pd_entry_t *pde;
405
406 pde = pmap_pde(pmap, va);
407 if (pde == NULL || (*pde & PG_V) == 0)
408 return NULL;
409 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
410 return ((pt_entry_t *)pde);
411 return (pmap_pde_to_pte(pde, va));
412}
413
414
415PMAP_INLINE pt_entry_t *
416vtopte(vm_offset_t va)
417READY1
418{
419 uint64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
420
421 return (PTmap + ((va >> PAGE_SHIFT) & mask));
422}
423
424static __inline pd_entry_t *
425vtopde(vm_offset_t va)
426READY1
427{
428 uint64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
429
430 return (PDmap + ((va >> PDRSHIFT) & mask));
431}
432
433static uint64_t
434allocpages(vm_paddr_t *firstaddr, int n)
435READY1
436{
437 uint64_t ret;
438
439 ret = *firstaddr;
440 bzero((void *)ret, n * PAGE_SIZE);
441 *firstaddr += n * PAGE_SIZE;
442 return (ret);
443}
444
445void
446create_pagetables(vm_paddr_t *firstaddr)
447READY0
448{
449 int i;
450 int count;
451 uint64_t cpu0pp, cpu0idlestk;
452 int idlestk_page_offset = offsetof(struct privatespace, idlestack) / PAGE_SIZE;
453
454 /* we are running (mostly) V=P at this point */
455
456 /* Allocate pages */
457 KPTphys = allocpages(firstaddr, NKPT);
458 KPML4phys = allocpages(firstaddr, 1);
459 KPDPphys = allocpages(firstaddr, NKPML4E);
460 KPDphys = allocpages(firstaddr, NKPDPE);
461
462 ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
463 if (ndmpdp < 4) /* Minimum 4GB of dirmap */
464 ndmpdp = 4;
465 DMPDPphys = allocpages(firstaddr, NDMPML4E);
466 if ((amd_feature & AMDID_PAGE1GB) == 0)
467 DMPDphys = allocpages(firstaddr, ndmpdp);
468 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
469
470 /* Fill in the underlying page table pages */
471 /* Read-only from zero to physfree */
472 /* XXX not fully used, underneath 2M pages */
473 for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
474 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
475 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
476 }
477
478 /* Now map the page tables at their location within PTmap */
479 for (i = 0; i < NKPT; i++) {
480 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
481 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
482 }
483
484 /* Map from zero to end of allocations under 2M pages */
485 /* This replaces some of the KPTphys entries above */
486 for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
487 ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
488 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
489 }
490
491 /* And connect up the PD to the PDP */
492 for (i = 0; i < NKPDPE; i++) {
493 ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys +
494 (i << PAGE_SHIFT);
495 ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
496 }
497
498 /* Now set up the direct map space using either 2MB or 1GB pages */
499 /* Preset PG_M and PG_A because demotion expects it */
500 if ((amd_feature & AMDID_PAGE1GB) == 0) {
501 for (i = 0; i < NPDEPG * ndmpdp; i++) {
502 ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT;
503 ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS |
504 PG_G | PG_M | PG_A;
505 }
506 /* And the direct map space's PDP */
507 for (i = 0; i < ndmpdp; i++) {
508 ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
509 (i << PAGE_SHIFT);
510 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U;
511 }
512 } else {
513 for (i = 0; i < ndmpdp; i++) {
514 ((pdp_entry_t *)DMPDPphys)[i] =
515 (vm_paddr_t)i << PDPSHIFT;
516 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_PS |
517 PG_G | PG_M | PG_A;
518 }
519 }
520
521 /* And recursively map PML4 to itself in order to get PTmap */
522 ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
523 ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
524
525 /* Connect the Direct Map slot up to the PML4 */
526 ((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys;
527 ((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U;
528
529 /* Connect the KVA slot up to the PML4 */
530 ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
531 ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;
532#if JGPMAP32
533 common_lvl4_phys = allocpages(firstaddr, 1); /* 512 512G mappings */
534 common_lvl3_phys = allocpages(firstaddr, 1); /* 512 1G mappings */
535 KPTphys = allocpages(firstaddr, NKPT); /* kernel page table */
536 IdlePTD = allocpages(firstaddr, 1); /* kernel page dir */
537 cpu0pp = allocpages(firstaddr, MDGLOBALDATA_BASEALLOC_PAGES);
538 cpu0idlestk = allocpages(firstaddr, UPAGES);
539 SMPptpa = allocpages(firstaddr, 1);
540 SMPpt = (void *)(SMPptpa + KERNBASE);
541
542
543 /*
544 * Load kernel page table with kernel memory mappings
545 */
546 for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
547 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
548 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V;
549 }
550
551#ifndef JG
552 for (i = 0; i < NKPT; i++) {
553 ((pd_entry_t *)IdlePTD)[i] = KPTphys + (i << PAGE_SHIFT);
554 ((pd_entry_t *)IdlePTD)[i] |= PG_RW | PG_V;
555 }
556#endif
557
558 /*
559 * Set up the kernel page table itself.
560 */
561 for (i = 0; i < NKPT; i++) {
562 ((pd_entry_t *)IdlePTD)[KPTDI + i] = KPTphys + (i << PAGE_SHIFT);
563 ((pd_entry_t *)IdlePTD)[KPTDI + i] |= PG_RW | PG_V;
564 }
565
566#ifndef JG
567 count = ISA_HOLE_LENGTH >> PAGE_SHIFT;
568 for (i = 0; i < count; i++) {
569 ((pt_entry_t *)KPTphys)[amd64_btop(ISA_HOLE_START) + i] = \
570 (ISA_HOLE_START + i * PAGE_SIZE) | PG_RW | PG_V;
571 }
572#endif
573
574 /*
575 * Self-mapping
576 */
577 ((pd_entry_t *)IdlePTD)[PTDPTDI] = (pd_entry_t)IdlePTD | PG_RW | PG_V;
578
579 /*
580 * Map CPU_prvspace[0].mdglobaldata
581 */
582 for (i = 0; i < MDGLOBALDATA_BASEALLOC_PAGES; i++) {
583 ((pt_entry_t *)SMPptpa)[i] = \
584 (cpu0pp + i * PAGE_SIZE) | PG_RW | PG_V;
585 }
586
587 /*
588 * Map CPU_prvspace[0].idlestack
589 */
590 for (i = 0; i < UPAGES; i++) {
591 ((pt_entry_t *)SMPptpa)[idlestk_page_offset + i] = \
592 (cpu0idlestk + i * PAGE_SIZE) | PG_RW | PG_V;
593 }
594
595 /*
596 * Link SMPpt.
597 */
598 ((pd_entry_t *)IdlePTD)[MPPTDI] = SMPptpa | PG_RW | PG_V;
599
600 /*
601 * PML4 maps level 3
602 */
603 ((pml4_entry_t *)common_lvl4_phys)[LINKPML4I] = common_lvl3_phys | PG_RW | PG_V | PG_U;
604
605 /*
606 * location of "virtual CR3" - a PDP entry that is loaded
607 * with a PD physical address (+ page attributes).
608 * Matt: location of user page directory entry (representing 1G)
609 */
610 link_pdpe = &((pdp_entry_t *)common_lvl3_phys)[LINKPDPI];
611#endif /* JGPMAP32 */
612}
613
614READY0
615void
616init_paging(vm_paddr_t *firstaddr) {
617 create_pagetables(firstaddr);
618
619#if JGPMAP32
620 /* switch to the newly created page table */
621 *link_pdpe = IdlePTD | PG_RW | PG_V | PG_U;
622 load_cr3(common_lvl4_phys);
623 link_pdpe = (void *)((char *)link_pdpe + KERNBASE);
624
625 KvaStart = (vm_offset_t)VADDR(PTDPTDI, 0);
626 KvaEnd = (vm_offset_t)VADDR(APTDPTDI, 0);
627 KvaSize = KvaEnd - KvaStart;
628#endif
629}
630
631/*
632 * Bootstrap the system enough to run with virtual memory.
633 *
634 * On the i386 this is called after mapping has already been enabled
635 * and just syncs the pmap module with what has already been done.
636 * [We can't call it easily with mapping off since the kernel is not
637 * mapped with PA == VA, hence we would have to relocate every address
638 * from the linked base (virtual) address "KERNBASE" to the actual
639 * (physical) address starting relative to 0]
640 */
641void
642pmap_bootstrap(vm_paddr_t *firstaddr)
643READY0
644{
645 vm_offset_t va;
646 pt_entry_t *pte;
647 struct mdglobaldata *gd;
648 int i;
649 int pg;
650
651 KvaStart = VM_MIN_KERNEL_ADDRESS;
652 KvaEnd = VM_MAX_KERNEL_ADDRESS;
653 KvaSize = KvaEnd - KvaStart;
654
655 avail_start = *firstaddr;
656
657 /*
658 * Create an initial set of page tables to run the kernel in.
659 */
660 create_pagetables(firstaddr);
661
662 virtual_start = (vm_offset_t) PTOV_OFFSET + *firstaddr;
663 virtual_start = pmap_kmem_choose(virtual_start);
664
665 virtual_end = VM_MAX_KERNEL_ADDRESS;
666
667 /* XXX do %cr0 as well */
668 load_cr4(rcr4() | CR4_PGE | CR4_PSE);
669 load_cr3(KPML4phys);
670
671 /*
672 * Initialize protection array.
673 */
674 i386_protection_init();
675
676 /*
677 * The kernel's pmap is statically allocated so we don't have to use
678 * pmap_create, which is unlikely to work correctly at this part of
679 * the boot sequence (XXX and which no longer exists).
680 */
681#if JGPMAP32
682 kernel_pmap.pm_pdir = (pd_entry_t *)(PTOV_OFFSET + (uint64_t)IdlePTD);
683#endif
684 kernel_pmap.pm_pml4 = (pdp_entry_t *) (PTOV_OFFSET + KPML4phys);
685 kernel_pmap.pm_count = 1;
686 kernel_pmap.pm_active = (cpumask_t)-1; /* don't allow deactivation */
687 TAILQ_INIT(&kernel_pmap.pm_pvlist);
688 nkpt = NKPT;
689
690 /*
691 * Reserve some special page table entries/VA space for temporary
692 * mapping of pages.
693 */
694#define SYSMAP(c, p, v, n) \
695 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
696
697 va = virtual_start;
698#ifdef JG
699 pte = (pt_entry_t *) pmap_pte(&kernel_pmap, va);
700#else
701 pte = vtopte(va);
702#endif
703
704 /*
705 * CMAP1/CMAP2 are used for zeroing and copying pages.
706 */
707 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
708
709 /*
710 * Crashdump maps.
711 */
712 SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
713
714 /*
715 * ptvmmap is used for reading arbitrary physical pages via
716 * /dev/mem.
717 */
718 SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
719
720 /*
721 * msgbufp is used to map the system message buffer.
722 * XXX msgbufmap is not used.
723 */
724 SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
725 atop(round_page(MSGBUF_SIZE)))
726
727 virtual_start = va;
728
729 *CMAP1 = 0;
730#if JGPMAP32
731 for (i = 0; i < NKPT; i++)
732 PTD[i] = 0;
733#endif
734
735 /*
736 * PG_G is terribly broken on SMP because we IPI invltlb's in some
737 * cases rather then invl1pg. Actually, I don't even know why it
738 * works under UP because self-referential page table mappings
739 */
740#ifdef SMP
741 pgeflag = 0;
742#else
743 if (cpu_feature & CPUID_PGE)
744 pgeflag = PG_G;
745#endif
746
747/*
748 * Initialize the 4MB page size flag
749 */
750 pseflag = 0;
751/*
752 * The 4MB page version of the initial
753 * kernel page mapping.
754 */
755 pdir4mb = 0;
756
757#if !defined(DISABLE_PSE)
758 if (cpu_feature & CPUID_PSE) {
759 pt_entry_t ptditmp;
760 /*
761 * Note that we have enabled PSE mode
762 */
763 pseflag = PG_PS;
764 ptditmp = *(PTmap + amd64_btop(KERNBASE));
765 ptditmp &= ~(NBPDR - 1);
766 ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
767 pdir4mb = ptditmp;
768
769#ifndef SMP
770 /*
771 * Enable the PSE mode. If we are SMP we can't do this
772 * now because the APs will not be able to use it when
773 * they boot up.
774 */
775 load_cr4(rcr4() | CR4_PSE);
776
777 /*
778 * We can do the mapping here for the single processor
779 * case. We simply ignore the old page table page from
780 * now on.
781 */
782 /*
783 * For SMP, we still need 4K pages to bootstrap APs,
784 * PSE will be enabled as soon as all APs are up.
785 */
786 PTD[KPTDI] = (pd_entry_t)ptditmp;
787#if JGPMAP32
788 kernel_pmap.pm_pdir[KPTDI] = (pd_entry_t)ptditmp;
789#endif
790 cpu_invltlb();
791#endif
792 }
793#endif
794#ifdef SMP
795 if (cpu_apic_address == 0)
796 panic("pmap_bootstrap: no local apic!");
797
798 /* local apic is mapped on last page */
799 SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag |
800 (cpu_apic_address & PG_FRAME));
801#endif
802
803 /*
804 * We need to finish setting up the globaldata page for the BSP.
805 * locore has already populated the page table for the mdglobaldata
806 * portion.
807 */
808 pg = MDGLOBALDATA_BASEALLOC_PAGES;
809 gd = &CPU_prvspace[0].mdglobaldata;
810 gd->gd_CMAP1 = &SMPpt[pg + 0];
811 gd->gd_CMAP2 = &SMPpt[pg + 1];
812 gd->gd_CMAP3 = &SMPpt[pg + 2];
813 gd->gd_PMAP1 = &SMPpt[pg + 3];
814 gd->gd_CADDR1 = CPU_prvspace[0].CPAGE1;
815 gd->gd_CADDR2 = CPU_prvspace[0].CPAGE2;
816 gd->gd_CADDR3 = CPU_prvspace[0].CPAGE3;
817 gd->gd_PADDR1 = (pt_entry_t *)CPU_prvspace[0].PPAGE1;
818
819 cpu_invltlb();
820}
821
822#ifdef SMP
823/*
824 * Set 4mb pdir for mp startup
825 */
826void
827pmap_set_opt(void)
828READY0
829{
830 if (pseflag && (cpu_feature & CPUID_PSE)) {
831 load_cr4(rcr4() | CR4_PSE);
832 if (pdir4mb && mycpu->gd_cpuid == 0) { /* only on BSP */
833#if JGPMAP32
834 kernel_pmap.pm_pdir[KPTDI] =
835 PTD[KPTDI] = (pd_entry_t)pdir4mb;
836#endif
837 cpu_invltlb();
838 }
839 }
840}
841#endif
842
843/*
844 * Initialize the pmap module.
845 * Called by vm_init, to initialize any structures that the pmap
846 * system needs to map virtual memory.
847 * pmap_init has been enhanced to support in a fairly consistant
848 * way, discontiguous physical memory.
849 */
850void
851pmap_init(void)
852READY0
853{
854 int i;
855 int initial_pvs;
856
857 /*
858 * object for kernel page table pages
859 */
860 /* JG I think the number can be arbitrary */
861 kptobj = vm_object_allocate(OBJT_DEFAULT, 5);
862
863 /*
864 * Allocate memory for random pmap data structures. Includes the
865 * pv_head_table.
866 */
867
868 for(i = 0; i < vm_page_array_size; i++) {
869 vm_page_t m;
870
871 m = &vm_page_array[i];
872 TAILQ_INIT(&m->md.pv_list);
873 m->md.pv_list_count = 0;
874 }
875
876 /*
877 * init the pv free list
878 */
879 initial_pvs = vm_page_array_size;
880 if (initial_pvs < MINPV)
881 initial_pvs = MINPV;
882 pvzone = &pvzone_store;
883 pvinit = (struct pv_entry *) kmem_alloc(&kernel_map,
884 initial_pvs * sizeof (struct pv_entry));
885 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
886 initial_pvs);
887
888 /*
889 * Now it is safe to enable pv_table recording.
890 */
891 pmap_initialized = TRUE;
892}
893
894/*
895 * Initialize the address space (zone) for the pv_entries. Set a
896 * high water mark so that the system can recover from excessive
897 * numbers of pv entries.
898 */
899void
900pmap_init2(void)
901READY0
902{
903 int shpgperproc = PMAP_SHPGPERPROC;
904
905 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
906 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
907 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
908 pv_entry_high_water = 9 * (pv_entry_max / 10);
909 zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
910}
911
912
913/***************************************************
914 * Low level helper routines.....
915 ***************************************************/
916
917#if defined(PMAP_DIAGNOSTIC)
918
919/*
920 * This code checks for non-writeable/modified pages.
921 * This should be an invalid condition.
922 */
923static int
924pmap_nw_modified(pt_entry_t pte)
925READY1
926{
927 if ((pte & (PG_M|PG_RW)) == PG_M)
928 return 1;
929 else
930 return 0;
931}
932#endif
933
934
935/*
936 * this routine defines the region(s) of memory that should
937 * not be tested for the modified bit.
938 */
939static PMAP_INLINE int
940pmap_track_modified(vm_offset_t va)
941READY0
942{
943 if ((va < clean_sva) || (va >= clean_eva))
944 return 1;
945 else
946 return 0;
947}
948
949/*
950 * pmap_extract:
951 *
952 * Extract the physical page address associated with the map/VA pair.
953 *
954 * This function may not be called from an interrupt if the pmap is
955 * not kernel_pmap.
956 */
957vm_paddr_t
958pmap_extract(pmap_t pmap, vm_offset_t va)
959READY1
960{
961 vm_paddr_t rtval;
962 pt_entry_t *pte;
963 pd_entry_t pde, *pdep;
964
965 rtval = 0;
966 pdep = pmap_pde(pmap, va);
967 if (pdep != NULL) {
968 pde = *pdep;
969 if (pde) {
970 if ((pde & PG_PS) != 0) {
971 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
972 } else {
973 pte = pmap_pde_to_pte(pdep, va);
974 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
975 }
976 }
977 }
978 return rtval;
979}
980
981/*
982 * Routine: pmap_kextract
983 * Function:
984 * Extract the physical page address associated
985 * kernel virtual address.
986 */
987vm_paddr_t
988pmap_kextract(vm_offset_t va)
989READY1
990{
991 pd_entry_t pde;
992 vm_paddr_t pa;
993
994 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
995 pa = DMAP_TO_PHYS(va);
996 } else {
997 pde = *vtopde(va);
998 if (pde & PG_PS) {
999 pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
1000 } else {
1001 /*
1002 * Beware of a concurrent promotion that changes the
1003 * PDE at this point! For example, vtopte() must not
1004 * be used to access the PTE because it would use the
1005 * new PDE. It is, however, safe to use the old PDE
1006 * because the page table page is preserved by the
1007 * promotion.
1008 */
1009 pa = *pmap_pde_to_pte(&pde, va);
1010 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1011 }
1012 }
1013 return pa;
1014}
1015
1016/***************************************************
1017 * Low level mapping routines.....
1018 ***************************************************/
1019
1020/*
1021 * Routine: pmap_kenter
1022 * Function:
1023 * Add a wired page to the KVA
1024 * NOTE! note that in order for the mapping to take effect -- you
1025 * should do an invltlb after doing the pmap_kenter().
1026 */
1027void
1028pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1029READY1
1030{
1031 pt_entry_t *pte;
1032 pt_entry_t npte;
1033 pmap_inval_info info;
1034
1035 pmap_inval_init(&info);
1036 npte = pa | PG_RW | PG_V | pgeflag;
1037 pte = vtopte(va);
1038 pmap_inval_add(&info, &kernel_pmap, va);
1039 *pte = npte;
1040 pmap_inval_flush(&info);
1041}
1042
1043/*
1044 * Routine: pmap_kenter_quick
1045 * Function:
1046 * Similar to pmap_kenter(), except we only invalidate the
1047 * mapping on the current CPU.
1048 */
1049void
1050pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
1051READY1
1052{
1053 pt_entry_t *pte;
1054 pt_entry_t npte;
1055
1056 npte = pa | PG_RW | PG_V | pgeflag;
1057 pte = vtopte(va);
1058 *pte = npte;
1059 cpu_invlpg((void *)va);
1060}
1061
1062void
1063pmap_kenter_sync(vm_offset_t va)
1064READY1
1065{
1066 pmap_inval_info info;
1067
1068 pmap_inval_init(&info);
1069 pmap_inval_add(&info, &kernel_pmap, va);
1070 pmap_inval_flush(&info);
1071}
1072
1073void
1074pmap_kenter_sync_quick(vm_offset_t va)
1075READY1
1076{
1077 cpu_invlpg((void *)va);
1078}
1079
1080/*
1081 * remove a page from the kernel pagetables
1082 */
1083void
1084pmap_kremove(vm_offset_t va)
1085READY1
1086{
1087 pt_entry_t *pte;
1088 pmap_inval_info info;
1089
1090 pmap_inval_init(&info);
1091 pte = vtopte(va);
1092 pmap_inval_add(&info, &kernel_pmap, va);
1093 *pte = 0;
1094 pmap_inval_flush(&info);
1095}
1096
1097void
1098pmap_kremove_quick(vm_offset_t va)
1099READY1
1100{
1101 pt_entry_t *pte;
1102 pte = vtopte(va);
1103 *pte = 0;
1104 cpu_invlpg((void *)va);
1105}
1106
1107/*
1108 * XXX these need to be recoded. They are not used in any critical path.
1109 */
1110void
1111pmap_kmodify_rw(vm_offset_t va)
1112READY1
1113{
1114 *vtopte(va) |= PG_RW;
1115 cpu_invlpg((void *)va);
1116}
1117
1118void
1119pmap_kmodify_nc(vm_offset_t va)
1120READY1
1121{
1122 *vtopte(va) |= PG_N;
1123 cpu_invlpg((void *)va);
1124}
1125
1126/*
1127 * Used to map a range of physical addresses into kernel
1128 * virtual address space.
1129 *
1130 * For now, VM is already on, we only need to map the
1131 * specified memory.
1132 */
1133vm_offset_t
1134pmap_map(vm_offset_t virt, vm_paddr_t start, vm_paddr_t end, int prot)
1135READY1
1136{
1137 /*
1138 * JG Are callers prepared to get an address in the DMAP,
1139 * instead of the passed-in virt?
1140 */
1141 while (start < end) {
1142 pmap_kenter(virt, start);
1143 virt += PAGE_SIZE;
1144 start += PAGE_SIZE;
1145 }
1146 return (virt);
1147}
1148
1149
1150/*
1151 * Add a list of wired pages to the kva
1152 * this routine is only used for temporary
1153 * kernel mappings that do not need to have
1154 * page modification or references recorded.
1155 * Note that old mappings are simply written
1156 * over. The page *must* be wired.
1157 */
1158void
1159pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1160READY1
1161{
1162 vm_offset_t end_va;
1163
1164 end_va = va + count * PAGE_SIZE;
1165
1166 while (va < end_va) {
1167 pt_entry_t *pte;
1168
1169 pte = vtopte(va);
1170 *pte = VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag;
1171 cpu_invlpg((void *)va);
1172 va += PAGE_SIZE;
1173 m++;
1174 }
1175#ifdef SMP
1176 smp_invltlb(); /* XXX */
1177#endif
1178}
1179
1180void
1181pmap_qenter2(vm_offset_t va, vm_page_t *m, int count, cpumask_t *mask)
1182READY1
1183{
1184 vm_offset_t end_va;
1185 cpumask_t cmask = mycpu->gd_cpumask;
1186
1187 end_va = va + count * PAGE_SIZE;
1188
1189 while (va < end_va) {
1190 pt_entry_t *pte;
1191 pt_entry_t pteval;
1192
1193 /*
1194 * Install the new PTE. If the pte changed from the prior
1195 * mapping we must reset the cpu mask and invalidate the page.
1196 * If the pte is the same but we have not seen it on the
1197 * current cpu, invlpg the existing mapping. Otherwise the
1198 * entry is optimal and no invalidation is required.
1199 */
1200 pte = vtopte(va);
1201 pteval = VM_PAGE_TO_PHYS(*m) | PG_A | PG_RW | PG_V | pgeflag;
1202 if (*pte != pteval) {
1203 *mask = 0;
1204 *pte = pteval;
1205 cpu_invlpg((void *)va);
1206 } else if ((*mask & cmask) == 0) {
1207 cpu_invlpg((void *)va);
1208 }
1209 va += PAGE_SIZE;
1210 m++;
1211 }
1212 *mask |= cmask;
1213}
1214
1215/*
1216 * this routine jerks page mappings from the
1217 * kernel -- it is meant only for temporary mappings.
1218 */
1219void
1220pmap_qremove(vm_offset_t va, int count)
1221READY1
1222{
1223 vm_offset_t end_va;
1224
1225 end_va = va + count * PAGE_SIZE;
1226
1227 while (va < end_va) {
1228 pt_entry_t *pte;
1229
1230 pte = vtopte(va);
1231 *pte = 0;
1232 cpu_invlpg((void *)va);
1233 va += PAGE_SIZE;
1234 }
1235#ifdef SMP
1236 smp_invltlb();
1237#endif
1238}
1239
1240/*
1241 * This routine works like vm_page_lookup() but also blocks as long as the
1242 * page is busy. This routine does not busy the page it returns.
1243 *
1244 * Unless the caller is managing objects whos pages are in a known state,
1245 * the call should be made with a critical section held so the page's object
1246 * association remains valid on return.
1247 */
1248static vm_page_t
1249pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
1250READY1
1251{
1252 vm_page_t m;
1253
1254 do {
1255 m = vm_page_lookup(object, pindex);
1256 } while (m && vm_page_sleep_busy(m, FALSE, "pplookp"));
1257
1258 return(m);
1259}
1260
1261/*
1262 * Create a new thread and optionally associate it with a (new) process.
1263 * NOTE! the new thread's cpu may not equal the current cpu.
1264 */
1265void
1266pmap_init_thread(thread_t td)
1267READY1
1268{
1269 /* enforce pcb placement */
1270 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
1271 td->td_savefpu = &td->td_pcb->pcb_save;
1272 td->td_sp = (char *)td->td_pcb - 16; /* JG is -16 needed on amd64? */
1273}
1274
1275/*
1276 * This routine directly affects the fork perf for a process.
1277 */
1278void
1279pmap_init_proc(struct proc *p)
1280READY1
1281{
1282}
1283
1284/*
1285 * Dispose the UPAGES for a process that has exited.
1286 * This routine directly impacts the exit perf of a process.
1287 */
1288void
1289pmap_dispose_proc(struct proc *p)
1290READY1
1291{
1292 KASSERT(p->p_lock == 0, ("attempt to dispose referenced proc! %p", p));
1293}
1294
1295/***************************************************
1296 * Page table page management routines.....
1297 ***************************************************/
1298
1299/*
1300 * This routine unholds page table pages, and if the hold count
1301 * drops to zero, then it decrements the wire count.
1302 */
1303static int
1304_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, pmap_inval_info_t info)
1305READY1
1306{
1307 /*
1308 * Wait until we can busy the page ourselves. We cannot have
1309 * any active flushes if we block.
1310 */
1311 if (m->flags & PG_BUSY) {
1312 pmap_inval_flush(info);
1313 while (vm_page_sleep_busy(m, FALSE, "pmuwpt"))
1314 ;
1315 }
1316 KASSERT(m->queue == PQ_NONE,
1317 ("_pmap_unwire_pte_hold: %p->queue != PQ_NONE", m));
1318
1319 if (m->hold_count == 1) {
1320 /*
1321 * Unmap the page table page
1322 */
1323 vm_page_busy(m);
1324 pmap_inval_add(info, pmap, -1);
1325
1326 if (m->pindex >= (NUPDE + NUPDPE)) {
1327 /* PDP page */
1328 pml4_entry_t *pml4;
1329 pml4 = pmap_pml4e(pmap, va);
1330 *pml4 = 0;
1331 } else if (m->pindex >= NUPDE) {
1332 /* PD page */
1333 pdp_entry_t *pdp;
1334 pdp = pmap_pdpe(pmap, va);
1335 *pdp = 0;
1336 } else {
1337 /* PT page */
1338 pd_entry_t *pd;
1339 pd = pmap_pde(pmap, va);
1340 *pd = 0;
1341 }
1342
1343 KKASSERT(pmap->pm_stats.resident_count > 0);
1344 --pmap->pm_stats.resident_count;
1345
1346 if (pmap->pm_ptphint == m)
1347 pmap->pm_ptphint = NULL;
1348
1349#if JG
1350 if (m->pindex < NUPDE) {
1351 /* We just released a PT, unhold the matching PD */
1352 vm_page_t pdpg;
1353
1354 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
1355 pmap_unwire_pte_hold(pmap, va, pdpg, info);
1356 }
1357 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
1358 /* We just released a PD, unhold the matching PDP */
1359 vm_page_t pdppg;
1360
1361 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
1362 pmap_unwire_pte_hold(pmap, va, pdppg, info);
1363 }
1364#endif
1365
1366 /*
1367 * This was our last hold, the page had better be unwired
1368 * after we decrement wire_count.
1369 *
1370 * FUTURE NOTE: shared page directory page could result in
1371 * multiple wire counts.
1372 */
1373 vm_page_unhold(m);
1374 --m->wire_count;
1375 KKASSERT(m->wire_count == 0);
1376 --vmstats.v_wire_count;
1377 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
1378 vm_page_flash(m);
1379 vm_page_free_zero(m);
1380 return 1;
1381 } else {
1382 KKASSERT(m->hold_count > 1);
1383 vm_page_unhold(m);
1384 return 0;
1385 }
1386}
1387
1388static PMAP_INLINE int
1389pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, pmap_inval_info_t info)
1390READY1
1391{
1392 KKASSERT(m->hold_count > 0);
1393 if (m->hold_count > 1) {
1394 vm_page_unhold(m);
1395 return 0;
1396 } else {
1397 return _pmap_unwire_pte_hold(pmap, va, m, info);
1398 }
1399}
1400
1401/*
1402 * After removing a page table entry, this routine is used to
1403 * conditionally free the page, and manage the hold/wire counts.
1404 */
1405static int
1406pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
1407 pmap_inval_info_t info)
1408READY1
1409{
1410 /* JG Use FreeBSD/amd64 or FreeBSD/i386 ptepde approaches? */
1411 vm_pindex_t ptepindex;
1412 if (va >= VM_MAX_USER_ADDRESS)
1413 return 0;
1414
1415 if (mpte == NULL) {
1416 ptepindex = pmap_pde_pindex(va);
1417#if JGHINT
1418 if (pmap->pm_ptphint &&
1419 (pmap->pm_ptphint->pindex == ptepindex)) {
1420 mpte = pmap->pm_ptphint;
1421 } else {
1422#endif
1423 pmap_inval_flush(info);
1424 mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
1425 pmap->pm_ptphint = mpte;
1426#if JGHINT
1427 }
1428#endif
1429 }
1430
1431 return pmap_unwire_pte_hold(pmap, va, mpte, info);
1432}
1433
1434/*
1435 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because
1436 * it, and IdlePTD, represents the template used to update all other pmaps.
1437 *
1438 * On architectures where the kernel pmap is not integrated into the user
1439 * process pmap, this pmap represents the process pmap, not the kernel pmap.
1440 * kernel_pmap should be used to directly access the kernel_pmap.
1441 */
1442void
1443pmap_pinit0(struct pmap *pmap)
1444READY1
1445{
1446#if JGPMAP32
1447 pmap->pm_pdir =
1448 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
1449 pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t) IdlePTD);
1450#endif
1451 pmap->pm_pml4 = (pml4_entry_t *)(PTOV_OFFSET + KPML4phys);
1452 pmap->pm_count = 1;
1453 pmap->pm_active = 0;
1454 pmap->pm_ptphint = NULL;
1455 TAILQ_INIT(&pmap->pm_pvlist);
1456 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1457}
1458
1459/*
1460 * Initialize a preallocated and zeroed pmap structure,
1461 * such as one in a vmspace structure.
1462 */
1463void
1464pmap_pinit(struct pmap *pmap)
1465READY1
1466{
1467 vm_page_t ptdpg;
1468
1469 /*
1470 * No need to allocate page table space yet but we do need a valid
1471 * page directory table.
1472 */
1473 if (pmap->pm_pml4 == NULL) {
1474 pmap->pm_pml4 =
1475 (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
1476 }
1477
1478 /*
1479 * Allocate an object for the ptes
1480 */
1481 if (pmap->pm_pteobj == NULL)
1482 pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PML4PML4I + 1);
1483
1484 /*
1485 * Allocate the page directory page, unless we already have
1486 * one cached. If we used the cached page the wire_count will
1487 * already be set appropriately.
1488 */
1489 if ((ptdpg = pmap->pm_pdirm) == NULL) {
1490 ptdpg = vm_page_grab(pmap->pm_pteobj, PML4PML4I,
1491 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1492 pmap->pm_pdirm = ptdpg;
1493 vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY);
1494 ptdpg->valid = VM_PAGE_BITS_ALL;
1495 ptdpg->wire_count = 1;
1496 ++vmstats.v_wire_count;
1497 pmap_kenter((vm_offset_t)pmap->pm_pml4, VM_PAGE_TO_PHYS(ptdpg));
1498 }
1499 if ((ptdpg->flags & PG_ZERO) == 0)
1500 bzero(pmap->pm_pml4, PAGE_SIZE);
1501
1502 pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
1503 pmap->pm_pml4[DMPML4I] = DMPDPphys | PG_RW | PG_V | PG_U;
1504
1505 /* install self-referential address mapping entry */
1506 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
1507
1508 pmap->pm_count = 1;
1509 pmap->pm_active = 0;
1510 pmap->pm_ptphint = NULL;
1511 TAILQ_INIT(&pmap->pm_pvlist);
1512 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1513 pmap->pm_stats.resident_count = 1;
1514}
1515
1516/*
1517 * Clean up a pmap structure so it can be physically freed. This routine
1518 * is called by the vmspace dtor function. A great deal of pmap data is
1519 * left passively mapped to improve vmspace management so we have a bit
1520 * of cleanup work to do here.
1521 */
1522void
1523pmap_puninit(pmap_t pmap)
1524READY1
1525{
1526 vm_page_t p;
1527
1528 KKASSERT(pmap->pm_active == 0);
1529 if ((p = pmap->pm_pdirm) != NULL) {
1530 KKASSERT(pmap->pm_pml4 != NULL);
1531 KKASSERT(pmap->pm_pml4 != (PTOV_OFFSET + KPML4phys));
1532 pmap_kremove((vm_offset_t)pmap->pm_pml4);
1533 p->wire_count--;
1534 vmstats.v_wire_count--;
1535 KKASSERT((p->flags & PG_BUSY) == 0);
1536 vm_page_busy(p);
1537 vm_page_free_zero(p);
1538 pmap->pm_pdirm = NULL;
1539 }
1540 if (pmap->pm_pml4) {
1541 KKASSERT(pmap->pm_pml4 != (PTOV_OFFSET + KPML4phys));
1542 kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE);
1543 pmap->pm_pml4 = NULL;
1544 }
1545 if (pmap->pm_pteobj) {
1546 vm_object_deallocate(pmap->pm_pteobj);
1547 pmap->pm_pteobj = NULL;
1548 }
1549}
1550
1551/*
1552 * Wire in kernel global address entries. To avoid a race condition
1553 * between pmap initialization and pmap_growkernel, this procedure
1554 * adds the pmap to the master list (which growkernel scans to update),
1555 * then copies the template.
1556 */
1557void
1558pmap_pinit2(struct pmap *pmap)
1559READY0
1560{
1561 crit_enter();
1562 TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode);
1563 /* XXX copies current process, does not fill in MPPTDI */
1564#if JGPMAP32
1565 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
1566#endif
1567 crit_exit();
1568}
1569
1570/*
1571 * Attempt to release and free a vm_page in a pmap. Returns 1 on success,
1572 * 0 on failure (if the procedure had to sleep).
1573 *
1574 * When asked to remove the page directory page itself, we actually just
1575 * leave it cached so we do not have to incur the SMP inval overhead of
1576 * removing the kernel mapping. pmap_puninit() will take care of it.
1577 */
1578static int
1579pmap_release_free_page(struct pmap *pmap, vm_page_t p)
1580READY1
1581{
1582 pml4_entry_t *pml4 = pmap->pm_pml4;
1583 /*
1584 * This code optimizes the case of freeing non-busy
1585 * page-table pages. Those pages are zero now, and
1586 * might as well be placed directly into the zero queue.
1587 */
1588 if (vm_page_sleep_busy(p, FALSE, "pmaprl"))
1589 return 0;
1590
1591 vm_page_busy(p);
1592
1593 /*
1594 * Remove the page table page from the processes address space.
1595 */
1596 /* JG XXX we need to turn 'pindex' into a page table level
1597 * (PML4, PDP, PD, PT) and index within the page table page
1598 */
1599#if JGPMAP32
1600 pde[p->pindex] = 0;
1601#endif
1602 KKASSERT(pmap->pm_stats.resident_count > 0);
1603 --pmap->pm_stats.resident_count;
1604
1605 if (p->hold_count) {
1606 panic("pmap_release: freeing held page table page");
1607 }
1608 if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
1609 pmap->pm_ptphint = NULL;
1610
1611 p->wire_count--;
1612 vmstats.v_wire_count--;
1613 vm_page_free_zero(p);
1614 return 1;
1615}
1616
1617/*
1618 * this routine is called if the page table page is not
1619 * mapped correctly.
1620 */
1621static vm_page_t
1622_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex)
1623READY1
1624{
1625 vm_page_t m, pdppg, pdpg;
1626
1627 /*
1628 * Find or fabricate a new pagetable page
1629 */
1630 m = vm_page_grab(pmap->pm_pteobj, ptepindex,
1631 VM_ALLOC_NORMAL | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
1632
1633
1634 if ((m->flags & PG_ZERO) == 0) {
1635 pmap_zero_page(VM_PAGE_TO_PHYS(m));
1636 }
1637
1638 KASSERT(m->queue == PQ_NONE,
1639 ("_pmap_allocpte: %p->queue != PQ_NONE", m));
1640
1641 /*
1642 * Increment the hold count for the page we will be returning to
1643 * the caller.
1644 */
1645 m->hold_count++;
1646
1647 /*
1648 * It is possible that someone else got in and mapped by the page
1649 * directory page while we were blocked, if so just unbusy and
1650 * return the held page.
1651 */
1652#if JGPMAP32
1653 if ((ptepa = pmap->pm_pdir[ptepindex]) != 0) {
1654 KKASSERT((ptepa & PG_FRAME) == VM_PAGE_TO_PHYS(m));
1655 vm_page_wakeup(m);
1656 return(m);
1657 }
1658#endif
1659
1660 if (m->wire_count == 0)
1661 vmstats.v_wire_count++;
1662 m->wire_count++;
1663
1664
1665 /*
1666 * Map the pagetable page into the process address space, if
1667 * it isn't already there.
1668 */
1669
1670 ++pmap->pm_stats.resident_count;
1671
1672#if JGPMAP32
1673 ptepa = VM_PAGE_TO_PHYS(m);
1674 pmap->pm_pdir[ptepindex] =
1675 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1676#endif
1677 if (ptepindex >= (NUPDE + NUPDPE)) {
1678 pml4_entry_t *pml4;
1679 vm_pindex_t pml4index;
1680
1681 /* Wire up a new PDP page */
1682 pml4index = ptepindex - (NUPDE + NUPDPE);
1683 pml4 = &pmap->pm_pml4[pml4index];
1684 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1685
1686 } else if (ptepindex >= NUPDE) {
1687 vm_pindex_t pml4index;
1688 vm_pindex_t pdpindex;
1689 pml4_entry_t *pml4;
1690 pdp_entry_t *pdp;
1691
1692 /* Wire up a new PD page */
1693 pdpindex = ptepindex - NUPDE;
1694 pml4index = pdpindex >> NPML4EPGSHIFT;
1695
1696 pml4 = &pmap->pm_pml4[pml4index];
1697 if ((*pml4 & PG_V) == 0) {
1698 /* Have to allocate a new PDP page, recurse */
1699 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index)
1700 == NULL) {
1701 --m->wire_count;
1702 vm_page_free(m);
1703 return (NULL);
1704 }
1705 } else {
1706 /* Add reference to the PDP page */
1707 pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
1708 pdppg->wire_count++;
1709 }
1710 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1711
1712 /* Now find the pdp page */
1713 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1714 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1715
1716 } else {
1717 vm_pindex_t pml4index;
1718 vm_pindex_t pdpindex;
1719 pml4_entry_t *pml4;
1720 pdp_entry_t *pdp;
1721 pd_entry_t *pd;
1722
1723 /* Wire up a new PT page */
1724 pdpindex = ptepindex >> NPDPEPGSHIFT;
1725 pml4index = pdpindex >> NPML4EPGSHIFT;
1726
1727 /* First, find the pdp and check that its valid. */
1728 pml4 = &pmap->pm_pml4[pml4index];
1729 if ((*pml4 & PG_V) == 0) {
1730 /* We miss a PDP page. We ultimately need a PD page.
1731 * Recursively allocating a PD page will allocate
1732 * the missing PDP page and will also allocate
1733 * the PD page we need.
1734 */
1735 /* Have to allocate a new PD page, recurse */
1736 if (_pmap_allocpte(pmap, NUPDE + pdpindex)
1737 == NULL) {
1738 --m->wire_count;
1739 vm_page_free(m);
1740 return (NULL);
1741 }
1742 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1743 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1744 } else {
1745 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1746 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1747 if ((*pdp & PG_V) == 0) {
1748 /* Have to allocate a new PD page, recurse */
1749 if (_pmap_allocpte(pmap, NUPDE + pdpindex)
1750 == NULL) {
1751 --m->wire_count;
1752 vm_page_free(m);
1753 return (NULL);
1754 }
1755 } else {
1756 /* Add reference to the PD page */
1757 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
1758 pdpg->wire_count++;
1759 }
1760 }
1761 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
1762
1763 /* Now we know where the page directory page is */
1764 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
1765 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1766 }
1767
1768
1769 /*
1770 * Set the page table hint
1771 */
1772 pmap->pm_ptphint = m;
1773
1774 m->valid = VM_PAGE_BITS_ALL;
1775 vm_page_flag_clear(m, PG_ZERO);
1776 vm_page_flag_set(m, PG_MAPPED);
1777 vm_page_wakeup(m);
1778
1779 return m;
1780}
1781
1782static vm_page_t
1783pmap_allocpte(pmap_t pmap, vm_offset_t va)
1784READY1
1785{
1786 vm_pindex_t ptepindex;
1787 pd_entry_t *pd;
1788 vm_page_t m;
1789
1790 /*
1791 * Calculate pagetable page index
1792 */
1793 ptepindex = pmap_pde_pindex(va);
1794
1795 /*
1796 * Get the page directory entry
1797 */
1798 pd = pmap_pde(pmap, va);
1799
1800 /*
1801 * This supports switching from a 2MB page to a
1802 * normal 4K page.
1803 */
1804 if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
1805 *pd = 0;
1806 pd = NULL;
1807 cpu_invltlb();
1808 smp_invltlb();
1809 }
1810
1811 /*
1812 * If the page table page is mapped, we just increment the
1813 * hold count, and activate it.
1814 */
1815 if (pd != NULL && (*pd & PG_V) != 0) {
1816 /* YYY hint is used here on i386 */
1817 m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
1818 pmap->pm_ptphint = m;
1819 m->hold_count++;
1820 return m;
1821 }
1822 /*
1823 * Here if the pte page isn't mapped, or if it has been deallocated.
1824 */
1825 return _pmap_allocpte(pmap, ptepindex);
1826}
1827
1828
1829/***************************************************
1830 * Pmap allocation/deallocation routines.
1831 ***************************************************/
1832
1833/*
1834 * Release any resources held by the given physical map.
1835 * Called when a pmap initialized by pmap_pinit is being released.
1836 * Should only be called if the map contains no valid mappings.
1837 */
1838static int pmap_release_callback(struct vm_page *p, void *data);
1839
1840void
1841pmap_release(struct pmap *pmap)
1842READY1
1843{
1844 vm_object_t object = pmap->pm_pteobj;
1845 struct rb_vm_page_scan_info info;
1846
1847 KASSERT(pmap->pm_active == 0, ("pmap still active! %08x", pmap->pm_active));
1848#if defined(DIAGNOSTIC)
1849 if (object->ref_count != 1)
1850 panic("pmap_release: pteobj reference count != 1");
1851#endif
1852
1853 info.pmap = pmap;
1854 info.object = object;
1855 crit_enter();
1856 TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
1857 crit_exit();
1858
1859 do {
1860 crit_enter();
1861 info.error = 0;
1862 info.mpte = NULL;
1863 info.limit = object->generation;
1864
1865 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1866 pmap_release_callback, &info);
1867 if (info.error == 0 && info.mpte) {
1868 if (!pmap_release_free_page(pmap, info.mpte))
1869 info.error = 1;
1870 }
1871 crit_exit();
1872 } while (info.error);
1873}
1874
1875static int
1876pmap_release_callback(struct vm_page *p, void *data)
1877READY1
1878{
1879 struct rb_vm_page_scan_info *info = data;
1880
1881 if (p->pindex == PML4PML4I) {
1882 info->mpte = p;
1883 return(0);
1884 }
1885 if (!pmap_release_free_page(info->pmap, p)) {
1886 info->error = 1;
1887 return(-1);
1888 }
1889 if (info->object->generation != info->limit) {
1890 info->error = 1;
1891 return(-1);
1892 }
1893 return(0);
1894}
1895
1896/*
1897 * Grow the number of kernel page table entries, if needed.
1898 */
1899
1900void
1901pmap_growkernel(vm_offset_t addr)
1902READY1
1903{
1904 vm_paddr_t paddr;
1905 struct pmap *pmap;
1906 vm_offset_t ptppaddr;
1907 vm_page_t nkpg;
1908 pd_entry_t *pde, newpdir;
1909 pdp_entry_t newpdp;
1910
1911 crit_enter();
1912 if (kernel_vm_end == 0) {
1913 kernel_vm_end = KERNBASE;
1914 nkpt = 0;
1915 while ((*pmap_pde(&kernel_pmap, kernel_vm_end) & PG_V) != 0) {
1916 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1917 nkpt++;
1918 if (kernel_vm_end - 1 >= kernel_map.max_offset) {
1919 kernel_vm_end = kernel_map.max_offset;
1920 break;
1921 }
1922 }
1923 }
1924 addr = roundup2(addr, PAGE_SIZE * NPTEPG);
1925 if (addr - 1 >= kernel_map.max_offset)
1926 addr = kernel_map.max_offset;
1927 while (kernel_vm_end < addr) {
1928 pde = pmap_pde(&kernel_pmap, kernel_vm_end);
1929 if (pde == NULL) {
1930 /* We need a new PDP entry */
1931 nkpg = vm_page_alloc(kptobj, nkpt,
1932 VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM
1933 | VM_ALLOC_INTERRUPT);
1934 if (nkpg == NULL)
1935 panic("pmap_growkernel: no memory to grow kernel");
1936 if ((nkpg->flags & PG_ZERO) == 0)
1937 pmap_zero_page(nkpg);
1938 paddr = VM_PAGE_TO_PHYS(nkpg);
1939 newpdp = (pdp_entry_t)
1940 (paddr | PG_V | PG_RW | PG_A | PG_M);
1941 *pmap_pdpe(&kernel_pmap, kernel_vm_end) = newpdp;
1942 continue; /* try again */
1943 }
1944 if ((*pde & PG_V) != 0) {
1945 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1946 if (kernel_vm_end - 1 >= kernel_map.max_offset) {
1947 kernel_vm_end = kernel_map.max_offset;
1948 break;
1949 }
1950 continue;
1951 }
1952
1953 /*
1954 * This index is bogus, but out of the way
1955 */
1956 nkpg = vm_page_alloc(kptobj, nkpt,
1957 VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT);
1958 if (nkpg == NULL)
1959 panic("pmap_growkernel: no memory to grow kernel");
1960
1961 vm_page_wire(nkpg);
1962 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1963 pmap_zero_page(ptppaddr);
1964 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1965 *pmap_pde(&kernel_pmap, kernel_vm_end) = newpdir;
1966 nkpt++;
1967
1968 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1969 if (kernel_vm_end - 1 >= kernel_map.max_offset) {
1970 kernel_vm_end = kernel_map.max_offset;
1971 break;
1972 }
1973 }
1974 crit_exit();
1975}
1976
1977/*
1978 * Retire the given physical map from service.
1979 * Should only be called if the map contains
1980 * no valid mappings.
1981 */
1982void
1983pmap_destroy(pmap_t pmap)
1984READY0
1985{
1986 int count;
1987
1988 if (pmap == NULL)
1989 return;
1990
1991 count = --pmap->pm_count;
1992 if (count == 0) {
1993 pmap_release(pmap);
1994 panic("destroying a pmap is not yet implemented");
1995 }
1996}
1997
1998/*
1999 * Add a reference to the specified pmap.
2000 */
2001void
2002pmap_reference(pmap_t pmap)
2003READY2
2004{
2005 if (pmap != NULL) {
2006 pmap->pm_count++;
2007 }
2008}
2009
2010/***************************************************
2011* page management routines.
2012 ***************************************************/
2013
2014/*
2015 * free the pv_entry back to the free list. This function may be
2016 * called from an interrupt.
2017 */
2018static PMAP_INLINE void
2019free_pv_entry(pv_entry_t pv)
2020READY2
2021{
2022 pv_entry_count--;
2023 KKASSERT(pv_entry_count >= 0);
2024 zfree(pvzone, pv);
2025}
2026
2027/*
2028 * get a new pv_entry, allocating a block from the system
2029 * when needed. This function may be called from an interrupt.
2030 */
2031static pv_entry_t
2032get_pv_entry(void)
2033READY2
2034{
2035 pv_entry_count++;
2036 if (pv_entry_high_water &&
2037 (pv_entry_count > pv_entry_high_water) &&
2038 (pmap_pagedaemon_waken == 0)) {
2039 pmap_pagedaemon_waken = 1;
2040 wakeup(&vm_pages_needed);
2041 }
2042 return zalloc(pvzone);
2043}
2044
2045/*
2046 * This routine is very drastic, but can save the system
2047 * in a pinch.
2048 */
2049void
2050pmap_collect(void)
2051READY0
2052{
2053 int i;
2054 vm_page_t m;
2055 static int warningdone=0;
2056
2057 if (pmap_pagedaemon_waken == 0)
2058 return;
2059
2060 if (warningdone < 5) {
2061 kprintf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
2062 warningdone++;
2063 }
2064
2065 for(i = 0; i < vm_page_array_size; i++) {
2066 m = &vm_page_array[i];
2067 if (m->wire_count || m->hold_count || m->busy ||
2068 (m->flags & PG_BUSY))
2069 continue;
2070 pmap_remove_all(m);
2071 }
2072 pmap_pagedaemon_waken = 0;
2073}
2074
2075
2076/*
2077 * If it is the first entry on the list, it is actually
2078 * in the header and we must copy the following entry up
2079 * to the header. Otherwise we must search the list for
2080 * the entry. In either case we free the now unused entry.
2081 */
2082static int
2083pmap_remove_entry(struct pmap *pmap, vm_page_t m,
2084 vm_offset_t va, pmap_inval_info_t info)
2085READY1
2086{
2087 pv_entry_t pv;
2088 int rtval;
2089
2090 crit_enter();
2091 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
2092 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2093 if (pmap == pv->pv_pmap && va == pv->pv_va)
2094 break;
2095 }
2096 } else {
2097 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
2098 if (va == pv->pv_va)
2099 break;
2100 }
2101 }
2102
2103 rtval = 0;
2104 /* JGXXX When can 'pv' be NULL? */
2105 if (pv) {
2106 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2107 m->md.pv_list_count--;
2108 KKASSERT(m->md.pv_list_count >= 0);
2109 if (TAILQ_EMPTY(&m->md.pv_list))
2110 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
2111 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2112 ++pmap->pm_generation;
2113 rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem, info);
2114 free_pv_entry(pv);
2115 }
2116 crit_exit();
2117 return rtval;
2118}
2119
2120/*
2121 * Create a pv entry for page at pa for
2122 * (pmap, va).
2123 */
2124static void
2125pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
2126READY1
2127{
2128 pv_entry_t pv;
2129
2130 crit_enter();
2131 pv = get_pv_entry();
2132 pv->pv_va = va;
2133 pv->pv_pmap = pmap;
2134 pv->pv_ptem = mpte;
2135
2136 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
2137 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2138 m->md.pv_list_count++;
2139
2140 crit_exit();
2141}
2142
2143/*
2144 * pmap_remove_pte: do the things to unmap a page in a process
2145 */
2146static int
2147pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
2148 pmap_inval_info_t info)
2149READY1
2150{
2151 pt_entry_t oldpte;
2152 vm_page_t m;
2153
2154 pmap_inval_add(info, pmap, va);
2155 oldpte = pte_load_clear(ptq);
2156 if (oldpte & PG_W)
2157 pmap->pm_stats.wired_count -= 1;
2158 /*
2159 * Machines that don't support invlpg, also don't support
2160 * PG_G. XXX PG_G is disabled for SMP so don't worry about
2161 * the SMP case.
2162 */
2163 if (oldpte & PG_G)
2164 cpu_invlpg((void *)va);
2165 KKASSERT(pmap->pm_stats.resident_count > 0);
2166 --pmap->pm_stats.resident_count;
2167 if (oldpte & PG_MANAGED) {
2168 m = PHYS_TO_VM_PAGE(oldpte);
2169 if (oldpte & PG_M) {
2170#if defined(PMAP_DIAGNOSTIC)
2171 if (pmap_nw_modified((pt_entry_t) oldpte)) {
2172 kprintf(
2173 "pmap_remove: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
2174 va, oldpte);
2175 }
2176#endif
2177 if (pmap_track_modified(va))
2178 vm_page_dirty(m);
2179 }
2180 if (oldpte & PG_A)
2181 vm_page_flag_set(m, PG_REFERENCED);
2182 return pmap_remove_entry(pmap, m, va, info);
2183 } else {
2184 return pmap_unuse_pt(pmap, va, NULL, info);
2185 }
2186
2187 return 0;
2188}
2189
2190/*
2191 * pmap_remove_page:
2192 *
2193 * Remove a single page from a process address space.
2194 *
2195 * This function may not be called from an interrupt if the pmap is
2196 * not kernel_pmap.
2197 */
2198static void
2199pmap_remove_page(struct pmap *pmap, vm_offset_t va, pmap_inval_info_t info)
2200READY1
2201{
2202 pt_entry_t *pte;
2203
2204 pte = pmap_pte(pmap, va);
2205 if (pte == NULL)
2206 return;
2207 if ((*pte & PG_V) == 0)
2208 return;
2209 pmap_remove_pte(pmap, pte, va, info);
2210}
2211
2212/*
2213 * pmap_remove:
2214 *
2215 * Remove the given range of addresses from the specified map.
2216 *
2217 * It is assumed that the start and end are properly
2218 * rounded to the page size.
2219 *
2220 * This function may not be called from an interrupt if the pmap is
2221 * not kernel_pmap.
2222 */
2223void
2224pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
2225READY1
2226{
2227 vm_offset_t va_next;
2228 pml4_entry_t *pml4e;
2229 pdp_entry_t *pdpe;
2230 pd_entry_t ptpaddr, *pde;
2231 pt_entry_t *pte;
2232 struct pmap_inval_info info;
2233
2234 if (pmap == NULL)
2235 return;
2236
2237 if (pmap->pm_stats.resident_count == 0)
2238 return;
2239
2240 pmap_inval_init(&info);
2241
2242 /*
2243 * special handling of removing one page. a very
2244 * common operation and easy to short circuit some
2245 * code.
2246 */
2247 if (sva + PAGE_SIZE == eva) {
2248 pde = pmap_pde(pmap, sva);
2249 if (pde && (*pde & PG_PS) == 0) {
2250 pmap_remove_page(pmap, sva, &info);
2251 pmap_inval_flush(&info);
2252 return;
2253 }
2254 }
2255
2256 for (; sva < eva; sva = va_next) {
2257 pml4e = pmap_pml4e(pmap, sva);
2258 if ((*pml4e & PG_V) == 0) {
2259 va_next = (sva + NBPML4) & ~PML4MASK;
2260 if (va_next < sva)
2261 va_next = eva;
2262 continue;
2263 }
2264
2265 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
2266 if ((*pdpe & PG_V) == 0) {
2267 va_next = (sva + NBPDP) & ~PDPMASK;
2268 if (va_next < sva)
2269 va_next = eva;
2270 continue;
2271 }
2272
2273 /*
2274 * Calculate index for next page table.
2275 */
2276 va_next = (sva + NBPDR) & ~PDRMASK;
2277 if (va_next < sva)
2278 va_next = eva;
2279
2280 pde = pmap_pdpe_to_pde(pdpe, sva);
2281 ptpaddr = *pde;
2282
2283 /*
2284 * Weed out invalid mappings.
2285 */
2286 if (ptpaddr == 0)
2287 continue;
2288
2289 /*
2290 * Check for large page.
2291 */
2292 if ((ptpaddr & PG_PS) != 0) {
2293 /* JG FreeBSD has more complex treatment here */
2294 pmap_inval_add(&info, pmap, -1);
2295 *pde = 0;
2296 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2297 continue;
2298 }
2299
2300 /*
2301 * Limit our scan to either the end of the va represented
2302 * by the current page table page, or to the end of the
2303 * range being removed.
2304 */
2305 if (va_next > eva)
2306 va_next = eva;
2307
2308 /*
2309 * NOTE: pmap_remove_pte() can block.
2310 */
2311 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2312 sva += PAGE_SIZE) {
2313 if (*pte == 0)
2314 continue;
2315 if (pmap_remove_pte(pmap, pte, sva, &info))
2316 break;
2317 }
2318 }
2319 pmap_inval_flush(&info);
2320}
2321
2322/*
2323 * pmap_remove_all:
2324 *
2325 * Removes this physical page from all physical maps in which it resides.
2326 * Reflects back modify bits to the pager.
2327 *
2328 * This routine may not be called from an interrupt.
2329 */
2330
2331static void
2332pmap_remove_all(vm_page_t m)
2333READY1
2334{
2335 struct pmap_inval_info info;
2336 pt_entry_t *pte, tpte;
2337 pv_entry_t pv;
2338
2339 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2340 return;
2341
2342 pmap_inval_init(&info);
2343 crit_enter();
2344 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2345 KKASSERT(pv->pv_pmap->pm_stats.resident_count > 0);
2346 --pv->pv_pmap->pm_stats.resident_count;
2347
2348 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2349 pmap_inval_add(&info, pv->pv_pmap, pv->pv_va);
2350 tpte = pte_load_clear(pte);
2351
2352 if (tpte & PG_W)
2353 pv->pv_pmap->pm_stats.wired_count--;
2354
2355 if (tpte & PG_A)
2356 vm_page_flag_set(m, PG_REFERENCED);
2357
2358 /*
2359 * Update the vm_page_t clean and reference bits.
2360 */
2361 if (tpte & PG_M) {
2362#if defined(PMAP_DIAGNOSTIC)
2363 if (pmap_nw_modified(tpte)) {
2364 kprintf(
2365 "pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
2366 pv->pv_va, tpte);
2367 }
2368#endif
2369 if (pmap_track_modified(pv->pv_va))
2370 vm_page_dirty(m);
2371 }
2372 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2373 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2374 ++pv->pv_pmap->pm_generation;
2375 m->md.pv_list_count--;
2376 KKASSERT(m->md.pv_list_count >= 0);
2377 if (TAILQ_EMPTY(&m->md.pv_list))
2378 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
2379 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem, &info);
2380 free_pv_entry(pv);
2381 }
2382 crit_exit();
2383 KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
2384 pmap_inval_flush(&info);
2385}
2386
2387/*
2388 * pmap_protect:
2389 *
2390 * Set the physical protection on the specified range of this map
2391 * as requested.
2392 *
2393 * This function may not be called from an interrupt if the map is
2394 * not the kernel_pmap.
2395 */
2396void
2397pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2398READY1
2399{
2400 vm_offset_t va_next;
2401 pml4_entry_t *pml4e;
2402 pdp_entry_t *pdpe;
2403 pd_entry_t ptpaddr, *pde;
2404 pt_entry_t *pte;
2405 pmap_inval_info info;
2406
2407 /* JG review for NX */
2408
2409 if (pmap == NULL)
2410 return;
2411
2412 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2413 pmap_remove(pmap, sva, eva);
2414 return;
2415 }
2416
2417 if (prot & VM_PROT_WRITE)
2418 return;
2419
2420 pmap_inval_init(&info);
2421
2422 for (; sva < eva; sva = va_next) {
2423
2424 pml4e = pmap_pml4e(pmap, sva);
2425 if ((*pml4e & PG_V) == 0) {
2426 va_next = (sva + NBPML4) & ~PML4MASK;
2427 if (va_next < sva)
2428 va_next = eva;
2429 continue;
2430 }
2431
2432 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
2433 if ((*pdpe & PG_V) == 0) {
2434 va_next = (sva + NBPDP) & ~PDPMASK;
2435 if (va_next < sva)
2436 va_next = eva;
2437 continue;
2438 }
2439
2440 va_next = (sva + NBPDR) & ~PDRMASK;
2441 if (va_next < sva)
2442 va_next = eva;
2443
2444 pde = pmap_pdpe_to_pde(pdpe, sva);
2445 ptpaddr = *pde;
2446
2447 /*
2448 * Check for large page.
2449 */
2450 if ((ptpaddr & PG_PS) != 0) {
2451 pmap_inval_add(&info, pmap, -1);
2452 *pde &= ~(PG_M|PG_RW);
2453 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2454 continue;
2455 }
2456
2457 /*
2458 * Weed out invalid mappings. Note: we assume that the page
2459 * directory table is always allocated, and in kernel virtual.
2460 */
2461 if (ptpaddr == 0)
2462 continue;
2463
2464 if (va_next > eva)
2465 va_next = eva;
2466
2467 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2468 sva += PAGE_SIZE) {
2469 pt_entry_t obits, pbits;
2470 vm_page_t m;
2471
2472 /*
2473 * XXX non-optimal. Note also that there can be
2474 * no pmap_inval_flush() calls until after we modify
2475 * ptbase[sindex] (or otherwise we have to do another
2476 * pmap_inval_add() call).
2477 */
2478 pmap_inval_add(&info, pmap, sva);
2479 obits = pbits = *pte;
2480 if ((pbits & PG_V) == 0)
2481 continue;
2482 if (pbits & PG_MANAGED) {
2483 m = NULL;
2484 if (pbits & PG_A) {
2485 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
2486 vm_page_flag_set(m, PG_REFERENCED);
2487 pbits &= ~PG_A;
2488 }
2489 if (pbits & PG_M) {
2490 if (pmap_track_modified(sva)) {
2491 if (m == NULL)
2492 KKASSERT(pbits == (pbits & PG_FRAME));
2493 m = PHYS_TO_VM_PAGE(pbits);
2494 vm_page_dirty(m);
2495 pbits &= ~PG_M;
2496 }
2497 }
2498 }
2499
2500 pbits &= ~PG_RW;
2501
2502 if (pbits != obits) {
2503 *pte = pbits;
2504 }
2505 }
2506 }
2507 pmap_inval_flush(&info);
2508}
2509
2510/*
2511 * Insert the given physical page (p) at
2512 * the specified virtual address (v) in the
2513 * target physical map with the protection requested.
2514 *
2515 * If specified, the page will be wired down, meaning
2516 * that the related pte can not be reclaimed.
2517 *
2518 * NB: This is the only routine which MAY NOT lazy-evaluate
2519 * or lose information. That is, this routine must actually
2520 * insert this page into the given map NOW.
2521 */
2522void
2523pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2524 boolean_t wired)
2525READY1
2526{
2527 vm_paddr_t pa;
2528 pd_entry_t *pde;
2529 pt_entry_t *pte;
2530 vm_paddr_t opa;
2531 pt_entry_t origpte, newpte;
2532 vm_page_t mpte;
2533 pmap_inval_info info;
2534
2535 if (pmap == NULL)
2536 return;
2537
2538 va = trunc_page(va);
2539#ifdef PMAP_DIAGNOSTIC
2540 if (va >= KvaEnd)
2541 panic("pmap_enter: toobig");
2542 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
2543 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va);
2544#endif
2545 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2546 kprintf("Warning: pmap_enter called on UVA with kernel_pmap\n");
2547#ifdef DDB
2548 db_print_backtrace();
2549#endif
2550 }
2551 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2552 kprintf("Warning: pmap_enter called on KVA without kernel_pmap\n");
2553#ifdef DDB
2554 db_print_backtrace();
2555#endif
2556 }
2557
2558 /*
2559 * In the case that a page table page is not
2560 * resident, we are creating it here.
2561 */
2562 if (va < VM_MAX_USER_ADDRESS)
2563 mpte = pmap_allocpte(pmap, va);
2564 else
2565 mpte = NULL;
2566
2567 pmap_inval_init(&info);
2568 pde = pmap_pde(pmap, va);
2569 if (pde != NULL && (*pde & PG_V) != 0) {
2570 if ((*pde & PG_PS) != 0)
2571 panic("pmap_enter: attempted pmap_enter on 2MB page");
2572 pte = pmap_pde_to_pte(pde, va);
2573 } else
2574 panic("pmap_enter: invalid page directory va=%#lx", va);
2575
2576 KKASSERT(pte != NULL);
2577 pa = VM_PAGE_TO_PHYS(m);
2578 KKASSERT(pa == (pa & PG_FRAME));
2579 origpte = *pte;
2580 opa = origpte & PG_FRAME;
2581
2582 /*
2583 * Mapping has not changed, must be protection or wiring change.
2584 */
2585 if (origpte && (opa == pa)) {
2586 /*
2587 * Wiring change, just update stats. We don't worry about
2588 * wiring PT pages as they remain resident as long as there
2589 * are valid mappings in them. Hence, if a user page is wired,
2590 * the PT page will be also.
2591 */
2592 if (wired && ((origpte & PG_W) == 0))
2593 pmap->pm_stats.wired_count++;
2594 else if (!wired && (origpte & PG_W))
2595 pmap->pm_stats.wired_count--;
2596
2597#if defined(PMAP_DIAGNOSTIC)
2598 if (pmap_nw_modified(origpte)) {
2599 kprintf(
2600 "pmap_enter: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
2601 va, origpte);
2602 }
2603#endif
2604
2605 /*
2606 * Remove the extra pte reference. Note that we cannot
2607 * optimize the RO->RW case because we have adjusted the
2608 * wiring count above and may need to adjust the wiring
2609 * bits below.
2610 */
2611 if (mpte)
2612 mpte->hold_count--;
2613
2614 /*
2615 * We might be turning off write access to the page,
2616 * so we go ahead and sense modify status.
2617 */
2618 if (origpte & PG_MANAGED) {
2619 if ((origpte & PG_M) && pmap_track_modified(va)) {
2620 vm_page_t om;
2621 om = PHYS_TO_VM_PAGE(opa);
2622 vm_page_dirty(om);
2623 }
2624 pa |= PG_MANAGED;
2625 KKASSERT(m->flags & PG_MAPPED);
2626 }
2627 goto validate;
2628 }
2629 /*
2630 * Mapping has changed, invalidate old range and fall through to
2631 * handle validating new mapping.
2632 */
2633 if (opa) {
2634 int err;
2635 err = pmap_remove_pte(pmap, pte, va, &info);
2636 if (err)
2637 panic("pmap_enter: pte vanished, va: 0x%lx", va);
2638 }
2639
2640 /*
2641 * Enter on the PV list if part of our managed memory. Note that we
2642 * raise IPL while manipulating pv_table since pmap_enter can be
2643 * called at interrupt time.
2644 */
2645 if (pmap_initialized &&
2646 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2647 pmap_insert_entry(pmap, va, mpte, m);
2648 pa |= PG_MANAGED;
2649 vm_page_flag_set(m, PG_MAPPED);
2650 }
2651
2652 /*
2653 * Increment counters
2654 */
2655 ++pmap->pm_stats.resident_count;
2656 if (wired)
2657 pmap->pm_stats.wired_count++;
2658
2659validate:
2660 /*
2661 * Now validate mapping with desired protection/wiring.
2662 */
2663 newpte = (pt_entry_t) (pa | pte_prot(pmap, prot) | PG_V);
2664
2665 if (wired)
2666 newpte |= PG_W;
2667 if (va < VM_MAX_USER_ADDRESS)
2668 newpte |= PG_U;
2669 if (pmap == &kernel_pmap)
2670 newpte |= pgeflag;
2671
2672 /*
2673 * if the mapping or permission bits are different, we need
2674 * to update the pte.
2675 */
2676 if ((origpte & ~(PG_M|PG_A)) != newpte) {
2677 pmap_inval_add(&info, pmap, va);
2678 *pte = newpte | PG_A;
2679 if (newpte & PG_RW)
2680 vm_page_flag_set(m, PG_WRITEABLE);
2681 }
2682 KKASSERT((newpte & PG_MANAGED) == 0 || (m->flags & PG_MAPPED));
2683 pmap_inval_flush(&info);
2684}
2685
2686/*
2687 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
2688 * This code also assumes that the pmap has no pre-existing entry for this
2689 * VA.
2690 *
2691 * This code currently may only be used on user pmaps, not kernel_pmap.
2692 */
2693static void
2694pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
2695READY1
2696{
2697 pt_entry_t *pte;
2698 vm_paddr_t pa;
2699 vm_page_t mpte;
2700 vm_pindex_t ptepindex;
2701 pd_entry_t *ptepa;
2702 pmap_inval_info info;
2703
2704 pmap_inval_init(&info);
2705
2706 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2707 kprintf("Warning: pmap_enter_quick called on UVA with kernel_pmap\n");
2708#ifdef DDB
2709 db_print_backtrace();
2710#endif
2711 }
2712 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2713 kprintf("Warning: pmap_enter_quick called on KVA without kernel_pmap\n");
2714#ifdef DDB
2715 db_print_backtrace();
2716#endif
2717 }
2718
2719 KKASSERT(va < UPT_MIN_ADDRESS); /* assert used on user pmaps only */
2720
2721 /*
2722 * Calculate the page table page (mpte), allocating it if necessary.
2723 *
2724 * A held page table page (mpte), or NULL, is passed onto the
2725 * section following.
2726 */
2727 if (va < VM_MAX_USER_ADDRESS) {
2728 /*
2729 * Calculate pagetable page index
2730 */
2731 ptepindex = pmap_pde_pindex(va);
2732
2733 do {
2734 /*
2735 * Get the page directory entry
2736 */
2737 ptepa = pmap_pde(pmap, va);
2738
2739 /*
2740 * If the page table page is mapped, we just increment
2741 * the hold count, and activate it.
2742 */
2743 if (ptepa && (*ptepa & PG_V) != 0) {
2744 if (*ptepa & PG_PS)
2745 panic("pmap_enter_quick: unexpected mapping into 2MB page");
2746// if (pmap->pm_ptphint &&
2747// (pmap->pm_ptphint->pindex == ptepindex)) {
2748// mpte = pmap->pm_ptphint;
2749// } else {
2750 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
2751 pmap->pm_ptphint = mpte;
2752// }
2753 if (mpte)
2754 mpte->hold_count++;
2755 } else {
2756 mpte = _pmap_allocpte(pmap, ptepindex);
2757 }
2758 } while (mpte == NULL);
2759 } else {
2760 mpte = NULL;
2761 /* this code path is not yet used */
2762 }
2763
2764 /*
2765 * With a valid (and held) page directory page, we can just use
2766 * vtopte() to get to the pte. If the pte is already present
2767 * we do not disturb it.
2768 */
2769 pte = vtopte(va);
2770 if (*pte & PG_V) {
2771 if (mpte)
2772 pmap_unwire_pte_hold(pmap, va, mpte, &info);
2773 pa = VM_PAGE_TO_PHYS(m);
2774 KKASSERT(((*pte ^ pa) & PG_FRAME) == 0);
2775 return;
2776 }
2777
2778 /*
2779 * Enter on the PV list if part of our managed memory
2780 */
2781 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2782 pmap_insert_entry(pmap, va, mpte, m);
2783 vm_page_flag_set(m, PG_MAPPED);
2784 }
2785
2786 /*
2787 * Increment counters
2788 */
2789 ++pmap->pm_stats.resident_count;
2790
2791 pa = VM_PAGE_TO_PHYS(m);
2792
2793 /*
2794 * Now validate mapping with RO protection
2795 */
2796 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2797 *pte = pa | PG_V | PG_U;
2798 else
2799 *pte = pa | PG_V | PG_U | PG_MANAGED;
2800/* pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
2801 pmap_inval_flush(&info);
2802}
2803
2804/*
2805 * Make a temporary mapping for a physical address. This is only intended
2806 * to be used for panic dumps.
2807 */
2808/* JG Needed on amd64? */
2809void *
2810pmap_kenter_temporary(vm_paddr_t pa, int i)
2811READY2
2812{
2813 pmap_kenter((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
2814 return ((void *)crashdumpmap);
2815}
2816
2817#define MAX_INIT_PT (96)
2818
2819/*
2820 * This routine preloads the ptes for a given object into the specified pmap.
2821 * This eliminates the blast of soft faults on process startup and
2822 * immediately after an mmap.
2823 */
2824static int pmap_object_init_pt_callback(vm_page_t p, void *data);
2825
2826void
2827pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
2828 vm_object_t object, vm_pindex_t pindex,
2829 vm_size_t size, int limit)
2830READY1
2831{
2832 struct rb_vm_page_scan_info info;
2833 struct lwp *lp;
2834 vm_size_t psize;
2835
2836 /*
2837 * We can't preinit if read access isn't set or there is no pmap
2838 * or object.
2839 */
2840 if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
2841 return;
2842
2843 /*
2844 * We can't preinit if the pmap is not the current pmap
2845 */
2846 lp = curthread->td_lwp;
2847 if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
2848 return;
2849
2850 psize = amd64_btop(size);
2851
2852 if ((object->type != OBJT_VNODE) ||
2853 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
2854 (object->resident_page_count > MAX_INIT_PT))) {
2855 return;
2856 }
2857
2858 if (psize + pindex > object->size) {
2859 if (object->size < pindex)
2860 return;
2861 psize = object->size - pindex;
2862 }
2863
2864 if (psize == 0)
2865 return;
2866
2867 /*
2868 * Use a red-black scan to traverse the requested range and load
2869 * any valid pages found into the pmap.
2870 *
2871 * We cannot safely scan the object's memq unless we are in a
2872 * critical section since interrupts can remove pages from objects.
2873 */
2874 info.start_pindex = pindex;
2875 info.end_pindex = pindex + psize - 1;
2876 info.limit = limit;
2877 info.mpte = NULL;
2878 info.addr = addr;
2879 info.pmap = pmap;
2880
2881 crit_enter();
2882 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
2883 pmap_object_init_pt_callback, &info);
2884 crit_exit();
2885}
2886
2887static
2888int
2889pmap_object_init_pt_callback(vm_page_t p, void *data)
2890READY1
2891{
2892 struct rb_vm_page_scan_info *info = data;
2893 vm_pindex_t rel_index;
2894 /*
2895 * don't allow an madvise to blow away our really
2896 * free pages allocating pv entries.
2897 */
2898 if ((info->limit & MAP_PREFAULT_MADVISE) &&
2899 vmstats.v_free_count < vmstats.v_free_reserved) {
2900 return(-1);
2901 }
2902 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2903 (p->busy == 0) && (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2904 if ((p->queue - p->pc) == PQ_CACHE)
2905 vm_page_deactivate(p);
2906 vm_page_busy(p);
2907 rel_index = p->pindex - info->start_pindex;
2908 pmap_enter_quick(info->pmap,
2909 info->addr + amd64_ptob(rel_index), p);
2910 vm_page_wakeup(p);
2911 }
2912 return(0);
2913}
2914
2915/*
2916 * pmap_prefault provides a quick way of clustering pagefaults into a
2917 * processes address space. It is a "cousin" of pmap_object_init_pt,
2918 * except it runs at page fault time instead of mmap time.
2919 */
2920#define PFBAK 4
2921#define PFFOR 4
2922#define PAGEORDER_SIZE (PFBAK+PFFOR)
2923
2924static int pmap_prefault_pageorder[] = {
2925 -PAGE_SIZE, PAGE_SIZE,
2926 -2 * PAGE_SIZE, 2 * PAGE_SIZE,
2927 -3 * PAGE_SIZE, 3 * PAGE_SIZE,
2928 -4 * PAGE_SIZE, 4 * PAGE_SIZE
2929};
2930
2931void
2932pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
2933READY0
2934{
2935 int i;
2936 vm_offset_t starta;
2937 vm_offset_t addr;
2938 vm_pindex_t pindex;
2939 vm_page_t m;
2940 vm_object_t object;
2941 struct lwp *lp;
2942
2943 /*
2944 * We do not currently prefault mappings that use virtual page
2945 * tables. We do not prefault foreign pmaps.
2946 */
2947 if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
2948 return;
2949 lp = curthread->td_lwp;
2950 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2951 return;
2952
2953 object = entry->object.vm_object;
2954
2955 starta = addra - PFBAK * PAGE_SIZE;
2956 if (starta < entry->start)
2957 starta = entry->start;
2958 else if (starta > addra)
2959 starta = 0;
2960
2961 /*
2962 * critical section protection is required to maintain the
2963 * page/object association, interrupts can free pages and remove
2964 * them from their objects.
2965 */
2966 crit_enter();
2967 for (i = 0; i < PAGEORDER_SIZE; i++) {
2968 vm_object_t lobject;
2969 pt_entry_t *pte;
2970
2971 addr = addra + pmap_prefault_pageorder[i];
2972 if (addr > addra + (PFFOR * PAGE_SIZE))
2973 addr = 0;
2974
2975 if (addr < starta || addr >= entry->end)
2976 continue;
2977
2978 if ((*pmap_pde(pmap, addr)) == 0)
2979 continue;
2980
2981 pte = vtopte(addr);
2982 if (*pte)
2983 continue;
2984
2985 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2986 lobject = object;
2987
2988 for (m = vm_page_lookup(lobject, pindex);
2989 (!m && (lobject->type == OBJT_DEFAULT) &&
2990 (lobject->backing_object));
2991 lobject = lobject->backing_object
2992 ) {
2993 if (lobject->backing_object_offset & PAGE_MASK)
2994 break;
2995 pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
2996 m = vm_page_lookup(lobject->backing_object, pindex);
2997 }
2998
2999 /*
3000 * give-up when a page is not in memory
3001 */
3002 if (m == NULL)
3003 break;
3004
3005 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
3006 (m->busy == 0) &&
3007 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
3008
3009 if ((m->queue - m->pc) == PQ_CACHE) {
3010 vm_page_deactivate(m);
3011 }
3012 vm_page_busy(m);
3013 pmap_enter_quick(pmap, addr, m);
3014 vm_page_wakeup(m);
3015 }
3016 }
3017 crit_exit();
3018}
3019
3020/*
3021 * Routine: pmap_change_wiring
3022 * Function: Change the wiring attribute for a map/virtual-address
3023 * pair.
3024 * In/out conditions:
3025 * The mapping must already exist in the pmap.
3026 */
3027void
3028pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
3029READY0
3030{
3031 pt_entry_t *pte;
3032
3033 if (pmap == NULL)
3034 return;
3035
3036 pte = pmap_pte(pmap, va);
3037
3038 if (wired && !pmap_pte_w(pte))
3039 pmap->pm_stats.wired_count++;
3040 else if (!wired && pmap_pte_w(pte))
3041 pmap->pm_stats.wired_count--;
3042
3043 /*
3044 * Wiring is not a hardware characteristic so there is no need to
3045 * invalidate TLB. However, in an SMP environment we must use
3046 * a locked bus cycle to update the pte (if we are not using
3047 * the pmap_inval_*() API that is)... it's ok to do this for simple
3048 * wiring changes.
3049 */
3050#ifdef SMP
3051 if (wired)
3052 atomic_set_int(pte, PG_W);
3053 else
3054 atomic_clear_int(pte, PG_W);
3055#else
3056 if (wired)
3057 atomic_set_int_nonlocked(pte, PG_W);
3058 else
3059 atomic_clear_int_nonlocked(pte, PG_W);
3060#endif
3061}
3062
3063
3064
3065/*
3066 * Copy the range specified by src_addr/len
3067 * from the source map to the range dst_addr/len
3068 * in the destination map.
3069 *
3070 * This routine is only advisory and need not do anything.
3071 */
3072void
3073pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
3074 vm_size_t len, vm_offset_t src_addr)
3075READY0
3076{
3077 pmap_inval_info info;
3078 vm_offset_t addr;
3079 vm_offset_t end_addr = src_addr + len;
3080 vm_offset_t pdnxt;
3081 pd_entry_t src_frame, dst_frame;
3082 vm_page_t m;
3083
3084 if (dst_addr != src_addr)
3085 return;
3086 /*
3087 * XXX BUGGY. Amoung other things srcmpte is assumed to remain
3088 * valid through blocking calls, and that's just not going to
3089 * be the case.
3090 *
3091 * FIXME!
3092 */
3093 return;
3094
3095#if JGPMAP32
3096 src_frame = src_pmap->pm_pdir[PTDPTDI] & PG_FRAME;
3097 if (src_frame != (PTDpde & PG_FRAME)) {
3098 return;
3099 }
3100
3101 dst_frame = dst_pmap->pm_pdir[PTDPTDI] & PG_FRAME;
3102 if (dst_frame != (APTDpde & PG_FRAME)) {
3103 APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V);
3104 /* The page directory is not shared between CPUs */
3105 cpu_invltlb();
3106 }
3107#endif
3108 pmap_inval_init(&info);
3109 pmap_inval_add(&info, dst_pmap, -1);
3110 pmap_inval_add(&info, src_pmap, -1);
3111
3112 /*
3113 * critical section protection is required to maintain the page/object
3114 * association, interrupts can free pages and remove them from
3115 * their objects.
3116 */
3117 crit_enter();
3118 for (addr = src_addr; addr < end_addr; addr = pdnxt) {
3119 pt_entry_t *src_pte, *dst_pte;
3120 vm_page_t dstmpte, srcmpte;
3121 vm_offset_t srcptepaddr;
3122 vm_pindex_t ptepindex;
3123
3124 if (addr >= UPT_MIN_ADDRESS)
3125 panic("pmap_copy: invalid to pmap_copy page tables\n");
3126
3127 /*
3128 * Don't let optional prefaulting of pages make us go
3129 * way below the low water mark of free pages or way
3130 * above high water mark of used pv entries.
3131 */
3132 if (vmstats.v_free_count < vmstats.v_free_reserved ||
3133 pv_entry_count > pv_entry_high_water)
3134 break;
3135
3136 pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
3137 ptepindex = addr >> PDRSHIFT;
3138
3139#if JGPMAP32
3140 srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex];
3141#endif
3142 if (srcptepaddr == 0)
3143 continue;
3144
3145 if (srcptepaddr & PG_PS) {
3146#if JGPMAP32
3147 if (dst_pmap->pm_pdir[ptepindex] == 0) {
3148 dst_pmap->pm_pdir[ptepindex] = (pd_entry_t) srcptepaddr;
3149 dst_pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
3150 }
3151#endif
3152 continue;
3153 }
3154
3155 srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
3156 if ((srcmpte == NULL) || (srcmpte->hold_count == 0) ||
3157 (srcmpte->flags & PG_BUSY)) {
3158 continue;
3159 }
3160
3161 if (pdnxt > end_addr)
3162 pdnxt = end_addr;
3163
3164 src_pte = vtopte(addr);
3165#if JGPMAP32
3166 dst_pte = avtopte(addr);
3167#endif
3168 while (addr < pdnxt) {
3169 pt_entry_t ptetemp;
3170
3171 ptetemp = *src_pte;
3172 /*
3173 * we only virtual copy managed pages
3174 */
3175 if ((ptetemp & PG_MANAGED) != 0) {
3176 /*
3177 * We have to check after allocpte for the
3178 * pte still being around... allocpte can
3179 * block.
3180 *
3181 * pmap_allocpte() can block. If we lose
3182 * our page directory mappings we stop.
3183 */
3184 dstmpte = pmap_allocpte(dst_pmap, addr);
3185
3186#if JGPMAP32
3187 if (src_frame != (PTDpde & PG_FRAME) ||
3188 dst_frame != (APTDpde & PG_FRAME)
3189 ) {
3190 kprintf("WARNING: pmap_copy: detected and corrected race\n");
3191 pmap_unwire_pte_hold(dst_pmap, dstmpte, &info);
3192 goto failed;
3193 } else if ((*dst_pte == 0) &&
3194 (ptetemp = *src_pte) != 0 &&
3195 (ptetemp & PG_MANAGED)) {
3196 /*
3197 * Clear the modified and
3198 * accessed (referenced) bits
3199 * during the copy.
3200 */
3201 m = PHYS_TO_VM_PAGE(ptetemp);
3202 *dst_pte = ptetemp & ~(PG_M | PG_A);
3203 ++dst_pmap->pm_stats.resident_count;
3204 pmap_insert_entry(dst_pmap, addr,
3205 dstmpte, m);
3206 KKASSERT(m->flags & PG_MAPPED);
3207 } else {
3208 kprintf("WARNING: pmap_copy: dst_pte race detected and corrected\n");
3209 pmap_unwire_pte_hold(dst_pmap, dstmpte, &info);
3210 goto failed;
3211 }
3212#endif
3213 if (dstmpte->hold_count >= srcmpte->hold_count)
3214 break;
3215 }
3216 addr += PAGE_SIZE;
3217 src_pte++;
3218 dst_pte++;
3219 }
3220 }
3221failed:
3222 crit_exit();
3223 pmap_inval_flush(&info);
3224}
3225
3226/*
3227 * pmap_zero_page:
3228 *
3229 * Zero the specified physical page.
3230 *
3231 * This function may be called from an interrupt and no locking is
3232 * required.
3233 */
3234void
3235pmap_zero_page(vm_paddr_t phys)
3236READY1
3237{
3238 vm_offset_t va = PHYS_TO_DMAP(phys);
3239
3240 pagezero((void *)va);
3241}
3242
3243/*
3244 * pmap_page_assertzero:
3245 *
3246 * Assert that a page is empty, panic if it isn't.
3247 */
3248void
3249pmap_page_assertzero(vm_paddr_t phys)
3250READY1
3251{
3252 struct mdglobaldata *gd = mdcpu;
3253 int i;
3254
3255 crit_enter();
3256 vm_offset_t virt = PHYS_TO_DMAP(phys);
3257
3258 for (i = 0; i < PAGE_SIZE; i += sizeof(int)) {
3259 if (*(int *)((char *)virt + i) != 0) {
3260 panic("pmap_page_assertzero() @ %p not zero!\n",
3261 (void *)virt);
3262 }
3263 }
3264 crit_exit();
3265}
3266
3267/*
3268 * pmap_zero_page:
3269 *
3270 * Zero part of a physical page by mapping it into memory and clearing
3271 * its contents with bzero.
3272 *
3273 * off and size may not cover an area beyond a single hardware page.
3274 */
3275void
3276pmap_zero_page_area(vm_paddr_t phys, int off, int size)
3277READY1
3278{
3279 struct mdglobaldata *gd = mdcpu;
3280
3281 crit_enter();
3282 vm_offset_t virt = PHYS_TO_DMAP(phys);
3283 bzero((char *)virt + off, size);
3284 crit_exit();
3285}
3286
3287/*
3288 * pmap_copy_page:
3289 *
3290 * Copy the physical page from the source PA to the target PA.
3291 * This function may be called from an interrupt. No locking
3292 * is required.
3293 */
3294void
3295pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
3296READY1
3297{
3298 vm_offset_t src_virt, dst_virt;
3299
3300 crit_enter();
3301 src_virt = PHYS_TO_DMAP(src);
3302 dst_virt = PHYS_TO_DMAP(dst);
3303 bcopy(src_virt, dst_virt, PAGE_SIZE);
3304 crit_exit();
3305}
3306
3307/*
3308 * pmap_copy_page_frag:
3309 *
3310 * Copy the physical page from the source PA to the target PA.
3311 * This function may be called from an interrupt. No locking
3312 * is required.
3313 */
3314void
3315pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
3316READY1
3317{
3318 vm_offset_t src_virt, dst_virt;
3319
3320 crit_enter();
3321 src_virt = PHYS_TO_DMAP(src);
3322 dst_virt = PHYS_TO_DMAP(dst);
3323 bcopy((char *)src_virt + (src & PAGE_MASK),
3324 (char *)dst_virt + (dst & PAGE_MASK),
3325 bytes);
3326 crit_exit();
3327}
3328
3329/*
3330 * Returns true if the pmap's pv is one of the first
3331 * 16 pvs linked to from this page. This count may
3332 * be changed upwards or downwards in the future; it
3333 * is only necessary that true be returned for a small
3334 * subset of pmaps for proper page aging.
3335 */
3336boolean_t
3337pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3338READY2
3339{
3340 pv_entry_t pv;
3341 int loops = 0;
3342
3343 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3344 return FALSE;
3345
3346 crit_enter();
3347
3348 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3349 if (pv->pv_pmap == pmap) {
3350 crit_exit();
3351 return TRUE;
3352 }
3353 loops++;
3354 if (loops >= 16)
3355 break;
3356 }
3357 crit_exit();
3358 return (FALSE);
3359}
3360
3361/*
3362 * Remove all pages from specified address space
3363 * this aids process exit speeds. Also, this code
3364 * is special cased for current process only, but
3365 * can have the more generic (and slightly slower)
3366 * mode enabled. This is much faster than pmap_remove
3367 * in the case of running down an entire address space.
3368 */
3369void
3370pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3371READY1
3372{
3373 struct lwp *lp;
3374 pt_entry_t *pte, tpte;
3375 pv_entry_t pv, npv;
3376 vm_page_t m;
3377 pmap_inval_info info;
3378 int iscurrentpmap;
3379 int save_generation;
3380
3381 lp = curthread->td_lwp;
3382 if (lp && pmap == vmspace_pmap(lp->lwp_vmspace))
3383 iscurrentpmap = 1;
3384 else
3385 iscurrentpmap = 0;
3386
3387 pmap_inval_init(&info);
3388 crit_enter();
3389 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
3390 if (pv->pv_va >= eva || pv->pv_va < sva) {
3391 npv = TAILQ_NEXT(pv, pv_plist);
3392 continue;
3393 }
3394
3395 KKASSERT(pmap == pv->pv_pmap);
3396
3397 if (iscurrentpmap)
3398 pte = vtopte(pv->pv_va);
3399 else
3400 pte = pmap_pte_quick(pmap, pv->pv_va);
3401 if (pmap->pm_active)
3402 pmap_inval_add(&info, pmap, pv->pv_va);
3403
3404 /*
3405 * We cannot remove wired pages from a process' mapping
3406 * at this time
3407 */
3408 if (*pte & PG_W) {
3409 npv = TAILQ_NEXT(pv, pv_plist);
3410 continue;
3411 }
3412 tpte = pte_load_clear(pte);
3413
3414 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
3415
3416 KASSERT(m < &vm_page_array[vm_page_array_size],
3417 ("pmap_remove_pages: bad tpte %lx", tpte));
3418
3419 KKASSERT(pmap->pm_stats.resident_count > 0);
3420 --pmap->pm_stats.resident_count;
3421
3422 /*
3423 * Update the vm_page_t clean and reference bits.
3424 */
3425 if (tpte & PG_M) {
3426 vm_page_dirty(m);
3427 }
3428
3429 npv = TAILQ_NEXT(pv, pv_plist);
3430 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
3431 save_generation = ++pmap->pm_generation;
3432
3433 m->md.pv_list_count--;
3434 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3435 if (TAILQ_EMPTY(&m->md.pv_list))
3436 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
3437
3438 pmap_unuse_pt(pmap, pv->pv_va, pv->pv_ptem, &info);
3439 free_pv_entry(pv);
3440
3441 /*
3442 * Restart the scan if we blocked during the unuse or free
3443 * calls and other removals were made.
3444 */
3445 if (save_generation != pmap->pm_generation) {
3446 kprintf("Warning: pmap_remove_pages race-A avoided\n");
3447 pv = TAILQ_FIRST(&pmap->pm_pvlist);
3448 }
3449 }
3450 pmap_inval_flush(&info);
3451 crit_exit();
3452}
3453
3454/*
3455 * pmap_testbit tests bits in pte's
3456 * note that the testbit/clearbit routines are inline,
3457 * and a lot of things compile-time evaluate.
3458 */
3459static boolean_t
3460pmap_testbit(vm_page_t m, int bit)
3461READY1
3462{
3463 pv_entry_t pv;
3464 pt_entry_t *pte;
3465
3466 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3467 return FALSE;
3468
3469 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
3470 return FALSE;
3471
3472 crit_enter();
3473
3474 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3475 /*
3476 * if the bit being tested is the modified bit, then
3477 * mark clean_map and ptes as never
3478 * modified.
3479 */
3480 if (bit & (PG_A|PG_M)) {
3481 if (!pmap_track_modified(pv->pv_va))
3482 continue;
3483 }
3484
3485#if defined(PMAP_DIAGNOSTIC)
3486 if (pv->pv_pmap == NULL) {
3487 kprintf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va);
3488 continue;
3489 }
3490#endif
3491 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3492 if (*pte & bit) {
3493 crit_exit();
3494 return TRUE;
3495 }
3496 }
3497 crit_exit();
3498 return (FALSE);
3499}
3500
3501/*
3502 * this routine is used to modify bits in ptes
3503 */
3504static __inline void
3505pmap_clearbit(vm_page_t m, int bit)
3506READY1
3507{
3508 struct pmap_inval_info info;
3509 pv_entry_t pv;
3510 pt_entry_t *pte;
3511 pt_entry_t pbits;
3512
3513 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3514 return;
3515
3516 pmap_inval_init(&info);
3517 crit_enter();
3518
3519 /*
3520 * Loop over all current mappings setting/clearing as appropos If
3521 * setting RO do we need to clear the VAC?
3522 */
3523 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3524 /*
3525 * don't write protect pager mappings
3526 */
3527 if (bit == PG_RW) {
3528 if (!pmap_track_modified(pv->pv_va))
3529 continue;
3530 }
3531
3532#if defined(PMAP_DIAGNOSTIC)
3533 if (pv->pv_pmap == NULL) {
3534 kprintf("Null pmap (cb) at va: 0x%lx\n", pv->pv_va);
3535 continue;
3536 }
3537#endif
3538
3539 /*
3540 * Careful here. We can use a locked bus instruction to
3541 * clear PG_A or PG_M safely but we need to synchronize
3542 * with the target cpus when we mess with PG_RW.
3543 *
3544 * We do not have to force synchronization when clearing
3545 * PG_M even for PTEs generated via virtual memory maps,
3546 * because the virtual kernel will invalidate the pmap
3547 * entry when/if it needs to resynchronize the Modify bit.
3548 */
3549 if (bit & PG_RW)
3550 pmap_inval_add(&info, pv->pv_pmap, pv->pv_va);
3551 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3552again:
3553 pbits = *pte;
3554 if (pbits & bit) {
3555 if (bit == PG_RW) {
3556 if (pbits & PG_M) {
3557 vm_page_dirty(m);
3558 atomic_clear_long(pte, PG_M|PG_RW);
3559 } else {
3560 /*
3561 * The cpu may be trying to set PG_M
3562 * simultaniously with our clearing
3563 * of PG_RW.
3564 */
3565 if (!atomic_cmpset_long(pte, pbits,
3566 pbits & ~PG_RW))
3567 goto again;
3568 }
3569 } else if (bit == PG_M) {
3570 /*
3571 * We could also clear PG_RW here to force
3572 * a fault on write to redetect PG_M for
3573 * virtual kernels, but it isn't necessary
3574 * since virtual kernels invalidate the pte
3575 * when they clear the VPTE_M bit in their
3576 * virtual page tables.
3577 */
3578 atomic_clear_long(pte, PG_M);
3579 } else {
3580 atomic_clear_long(pte, bit);
3581 }
3582 }
3583 }
3584 pmap_inval_flush(&info);
3585 crit_exit();
3586}
3587
3588/*
3589 * pmap_page_protect:
3590 *
3591 * Lower the permission for all mappings to a given page.
3592 */
3593void
3594pmap_page_protect(vm_page_t m, vm_prot_t prot)
3595READY1
3596{
3597 /* JG NX support? */
3598 if ((prot & VM_PROT_WRITE) == 0) {
3599 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
3600 pmap_clearbit(m, PG_RW);
3601 vm_page_flag_clear(m, PG_WRITEABLE);
3602 } else {
3603 pmap_remove_all(m);
3604 }
3605 }
3606}
3607
3608vm_paddr_t
3609pmap_phys_address(vm_pindex_t ppn)
3610READY2
3611{
3612 return (amd64_ptob(ppn));
3613}
3614
3615/*
3616 * pmap_ts_referenced:
3617 *
3618 * Return a count of reference bits for a page, clearing those bits.
3619 * It is not necessary for every reference bit to be cleared, but it
3620 * is necessary that 0 only be returned when there are truly no
3621 * reference bits set.
3622 *
3623 * XXX: The exact number of bits to check and clear is a matter that
3624 * should be tested and standardized at some point in the future for
3625 * optimal aging of shared pages.
3626 */
3627int
3628pmap_ts_referenced(vm_page_t m)
3629READY1
3630{
3631 pv_entry_t pv, pvf, pvn;
3632 pt_entry_t *pte;
3633 int rtval = 0;
3634
3635 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3636 return (rtval);
3637
3638 crit_enter();
3639
3640 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3641
3642 pvf = pv;
3643
3644 do {
3645 pvn = TAILQ_NEXT(pv, pv_list);
3646
3647 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3648
3649 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
3650
3651 if (!pmap_track_modified(pv->pv_va))
3652 continue;
3653
3654 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3655
3656 if (pte && (*pte & PG_A)) {
3657#ifdef SMP
3658 atomic_clear_long(pte, PG_A);
3659#else
3660 atomic_clear_long_nonlocked(pte, PG_A);
3661#endif
3662 rtval++;
3663 if (rtval > 4) {
3664 break;
3665 }
3666 }
3667 } while ((pv = pvn) != NULL && pv != pvf);
3668 }
3669 crit_exit();
3670
3671 return (rtval);
3672}
3673
3674/*
3675 * pmap_is_modified:
3676 *
3677 * Return whether or not the specified physical page was modified
3678 * in any physical maps.
3679 */
3680boolean_t
3681pmap_is_modified(vm_page_t m)
3682READY2
3683{
3684 return pmap_testbit(m, PG_M);
3685}
3686
3687/*
3688 * Clear the modify bits on the specified physical page.
3689 */
3690void
3691pmap_clear_modify(vm_page_t m)
3692READY2
3693{
3694 pmap_clearbit(m, PG_M);
3695}
3696
3697/*
3698 * pmap_clear_reference:
3699 *
3700 * Clear the reference bit on the specified physical page.
3701 */
3702void
3703pmap_clear_reference(vm_page_t m)
3704READY2
3705{
3706 pmap_clearbit(m, PG_A);
3707}
3708
3709/*
3710 * Miscellaneous support routines follow
3711 */
3712
3713static void
3714i386_protection_init(void)
3715READY0
3716{
3717 int *kp, prot;
3718
3719 /* JG NX support may go here; No VM_PROT_EXECUTE ==> set NX bit */
3720 kp = protection_codes;
3721 for (prot = 0; prot < 8; prot++) {
3722 switch (prot) {
3723 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
3724 /*
3725 * Read access is also 0. There isn't any execute bit,
3726 * so just make it readable.
3727 */
3728 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
3729 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
3730 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
3731 *kp++ = 0;
3732 break;
3733 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
3734 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
3735 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
3736 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
3737 *kp++ = PG_RW;
3738 break;
3739 }
3740 }
3741}
3742
3743/*
3744 * Map a set of physical memory pages into the kernel virtual
3745 * address space. Return a pointer to where it is mapped. This
3746 * routine is intended to be used for mapping device memory,
3747 * NOT real memory.
3748 *
3749 * NOTE: we can't use pgeflag unless we invalidate the pages one at
3750 * a time.
3751 */
3752void *
3753pmap_mapdev(vm_paddr_t pa, vm_size_t size)
3754READY1
3755{
3756 vm_offset_t va, tmpva, offset;
3757 pt_entry_t *pte;
3758
3759 offset = pa & PAGE_MASK;
3760 size = roundup(offset + size, PAGE_SIZE);
3761
3762 va = kmem_alloc_nofault(&kernel_map, size);
3763 if (va == 0)
3764 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3765
3766 pa = pa & ~PAGE_MASK;
3767 for (tmpva = va; size > 0;) {
3768 pte = vtopte(tmpva);
3769 *pte = pa | PG_RW | PG_V; /* | pgeflag; */
3770 size -= PAGE_SIZE;
3771 tmpva += PAGE_SIZE;
3772 pa += PAGE_SIZE;
3773 }
3774 cpu_invltlb();
3775 smp_invltlb();
3776
3777 return ((void *)(va + offset));
3778}
3779
3780void
3781pmap_unmapdev(vm_offset_t va, vm_size_t size)
3782READY1
3783{
3784 vm_offset_t base, offset;
3785
3786 base = va & ~PAGE_MASK;
3787 offset = va & PAGE_MASK;
3788 size = roundup(offset + size, PAGE_SIZE);
3789 pmap_qremove(va, size >> PAGE_SHIFT);
3790 kmem_free(&kernel_map, base, size);
3791}
3792
3793/*
3794 * perform the pmap work for mincore
3795 */
3796int
3797pmap_mincore(pmap_t pmap, vm_offset_t addr)
3798READY0
3799{
3800 pt_entry_t *ptep, pte;
3801 vm_page_t m;
3802 int val = 0;
3803
3804 ptep = pmap_pte(pmap, addr);
3805 if (ptep == 0) {
3806 return 0;
3807 }
3808
3809 if ((pte = *ptep) != 0) {
3810 vm_offset_t pa;
3811
3812 val = MINCORE_INCORE;
3813 if ((pte & PG_MANAGED) == 0)
3814 return val;
3815
3816 pa = pte & PG_FRAME;
3817
3818 m = PHYS_TO_VM_PAGE(pa);
3819
3820 /*
3821 * Modified by us
3822 */
3823 if (pte & PG_M)
3824 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
3825 /*
3826 * Modified by someone
3827 */
3828 else if (m->dirty || pmap_is_modified(m))
3829 val |= MINCORE_MODIFIED_OTHER;
3830 /*
3831 * Referenced by us
3832 */
3833 if (pte & PG_A)
3834 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
3835
3836 /*
3837 * Referenced by someone
3838 */
3839 else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
3840 val |= MINCORE_REFERENCED_OTHER;
3841 vm_page_flag_set(m, PG_REFERENCED);
3842 }
3843 }
3844 return val;
3845}
3846
3847/*
3848 * Replace p->p_vmspace with a new one. If adjrefs is non-zero the new
3849 * vmspace will be ref'd and the old one will be deref'd.
3850 *
3851 * The vmspace for all lwps associated with the process will be adjusted
3852 * and cr3 will be reloaded if any lwp is the current lwp.
3853 */
3854void
3855pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
3856READY2
3857{
3858 struct vmspace *oldvm;
3859 struct lwp *lp;
3860
3861 crit_enter();
3862 oldvm = p->p_vmspace;
3863 if (oldvm != newvm) {
3864 p->p_vmspace = newvm;
3865 KKASSERT(p->p_nthreads == 1);
3866 lp = RB_ROOT(&p->p_lwp_tree);
3867 pmap_setlwpvm(lp, newvm);
3868 if (adjrefs) {
3869 sysref_get(&newvm->vm_sysref);
3870 sysref_put(&oldvm->vm_sysref);
3871 }
3872 }
3873 crit_exit();
3874}
3875
3876/*
3877 * Set the vmspace for a LWP. The vmspace is almost universally set the
3878 * same as the process vmspace, but virtual kernels need to swap out contexts
3879 * on a per-lwp basis.
3880 */
3881void
3882pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
3883READY1
3884{
3885 struct vmspace *oldvm;
3886 struct pmap *pmap;
3887
3888 crit_enter();
3889 oldvm = lp->lwp_vmspace;
3890
3891 if (oldvm != newvm) {
3892 lp->lwp_vmspace = newvm;
3893 if (curthread->td_lwp == lp) {
3894 pmap = vmspace_pmap(newvm);
3895#if defined(SMP)
3896 atomic_set_int(&pmap->pm_active, 1 << mycpu->gd_cpuid);
3897#else
3898 pmap->pm_active |= 1;
3899#endif
3900#if defined(SWTCH_OPTIM_STATS)
3901 tlb_flush_count++;
3902#endif
3903 curthread->td_pcb->pcb_cr3 = vtophys(pmap->pm_pml4);
3904 load_cr3(curthread->td_pcb->pcb_cr3);
3905 pmap = vmspace_pmap(oldvm);
3906#if defined(SMP)
3907 atomic_clear_int(&pmap->pm_active,
3908 1 << mycpu->gd_cpuid);
3909#else
3910 pmap->pm_active &= ~1;
3911#endif
3912 }
3913 }
3914 crit_exit();
3915}
3916
3917vm_offset_t
3918pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
3919READY0
3920{
3921
3922 if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
3923 return addr;
3924 }
3925
3926 addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
3927 return addr;
3928}
3929
3930
3931#if defined(DEBUG)
3932
3933static void pads (pmap_t pm);
3934void pmap_pvdump (vm_paddr_t pa);
3935
3936/* print address space of pmap*/
3937static void
3938pads(pmap_t pm)
3939READY0
3940{
3941 vm_offset_t va;
3942 unsigned i, j;
3943 pt_entry_t *ptep;
3944
3945 if (pm == &kernel_pmap)
3946 return;
3947 crit_enter();
3948 for (i = 0; i < NPDEPG; i++) {
3949#if JGPMAP32
3950 if (pm->pm_pdir[i]) {
3951 for (j = 0; j < NPTEPG; j++) {
3952 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
3953 if (pm == &kernel_pmap && va < KERNBASE)
3954 continue;
3955 if (pm != &kernel_pmap && va > UPT_MAX_ADDRESS)
3956 continue;
3957 ptep = pmap_pte_quick(pm, va);
3958 if (pmap_pte_v(ptep))
3959 kprintf("%lx:%lx ", va, *ptep);
3960 };
3961 }
3962#endif
3963 }
3964 crit_exit();
3965
3966}
3967
3968void
3969pmap_pvdump(vm_paddr_t pa)
3970READY0
3971{
3972 pv_entry_t pv;
3973 vm_page_t m;
3974
3975 kprintf("pa %08llx", (long long)pa);
3976 m = PHYS_TO_VM_PAGE(pa);
3977 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3978#ifdef used_to_be
3979 kprintf(" -> pmap %p, va %x, flags %x",
3980 (void *)pv->pv_pmap, pv->pv_va, pv->pv_flags);
3981#endif
3982 kprintf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3983 pads(pv->pv_pmap);
3984 }
3985 kprintf(" ");
3986}
3987#endif