kernel - Check PG_MARKER in pmap_object_init_pt_callback()
[dragonfly.git] / sys / platform / pc32 / i386 / pmap.c
CommitLineData
984263bc 1/*
4107b0c0
MD
2 * (MPSAFE)
3 *
984263bc
MD
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
45 */
46
47/*
4107b0c0 48 * Manages physical address maps.
984263bc 49 *
b12defdc 50 * In most cases we hold page table pages busy in order to manipulate them.
984263bc 51 */
5926987a
MD
52/*
53 * PMAP_DEBUG - see platform/pc32/include/pmap.h
54 */
984263bc
MD
55
56#include "opt_disable_pse.h"
57#include "opt_pmap.h"
58#include "opt_msgbuf.h"
984263bc
MD
59
60#include <sys/param.h>
61#include <sys/systm.h>
62#include <sys/kernel.h>
63#include <sys/proc.h>
64#include <sys/msgbuf.h>
65#include <sys/vmmeter.h>
66#include <sys/mman.h>
b12defdc 67#include <sys/thread.h>
984263bc
MD
68
69#include <vm/vm.h>
70#include <vm/vm_param.h>
71#include <sys/sysctl.h>
72#include <sys/lock.h>
73#include <vm/vm_kern.h>
74#include <vm/vm_page.h>
75#include <vm/vm_map.h>
76#include <vm/vm_object.h>
77#include <vm/vm_extern.h>
78#include <vm/vm_pageout.h>
79#include <vm/vm_pager.h>
80#include <vm/vm_zone.h>
81
82#include <sys/user.h>
e0e69b7d 83#include <sys/thread2.h>
e3161323 84#include <sys/sysref2.h>
b12defdc 85#include <sys/spinlock2.h>
90244566 86#include <vm/vm_page2.h>
984263bc
MD
87
88#include <machine/cputypes.h>
89#include <machine/md_var.h>
90#include <machine/specialreg.h>
984263bc 91#include <machine/smp.h>
a9295349 92#include <machine_base/apic/apicreg.h>
85100692 93#include <machine/globaldata.h>
0f7a3396
MD
94#include <machine/pmap.h>
95#include <machine/pmap_inval.h>
984263bc
MD
96
97#define PMAP_KEEP_PDIRS
98#ifndef PMAP_SHPGPERPROC
99#define PMAP_SHPGPERPROC 200
948209ce 100#define PMAP_PVLIMIT 1400000 /* i386 kvm problems */
984263bc
MD
101#endif
102
103#if defined(DIAGNOSTIC)
104#define PMAP_DIAGNOSTIC
105#endif
106
107#define MINPV 2048
108
109#if !defined(PMAP_DIAGNOSTIC)
110#define PMAP_INLINE __inline
111#else
112#define PMAP_INLINE
113#endif
114
115/*
116 * Get PDEs and PTEs for user/kernel address space
117 */
118#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
119#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
120
121#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
122#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
123#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
124#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
125#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
126
984263bc
MD
127/*
128 * Given a map and a machine independent protection code,
129 * convert to a vax protection code.
130 */
639a9b43
MD
131#define pte_prot(m, p) \
132 (protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
984263bc
MD
133static int protection_codes[8];
134
fbbaeba3 135struct pmap kernel_pmap;
54a764e8
MD
136static TAILQ_HEAD(,pmap) pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list);
137
e880033d 138vm_paddr_t avail_start; /* PA of first available physical page */
6ef943a3 139vm_paddr_t avail_end; /* PA of last available physical page */
e880033d 140vm_offset_t virtual_start; /* VA of first avail page (after kernel bss) */
984263bc 141vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
791c6551
MD
142vm_offset_t virtual2_start;
143vm_offset_t virtual2_end;
c439ad8f
MD
144vm_offset_t KvaStart; /* VA start of KVA space */
145vm_offset_t KvaEnd; /* VA end of KVA space (non-inclusive) */
146vm_offset_t KvaSize; /* max size of kernel virtual address space */
984263bc
MD
147static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
148static int pgeflag; /* PG_G or-in */
149static int pseflag; /* PG_PS or-in */
150
151static vm_object_t kptobj;
152
153static int nkpt;
154vm_offset_t kernel_vm_end;
155
156/*
157 * Data for the pv entry allocation mechanism
158 */
159static vm_zone_t pvzone;
160static struct vm_zone pvzone_store;
161static struct vm_object pvzone_obj;
162static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
163static int pmap_pagedaemon_waken = 0;
164static struct pv_entry *pvinit;
165
166/*
a93980ab
MD
167 * Considering all the issues I'm having with pmap caching, if breakage
168 * continues to occur, and for debugging, I've added a sysctl that will
169 * just do an unconditional invltlb.
170 */
171static int dreadful_invltlb;
172
173SYSCTL_INT(_vm, OID_AUTO, dreadful_invltlb,
9733f757 174 CTLFLAG_RW, &dreadful_invltlb, 0, "Debugging sysctl to force invltlb on pmap operations");
a93980ab
MD
175
176/*
984263bc
MD
177 * All those kernel PT submaps that BSD is so fond of
178 */
e731d345 179pt_entry_t *CMAP1 = 0, *ptmmap;
984263bc 180caddr_t CADDR1 = 0, ptvmmap = 0;
984263bc
MD
181static pt_entry_t *msgbufmap;
182struct msgbuf *msgbufp=0;
183
184/*
185 * Crashdump maps.
186 */
187static pt_entry_t *pt_crashdumpmap;
188static caddr_t crashdumpmap;
189
984263bc 190extern pt_entry_t *SMPpt;
984263bc 191
3ae0cd58
RG
192static PMAP_INLINE void free_pv_entry (pv_entry_t pv);
193static unsigned * get_ptbase (pmap_t pmap);
194static pv_entry_t get_pv_entry (void);
195static void i386_protection_init (void);
5e8d0349 196static __inline void pmap_clearbit (vm_page_t m, int bit);
3ae0cd58
RG
197
198static void pmap_remove_all (vm_page_t m);
0f7a3396
MD
199static int pmap_remove_pte (struct pmap *pmap, unsigned *ptq,
200 vm_offset_t sva, pmap_inval_info_t info);
201static void pmap_remove_page (struct pmap *pmap,
202 vm_offset_t va, pmap_inval_info_t info);
3ae0cd58 203static int pmap_remove_entry (struct pmap *pmap, vm_page_t m,
0f7a3396 204 vm_offset_t va, pmap_inval_info_t info);
3ae0cd58
RG
205static boolean_t pmap_testbit (vm_page_t m, int bit);
206static void pmap_insert_entry (pmap_t pmap, vm_offset_t va,
207 vm_page_t mpte, vm_page_t m);
208
209static vm_page_t pmap_allocpte (pmap_t pmap, vm_offset_t va);
210
211static int pmap_release_free_page (pmap_t pmap, vm_page_t p);
212static vm_page_t _pmap_allocpte (pmap_t pmap, unsigned ptepindex);
213static unsigned * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
214static vm_page_t pmap_page_lookup (vm_object_t object, vm_pindex_t pindex);
0f7a3396 215static int pmap_unuse_pt (pmap_t, vm_offset_t, vm_page_t, pmap_inval_info_t);
984263bc
MD
216static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
217
218static unsigned pdir4mb;
219
220/*
840de426
MD
221 * Move the kernel virtual free pointer to the next
222 * 4MB. This is used to help improve performance
223 * by using a large (4MB) page for much of the kernel
224 * (.text, .data, .bss)
225 */
4107b0c0
MD
226static
227vm_offset_t
840de426
MD
228pmap_kmem_choose(vm_offset_t addr)
229{
230 vm_offset_t newaddr = addr;
231#ifndef DISABLE_PSE
232 if (cpu_feature & CPUID_PSE) {
233 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
234 }
235#endif
236 return newaddr;
237}
238
239/*
4107b0c0
MD
240 * This function returns a pointer to the pte entry in the pmap and has
241 * the side effect of potentially retaining a cached mapping of the pmap.
e0e69b7d 242 *
4107b0c0
MD
243 * The caller must hold vm_token and the returned value is only valid
244 * until the caller blocks or releases the token.
984263bc 245 */
4107b0c0
MD
246static
247unsigned *
840de426 248pmap_pte(pmap_t pmap, vm_offset_t va)
984263bc
MD
249{
250 unsigned *pdeaddr;
251
4107b0c0 252 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
253 if (pmap) {
254 pdeaddr = (unsigned *) pmap_pde(pmap, va);
255 if (*pdeaddr & PG_PS)
256 return pdeaddr;
4107b0c0 257 if (*pdeaddr)
984263bc 258 return get_ptbase(pmap) + i386_btop(va);
984263bc
MD
259 }
260 return (0);
261}
262
263/*
4107b0c0
MD
264 * pmap_pte using the kernel_pmap
265 *
266 * Used for debugging, no requirements.
267 */
268unsigned *
269pmap_kernel_pte(vm_offset_t va)
270{
271 unsigned *pdeaddr;
272
273 pdeaddr = (unsigned *) pmap_pde(&kernel_pmap, va);
274 if (*pdeaddr & PG_PS)
275 return pdeaddr;
276 if (*pdeaddr)
277 return (unsigned *)vtopte(va);
278 return(0);
279}
280
281/*
e0e69b7d
MD
282 * pmap_pte_quick:
283 *
c1692ddf
MD
284 * Super fast pmap_pte routine best used when scanning the pv lists.
285 * This eliminates many course-grained invltlb calls. Note that many of
286 * the pv list scans are across different pmaps and it is very wasteful
287 * to do an entire invltlb when checking a single mapping.
e0e69b7d 288 *
c1692ddf
MD
289 * Should only be called while in a critical section.
290 *
4107b0c0
MD
291 * The caller must hold vm_token and the returned value is only valid
292 * until the caller blocks or releases the token.
984263bc 293 */
4107b0c0
MD
294static
295unsigned *
840de426 296pmap_pte_quick(pmap_t pmap, vm_offset_t va)
984263bc 297{
840de426
MD
298 struct mdglobaldata *gd = mdcpu;
299 unsigned pde, newpf;
300
4107b0c0 301 ASSERT_LWKT_TOKEN_HELD(&vm_token);
840de426
MD
302 if ((pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) != 0) {
303 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
304 unsigned index = i386_btop(va);
305 /* are we current address space or kernel? */
fbbaeba3 306 if ((pmap == &kernel_pmap) ||
840de426
MD
307 (frame == (((unsigned) PTDpde) & PG_FRAME))) {
308 return (unsigned *) PTmap + index;
309 }
310 newpf = pde & PG_FRAME;
4107b0c0
MD
311 if (((*(unsigned *)gd->gd_PMAP1) & PG_FRAME) != newpf) {
312 *(unsigned *)gd->gd_PMAP1 = newpf | PG_RW | PG_V;
840de426
MD
313 cpu_invlpg(gd->gd_PADDR1);
314 }
315 return gd->gd_PADDR1 + ((unsigned) index & (NPTEPG - 1));
984263bc 316 }
840de426 317 return (0);
984263bc
MD
318}
319
840de426 320
984263bc 321/*
4107b0c0 322 * Bootstrap the system enough to run with virtual memory.
984263bc 323 *
4107b0c0
MD
324 * On the i386 this is called after mapping has already been enabled
325 * and just syncs the pmap module with what has already been done.
326 * [We can't call it easily with mapping off since the kernel is not
327 * mapped with PA == VA, hence we would have to relocate every address
328 * from the linked base (virtual) address "KERNBASE" to the actual
329 * (physical) address starting relative to 0]
984263bc
MD
330 */
331void
f123d5a1 332pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr)
984263bc
MD
333{
334 vm_offset_t va;
335 pt_entry_t *pte;
85100692 336 struct mdglobaldata *gd;
984263bc 337 int i;
81c04d07 338 int pg;
984263bc 339
c439ad8f
MD
340 KvaStart = (vm_offset_t)VADDR(PTDPTDI, 0);
341 KvaSize = (vm_offset_t)VADDR(APTDPTDI, 0) - KvaStart;
342 KvaEnd = KvaStart + KvaSize;
343
984263bc
MD
344 avail_start = firstaddr;
345
346 /*
e880033d
MD
347 * XXX The calculation of virtual_start is wrong. It's NKPT*PAGE_SIZE
348 * too large. It should instead be correctly calculated in locore.s and
984263bc
MD
349 * not based on 'first' (which is a physical address, not a virtual
350 * address, for the start of unused physical memory). The kernel
351 * page tables are NOT double mapped and thus should not be included
352 * in this calculation.
353 */
e880033d
MD
354 virtual_start = (vm_offset_t) KERNBASE + firstaddr;
355 virtual_start = pmap_kmem_choose(virtual_start);
c439ad8f 356 virtual_end = VADDR(KPTDI+NKPDE-1, NPTEPG-1);
984263bc
MD
357
358 /*
359 * Initialize protection array.
360 */
361 i386_protection_init();
362
363 /*
364 * The kernel's pmap is statically allocated so we don't have to use
365 * pmap_create, which is unlikely to work correctly at this part of
366 * the boot sequence (XXX and which no longer exists).
b12defdc
MD
367 *
368 * The kernel_pmap's pm_pteobj is used only for locking and not
369 * for mmu pages.
984263bc 370 */
fbbaeba3
MD
371 kernel_pmap.pm_pdir = (pd_entry_t *)(KERNBASE + (u_int)IdlePTD);
372 kernel_pmap.pm_count = 1;
c2fb025d 373 kernel_pmap.pm_active = (cpumask_t)-1 & ~CPUMASK_LOCK;
b12defdc 374 kernel_pmap.pm_pteobj = &kernel_object;
fbbaeba3 375 TAILQ_INIT(&kernel_pmap.pm_pvlist);
b12defdc
MD
376 TAILQ_INIT(&kernel_pmap.pm_pvlist_free);
377 spin_init(&kernel_pmap.pm_spin);
378 lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
984263bc
MD
379 nkpt = NKPT;
380
381 /*
382 * Reserve some special page table entries/VA space for temporary
383 * mapping of pages.
384 */
385#define SYSMAP(c, p, v, n) \
386 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
387
e880033d 388 va = virtual_start;
4107b0c0 389 pte = (pt_entry_t *) pmap_kernel_pte(va);
984263bc
MD
390
391 /*
392 * CMAP1/CMAP2 are used for zeroing and copying pages.
393 */
394 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
984263bc
MD
395
396 /*
397 * Crashdump maps.
398 */
399 SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
400
401 /*
e731d345
MD
402 * ptvmmap is used for reading arbitrary physical pages via
403 * /dev/mem.
404 */
405 SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
406
407 /*
984263bc
MD
408 * msgbufp is used to map the system message buffer.
409 * XXX msgbufmap is not used.
410 */
411 SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
412 atop(round_page(MSGBUF_SIZE)))
413
e880033d 414 virtual_start = va;
984263bc 415
17a9f566 416 *(int *) CMAP1 = 0;
984263bc
MD
417 for (i = 0; i < NKPT; i++)
418 PTD[i] = 0;
419
a2a5ad0d
MD
420 /*
421 * PG_G is terribly broken on SMP because we IPI invltlb's in some
422 * cases rather then invl1pg. Actually, I don't even know why it
423 * works under UP because self-referential page table mappings
424 */
425#ifdef SMP
426 pgeflag = 0;
427#else
428 if (cpu_feature & CPUID_PGE)
984263bc 429 pgeflag = PG_G;
a2a5ad0d 430#endif
984263bc
MD
431
432/*
433 * Initialize the 4MB page size flag
434 */
435 pseflag = 0;
436/*
437 * The 4MB page version of the initial
438 * kernel page mapping.
439 */
440 pdir4mb = 0;
441
442#if !defined(DISABLE_PSE)
443 if (cpu_feature & CPUID_PSE) {
444 unsigned ptditmp;
445 /*
446 * Note that we have enabled PSE mode
447 */
448 pseflag = PG_PS;
449 ptditmp = *((unsigned *)PTmap + i386_btop(KERNBASE));
450 ptditmp &= ~(NBPDR - 1);
451 ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
452 pdir4mb = ptditmp;
453
8a8d5d85
MD
454#ifndef SMP
455 /*
456 * Enable the PSE mode. If we are SMP we can't do this
457 * now because the APs will not be able to use it when
458 * they boot up.
459 */
460 load_cr4(rcr4() | CR4_PSE);
984263bc 461
8a8d5d85
MD
462 /*
463 * We can do the mapping here for the single processor
464 * case. We simply ignore the old page table page from
465 * now on.
466 */
467 /*
468 * For SMP, we still need 4K pages to bootstrap APs,
469 * PSE will be enabled as soon as all APs are up.
470 */
b5b32410 471 PTD[KPTDI] = (pd_entry_t)ptditmp;
fbbaeba3 472 kernel_pmap.pm_pdir[KPTDI] = (pd_entry_t)ptditmp;
0f7a3396 473 cpu_invltlb();
8a8d5d85 474#endif
984263bc
MD
475 }
476#endif
984263bc 477
81c04d07
MD
478 /*
479 * We need to finish setting up the globaldata page for the BSP.
480 * locore has already populated the page table for the mdglobaldata
481 * portion.
482 */
483 pg = MDGLOBALDATA_BASEALLOC_PAGES;
85100692 484 gd = &CPU_prvspace[0].mdglobaldata;
81c04d07
MD
485 gd->gd_CMAP1 = &SMPpt[pg + 0];
486 gd->gd_CMAP2 = &SMPpt[pg + 1];
487 gd->gd_CMAP3 = &SMPpt[pg + 2];
488 gd->gd_PMAP1 = &SMPpt[pg + 3];
9388fcaa 489 gd->gd_GDMAP1 = &PTD[APTDPTDI];
85100692
MD
490 gd->gd_CADDR1 = CPU_prvspace[0].CPAGE1;
491 gd->gd_CADDR2 = CPU_prvspace[0].CPAGE2;
492 gd->gd_CADDR3 = CPU_prvspace[0].CPAGE3;
493 gd->gd_PADDR1 = (unsigned *)CPU_prvspace[0].PPAGE1;
9388fcaa 494 gd->gd_GDADDR1= (unsigned *)VADDR(APTDPTDI, 0);
984263bc 495
0f7a3396 496 cpu_invltlb();
984263bc
MD
497}
498
499#ifdef SMP
500/*
501 * Set 4mb pdir for mp startup
502 */
503void
504pmap_set_opt(void)
505{
506 if (pseflag && (cpu_feature & CPUID_PSE)) {
507 load_cr4(rcr4() | CR4_PSE);
72740893 508 if (pdir4mb && mycpu->gd_cpuid == 0) { /* only on BSP */
fbbaeba3 509 kernel_pmap.pm_pdir[KPTDI] =
984263bc
MD
510 PTD[KPTDI] = (pd_entry_t)pdir4mb;
511 cpu_invltlb();
512 }
513 }
514}
515#endif
516
517/*
4107b0c0
MD
518 * Initialize the pmap module, called by vm_init()
519 *
520 * Called from the low level boot code only.
984263bc
MD
521 */
522void
e7252eda 523pmap_init(void)
984263bc
MD
524{
525 int i;
526 int initial_pvs;
527
528 /*
529 * object for kernel page table pages
530 */
531 kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
532
533 /*
534 * Allocate memory for random pmap data structures. Includes the
535 * pv_head_table.
536 */
537
538 for(i = 0; i < vm_page_array_size; i++) {
539 vm_page_t m;
540
541 m = &vm_page_array[i];
542 TAILQ_INIT(&m->md.pv_list);
543 m->md.pv_list_count = 0;
544 }
545
546 /*
547 * init the pv free list
548 */
549 initial_pvs = vm_page_array_size;
550 if (initial_pvs < MINPV)
551 initial_pvs = MINPV;
552 pvzone = &pvzone_store;
948209ce
MD
553 pvinit = (void *)kmem_alloc(&kernel_map,
554 initial_pvs * sizeof (struct pv_entry));
555 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
556 pvinit, initial_pvs);
984263bc
MD
557
558 /*
559 * Now it is safe to enable pv_table recording.
560 */
561 pmap_initialized = TRUE;
562}
563
564/*
565 * Initialize the address space (zone) for the pv_entries. Set a
566 * high water mark so that the system can recover from excessive
567 * numbers of pv entries.
4107b0c0
MD
568 *
569 * Called from the low level boot code only.
984263bc
MD
570 */
571void
f123d5a1 572pmap_init2(void)
984263bc
MD
573{
574 int shpgperproc = PMAP_SHPGPERPROC;
948209ce 575 int entry_max;
984263bc
MD
576
577 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
578 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
948209ce
MD
579
580#ifdef PMAP_PVLIMIT
581 /*
582 * Horrible hack for systems with a lot of memory running i386.
583 * the calculated pv_entry_max can wind up eating a ton of KVM
584 * so put a cap on the number of entries if the user did not
585 * change any of the values. This saves about 44MB of KVM on
586 * boxes with 3+GB of ram.
587 *
588 * On the flip side, this makes it more likely that some setups
589 * will run out of pv entries. Those sysads will have to bump
590 * the limit up with vm.pamp.pv_entries or vm.pmap.shpgperproc.
591 */
592 if (shpgperproc == PMAP_SHPGPERPROC) {
593 if (pv_entry_max > PMAP_PVLIMIT)
594 pv_entry_max = PMAP_PVLIMIT;
595 }
596#endif
984263bc
MD
597 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
598 pv_entry_high_water = 9 * (pv_entry_max / 10);
948209ce
MD
599
600 /*
601 * Subtract out pages already installed in the zone (hack)
602 */
603 entry_max = pv_entry_max - vm_page_array_size;
604 if (entry_max <= 0)
605 entry_max = 1;
606
607 zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1);
984263bc
MD
608}
609
610
611/***************************************************
612 * Low level helper routines.....
613 ***************************************************/
614
5926987a
MD
615#ifdef PMAP_DEBUG
616
617static void
618test_m_maps_pv(vm_page_t m, pv_entry_t pv)
619{
620 pv_entry_t spv;
621
74b9d1ec 622 crit_enter();
5926987a
MD
623#ifdef PMAP_DEBUG
624 KKASSERT(pv->pv_m == m);
625#endif
626 TAILQ_FOREACH(spv, &m->md.pv_list, pv_list) {
74b9d1ec
MD
627 if (pv == spv) {
628 crit_exit();
5926987a 629 return;
74b9d1ec 630 }
5926987a 631 }
74b9d1ec 632 crit_exit();
5926987a
MD
633 panic("test_m_maps_pv: failed m %p pv %p\n", m, pv);
634}
635
636static void
637ptbase_assert(struct pmap *pmap)
638{
639 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
640
641 /* are we current address space or kernel? */
4107b0c0 642 if (pmap == &kernel_pmap || frame == (((unsigned)PTDpde) & PG_FRAME))
5926987a 643 return;
3558dcda 644 KKASSERT(frame == (*mdcpu->gd_GDMAP1 & PG_FRAME));
5926987a
MD
645}
646
647#else
648
649#define test_m_maps_pv(m, pv)
650#define ptbase_assert(pmap)
651
652#endif
653
984263bc
MD
654#if defined(PMAP_DIAGNOSTIC)
655
656/*
657 * This code checks for non-writeable/modified pages.
658 * This should be an invalid condition.
659 */
660static int
661pmap_nw_modified(pt_entry_t ptea)
662{
663 int pte;
664
665 pte = (int) ptea;
666
667 if ((pte & (PG_M|PG_RW)) == PG_M)
668 return 1;
669 else
670 return 0;
671}
672#endif
673
674
675/*
4107b0c0
MD
676 * This routine defines the region(s) of memory that should not be tested
677 * for the modified bit.
678 *
679 * No requirements.
984263bc
MD
680 */
681static PMAP_INLINE int
682pmap_track_modified(vm_offset_t va)
683{
684 if ((va < clean_sva) || (va >= clean_eva))
685 return 1;
686 else
687 return 0;
688}
689
c1692ddf
MD
690/*
691 * Retrieve the mapped page table base for a particular pmap. Use our self
692 * mapping for the kernel_pmap or our current pmap.
693 *
694 * For foreign pmaps we use the per-cpu page table map. Since this involves
695 * installing a ptd it's actually (per-process x per-cpu). However, we
696 * still cannot depend on our mapping to survive thread switches because
697 * the process might be threaded and switching to another thread for the
698 * same process on the same cpu will allow that other thread to make its
699 * own mapping.
700 *
701 * This could be a bit confusing but the jist is for something like the
702 * vkernel which uses foreign pmaps all the time this represents a pretty
703 * good cache that avoids unnecessary invltlb()s.
4107b0c0
MD
704 *
705 * The caller must hold vm_token and the returned value is only valid
706 * until the caller blocks or releases the token.
c1692ddf 707 */
984263bc 708static unsigned *
e0e69b7d 709get_ptbase(pmap_t pmap)
984263bc
MD
710{
711 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
c1692ddf 712 struct mdglobaldata *gd = mdcpu;
984263bc 713
4107b0c0
MD
714 ASSERT_LWKT_TOKEN_HELD(&vm_token);
715
5926987a
MD
716 /*
717 * We can use PTmap if the pmap is our current address space or
718 * the kernel address space.
719 */
fbbaeba3 720 if (pmap == &kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
984263bc
MD
721 return (unsigned *) PTmap;
722 }
e0e69b7d 723
5926987a 724 /*
c1692ddf
MD
725 * Otherwise we use the per-cpu alternative page table map. Each
726 * cpu gets its own map. Because of this we cannot use this map
727 * from interrupts or threads which can preempt.
be3aecf7
MD
728 *
729 * Even if we already have the map cached we may still have to
730 * invalidate the TLB if another cpu modified a PDE in the map.
5926987a 731 */
c1692ddf
MD
732 KKASSERT(gd->mi.gd_intr_nesting_level == 0 &&
733 (gd->mi.gd_curthread->td_flags & TDF_INTTHREAD) == 0);
e0e69b7d 734
c1692ddf
MD
735 if ((*gd->gd_GDMAP1 & PG_FRAME) != frame) {
736 *gd->gd_GDMAP1 = frame | PG_RW | PG_V;
be3aecf7
MD
737 pmap->pm_cached |= gd->mi.gd_cpumask;
738 cpu_invltlb();
739 } else if ((pmap->pm_cached & gd->mi.gd_cpumask) == 0) {
740 pmap->pm_cached |= gd->mi.gd_cpumask;
984263bc 741 cpu_invltlb();
a93980ab
MD
742 } else if (dreadful_invltlb) {
743 cpu_invltlb();
984263bc 744 }
c1692ddf 745 return ((unsigned *)gd->gd_GDADDR1);
984263bc
MD
746}
747
748/*
e0e69b7d
MD
749 * pmap_extract:
750 *
4107b0c0 751 * Extract the physical page address associated with the map/VA pair.
e0e69b7d 752 *
4107b0c0 753 * The caller may hold vm_token if it desires non-blocking operation.
984263bc 754 */
6ef943a3 755vm_paddr_t
840de426 756pmap_extract(pmap_t pmap, vm_offset_t va)
984263bc
MD
757{
758 vm_offset_t rtval;
759 vm_offset_t pdirindex;
840de426 760
4107b0c0 761 lwkt_gettoken(&vm_token);
984263bc
MD
762 pdirindex = va >> PDRSHIFT;
763 if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
764 unsigned *pte;
765 if ((rtval & PG_PS) != 0) {
766 rtval &= ~(NBPDR - 1);
767 rtval |= va & (NBPDR - 1);
4107b0c0
MD
768 } else {
769 pte = get_ptbase(pmap) + i386_btop(va);
770 rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
984263bc 771 }
4107b0c0
MD
772 } else {
773 rtval = 0;
984263bc 774 }
4107b0c0
MD
775 lwkt_reltoken(&vm_token);
776 return rtval;
f6bf3af1
MD
777}
778
984263bc
MD
779/***************************************************
780 * Low level mapping routines.....
781 ***************************************************/
782
783/*
4107b0c0
MD
784 * Map a wired VM page to a KVA, fully SMP synchronized.
785 *
786 * No requirements, non blocking.
984263bc 787 */
24712b90 788void
6ef943a3 789pmap_kenter(vm_offset_t va, vm_paddr_t pa)
984263bc 790{
840de426 791 unsigned *pte;
0f7a3396
MD
792 unsigned npte;
793 pmap_inval_info info;
984263bc 794
0f7a3396 795 pmap_inval_init(&info);
984263bc
MD
796 npte = pa | PG_RW | PG_V | pgeflag;
797 pte = (unsigned *)vtopte(va);
c2fb025d 798 pmap_inval_interlock(&info, &kernel_pmap, va);
984263bc 799 *pte = npte;
c2fb025d
MD
800 pmap_inval_deinterlock(&info, &kernel_pmap);
801 pmap_inval_done(&info);
984263bc
MD
802}
803
6d1ec6fa 804/*
4107b0c0
MD
805 * Map a wired VM page to a KVA, synchronized on current cpu only.
806 *
807 * No requirements, non blocking.
6d1ec6fa 808 */
24712b90
MD
809void
810pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
811{
812 unsigned *pte;
813 unsigned npte;
814
815 npte = pa | PG_RW | PG_V | pgeflag;
816 pte = (unsigned *)vtopte(va);
817 *pte = npte;
818 cpu_invlpg((void *)va);
819}
820
4107b0c0
MD
821/*
822 * Synchronize a previously entered VA on all cpus.
823 *
824 * No requirements, non blocking.
825 */
24712b90
MD
826void
827pmap_kenter_sync(vm_offset_t va)
828{
829 pmap_inval_info info;
830
831 pmap_inval_init(&info);
c2fb025d
MD
832 pmap_inval_interlock(&info, &kernel_pmap, va);
833 pmap_inval_deinterlock(&info, &kernel_pmap);
834 pmap_inval_done(&info);
24712b90
MD
835}
836
4107b0c0
MD
837/*
838 * Synchronize a previously entered VA on the current cpu only.
839 *
840 * No requirements, non blocking.
841 */
24712b90
MD
842void
843pmap_kenter_sync_quick(vm_offset_t va)
844{
845 cpu_invlpg((void *)va);
846}
847
984263bc 848/*
4107b0c0
MD
849 * Remove a page from the kernel pagetables, fully SMP synchronized.
850 *
851 * No requirements, non blocking.
984263bc 852 */
24712b90 853void
840de426 854pmap_kremove(vm_offset_t va)
984263bc 855{
840de426 856 unsigned *pte;
0f7a3396 857 pmap_inval_info info;
984263bc 858
0f7a3396 859 pmap_inval_init(&info);
984263bc 860 pte = (unsigned *)vtopte(va);
c2fb025d 861 pmap_inval_interlock(&info, &kernel_pmap, va);
984263bc 862 *pte = 0;
c2fb025d
MD
863 pmap_inval_deinterlock(&info, &kernel_pmap);
864 pmap_inval_done(&info);
984263bc
MD
865}
866
4107b0c0
MD
867/*
868 * Remove a page from the kernel pagetables, synchronized on current cpu only.
869 *
870 * No requirements, non blocking.
871 */
24712b90
MD
872void
873pmap_kremove_quick(vm_offset_t va)
874{
875 unsigned *pte;
876 pte = (unsigned *)vtopte(va);
877 *pte = 0;
878 cpu_invlpg((void *)va);
879}
880
984263bc 881/*
4107b0c0
MD
882 * Adjust the permissions of a page in the kernel page table,
883 * synchronized on the current cpu only.
884 *
885 * No requirements, non blocking.
9ad680a3
MD
886 */
887void
888pmap_kmodify_rw(vm_offset_t va)
889{
4107b0c0 890 atomic_set_int(vtopte(va), PG_RW);
9ad680a3
MD
891 cpu_invlpg((void *)va);
892}
893
4107b0c0
MD
894/*
895 * Adjust the permissions of a page in the kernel page table,
896 * synchronized on the current cpu only.
897 *
898 * No requirements, non blocking.
899 */
9ad680a3
MD
900void
901pmap_kmodify_nc(vm_offset_t va)
902{
4107b0c0 903 atomic_set_int(vtopte(va), PG_N);
9ad680a3
MD
904 cpu_invlpg((void *)va);
905}
906
907/*
4107b0c0 908 * Map a range of physical addresses into kernel virtual address space.
984263bc 909 *
4107b0c0 910 * No requirements, non blocking.
984263bc
MD
911 */
912vm_offset_t
8e5e6f1b 913pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
984263bc 914{
8e5e6f1b
AH
915 vm_offset_t sva, virt;
916
917 sva = virt = *virtp;
984263bc
MD
918 while (start < end) {
919 pmap_kenter(virt, start);
920 virt += PAGE_SIZE;
921 start += PAGE_SIZE;
922 }
8e5e6f1b
AH
923 *virtp = virt;
924 return (sva);
984263bc
MD
925}
926
984263bc 927/*
4107b0c0
MD
928 * Add a list of wired pages to the kva, fully SMP synchronized.
929 *
930 * No requirements, non blocking.
984263bc
MD
931 */
932void
840de426 933pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
984263bc
MD
934{
935 vm_offset_t end_va;
936
937 end_va = va + count * PAGE_SIZE;
938
939 while (va < end_va) {
940 unsigned *pte;
941
942 pte = (unsigned *)vtopte(va);
943 *pte = VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag;
984263bc 944 cpu_invlpg((void *)va);
984263bc
MD
945 va += PAGE_SIZE;
946 m++;
947 }
948#ifdef SMP
0f7a3396 949 smp_invltlb(); /* XXX */
984263bc
MD
950#endif
951}
952
953/*
4107b0c0 954 * Remove pages from KVA, fully SMP synchronized.
7155fc7d 955 *
4107b0c0 956 * No requirements, non blocking.
984263bc
MD
957 */
958void
840de426 959pmap_qremove(vm_offset_t va, int count)
984263bc
MD
960{
961 vm_offset_t end_va;
962
963 end_va = va + count*PAGE_SIZE;
964
965 while (va < end_va) {
966 unsigned *pte;
967
968 pte = (unsigned *)vtopte(va);
969 *pte = 0;
984263bc 970 cpu_invlpg((void *)va);
984263bc
MD
971 va += PAGE_SIZE;
972 }
973#ifdef SMP
974 smp_invltlb();
975#endif
976}
977
06ecca5a
MD
978/*
979 * This routine works like vm_page_lookup() but also blocks as long as the
980 * page is busy. This routine does not busy the page it returns.
981 *
b12defdc 982 * The caller must hold the object.
06ecca5a 983 */
984263bc 984static vm_page_t
840de426 985pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
984263bc
MD
986{
987 vm_page_t m;
06ecca5a 988
b12defdc
MD
989 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
990 m = vm_page_lookup_busy_wait(object, pindex, FALSE, "pplookp");
17cde63e 991
06ecca5a 992 return(m);
984263bc
MD
993}
994
995/*
263e4574 996 * Create a new thread and optionally associate it with a (new) process.
6ef943a3 997 * NOTE! the new thread's cpu may not equal the current cpu.
263e4574 998 */
7d0bac62
MD
999void
1000pmap_init_thread(thread_t td)
263e4574 1001{
f470d0c8 1002 /* enforce pcb placement */
f470d0c8 1003 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
65d6ce10 1004 td->td_savefpu = &td->td_pcb->pcb_save;
7d0bac62 1005 td->td_sp = (char *)td->td_pcb - 16;
263e4574
MD
1006}
1007
1008/*
984263bc
MD
1009 * This routine directly affects the fork perf for a process.
1010 */
1011void
13d13d89 1012pmap_init_proc(struct proc *p)
984263bc 1013{
984263bc
MD
1014}
1015
984263bc
MD
1016/***************************************************
1017 * Page table page management routines.....
1018 ***************************************************/
1019
1020/*
90244566
MD
1021 * This routine unwires page table pages, removing and freeing the page
1022 * tale page when the wire count drops to 0.
4107b0c0
MD
1023 *
1024 * The caller must hold vm_token.
1025 * This function can block.
984263bc
MD
1026 */
1027static int
90244566 1028_pmap_unwire_pte(pmap_t pmap, vm_page_t m, pmap_inval_info_t info)
840de426 1029{
17cde63e
MD
1030 /*
1031 * Wait until we can busy the page ourselves. We cannot have
1032 * any active flushes if we block.
1033 */
b12defdc 1034 vm_page_busy_wait(m, FALSE, "pmuwpt");
eec2b734 1035 KASSERT(m->queue == PQ_NONE,
90244566 1036 ("_pmap_unwire_pte: %p->queue != PQ_NONE", m));
984263bc 1037
90244566 1038 if (m->wire_count == 1) {
984263bc 1039 /*
be3aecf7
MD
1040 * Unmap the page table page.
1041 *
1042 * NOTE: We must clear pm_cached for all cpus, including
1043 * the current one, when clearing a page directory
1044 * entry.
984263bc 1045 */
c2fb025d 1046 pmap_inval_interlock(info, pmap, -1);
2247fe02 1047 KKASSERT(pmap->pm_pdir[m->pindex]);
984263bc 1048 pmap->pm_pdir[m->pindex] = 0;
be3aecf7 1049 pmap->pm_cached = 0;
c2fb025d 1050 pmap_inval_deinterlock(info, pmap);
eec2b734
MD
1051
1052 KKASSERT(pmap->pm_stats.resident_count > 0);
984263bc 1053 --pmap->pm_stats.resident_count;
984263bc
MD
1054
1055 if (pmap->pm_ptphint == m)
1056 pmap->pm_ptphint = NULL;
1057
1058 /*
eec2b734
MD
1059 * This was our last hold, the page had better be unwired
1060 * after we decrement wire_count.
1061 *
1062 * FUTURE NOTE: shared page directory page could result in
1063 * multiple wire counts.
984263bc 1064 */
90244566 1065 vm_page_unwire(m, 0);
17cde63e 1066 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
eec2b734
MD
1067 vm_page_flash(m);
1068 vm_page_free_zero(m);
984263bc 1069 return 1;
17cde63e 1070 } else {
90244566
MD
1071 KKASSERT(m->wire_count > 1);
1072 if (vm_page_unwire_quick(m))
1073 panic("pmap_unwire_pte: Insufficient wire_count");
b12defdc 1074 vm_page_wakeup(m);
17cde63e 1075 return 0;
984263bc 1076 }
984263bc
MD
1077}
1078
4107b0c0
MD
1079/*
1080 * The caller must hold vm_token.
1081 * This function can block.
1082 */
984263bc 1083static PMAP_INLINE int
90244566 1084pmap_unwire_pte(pmap_t pmap, vm_page_t m, pmap_inval_info_t info)
984263bc 1085{
90244566
MD
1086 KKASSERT(m->wire_count > 0);
1087 if (m->wire_count > 1) {
1088 if (vm_page_unwire_quick(m))
1089 panic("pmap_unwire_pte: Insufficient wire_count");
984263bc 1090 return 0;
eec2b734 1091 } else {
90244566 1092 return _pmap_unwire_pte(pmap, m, info);
eec2b734 1093 }
984263bc
MD
1094}
1095
1096/*
4107b0c0 1097 * After removing a (user) page table entry, this routine is used to
984263bc 1098 * conditionally free the page, and manage the hold/wire counts.
5926987a 1099 *
4107b0c0
MD
1100 * The caller must hold vm_token.
1101 * This function can block regardless.
984263bc
MD
1102 */
1103static int
0f7a3396 1104pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
4107b0c0 1105 pmap_inval_info_t info)
984263bc
MD
1106{
1107 unsigned ptepindex;
4107b0c0 1108
b12defdc
MD
1109 ASSERT_LWKT_TOKEN_HELD(vm_object_token(pmap->pm_pteobj));
1110
984263bc
MD
1111 if (va >= UPT_MIN_ADDRESS)
1112 return 0;
1113
1114 if (mpte == NULL) {
1115 ptepindex = (va >> PDRSHIFT);
1116 if (pmap->pm_ptphint &&
1117 (pmap->pm_ptphint->pindex == ptepindex)) {
1118 mpte = pmap->pm_ptphint;
1119 } else {
b12defdc 1120 mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
984263bc 1121 pmap->pm_ptphint = mpte;
b12defdc 1122 vm_page_wakeup(mpte);
984263bc
MD
1123 }
1124 }
1125
90244566 1126 return pmap_unwire_pte(pmap, mpte, info);
984263bc
MD
1127}
1128
54a764e8 1129/*
fbbaeba3
MD
1130 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because
1131 * it, and IdlePTD, represents the template used to update all other pmaps.
1132 *
1133 * On architectures where the kernel pmap is not integrated into the user
1134 * process pmap, this pmap represents the process pmap, not the kernel pmap.
1135 * kernel_pmap should be used to directly access the kernel_pmap.
4107b0c0
MD
1136 *
1137 * No requirements.
54a764e8 1138 */
984263bc 1139void
840de426 1140pmap_pinit0(struct pmap *pmap)
984263bc
MD
1141{
1142 pmap->pm_pdir =
e4846942 1143 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
24712b90 1144 pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t) IdlePTD);
984263bc
MD
1145 pmap->pm_count = 1;
1146 pmap->pm_active = 0;
be3aecf7 1147 pmap->pm_cached = 0;
984263bc
MD
1148 pmap->pm_ptphint = NULL;
1149 TAILQ_INIT(&pmap->pm_pvlist);
b12defdc
MD
1150 TAILQ_INIT(&pmap->pm_pvlist_free);
1151 spin_init(&pmap->pm_spin);
1152 lwkt_token_init(&pmap->pm_token, "pmap_tok");
984263bc
MD
1153 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1154}
1155
1156/*
1157 * Initialize a preallocated and zeroed pmap structure,
1158 * such as one in a vmspace structure.
4107b0c0
MD
1159 *
1160 * No requirements.
984263bc
MD
1161 */
1162void
840de426 1163pmap_pinit(struct pmap *pmap)
984263bc
MD
1164{
1165 vm_page_t ptdpg;
1166
1167 /*
1168 * No need to allocate page table space yet but we do need a valid
1169 * page directory table.
1170 */
b5b32410 1171 if (pmap->pm_pdir == NULL) {
984263bc 1172 pmap->pm_pdir =
e4846942 1173 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
b5b32410 1174 }
984263bc
MD
1175
1176 /*
c3834cb2 1177 * Allocate an object for the ptes
984263bc
MD
1178 */
1179 if (pmap->pm_pteobj == NULL)
c3834cb2 1180 pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1);
984263bc
MD
1181
1182 /*
c3834cb2
MD
1183 * Allocate the page directory page, unless we already have
1184 * one cached. If we used the cached page the wire_count will
1185 * already be set appropriately.
984263bc 1186 */
c3834cb2
MD
1187 if ((ptdpg = pmap->pm_pdirm) == NULL) {
1188 ptdpg = vm_page_grab(pmap->pm_pteobj, PTDPTDI,
d2d8515b
MD
1189 VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
1190 VM_ALLOC_ZERO);
c3834cb2 1191 pmap->pm_pdirm = ptdpg;
b12defdc
MD
1192 vm_page_flag_clear(ptdpg, PG_MAPPED);
1193 vm_page_wire(ptdpg);
d2d8515b 1194 KKASSERT(ptdpg->valid == VM_PAGE_BITS_ALL);
c3834cb2 1195 pmap_kenter((vm_offset_t)pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg));
b12defdc 1196 vm_page_wakeup(ptdpg);
c3834cb2 1197 }
984263bc 1198 pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
984263bc
MD
1199
1200 /* install self-referential address mapping entry */
1201 *(unsigned *) (pmap->pm_pdir + PTDPTDI) =
1202 VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
1203
1204 pmap->pm_count = 1;
1205 pmap->pm_active = 0;
be3aecf7 1206 pmap->pm_cached = 0;
984263bc
MD
1207 pmap->pm_ptphint = NULL;
1208 TAILQ_INIT(&pmap->pm_pvlist);
b12defdc
MD
1209 TAILQ_INIT(&pmap->pm_pvlist_free);
1210 spin_init(&pmap->pm_spin);
1211 lwkt_token_init(&pmap->pm_token, "pmap_tok");
984263bc 1212 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
eec2b734 1213 pmap->pm_stats.resident_count = 1;
984263bc
MD
1214}
1215
1216/*
c3834cb2
MD
1217 * Clean up a pmap structure so it can be physically freed. This routine
1218 * is called by the vmspace dtor function. A great deal of pmap data is
1219 * left passively mapped to improve vmspace management so we have a bit
1220 * of cleanup work to do here.
4107b0c0
MD
1221 *
1222 * No requirements.
e3161323
MD
1223 */
1224void
1225pmap_puninit(pmap_t pmap)
1226{
c3834cb2
MD
1227 vm_page_t p;
1228
e3161323 1229 KKASSERT(pmap->pm_active == 0);
c3834cb2
MD
1230 if ((p = pmap->pm_pdirm) != NULL) {
1231 KKASSERT(pmap->pm_pdir != NULL);
1232 pmap_kremove((vm_offset_t)pmap->pm_pdir);
b12defdc 1233 vm_page_busy_wait(p, FALSE, "pgpun");
90244566 1234 vm_page_unwire(p, 0);
c3834cb2
MD
1235 vm_page_free_zero(p);
1236 pmap->pm_pdirm = NULL;
1237 }
e3161323
MD
1238 if (pmap->pm_pdir) {
1239 kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pdir, PAGE_SIZE);
1240 pmap->pm_pdir = NULL;
1241 }
1242 if (pmap->pm_pteobj) {
1243 vm_object_deallocate(pmap->pm_pteobj);
1244 pmap->pm_pteobj = NULL;
1245 }
1246}
1247
1248/*
984263bc
MD
1249 * Wire in kernel global address entries. To avoid a race condition
1250 * between pmap initialization and pmap_growkernel, this procedure
54a764e8
MD
1251 * adds the pmap to the master list (which growkernel scans to update),
1252 * then copies the template.
4107b0c0
MD
1253 *
1254 * No requirements.
984263bc
MD
1255 */
1256void
840de426 1257pmap_pinit2(struct pmap *pmap)
984263bc 1258{
b12defdc
MD
1259 /*
1260 * XXX copies current process, does not fill in MPPTDI
1261 */
1262 spin_lock(&pmap_spin);
54a764e8 1263 TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode);
984263bc 1264 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
b12defdc 1265 spin_unlock(&pmap_spin);
984263bc
MD
1266}
1267
344ad853 1268/*
eec2b734 1269 * Attempt to release and free a vm_page in a pmap. Returns 1 on success,
344ad853 1270 * 0 on failure (if the procedure had to sleep).
c3834cb2
MD
1271 *
1272 * When asked to remove the page directory page itself, we actually just
1273 * leave it cached so we do not have to incur the SMP inval overhead of
1274 * removing the kernel mapping. pmap_puninit() will take care of it.
4107b0c0
MD
1275 *
1276 * The caller must hold vm_token.
1277 * This function can block regardless.
344ad853 1278 */
984263bc 1279static int
840de426 1280pmap_release_free_page(struct pmap *pmap, vm_page_t p)
984263bc
MD
1281{
1282 unsigned *pde = (unsigned *) pmap->pm_pdir;
4107b0c0 1283
984263bc
MD
1284 /*
1285 * This code optimizes the case of freeing non-busy
1286 * page-table pages. Those pages are zero now, and
1287 * might as well be placed directly into the zero queue.
1288 */
b12defdc
MD
1289 if (vm_page_busy_try(p, FALSE)) {
1290 vm_page_sleep_busy(p, FALSE, "pmaprl");
984263bc 1291 return 0;
b12defdc 1292 }
984263bc
MD
1293
1294 /*
1295 * Remove the page table page from the processes address space.
1296 */
eec2b734 1297 KKASSERT(pmap->pm_stats.resident_count > 0);
2247fe02
MD
1298 KKASSERT(pde[p->pindex]);
1299 pde[p->pindex] = 0;
eec2b734 1300 --pmap->pm_stats.resident_count;
a93980ab 1301 pmap->pm_cached = 0;
984263bc 1302
90244566
MD
1303 if (p->wire_count != 1) {
1304 panic("pmap_release: freeing wired page table page");
984263bc 1305 }
c3834cb2
MD
1306 if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
1307 pmap->pm_ptphint = NULL;
1308
984263bc 1309 /*
c3834cb2
MD
1310 * We leave the page directory page cached, wired, and mapped in
1311 * the pmap until the dtor function (pmap_puninit()) gets called.
1312 * However, still clean it up so we can set PG_ZERO.
c1692ddf
MD
1313 *
1314 * The pmap has already been removed from the pmap_list in the
1315 * PTDPTDI case.
984263bc
MD
1316 */
1317 if (p->pindex == PTDPTDI) {
1318 bzero(pde + KPTDI, nkpt * PTESIZE);
9388fcaa 1319 bzero(pde + MPPTDI, (NPDEPG - MPPTDI) * PTESIZE);
c3834cb2
MD
1320 vm_page_flag_set(p, PG_ZERO);
1321 vm_page_wakeup(p);
1322 } else {
90244566
MD
1323 panic("pmap_release: page should already be gone %p", p);
1324 /*vm_page_flag_clear(p, PG_MAPPED); should already be clear */
1325 vm_page_unwire(p, 0);
c3834cb2 1326 vm_page_free_zero(p);
984263bc 1327 }
984263bc
MD
1328 return 1;
1329}
1330
1331/*
4107b0c0
MD
1332 * This routine is called if the page table page is not mapped correctly.
1333 *
1334 * The caller must hold vm_token.
984263bc
MD
1335 */
1336static vm_page_t
840de426 1337_pmap_allocpte(pmap_t pmap, unsigned ptepindex)
984263bc 1338{
480c83b6 1339 vm_offset_t ptepa;
984263bc
MD
1340 vm_page_t m;
1341
1342 /*
d2d8515b
MD
1343 * Find or fabricate a new pagetable page. Setting VM_ALLOC_ZERO
1344 * will zero any new page and mark it valid.
984263bc
MD
1345 */
1346 m = vm_page_grab(pmap->pm_pteobj, ptepindex,
d2d8515b 1347 VM_ALLOC_NORMAL | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
984263bc
MD
1348
1349 KASSERT(m->queue == PQ_NONE,
1350 ("_pmap_allocpte: %p->queue != PQ_NONE", m));
1351
eec2b734 1352 /*
90244566 1353 * Increment the wire count for the page we will be returning to
eec2b734
MD
1354 * the caller.
1355 */
90244566 1356 vm_page_wire(m);
eec2b734
MD
1357
1358 /*
1359 * It is possible that someone else got in and mapped by the page
1360 * directory page while we were blocked, if so just unbusy and
90244566 1361 * return the wired page.
eec2b734
MD
1362 */
1363 if ((ptepa = pmap->pm_pdir[ptepindex]) != 0) {
1364 KKASSERT((ptepa & PG_FRAME) == VM_PAGE_TO_PHYS(m));
1365 vm_page_wakeup(m);
1366 return(m);
1367 }
1368
984263bc
MD
1369 /*
1370 * Map the pagetable page into the process address space, if
1371 * it isn't already there.
be3aecf7
MD
1372 *
1373 * NOTE: For safety clear pm_cached for all cpus including the
1374 * current one when adding a PDE to the map.
984263bc 1375 */
eec2b734 1376 ++pmap->pm_stats.resident_count;
984263bc
MD
1377
1378 ptepa = VM_PAGE_TO_PHYS(m);
1379 pmap->pm_pdir[ptepindex] =
1380 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
be3aecf7 1381 pmap->pm_cached = 0;
984263bc
MD
1382
1383 /*
1384 * Set the page table hint
1385 */
1386 pmap->pm_ptphint = m;
984263bc
MD
1387 vm_page_flag_set(m, PG_MAPPED);
1388 vm_page_wakeup(m);
1389
1390 return m;
1391}
1392
4107b0c0
MD
1393/*
1394 * Allocate a page table entry for a va.
1395 *
1396 * The caller must hold vm_token.
1397 */
984263bc 1398static vm_page_t
840de426 1399pmap_allocpte(pmap_t pmap, vm_offset_t va)
984263bc
MD
1400{
1401 unsigned ptepindex;
1402 vm_offset_t ptepa;
1403 vm_page_t m;
1404
b12defdc
MD
1405 ASSERT_LWKT_TOKEN_HELD(vm_object_token(pmap->pm_pteobj));
1406
984263bc
MD
1407 /*
1408 * Calculate pagetable page index
1409 */
1410 ptepindex = va >> PDRSHIFT;
1411
1412 /*
1413 * Get the page directory entry
1414 */
1415 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
1416
1417 /*
1418 * This supports switching from a 4MB page to a
1419 * normal 4K page.
1420 */
1421 if (ptepa & PG_PS) {
1422 pmap->pm_pdir[ptepindex] = 0;
1423 ptepa = 0;
0f7a3396 1424 smp_invltlb();
54341a3b 1425 cpu_invltlb();
984263bc
MD
1426 }
1427
1428 /*
1429 * If the page table page is mapped, we just increment the
90244566 1430 * wire count, and activate it.
984263bc
MD
1431 */
1432 if (ptepa) {
1433 /*
1434 * In order to get the page table page, try the
1435 * hint first.
1436 */
1437 if (pmap->pm_ptphint &&
1438 (pmap->pm_ptphint->pindex == ptepindex)) {
1439 m = pmap->pm_ptphint;
1440 } else {
b12defdc 1441 m = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
984263bc 1442 pmap->pm_ptphint = m;
b12defdc 1443 vm_page_wakeup(m);
984263bc 1444 }
90244566 1445 vm_page_wire_quick(m);
984263bc
MD
1446 return m;
1447 }
1448 /*
1449 * Here if the pte page isn't mapped, or if it has been deallocated.
1450 */
1451 return _pmap_allocpte(pmap, ptepindex);
1452}
1453
1454
1455/***************************************************
1f804340 1456 * Pmap allocation/deallocation routines.
984263bc
MD
1457 ***************************************************/
1458
1459/*
1460 * Release any resources held by the given physical map.
1461 * Called when a pmap initialized by pmap_pinit is being released.
1462 * Should only be called if the map contains no valid mappings.
4107b0c0 1463 *
b12defdc 1464 * Caller must hold pmap->pm_token
984263bc 1465 */
1f804340
MD
1466static int pmap_release_callback(struct vm_page *p, void *data);
1467
984263bc 1468void
840de426 1469pmap_release(struct pmap *pmap)
984263bc 1470{
984263bc 1471 vm_object_t object = pmap->pm_pteobj;
1f804340 1472 struct rb_vm_page_scan_info info;
984263bc 1473
4107b0c0
MD
1474 KASSERT(pmap->pm_active == 0,
1475 ("pmap still active! %08x", pmap->pm_active));
984263bc
MD
1476#if defined(DIAGNOSTIC)
1477 if (object->ref_count != 1)
1478 panic("pmap_release: pteobj reference count != 1");
1479#endif
1480
1f804340
MD
1481 info.pmap = pmap;
1482 info.object = object;
b12defdc
MD
1483
1484 spin_lock(&pmap_spin);
54a764e8 1485 TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
b12defdc 1486 spin_unlock(&pmap_spin);
1f804340 1487
b12defdc 1488 vm_object_hold(object);
1f804340 1489 do {
1f804340
MD
1490 info.error = 0;
1491 info.mpte = NULL;
1492 info.limit = object->generation;
1493
1494 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1495 pmap_release_callback, &info);
1496 if (info.error == 0 && info.mpte) {
1497 if (!pmap_release_free_page(pmap, info.mpte))
1498 info.error = 1;
984263bc 1499 }
1f804340 1500 } while (info.error);
2f2d9e58 1501 vm_object_drop(object);
b12defdc
MD
1502
1503 pmap->pm_cached = 0;
1f804340
MD
1504}
1505
4107b0c0
MD
1506/*
1507 * The caller must hold vm_token.
1508 */
1f804340
MD
1509static int
1510pmap_release_callback(struct vm_page *p, void *data)
1511{
1512 struct rb_vm_page_scan_info *info = data;
1513
1514 if (p->pindex == PTDPTDI) {
1515 info->mpte = p;
1516 return(0);
344ad853 1517 }
1f804340
MD
1518 if (!pmap_release_free_page(info->pmap, p)) {
1519 info->error = 1;
1520 return(-1);
1521 }
1522 if (info->object->generation != info->limit) {
1523 info->error = 1;
1524 return(-1);
1525 }
1526 return(0);
984263bc 1527}
984263bc
MD
1528
1529/*
0e5797fe 1530 * Grow the number of kernel page table entries, if needed.
4107b0c0
MD
1531 *
1532 * No requirements.
984263bc
MD
1533 */
1534void
a8cf2878 1535pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
984263bc 1536{
a8cf2878 1537 vm_offset_t addr = kend;
54a764e8 1538 struct pmap *pmap;
984263bc
MD
1539 vm_offset_t ptppaddr;
1540 vm_page_t nkpg;
1541 pd_entry_t newpdir;
1542
b12defdc 1543 vm_object_hold(kptobj);
984263bc
MD
1544 if (kernel_vm_end == 0) {
1545 kernel_vm_end = KERNBASE;
1546 nkpt = 0;
1547 while (pdir_pde(PTD, kernel_vm_end)) {
4107b0c0
MD
1548 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1549 ~(PAGE_SIZE * NPTEPG - 1);
984263bc
MD
1550 nkpt++;
1551 }
1552 }
1553 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1554 while (kernel_vm_end < addr) {
1555 if (pdir_pde(PTD, kernel_vm_end)) {
4107b0c0
MD
1556 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1557 ~(PAGE_SIZE * NPTEPG - 1);
984263bc
MD
1558 continue;
1559 }
1560
1561 /*
1562 * This index is bogus, but out of the way
1563 */
4107b0c0
MD
1564 nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_NORMAL |
1565 VM_ALLOC_SYSTEM |
1566 VM_ALLOC_INTERRUPT);
dc1fd4b3 1567 if (nkpg == NULL)
984263bc
MD
1568 panic("pmap_growkernel: no memory to grow kernel");
1569
984263bc
MD
1570 vm_page_wire(nkpg);
1571 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1572 pmap_zero_page(ptppaddr);
1573 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1574 pdir_pde(PTD, kernel_vm_end) = newpdir;
fbbaeba3 1575 *pmap_pde(&kernel_pmap, kernel_vm_end) = newpdir;
0e5797fe
MD
1576 nkpt++;
1577
1578 /*
54a764e8 1579 * This update must be interlocked with pmap_pinit2.
0e5797fe 1580 */
b12defdc 1581 spin_lock(&pmap_spin);
54a764e8
MD
1582 TAILQ_FOREACH(pmap, &pmap_list, pm_pmnode) {
1583 *pmap_pde(pmap, kernel_vm_end) = newpdir;
1584 }
b12defdc 1585 spin_unlock(&pmap_spin);
54a764e8
MD
1586 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1587 ~(PAGE_SIZE * NPTEPG - 1);
984263bc 1588 }
b12defdc 1589 vm_object_drop(kptobj);
984263bc
MD
1590}
1591
1592/*
4107b0c0
MD
1593 * Retire the given physical map from service.
1594 *
1595 * Should only be called if the map contains no valid mappings.
1596 *
1597 * No requirements.
984263bc
MD
1598 */
1599void
840de426 1600pmap_destroy(pmap_t pmap)
984263bc 1601{
984263bc
MD
1602 if (pmap == NULL)
1603 return;
1604
4107b0c0
MD
1605 lwkt_gettoken(&vm_token);
1606 if (--pmap->pm_count == 0) {
984263bc
MD
1607 pmap_release(pmap);
1608 panic("destroying a pmap is not yet implemented");
1609 }
4107b0c0 1610 lwkt_reltoken(&vm_token);
984263bc
MD
1611}
1612
1613/*
4107b0c0
MD
1614 * Add a reference to the specified pmap.
1615 *
1616 * No requirements.
984263bc
MD
1617 */
1618void
840de426 1619pmap_reference(pmap_t pmap)
984263bc 1620{
4107b0c0
MD
1621 if (pmap) {
1622 lwkt_gettoken(&vm_token);
1623 ++pmap->pm_count;
1624 lwkt_reltoken(&vm_token);
984263bc
MD
1625 }
1626}
1627
1628/***************************************************
4107b0c0 1629 * page management routines.
984263bc
MD
1630 ***************************************************/
1631
1632/*
8a8d5d85
MD
1633 * free the pv_entry back to the free list. This function may be
1634 * called from an interrupt.
4107b0c0
MD
1635 *
1636 * The caller must hold vm_token.
984263bc
MD
1637 */
1638static PMAP_INLINE void
840de426 1639free_pv_entry(pv_entry_t pv)
984263bc 1640{
5926987a
MD
1641#ifdef PMAP_DEBUG
1642 KKASSERT(pv->pv_m != NULL);
1643 pv->pv_m = NULL;
1644#endif
984263bc 1645 pv_entry_count--;
8a8d5d85 1646 zfree(pvzone, pv);
984263bc
MD
1647}
1648
1649/*
1650 * get a new pv_entry, allocating a block from the system
8a8d5d85 1651 * when needed. This function may be called from an interrupt.
4107b0c0
MD
1652 *
1653 * The caller must hold vm_token.
984263bc
MD
1654 */
1655static pv_entry_t
1656get_pv_entry(void)
1657{
1658 pv_entry_count++;
1659 if (pv_entry_high_water &&
20479584
MD
1660 (pv_entry_count > pv_entry_high_water) &&
1661 (pmap_pagedaemon_waken == 0)) {
984263bc
MD
1662 pmap_pagedaemon_waken = 1;
1663 wakeup (&vm_pages_needed);
1664 }
8a8d5d85 1665 return zalloc(pvzone);
984263bc
MD
1666}
1667
1668/*
1669 * This routine is very drastic, but can save the system
1670 * in a pinch.
4107b0c0
MD
1671 *
1672 * No requirements.
984263bc
MD
1673 */
1674void
840de426 1675pmap_collect(void)
984263bc
MD
1676{
1677 int i;
1678 vm_page_t m;
1679 static int warningdone=0;
1680
1681 if (pmap_pagedaemon_waken == 0)
1682 return;
4107b0c0 1683 lwkt_gettoken(&vm_token);
20479584 1684 pmap_pagedaemon_waken = 0;
984263bc
MD
1685
1686 if (warningdone < 5) {
948209ce
MD
1687 kprintf("pmap_collect: collecting pv entries -- "
1688 "suggest increasing PMAP_SHPGPERPROC\n");
984263bc
MD
1689 warningdone++;
1690 }
1691
b12defdc 1692 for (i = 0; i < vm_page_array_size; i++) {
984263bc 1693 m = &vm_page_array[i];
b12defdc 1694 if (m->wire_count || m->hold_count)
984263bc 1695 continue;
b12defdc
MD
1696 if (vm_page_busy_try(m, TRUE) == 0) {
1697 if (m->wire_count == 0 && m->hold_count == 0) {
1698 pmap_remove_all(m);
1699 }
1700 vm_page_wakeup(m);
4107b0c0 1701 }
984263bc 1702 }
4107b0c0 1703 lwkt_reltoken(&vm_token);
984263bc
MD
1704}
1705
1706
1707/*
1708 * If it is the first entry on the list, it is actually
1709 * in the header and we must copy the following entry up
1710 * to the header. Otherwise we must search the list for
1711 * the entry. In either case we free the now unused entry.
4107b0c0
MD
1712 *
1713 * The caller must hold vm_token.
984263bc 1714 */
984263bc 1715static int
0f7a3396 1716pmap_remove_entry(struct pmap *pmap, vm_page_t m,
4107b0c0 1717 vm_offset_t va, pmap_inval_info_t info)
984263bc
MD
1718{
1719 pv_entry_t pv;
1720 int rtval;
984263bc 1721
4107b0c0 1722 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
1723 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1724 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1725 if (pmap == pv->pv_pmap && va == pv->pv_va)
1726 break;
1727 }
1728 } else {
1729 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
5926987a
MD
1730#ifdef PMAP_DEBUG
1731 KKASSERT(pv->pv_pmap == pmap);
1732#endif
1733 if (va == pv->pv_va)
984263bc
MD
1734 break;
1735 }
1736 }
5926987a 1737 KKASSERT(pv);
984263bc
MD
1738
1739 rtval = 0;
5926987a
MD
1740 test_m_maps_pv(m, pv);
1741 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1742 m->md.pv_list_count--;
cef01e15
MD
1743 if (m->object)
1744 atomic_add_int(&m->object->agg_pv_list_count, -1);
5926987a
MD
1745 if (TAILQ_EMPTY(&m->md.pv_list))
1746 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
1747 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1748 ++pmap->pm_generation;
b12defdc 1749 vm_object_hold(pmap->pm_pteobj);
5926987a 1750 rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem, info);
b12defdc 1751 vm_object_drop(pmap->pm_pteobj);
5926987a 1752 free_pv_entry(pv);
b12defdc 1753
984263bc
MD
1754 return rtval;
1755}
1756
1757/*
4107b0c0
MD
1758 * Create a pv entry for page at pa for (pmap, va).
1759 *
1760 * The caller must hold vm_token.
984263bc
MD
1761 */
1762static void
840de426 1763pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
984263bc 1764{
984263bc
MD
1765 pv_entry_t pv;
1766
984263bc 1767 pv = get_pv_entry();
5926987a
MD
1768#ifdef PMAP_DEBUG
1769 KKASSERT(pv->pv_m == NULL);
1770 pv->pv_m = m;
1771#endif
984263bc
MD
1772 pv->pv_va = va;
1773 pv->pv_pmap = pmap;
1774 pv->pv_ptem = mpte;
1775
1776 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1777 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
5926987a 1778 ++pmap->pm_generation;
984263bc 1779 m->md.pv_list_count++;
cef01e15
MD
1780 if (m->object)
1781 atomic_add_int(&m->object->agg_pv_list_count, 1);
984263bc
MD
1782}
1783
1784/*
5926987a
MD
1785 * pmap_remove_pte: do the things to unmap a page in a process.
1786 *
4107b0c0
MD
1787 * The caller must hold vm_token.
1788 *
1789 * WARNING! As with most other pmap functions this one can block, so
1790 * callers using temporary page table mappings must reload
1791 * them.
984263bc
MD
1792 */
1793static int
0f7a3396 1794pmap_remove_pte(struct pmap *pmap, unsigned *ptq, vm_offset_t va,
5926987a 1795 pmap_inval_info_t info)
984263bc
MD
1796{
1797 unsigned oldpte;
1798 vm_page_t m;
1799
5926987a 1800 ptbase_assert(pmap);
c2fb025d 1801 pmap_inval_interlock(info, pmap, va);
5926987a 1802 ptbase_assert(pmap);
984263bc
MD
1803 oldpte = loadandclear(ptq);
1804 if (oldpte & PG_W)
1805 pmap->pm_stats.wired_count -= 1;
c2fb025d 1806 pmap_inval_deinterlock(info, pmap);
90244566 1807 KKASSERT(oldpte & PG_V);
984263bc
MD
1808 /*
1809 * Machines that don't support invlpg, also don't support
0f7a3396
MD
1810 * PG_G. XXX PG_G is disabled for SMP so don't worry about
1811 * the SMP case.
984263bc
MD
1812 */
1813 if (oldpte & PG_G)
41a01a4d 1814 cpu_invlpg((void *)va);
eec2b734
MD
1815 KKASSERT(pmap->pm_stats.resident_count > 0);
1816 --pmap->pm_stats.resident_count;
984263bc
MD
1817 if (oldpte & PG_MANAGED) {
1818 m = PHYS_TO_VM_PAGE(oldpte);
1819 if (oldpte & PG_M) {
1820#if defined(PMAP_DIAGNOSTIC)
1821 if (pmap_nw_modified((pt_entry_t) oldpte)) {
d557216f
MD
1822 kprintf("pmap_remove: modified page not "
1823 "writable: va: %p, pte: 0x%lx\n",
1824 (void *)va, (long)oldpte);
984263bc
MD
1825 }
1826#endif
1827 if (pmap_track_modified(va))
1828 vm_page_dirty(m);
1829 }
1830 if (oldpte & PG_A)
1831 vm_page_flag_set(m, PG_REFERENCED);
0f7a3396 1832 return pmap_remove_entry(pmap, m, va, info);
984263bc 1833 } else {
0f7a3396 1834 return pmap_unuse_pt(pmap, va, NULL, info);
984263bc
MD
1835 }
1836
1837 return 0;
1838}
1839
1840/*
5926987a 1841 * Remove a single page from a process address space.
e0e69b7d 1842 *
4107b0c0 1843 * The caller must hold vm_token.
984263bc
MD
1844 */
1845static void
0f7a3396 1846pmap_remove_page(struct pmap *pmap, vm_offset_t va, pmap_inval_info_t info)
984263bc 1847{
840de426 1848 unsigned *ptq;
984263bc
MD
1849
1850 /*
90244566 1851 * If there is no pte for this address, just skip it!!! Otherwise
e0e69b7d 1852 * get a local va for mappings for this pmap and remove the entry.
984263bc 1853 */
e0e69b7d
MD
1854 if (*pmap_pde(pmap, va) != 0) {
1855 ptq = get_ptbase(pmap) + i386_btop(va);
1856 if (*ptq) {
0f7a3396 1857 pmap_remove_pte(pmap, ptq, va, info);
5926987a 1858 /* ptq invalid */
e0e69b7d 1859 }
984263bc 1860 }
984263bc
MD
1861}
1862
1863/*
4107b0c0 1864 * Remove the given range of addresses from the specified map.
984263bc 1865 *
4107b0c0
MD
1866 * It is assumed that the start and end are properly rounded to the page
1867 * size.
e0e69b7d 1868 *
4107b0c0 1869 * No requirements.
984263bc
MD
1870 */
1871void
840de426 1872pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
984263bc 1873{
840de426 1874 unsigned *ptbase;
984263bc
MD
1875 vm_offset_t pdnxt;
1876 vm_offset_t ptpaddr;
1877 vm_offset_t sindex, eindex;
0f7a3396 1878 struct pmap_inval_info info;
984263bc
MD
1879
1880 if (pmap == NULL)
1881 return;
1882
b12defdc 1883 vm_object_hold(pmap->pm_pteobj);
4107b0c0
MD
1884 lwkt_gettoken(&vm_token);
1885 if (pmap->pm_stats.resident_count == 0) {
1886 lwkt_reltoken(&vm_token);
b12defdc 1887 vm_object_drop(pmap->pm_pteobj);
984263bc 1888 return;
4107b0c0 1889 }
984263bc 1890
0f7a3396
MD
1891 pmap_inval_init(&info);
1892
984263bc
MD
1893 /*
1894 * special handling of removing one page. a very
1895 * common operation and easy to short circuit some
1896 * code.
1897 */
1898 if (((sva + PAGE_SIZE) == eva) &&
1899 (((unsigned) pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
0f7a3396 1900 pmap_remove_page(pmap, sva, &info);
c2fb025d 1901 pmap_inval_done(&info);
4107b0c0 1902 lwkt_reltoken(&vm_token);
b12defdc 1903 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
1904 return;
1905 }
1906
984263bc
MD
1907 /*
1908 * Get a local virtual address for the mappings that are being
1909 * worked with.
1910 */
984263bc
MD
1911 sindex = i386_btop(sva);
1912 eindex = i386_btop(eva);
1913
1914 for (; sindex < eindex; sindex = pdnxt) {
1915 unsigned pdirindex;
1916
1917 /*
1918 * Calculate index for next page table.
1919 */
1920 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1921 if (pmap->pm_stats.resident_count == 0)
1922 break;
1923
1924 pdirindex = sindex / NPDEPG;
1925 if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
c2fb025d 1926 pmap_inval_interlock(&info, pmap, -1);
984263bc
MD
1927 pmap->pm_pdir[pdirindex] = 0;
1928 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
be3aecf7 1929 pmap->pm_cached = 0;
c2fb025d 1930 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
1931 continue;
1932 }
1933
1934 /*
1935 * Weed out invalid mappings. Note: we assume that the page
1936 * directory table is always allocated, and in kernel virtual.
1937 */
1938 if (ptpaddr == 0)
1939 continue;
1940
1941 /*
1942 * Limit our scan to either the end of the va represented
1943 * by the current page table page, or to the end of the
1944 * range being removed.
1945 */
1946 if (pdnxt > eindex) {
1947 pdnxt = eindex;
1948 }
1949
8790d7d8 1950 /*
5926987a
MD
1951 * NOTE: pmap_remove_pte() can block and wipe the temporary
1952 * ptbase.
8790d7d8 1953 */
0f7a3396 1954 for (; sindex != pdnxt; sindex++) {
984263bc 1955 vm_offset_t va;
8790d7d8
MD
1956
1957 ptbase = get_ptbase(pmap);
0f7a3396 1958 if (ptbase[sindex] == 0)
984263bc 1959 continue;
984263bc 1960 va = i386_ptob(sindex);
0f7a3396 1961 if (pmap_remove_pte(pmap, ptbase + sindex, va, &info))
984263bc
MD
1962 break;
1963 }
1964 }
c2fb025d 1965 pmap_inval_done(&info);
4107b0c0 1966 lwkt_reltoken(&vm_token);
b12defdc 1967 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
1968}
1969
1970/*
4107b0c0
MD
1971 * Removes this physical page from all physical maps in which it resides.
1972 * Reflects back modify bits to the pager.
984263bc 1973 *
4107b0c0 1974 * No requirements.
984263bc 1975 */
984263bc 1976static void
840de426 1977pmap_remove_all(vm_page_t m)
984263bc 1978{
0f7a3396 1979 struct pmap_inval_info info;
840de426 1980 unsigned *pte, tpte;
0f7a3396 1981 pv_entry_t pv;
984263bc 1982
bee81bdd
SS
1983 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1984 return;
984263bc 1985
0f7a3396 1986 pmap_inval_init(&info);
984263bc 1987 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
eec2b734
MD
1988 KKASSERT(pv->pv_pmap->pm_stats.resident_count > 0);
1989 --pv->pv_pmap->pm_stats.resident_count;
984263bc
MD
1990
1991 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
c2fb025d 1992 pmap_inval_interlock(&info, pv->pv_pmap, pv->pv_va);
984263bc
MD
1993 tpte = loadandclear(pte);
1994 if (tpte & PG_W)
1995 pv->pv_pmap->pm_stats.wired_count--;
c2fb025d 1996 pmap_inval_deinterlock(&info, pv->pv_pmap);
984263bc
MD
1997 if (tpte & PG_A)
1998 vm_page_flag_set(m, PG_REFERENCED);
c2fb025d
MD
1999#ifdef PMAP_DEBUG
2000 KKASSERT(PHYS_TO_VM_PAGE(tpte) == m);
2001#endif
984263bc
MD
2002
2003 /*
2004 * Update the vm_page_t clean and reference bits.
2005 */
2006 if (tpte & PG_M) {
2007#if defined(PMAP_DIAGNOSTIC)
2008 if (pmap_nw_modified((pt_entry_t) tpte)) {
d557216f
MD
2009 kprintf("pmap_remove_all: modified page "
2010 "not writable: va: %p, pte: 0x%lx\n",
2011 (void *)pv->pv_va, (long)tpte);
984263bc
MD
2012 }
2013#endif
2014 if (pmap_track_modified(pv->pv_va))
2015 vm_page_dirty(m);
2016 }
5926987a
MD
2017#ifdef PMAP_DEBUG
2018 KKASSERT(pv->pv_m == m);
2019#endif
984263bc 2020 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
8790d7d8
MD
2021 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2022 ++pv->pv_pmap->pm_generation;
984263bc 2023 m->md.pv_list_count--;
cef01e15
MD
2024 if (m->object)
2025 atomic_add_int(&m->object->agg_pv_list_count, -1);
17cde63e
MD
2026 if (TAILQ_EMPTY(&m->md.pv_list))
2027 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
b12defdc 2028 vm_object_hold(pv->pv_pmap->pm_pteobj);
0f7a3396 2029 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem, &info);
b12defdc 2030 vm_object_drop(pv->pv_pmap->pm_pteobj);
984263bc
MD
2031 free_pv_entry(pv);
2032 }
17cde63e 2033 KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
c2fb025d 2034 pmap_inval_done(&info);
984263bc
MD
2035}
2036
2037/*
4107b0c0
MD
2038 * Set the physical protection on the specified range of this map
2039 * as requested.
e0e69b7d 2040 *
4107b0c0 2041 * No requirements.
984263bc
MD
2042 */
2043void
2044pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2045{
840de426 2046 unsigned *ptbase;
984263bc
MD
2047 vm_offset_t pdnxt, ptpaddr;
2048 vm_pindex_t sindex, eindex;
0f7a3396 2049 pmap_inval_info info;
984263bc
MD
2050
2051 if (pmap == NULL)
2052 return;
2053
2054 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2055 pmap_remove(pmap, sva, eva);
2056 return;
2057 }
2058
2059 if (prot & VM_PROT_WRITE)
2060 return;
2061
4107b0c0 2062 lwkt_gettoken(&vm_token);
0f7a3396 2063 pmap_inval_init(&info);
984263bc
MD
2064
2065 ptbase = get_ptbase(pmap);
2066
2067 sindex = i386_btop(sva);
2068 eindex = i386_btop(eva);
2069
2070 for (; sindex < eindex; sindex = pdnxt) {
984263bc
MD
2071 unsigned pdirindex;
2072
2073 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
2074
2075 pdirindex = sindex / NPDEPG;
2076 if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
c2fb025d 2077 pmap_inval_interlock(&info, pmap, -1);
55f2596a 2078 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
984263bc 2079 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
c2fb025d 2080 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2081 continue;
2082 }
2083
2084 /*
2085 * Weed out invalid mappings. Note: we assume that the page
2086 * directory table is always allocated, and in kernel virtual.
2087 */
2088 if (ptpaddr == 0)
2089 continue;
2090
2091 if (pdnxt > eindex) {
2092 pdnxt = eindex;
2093 }
2094
2095 for (; sindex != pdnxt; sindex++) {
984263bc 2096 unsigned pbits;
c2fb025d 2097 unsigned cbits;
984263bc
MD
2098 vm_page_t m;
2099
17cde63e 2100 /*
d5b2d319 2101 * XXX non-optimal.
17cde63e 2102 */
c2fb025d
MD
2103 pmap_inval_interlock(&info, pmap, i386_ptob(sindex));
2104again:
984263bc 2105 pbits = ptbase[sindex];
c2fb025d 2106 cbits = pbits;
984263bc
MD
2107
2108 if (pbits & PG_MANAGED) {
2109 m = NULL;
2110 if (pbits & PG_A) {
2111 m = PHYS_TO_VM_PAGE(pbits);
2112 vm_page_flag_set(m, PG_REFERENCED);
c2fb025d 2113 cbits &= ~PG_A;
984263bc
MD
2114 }
2115 if (pbits & PG_M) {
2116 if (pmap_track_modified(i386_ptob(sindex))) {
2117 if (m == NULL)
2118 m = PHYS_TO_VM_PAGE(pbits);
2119 vm_page_dirty(m);
c2fb025d 2120 cbits &= ~PG_M;
984263bc
MD
2121 }
2122 }
2123 }
c2fb025d
MD
2124 cbits &= ~PG_RW;
2125 if (pbits != cbits &&
2126 !atomic_cmpset_int(ptbase + sindex, pbits, cbits)) {
2127 goto again;
984263bc 2128 }
c2fb025d 2129 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2130 }
2131 }
c2fb025d 2132 pmap_inval_done(&info);
4107b0c0 2133 lwkt_reltoken(&vm_token);
984263bc
MD
2134}
2135
2136/*
4107b0c0
MD
2137 * Insert the given physical page (p) at the specified virtual address (v)
2138 * in the target physical map with the protection requested.
984263bc 2139 *
4107b0c0
MD
2140 * If specified, the page will be wired down, meaning that the related pte
2141 * cannot be reclaimed.
984263bc 2142 *
4107b0c0 2143 * No requirements.
984263bc
MD
2144 */
2145void
2146pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2147 boolean_t wired)
2148{
6ef943a3 2149 vm_paddr_t pa;
840de426 2150 unsigned *pte;
6ef943a3 2151 vm_paddr_t opa;
984263bc
MD
2152 vm_offset_t origpte, newpte;
2153 vm_page_t mpte;
0f7a3396 2154 pmap_inval_info info;
984263bc
MD
2155
2156 if (pmap == NULL)
2157 return;
2158
2159 va &= PG_FRAME;
2160#ifdef PMAP_DIAGNOSTIC
c439ad8f 2161 if (va >= KvaEnd)
984263bc 2162 panic("pmap_enter: toobig");
d557216f
MD
2163 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) {
2164 panic("pmap_enter: invalid to pmap_enter page "
2165 "table pages (va: %p)", (void *)va);
2166 }
984263bc 2167#endif
fbbaeba3
MD
2168 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2169 kprintf("Warning: pmap_enter called on UVA with kernel_pmap\n");
7ce2998e 2170 print_backtrace(-1);
fbbaeba3
MD
2171 }
2172 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2173 kprintf("Warning: pmap_enter called on KVA without kernel_pmap\n");
7ce2998e 2174 print_backtrace(-1);
fbbaeba3 2175 }
984263bc 2176
b12defdc 2177 vm_object_hold(pmap->pm_pteobj);
4107b0c0
MD
2178 lwkt_gettoken(&vm_token);
2179
984263bc
MD
2180 /*
2181 * In the case that a page table page is not
2182 * resident, we are creating it here.
2183 */
17cde63e 2184 if (va < UPT_MIN_ADDRESS)
984263bc 2185 mpte = pmap_allocpte(pmap, va);
17cde63e
MD
2186 else
2187 mpte = NULL;
984263bc 2188
b12defdc
MD
2189 if ((prot & VM_PROT_NOSYNC) == 0)
2190 pmap_inval_init(&info);
984263bc
MD
2191 pte = pmap_pte(pmap, va);
2192
2193 /*
2194 * Page Directory table entry not valid, we need a new PT page
2195 */
2196 if (pte == NULL) {
d557216f
MD
2197 panic("pmap_enter: invalid page directory pdir=0x%lx, va=%p\n",
2198 (long)pmap->pm_pdir[PTDPTDI], (void *)va);
984263bc
MD
2199 }
2200
2201 pa = VM_PAGE_TO_PHYS(m) & PG_FRAME;
2202 origpte = *(vm_offset_t *)pte;
2203 opa = origpte & PG_FRAME;
2204
2205 if (origpte & PG_PS)
2206 panic("pmap_enter: attempted pmap_enter on 4MB page");
2207
2208 /*
2209 * Mapping has not changed, must be protection or wiring change.
2210 */
2211 if (origpte && (opa == pa)) {
2212 /*
2213 * Wiring change, just update stats. We don't worry about
2214 * wiring PT pages as they remain resident as long as there
2215 * are valid mappings in them. Hence, if a user page is wired,
2216 * the PT page will be also.
2217 */
2218 if (wired && ((origpte & PG_W) == 0))
2219 pmap->pm_stats.wired_count++;
2220 else if (!wired && (origpte & PG_W))
2221 pmap->pm_stats.wired_count--;
2222
2223#if defined(PMAP_DIAGNOSTIC)
2224 if (pmap_nw_modified((pt_entry_t) origpte)) {
d557216f
MD
2225 kprintf("pmap_enter: modified page not "
2226 "writable: va: %p, pte: 0x%lx\n",
2227 (void *)va, (long )origpte);
984263bc
MD
2228 }
2229#endif
2230
2231 /*
639a9b43
MD
2232 * Remove the extra pte reference. Note that we cannot
2233 * optimize the RO->RW case because we have adjusted the
2234 * wiring count above and may need to adjust the wiring
2235 * bits below.
984263bc 2236 */
90244566
MD
2237 if (mpte) {
2238 if (vm_page_unwire_quick(mpte))
2239 panic("pmap_enter: Insufficient wire_count");
2240 }
984263bc 2241
984263bc
MD
2242 /*
2243 * We might be turning off write access to the page,
2244 * so we go ahead and sense modify status.
2245 */
2246 if (origpte & PG_MANAGED) {
2247 if ((origpte & PG_M) && pmap_track_modified(va)) {
2248 vm_page_t om;
2249 om = PHYS_TO_VM_PAGE(opa);
2250 vm_page_dirty(om);
2251 }
2252 pa |= PG_MANAGED;
17cde63e 2253 KKASSERT(m->flags & PG_MAPPED);
984263bc
MD
2254 }
2255 goto validate;
2256 }
2257 /*
2258 * Mapping has changed, invalidate old range and fall through to
2259 * handle validating new mapping.
5926987a
MD
2260 *
2261 * Since we have a ref on the page directory page pmap_pte()
2262 * will always return non-NULL.
2263 *
2264 * NOTE: pmap_remove_pte() can block and cause the temporary ptbase
2265 * to get wiped. reload the ptbase. I'm not sure if it is
2266 * also possible to race another pmap_enter() but check for
2267 * that case too.
984263bc 2268 */
5926987a 2269 while (opa) {
984263bc 2270 int err;
5926987a
MD
2271
2272 KKASSERT((origpte & PG_FRAME) ==
2273 (*(vm_offset_t *)pte & PG_FRAME));
0f7a3396 2274 err = pmap_remove_pte(pmap, pte, va, &info);
984263bc 2275 if (err)
d557216f 2276 panic("pmap_enter: pte vanished, va: %p", (void *)va);
5926987a
MD
2277 pte = pmap_pte(pmap, va);
2278 origpte = *(vm_offset_t *)pte;
2279 opa = origpte & PG_FRAME;
2280 if (opa) {
2281 kprintf("pmap_enter: Warning, raced pmap %p va %p\n",
2282 pmap, (void *)va);
2283 }
984263bc
MD
2284 }
2285
2286 /*
2287 * Enter on the PV list if part of our managed memory. Note that we
2288 * raise IPL while manipulating pv_table since pmap_enter can be
2289 * called at interrupt time.
2290 */
2291 if (pmap_initialized &&
2292 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2293 pmap_insert_entry(pmap, va, mpte, m);
5926987a 2294 ptbase_assert(pmap);
984263bc 2295 pa |= PG_MANAGED;
17cde63e 2296 vm_page_flag_set(m, PG_MAPPED);
984263bc
MD
2297 }
2298
2299 /*
2300 * Increment counters
2301 */
eec2b734 2302 ++pmap->pm_stats.resident_count;
984263bc
MD
2303 if (wired)
2304 pmap->pm_stats.wired_count++;
5926987a 2305 KKASSERT(*pte == 0);
984263bc
MD
2306
2307validate:
2308 /*
2309 * Now validate mapping with desired protection/wiring.
2310 */
5926987a 2311 ptbase_assert(pmap);
984263bc
MD
2312 newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
2313
2314 if (wired)
2315 newpte |= PG_W;
2316 if (va < UPT_MIN_ADDRESS)
2317 newpte |= PG_U;
fbbaeba3 2318 if (pmap == &kernel_pmap)
984263bc
MD
2319 newpte |= pgeflag;
2320
2321 /*
2322 * if the mapping or permission bits are different, we need
2323 * to update the pte.
2324 */
2325 if ((origpte & ~(PG_M|PG_A)) != newpte) {
b12defdc
MD
2326 if (prot & VM_PROT_NOSYNC)
2327 cpu_invlpg((void *)va);
2328 else
2329 pmap_inval_interlock(&info, pmap, va);
5926987a
MD
2330 ptbase_assert(pmap);
2331 KKASSERT(*pte == 0 ||
2332 (*pte & PG_FRAME) == (newpte & PG_FRAME));
984263bc 2333 *pte = newpte | PG_A;
b12defdc
MD
2334 if ((prot & VM_PROT_NOSYNC) == 0)
2335 pmap_inval_deinterlock(&info, pmap);
17cde63e
MD
2336 if (newpte & PG_RW)
2337 vm_page_flag_set(m, PG_WRITEABLE);
984263bc 2338 }
c695044a 2339 KKASSERT((newpte & PG_MANAGED) == 0 || (m->flags & PG_MAPPED));
b12defdc
MD
2340 if ((prot & VM_PROT_NOSYNC) == 0)
2341 pmap_inval_done(&info);
4107b0c0 2342 lwkt_reltoken(&vm_token);
b12defdc 2343 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
2344}
2345
2346/*
17cde63e
MD
2347 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
2348 * This code also assumes that the pmap has no pre-existing entry for this
2349 * VA.
2350 *
2351 * This code currently may only be used on user pmaps, not kernel_pmap.
4107b0c0
MD
2352 *
2353 * No requirements.
984263bc 2354 */
1b9d3514 2355void
17cde63e 2356pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
984263bc
MD
2357{
2358 unsigned *pte;
6ef943a3 2359 vm_paddr_t pa;
17cde63e
MD
2360 vm_page_t mpte;
2361 unsigned ptepindex;
2362 vm_offset_t ptepa;
0f7a3396
MD
2363 pmap_inval_info info;
2364
b12defdc 2365 vm_object_hold(pmap->pm_pteobj);
4107b0c0 2366 lwkt_gettoken(&vm_token);
0f7a3396 2367 pmap_inval_init(&info);
984263bc 2368
fbbaeba3
MD
2369 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2370 kprintf("Warning: pmap_enter_quick called on UVA with kernel_pmap\n");
7ce2998e 2371 print_backtrace(-1);
fbbaeba3
MD
2372 }
2373 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2374 kprintf("Warning: pmap_enter_quick called on KVA without kernel_pmap\n");
7ce2998e 2375 print_backtrace(-1);
fbbaeba3
MD
2376 }
2377
17cde63e
MD
2378 KKASSERT(va < UPT_MIN_ADDRESS); /* assert used on user pmaps only */
2379
984263bc 2380 /*
17cde63e
MD
2381 * Calculate the page table page (mpte), allocating it if necessary.
2382 *
2383 * A held page table page (mpte), or NULL, is passed onto the
2384 * section following.
984263bc
MD
2385 */
2386 if (va < UPT_MIN_ADDRESS) {
984263bc
MD
2387 /*
2388 * Calculate pagetable page index
2389 */
2390 ptepindex = va >> PDRSHIFT;
17cde63e
MD
2391
2392 do {
984263bc
MD
2393 /*
2394 * Get the page directory entry
2395 */
2396 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
2397
2398 /*
2399 * If the page table page is mapped, we just increment
90244566 2400 * the wire count, and activate it.
984263bc
MD
2401 */
2402 if (ptepa) {
2403 if (ptepa & PG_PS)
2404 panic("pmap_enter_quick: unexpected mapping into 4MB page");
2405 if (pmap->pm_ptphint &&
17cde63e 2406 (pmap->pm_ptphint->pindex == ptepindex)) {
984263bc
MD
2407 mpte = pmap->pm_ptphint;
2408 } else {
b12defdc 2409 mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
984263bc 2410 pmap->pm_ptphint = mpte;
b12defdc 2411 vm_page_wakeup(mpte);
984263bc 2412 }
17cde63e 2413 if (mpte)
90244566 2414 vm_page_wire_quick(mpte);
984263bc
MD
2415 } else {
2416 mpte = _pmap_allocpte(pmap, ptepindex);
2417 }
17cde63e 2418 } while (mpte == NULL);
984263bc
MD
2419 } else {
2420 mpte = NULL;
17cde63e 2421 /* this code path is not yet used */
984263bc
MD
2422 }
2423
2424 /*
17cde63e
MD
2425 * With a valid (and held) page directory page, we can just use
2426 * vtopte() to get to the pte. If the pte is already present
2427 * we do not disturb it.
984263bc
MD
2428 */
2429 pte = (unsigned *)vtopte(va);
17cde63e 2430 if (*pte & PG_V) {
984263bc 2431 if (mpte)
90244566 2432 pmap_unwire_pte(pmap, mpte, &info);
17cde63e
MD
2433 pa = VM_PAGE_TO_PHYS(m);
2434 KKASSERT(((*pte ^ pa) & PG_FRAME) == 0);
c2fb025d 2435 pmap_inval_done(&info);
4107b0c0 2436 lwkt_reltoken(&vm_token);
b12defdc 2437 vm_object_drop(pmap->pm_pteobj);
17cde63e 2438 return;
984263bc
MD
2439 }
2440
2441 /*
17cde63e 2442 * Enter on the PV list if part of our managed memory
984263bc 2443 */
17cde63e 2444 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
984263bc 2445 pmap_insert_entry(pmap, va, mpte, m);
17cde63e
MD
2446 vm_page_flag_set(m, PG_MAPPED);
2447 }
984263bc
MD
2448
2449 /*
2450 * Increment counters
2451 */
eec2b734 2452 ++pmap->pm_stats.resident_count;
984263bc
MD
2453
2454 pa = VM_PAGE_TO_PHYS(m);
2455
2456 /*
2457 * Now validate mapping with RO protection
2458 */
2459 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2460 *pte = pa | PG_V | PG_U;
2461 else
2462 *pte = pa | PG_V | PG_U | PG_MANAGED;
17cde63e 2463/* pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
c2fb025d 2464 pmap_inval_done(&info);
4107b0c0 2465 lwkt_reltoken(&vm_token);
b12defdc 2466 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
2467}
2468
2469/*
2470 * Make a temporary mapping for a physical address. This is only intended
2471 * to be used for panic dumps.
4107b0c0 2472 *
fb8345e6
MD
2473 * The caller is responsible for calling smp_invltlb().
2474 *
4107b0c0 2475 * No requirements.
984263bc
MD
2476 */
2477void *
8e5ea5f7 2478pmap_kenter_temporary(vm_paddr_t pa, long i)
984263bc 2479{
fb8345e6 2480 pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
984263bc
MD
2481 return ((void *)crashdumpmap);
2482}
2483
2484#define MAX_INIT_PT (96)
06ecca5a 2485
984263bc 2486/*
06ecca5a
MD
2487 * This routine preloads the ptes for a given object into the specified pmap.
2488 * This eliminates the blast of soft faults on process startup and
2489 * immediately after an mmap.
4107b0c0
MD
2490 *
2491 * No requirements.
984263bc 2492 */
1f804340
MD
2493static int pmap_object_init_pt_callback(vm_page_t p, void *data);
2494
984263bc 2495void
083a7402
MD
2496pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
2497 vm_object_t object, vm_pindex_t pindex,
2498 vm_size_t size, int limit)
984263bc 2499{
1f804340 2500 struct rb_vm_page_scan_info info;
287ebb09 2501 struct lwp *lp;
984263bc 2502 int psize;
984263bc 2503
54a764e8
MD
2504 /*
2505 * We can't preinit if read access isn't set or there is no pmap
2506 * or object.
2507 */
083a7402 2508 if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
984263bc
MD
2509 return;
2510
54a764e8
MD
2511 /*
2512 * We can't preinit if the pmap is not the current pmap
2513 */
287ebb09
MD
2514 lp = curthread->td_lwp;
2515 if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
54a764e8
MD
2516 return;
2517
984263bc
MD
2518 psize = i386_btop(size);
2519
2520 if ((object->type != OBJT_VNODE) ||
2521 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
2522 (object->resident_page_count > MAX_INIT_PT))) {
2523 return;
2524 }
2525
2526 if (psize + pindex > object->size) {
2527 if (object->size < pindex)
2528 return;
2529 psize = object->size - pindex;
2530 }
2531
1f804340
MD
2532 if (psize == 0)
2533 return;
06ecca5a 2534
984263bc 2535 /*
1f804340
MD
2536 * Use a red-black scan to traverse the requested range and load
2537 * any valid pages found into the pmap.
06ecca5a 2538 *
9acd5bbb
MD
2539 * We cannot safely scan the object's memq unless we are in a
2540 * critical section since interrupts can remove pages from objects.
984263bc 2541 */
1f804340
MD
2542 info.start_pindex = pindex;
2543 info.end_pindex = pindex + psize - 1;
2544 info.limit = limit;
2545 info.mpte = NULL;
2546 info.addr = addr;
2547 info.pmap = pmap;
2548
2f2d9e58 2549 vm_object_hold(object);
1f804340
MD
2550 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
2551 pmap_object_init_pt_callback, &info);
2f2d9e58 2552 vm_object_drop(object);
1f804340 2553}
06ecca5a 2554
4107b0c0
MD
2555/*
2556 * The caller must hold vm_token.
2557 */
1f804340
MD
2558static
2559int
2560pmap_object_init_pt_callback(vm_page_t p, void *data)
2561{
2562 struct rb_vm_page_scan_info *info = data;
2563 vm_pindex_t rel_index;
2564 /*
2565 * don't allow an madvise to blow away our really
2566 * free pages allocating pv entries.
2567 */
2568 if ((info->limit & MAP_PREFAULT_MADVISE) &&
2569 vmstats.v_free_count < vmstats.v_free_reserved) {
2570 return(-1);
984263bc 2571 }
0d987a03
MD
2572
2573 /*
2574 * Ignore list markers and ignore pages we cannot instantly
2575 * busy (while holding the object token).
2576 */
2577 if (p->flags & PG_MARKER)
2578 return 0;
b12defdc
MD
2579 if (vm_page_busy_try(p, TRUE))
2580 return 0;
1f804340 2581 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
b12defdc 2582 (p->flags & PG_FICTITIOUS) == 0) {
1f804340
MD
2583 if ((p->queue - p->pc) == PQ_CACHE)
2584 vm_page_deactivate(p);
1f804340 2585 rel_index = p->pindex - info->start_pindex;
17cde63e
MD
2586 pmap_enter_quick(info->pmap,
2587 info->addr + i386_ptob(rel_index), p);
1f804340 2588 }
b12defdc 2589 vm_page_wakeup(p);
1f804340 2590 return(0);
984263bc
MD
2591}
2592
2593/*
1b9d3514
MD
2594 * Return TRUE if the pmap is in shape to trivially
2595 * pre-fault the specified address.
2596 *
2597 * Returns FALSE if it would be non-trivial or if a
2598 * pte is already loaded into the slot.
4107b0c0
MD
2599 *
2600 * No requirements.
984263bc 2601 */
1b9d3514
MD
2602int
2603pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
984263bc 2604{
1b9d3514 2605 unsigned *pte;
4107b0c0 2606 int ret;
984263bc 2607
4107b0c0
MD
2608 lwkt_gettoken(&vm_token);
2609 if ((*pmap_pde(pmap, addr)) == 0) {
2610 ret = 0;
2611 } else {
2612 pte = (unsigned *) vtopte(addr);
2613 ret = (*pte) ? 0 : 1;
2614 }
2615 lwkt_reltoken(&vm_token);
2616 return(ret);
984263bc
MD
2617}
2618
2619/*
4107b0c0
MD
2620 * Change the wiring attribute for a map/virtual-adderss pair. The mapping
2621 * must already exist.
2622 *
2623 * No requirements.
984263bc
MD
2624 */
2625void
840de426 2626pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
984263bc 2627{
840de426 2628 unsigned *pte;
984263bc
MD
2629
2630 if (pmap == NULL)
2631 return;
2632
4107b0c0 2633 lwkt_gettoken(&vm_token);
984263bc
MD
2634 pte = pmap_pte(pmap, va);
2635
2636 if (wired && !pmap_pte_w(pte))
2637 pmap->pm_stats.wired_count++;
2638 else if (!wired && pmap_pte_w(pte))
2639 pmap->pm_stats.wired_count--;
2640
2641 /*
2642 * Wiring is not a hardware characteristic so there is no need to
0f7a3396
MD
2643 * invalidate TLB. However, in an SMP environment we must use
2644 * a locked bus cycle to update the pte (if we are not using
2645 * the pmap_inval_*() API that is)... it's ok to do this for simple
2646 * wiring changes.
984263bc 2647 */
0f7a3396
MD
2648#ifdef SMP
2649 if (wired)
2650 atomic_set_int(pte, PG_W);
2651 else
2652 atomic_clear_int(pte, PG_W);
2653#else
2654 if (wired)
2655 atomic_set_int_nonlocked(pte, PG_W);
2656 else
2657 atomic_clear_int_nonlocked(pte, PG_W);
2658#endif
4107b0c0 2659 lwkt_reltoken(&vm_token);
984263bc
MD
2660}
2661
984263bc 2662/*
4107b0c0
MD
2663 * Copy the range specified by src_addr/len from the source map to the
2664 * range dst_addr/len in the destination map.
2665 *
2666 * This routine is only advisory and need not do anything.
984263bc 2667 *
4107b0c0 2668 * No requirements.
984263bc 2669 */
984263bc 2670void
840de426 2671pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
4107b0c0 2672 vm_size_t len, vm_offset_t src_addr)
984263bc 2673{
4107b0c0 2674 /* does nothing */
984263bc
MD
2675}
2676
2677/*
4107b0c0
MD
2678 * Zero the specified PA by mapping the page into KVM and clearing its
2679 * contents.
e0e69b7d 2680 *
4107b0c0 2681 * No requirements.
984263bc
MD
2682 */
2683void
6ef943a3 2684pmap_zero_page(vm_paddr_t phys)
984263bc 2685{
85100692 2686 struct mdglobaldata *gd = mdcpu;
17a9f566 2687
e0e69b7d 2688 crit_enter();
85100692
MD
2689 if (*(int *)gd->gd_CMAP3)
2690 panic("pmap_zero_page: CMAP3 busy");
85100692 2691 *(int *)gd->gd_CMAP3 =
17a9f566 2692 PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
85100692 2693 cpu_invlpg(gd->gd_CADDR3);
984263bc
MD
2694
2695#if defined(I686_CPU)
2696 if (cpu_class == CPUCLASS_686)
85100692 2697 i686_pagezero(gd->gd_CADDR3);
984263bc
MD
2698 else
2699#endif
85100692 2700 bzero(gd->gd_CADDR3, PAGE_SIZE);
85100692 2701 *(int *) gd->gd_CMAP3 = 0;
e0e69b7d 2702 crit_exit();
8100156a
MD
2703}
2704
2705/*
4107b0c0 2706 * Assert that a page is empty, panic if it isn't.
8100156a 2707 *
4107b0c0 2708 * No requirements.
8100156a
MD
2709 */
2710void
2711pmap_page_assertzero(vm_paddr_t phys)
2712{
2713 struct mdglobaldata *gd = mdcpu;
2714 int i;
2715
2716 crit_enter();
2717 if (*(int *)gd->gd_CMAP3)
2718 panic("pmap_zero_page: CMAP3 busy");
2719 *(int *)gd->gd_CMAP3 =
2720 PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2721 cpu_invlpg(gd->gd_CADDR3);
2722 for (i = 0; i < PAGE_SIZE; i += 4) {
2723 if (*(int *)((char *)gd->gd_CADDR3 + i) != 0) {
2724 panic("pmap_page_assertzero() @ %p not zero!\n",
2725 (void *)gd->gd_CADDR3);
2726 }
2727 }
2728 *(int *) gd->gd_CMAP3 = 0;
2729 crit_exit();
984263bc
MD
2730}
2731
2732/*
4107b0c0
MD
2733 * Zero part of a physical page by mapping it into memory and clearing
2734 * its contents with bzero.
e0e69b7d 2735 *
4107b0c0 2736 * off and size may not cover an area beyond a single hardware page.
984263bc 2737 *
4107b0c0 2738 * No requirements.
984263bc
MD
2739 */
2740void
6ef943a3 2741pmap_zero_page_area(vm_paddr_t phys, int off, int size)
984263bc 2742{
85100692 2743 struct mdglobaldata *gd = mdcpu;
17a9f566 2744
e0e69b7d 2745 crit_enter();
85100692
MD
2746 if (*(int *) gd->gd_CMAP3)
2747 panic("pmap_zero_page: CMAP3 busy");
85100692
MD
2748 *(int *) gd->gd_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2749 cpu_invlpg(gd->gd_CADDR3);
984263bc
MD
2750
2751#if defined(I686_CPU)
2752 if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
85100692 2753 i686_pagezero(gd->gd_CADDR3);
984263bc
MD
2754 else
2755#endif
85100692 2756 bzero((char *)gd->gd_CADDR3 + off, size);
85100692 2757 *(int *) gd->gd_CMAP3 = 0;
e0e69b7d 2758 crit_exit();
984263bc
MD
2759}
2760
2761/*
4107b0c0
MD
2762 * Copy the physical page from the source PA to the target PA.
2763 * This function may be called from an interrupt. No locking
2764 * is required.
e0e69b7d 2765 *
4107b0c0 2766 * No requirements.
984263bc
MD
2767 */
2768void
6ef943a3 2769pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
984263bc 2770{
85100692 2771 struct mdglobaldata *gd = mdcpu;
17a9f566 2772
e0e69b7d 2773 crit_enter();
85100692
MD
2774 if (*(int *) gd->gd_CMAP1)
2775 panic("pmap_copy_page: CMAP1 busy");
2776 if (*(int *) gd->gd_CMAP2)
2777 panic("pmap_copy_page: CMAP2 busy");
984263bc 2778
85100692
MD
2779 *(int *) gd->gd_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2780 *(int *) gd->gd_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
984263bc 2781
85100692
MD
2782 cpu_invlpg(gd->gd_CADDR1);
2783 cpu_invlpg(gd->gd_CADDR2);
984263bc 2784
85100692 2785 bcopy(gd->gd_CADDR1, gd->gd_CADDR2, PAGE_SIZE);
984263bc 2786
85100692
MD
2787 *(int *) gd->gd_CMAP1 = 0;
2788 *(int *) gd->gd_CMAP2 = 0;
e0e69b7d 2789 crit_exit();
984263bc
MD
2790}
2791
f6bf3af1 2792/*
4107b0c0
MD
2793 * Copy the physical page from the source PA to the target PA.
2794 * This function may be called from an interrupt. No locking
2795 * is required.
f6bf3af1 2796 *
4107b0c0 2797 * No requirements.
f6bf3af1
MD
2798 */
2799void
2800pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
2801{
2802 struct mdglobaldata *gd = mdcpu;
2803
2804 crit_enter();
2805 if (*(int *) gd->gd_CMAP1)
2806 panic("pmap_copy_page: CMAP1 busy");
2807 if (*(int *) gd->gd_CMAP2)
2808 panic("pmap_copy_page: CMAP2 busy");
2809
2810 *(int *) gd->gd_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2811 *(int *) gd->gd_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
2812
2813 cpu_invlpg(gd->gd_CADDR1);
2814 cpu_invlpg(gd->gd_CADDR2);
2815
2816 bcopy((char *)gd->gd_CADDR1 + (src & PAGE_MASK),
2817 (char *)gd->gd_CADDR2 + (dst & PAGE_MASK),
2818 bytes);
2819
2820 *(int *) gd->gd_CMAP1 = 0;
2821 *(int *) gd->gd_CMAP2 = 0;
2822 crit_exit();
2823}
2824
984263bc
MD
2825/*
2826 * Returns true if the pmap's pv is one of the first
2827 * 16 pvs linked to from this page. This count may
2828 * be changed upwards or downwards in the future; it
2829 * is only necessary that true be returned for a small
2830 * subset of pmaps for proper page aging.
4107b0c0
MD
2831 *
2832 * No requirements.
984263bc
MD
2833 */
2834boolean_t
840de426 2835pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
984263bc
MD
2836{
2837 pv_entry_t pv;
2838 int loops = 0;
984263bc
MD
2839
2840 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2841 return FALSE;
2842
4107b0c0 2843 lwkt_gettoken(&vm_token);
984263bc
MD
2844 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2845 if (pv->pv_pmap == pmap) {
11502947 2846 lwkt_reltoken(&vm_token);
984263bc
MD
2847 return TRUE;
2848 }
2849 loops++;
2850 if (loops >= 16)
2851 break;
2852 }
4107b0c0 2853 lwkt_reltoken(&vm_token);
984263bc
MD
2854 return (FALSE);
2855}
2856
984263bc
MD
2857/*
2858 * Remove all pages from specified address space
2859 * this aids process exit speeds. Also, this code
2860 * is special cased for current process only, but
2861 * can have the more generic (and slightly slower)
2862 * mode enabled. This is much faster than pmap_remove
2863 * in the case of running down an entire address space.
4107b0c0
MD
2864 *
2865 * No requirements.
984263bc
MD
2866 */
2867void
840de426 2868pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
984263bc 2869{
287ebb09 2870 struct lwp *lp;
984263bc
MD
2871 unsigned *pte, tpte;
2872 pv_entry_t pv, npv;
984263bc 2873 vm_page_t m;
0f7a3396 2874 pmap_inval_info info;
4a22e893 2875 int iscurrentpmap;
8790d7d8 2876 int32_t save_generation;
984263bc 2877
287ebb09
MD
2878 lp = curthread->td_lwp;
2879 if (lp && pmap == vmspace_pmap(lp->lwp_vmspace))
4a22e893
MD
2880 iscurrentpmap = 1;
2881 else
2882 iscurrentpmap = 0;
984263bc 2883
b12defdc
MD
2884 if (pmap->pm_pteobj)
2885 vm_object_hold(pmap->pm_pteobj);
4107b0c0 2886 lwkt_gettoken(&vm_token);
0f7a3396 2887 pmap_inval_init(&info);
b12defdc 2888
4a22e893 2889 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
984263bc
MD
2890 if (pv->pv_va >= eva || pv->pv_va < sva) {
2891 npv = TAILQ_NEXT(pv, pv_plist);
2892 continue;
2893 }
2894
8790d7d8
MD
2895 KKASSERT(pmap == pv->pv_pmap);
2896
4a22e893
MD
2897 if (iscurrentpmap)
2898 pte = (unsigned *)vtopte(pv->pv_va);
2899 else
8790d7d8 2900 pte = pmap_pte_quick(pmap, pv->pv_va);
5926987a 2901 KKASSERT(*pte);
c2fb025d 2902 pmap_inval_interlock(&info, pmap, pv->pv_va);
984263bc 2903
4a22e893
MD
2904 /*
2905 * We cannot remove wired pages from a process' mapping
2906 * at this time
2907 */
17cde63e 2908 if (*pte & PG_W) {
c2fb025d 2909 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2910 npv = TAILQ_NEXT(pv, pv_plist);
2911 continue;
2912 }
2247fe02 2913 KKASSERT(*pte);
17cde63e 2914 tpte = loadandclear(pte);
c2fb025d 2915 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2916
2917 m = PHYS_TO_VM_PAGE(tpte);
5926987a 2918 test_m_maps_pv(m, pv);
984263bc
MD
2919
2920 KASSERT(m < &vm_page_array[vm_page_array_size],
2921 ("pmap_remove_pages: bad tpte %x", tpte));
2922
eec2b734
MD
2923 KKASSERT(pmap->pm_stats.resident_count > 0);
2924 --pmap->pm_stats.resident_count;
984263bc
MD
2925
2926 /*
2927 * Update the vm_page_t clean and reference bits.
2928 */
2929 if (tpte & PG_M) {
2930 vm_page_dirty(m);
2931 }
2932
984263bc 2933 npv = TAILQ_NEXT(pv, pv_plist);
5926987a
MD
2934#ifdef PMAP_DEBUG
2935 KKASSERT(pv->pv_m == m);
2936 KKASSERT(pv->pv_pmap == pmap);
2937#endif
8790d7d8
MD
2938 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2939 save_generation = ++pmap->pm_generation;
984263bc
MD
2940
2941 m->md.pv_list_count--;
cef01e15
MD
2942 if (m->object)
2943 atomic_add_int(&m->object->agg_pv_list_count, -1);
984263bc 2944 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
17cde63e 2945 if (TAILQ_EMPTY(&m->md.pv_list))
984263bc 2946 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
984263bc 2947
8790d7d8 2948 pmap_unuse_pt(pmap, pv->pv_va, pv->pv_ptem, &info);
984263bc 2949 free_pv_entry(pv);
8790d7d8
MD
2950
2951 /*
2952 * Restart the scan if we blocked during the unuse or free
2953 * calls and other removals were made.
2954 */
2955 if (save_generation != pmap->pm_generation) {
2956 kprintf("Warning: pmap_remove_pages race-A avoided\n");
5926987a 2957 npv = TAILQ_FIRST(&pmap->pm_pvlist);
8790d7d8 2958 }
984263bc 2959 }
c2fb025d 2960 pmap_inval_done(&info);
4107b0c0 2961 lwkt_reltoken(&vm_token);
b12defdc
MD
2962 if (pmap->pm_pteobj)
2963 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
2964}
2965
2966/*
2967 * pmap_testbit tests bits in pte's
5e8d0349 2968 * note that the testbit/clearbit routines are inline,
984263bc 2969 * and a lot of things compile-time evaluate.
4107b0c0
MD
2970 *
2971 * The caller must hold vm_token.
984263bc
MD
2972 */
2973static boolean_t
840de426 2974pmap_testbit(vm_page_t m, int bit)
984263bc
MD
2975{
2976 pv_entry_t pv;
2977 unsigned *pte;
984263bc
MD
2978
2979 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2980 return FALSE;
2981
2982 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
2983 return FALSE;
2984
984263bc
MD
2985 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2986 /*
2987 * if the bit being tested is the modified bit, then
2988 * mark clean_map and ptes as never
2989 * modified.
2990 */
2991 if (bit & (PG_A|PG_M)) {
2992 if (!pmap_track_modified(pv->pv_va))
2993 continue;
2994 }
2995
2996#if defined(PMAP_DIAGNOSTIC)
2997 if (!pv->pv_pmap) {
d557216f
MD
2998 kprintf("Null pmap (tb) at va: %p\n",
2999 (void *)pv->pv_va);
984263bc
MD
3000 continue;
3001 }
3002#endif
3003 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
74b9d1ec 3004 if (*pte & bit) {
984263bc 3005 return TRUE;
74b9d1ec 3006 }
984263bc 3007 }
984263bc
MD
3008 return (FALSE);
3009}
3010
3011/*
4107b0c0
MD
3012 * This routine is used to modify bits in ptes
3013 *
3014 * The caller must hold vm_token.
984263bc
MD
3015 */
3016static __inline void
5e8d0349 3017pmap_clearbit(vm_page_t m, int bit)
984263bc 3018{
0f7a3396 3019 struct pmap_inval_info info;
840de426
MD
3020 pv_entry_t pv;
3021 unsigned *pte;
5e8d0349 3022 unsigned pbits;
984263bc
MD
3023
3024 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3025 return;
3026
0f7a3396 3027 pmap_inval_init(&info);
984263bc
MD
3028
3029 /*
3030 * Loop over all current mappings setting/clearing as appropos If
3031 * setting RO do we need to clear the VAC?
3032 */
3033 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3034 /*
3035 * don't write protect pager mappings
3036 */
5e8d0349 3037 if (bit == PG_RW) {
984263bc
MD
3038 if (!pmap_track_modified(pv->pv_va))
3039 continue;
3040 }
3041
3042#if defined(PMAP_DIAGNOSTIC)
3043 if (!pv->pv_pmap) {
d557216f
MD
3044 kprintf("Null pmap (cb) at va: %p\n",
3045 (void *)pv->pv_va);
984263bc
MD
3046 continue;
3047 }
3048#endif
3049
0f7a3396
MD
3050 /*
3051 * Careful here. We can use a locked bus instruction to
3052 * clear PG_A or PG_M safely but we need to synchronize
3053 * with the target cpus when we mess with PG_RW.
70fc5283
MD
3054 *
3055 * We do not have to force synchronization when clearing
3056 * PG_M even for PTEs generated via virtual memory maps,
3057 * because the virtual kernel will invalidate the pmap
3058 * entry when/if it needs to resynchronize the Modify bit.
0f7a3396 3059 */
70fc5283 3060 if (bit & PG_RW)
c2fb025d 3061 pmap_inval_interlock(&info, pv->pv_pmap, pv->pv_va);
17cde63e
MD
3062 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3063again:
5e8d0349
MD
3064 pbits = *pte;
3065 if (pbits & bit) {
3066 if (bit == PG_RW) {
17cde63e 3067 if (pbits & PG_M) {
5e8d0349 3068 vm_page_dirty(m);
17cde63e
MD
3069 atomic_clear_int(pte, PG_M|PG_RW);
3070 } else {
3071 /*
3072 * The cpu may be trying to set PG_M
3073 * simultaniously with our clearing
3074 * of PG_RW.
3075 */
3076 if (!atomic_cmpset_int(pte, pbits,
3077 pbits & ~PG_RW))
3078 goto again;
3079 }
5e8d0349
MD
3080 } else if (bit == PG_M) {
3081 /*
70fc5283
MD
3082 * We could also clear PG_RW here to force
3083 * a fault on write to redetect PG_M for
3084 * virtual kernels, but it isn't necessary
3085 * since virtual kernels invalidate the pte
3086 * when they clear the VPTE_M bit in their
3087 * virtual page tables.
5e8d0349 3088 */
70fc5283 3089 atomic_clear_int(pte, PG_M);
5e8d0349
MD
3090 } else {
3091 atomic_clear_int(pte, bit);
984263bc
MD
3092 }
3093 }
c2fb025d
MD
3094 if (bit & PG_RW)
3095 pmap_inval_deinterlock(&info, pv->pv_pmap);
984263bc 3096 }
c2fb025d 3097 pmap_inval_done(&info);
984263bc
MD
3098}
3099
3100/*
4107b0c0 3101 * Lower the permission for all mappings to a given page.
984263bc 3102 *
4107b0c0 3103 * No requirements.
984263bc
MD
3104 */
3105void
3106pmap_page_protect(vm_page_t m, vm_prot_t prot)
3107{
3108 if ((prot & VM_PROT_WRITE) == 0) {
4107b0c0 3109 lwkt_gettoken(&vm_token);
984263bc 3110 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
5e8d0349 3111 pmap_clearbit(m, PG_RW);
17cde63e 3112 vm_page_flag_clear(m, PG_WRITEABLE);
984263bc
MD
3113 } else {
3114 pmap_remove_all(m);
3115 }
4107b0c0 3116 lwkt_reltoken(&vm_token);
984263bc
MD
3117 }
3118}
3119
4107b0c0
MD
3120/*
3121 * Return the physical address given a physical page index.
3122 *
3123 * No requirements.
3124 */
6ef943a3 3125vm_paddr_t
cfd17028 3126pmap_phys_address(vm_pindex_t ppn)
984263bc
MD
3127{
3128 return (i386_ptob(ppn));
3129}
3130
3131/*
4107b0c0
MD
3132 * Return a count of reference bits for a page, clearing those bits.
3133 * It is not necessary for every reference bit to be cleared, but it
3134 * is necessary that 0 only be returned when there are truly no
3135 * reference bits set.
984263bc 3136 *
4107b0c0 3137 * No requirements.
984263bc
MD
3138 */
3139int
3140pmap_ts_referenced(vm_page_t m)
3141{
840de426 3142 pv_entry_t pv, pvf, pvn;
984263bc 3143 unsigned *pte;
984263bc
MD
3144 int rtval = 0;
3145
3146 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3147 return (rtval);
3148
4107b0c0 3149 lwkt_gettoken(&vm_token);
984263bc
MD
3150
3151 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3152
3153 pvf = pv;
3154
3155 do {
3156 pvn = TAILQ_NEXT(pv, pv_list);
3157
3158 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
984263bc
MD
3159 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
3160
3161 if (!pmap_track_modified(pv->pv_va))
3162 continue;
3163
3164 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3165
3166 if (pte && (*pte & PG_A)) {
0f7a3396
MD
3167#ifdef SMP
3168 atomic_clear_int(pte, PG_A);
3169#else
3170 atomic_clear_int_nonlocked(pte, PG_A);
3171#endif
984263bc
MD
3172 rtval++;
3173 if (rtval > 4) {
3174 break;
3175 }
3176 }
3177 } while ((pv = pvn) != NULL && pv != pvf);
3178 }
4107b0c0
MD
3179
3180 lwkt_reltoken(&vm_token);
984263bc
MD
3181
3182 return (rtval);
3183}
3184
3185/*