kernel - Attempt to fix i386 wire_count panic (98)
[dragonfly.git] / sys / platform / pc32 / i386 / pmap.c
CommitLineData
984263bc 1/*
4107b0c0
MD
2 * (MPSAFE)
3 *
984263bc
MD
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
45 */
46
47/*
4107b0c0 48 * Manages physical address maps.
984263bc 49 *
b12defdc 50 * In most cases we hold page table pages busy in order to manipulate them.
984263bc 51 */
5926987a
MD
52/*
53 * PMAP_DEBUG - see platform/pc32/include/pmap.h
54 */
984263bc
MD
55
56#include "opt_disable_pse.h"
57#include "opt_pmap.h"
58#include "opt_msgbuf.h"
984263bc
MD
59
60#include <sys/param.h>
61#include <sys/systm.h>
62#include <sys/kernel.h>
63#include <sys/proc.h>
64#include <sys/msgbuf.h>
65#include <sys/vmmeter.h>
66#include <sys/mman.h>
b12defdc 67#include <sys/thread.h>
984263bc
MD
68
69#include <vm/vm.h>
70#include <vm/vm_param.h>
71#include <sys/sysctl.h>
72#include <sys/lock.h>
73#include <vm/vm_kern.h>
74#include <vm/vm_page.h>
75#include <vm/vm_map.h>
76#include <vm/vm_object.h>
77#include <vm/vm_extern.h>
78#include <vm/vm_pageout.h>
79#include <vm/vm_pager.h>
80#include <vm/vm_zone.h>
81
82#include <sys/user.h>
e0e69b7d 83#include <sys/thread2.h>
e3161323 84#include <sys/sysref2.h>
b12defdc 85#include <sys/spinlock2.h>
90244566 86#include <vm/vm_page2.h>
984263bc
MD
87
88#include <machine/cputypes.h>
89#include <machine/md_var.h>
90#include <machine/specialreg.h>
984263bc 91#include <machine/smp.h>
a9295349 92#include <machine_base/apic/apicreg.h>
85100692 93#include <machine/globaldata.h>
0f7a3396
MD
94#include <machine/pmap.h>
95#include <machine/pmap_inval.h>
984263bc
MD
96
97#define PMAP_KEEP_PDIRS
98#ifndef PMAP_SHPGPERPROC
99#define PMAP_SHPGPERPROC 200
948209ce 100#define PMAP_PVLIMIT 1400000 /* i386 kvm problems */
984263bc
MD
101#endif
102
103#if defined(DIAGNOSTIC)
104#define PMAP_DIAGNOSTIC
105#endif
106
107#define MINPV 2048
108
109#if !defined(PMAP_DIAGNOSTIC)
110#define PMAP_INLINE __inline
111#else
112#define PMAP_INLINE
113#endif
114
115/*
116 * Get PDEs and PTEs for user/kernel address space
117 */
118#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
119#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
120
121#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
122#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
123#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
124#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
125#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
126
984263bc
MD
127/*
128 * Given a map and a machine independent protection code,
129 * convert to a vax protection code.
130 */
639a9b43
MD
131#define pte_prot(m, p) \
132 (protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
984263bc
MD
133static int protection_codes[8];
134
fbbaeba3 135struct pmap kernel_pmap;
54a764e8
MD
136static TAILQ_HEAD(,pmap) pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list);
137
e880033d 138vm_paddr_t avail_start; /* PA of first available physical page */
6ef943a3 139vm_paddr_t avail_end; /* PA of last available physical page */
e880033d 140vm_offset_t virtual_start; /* VA of first avail page (after kernel bss) */
984263bc 141vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
791c6551
MD
142vm_offset_t virtual2_start;
143vm_offset_t virtual2_end;
c439ad8f
MD
144vm_offset_t KvaStart; /* VA start of KVA space */
145vm_offset_t KvaEnd; /* VA end of KVA space (non-inclusive) */
146vm_offset_t KvaSize; /* max size of kernel virtual address space */
984263bc
MD
147static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
148static int pgeflag; /* PG_G or-in */
149static int pseflag; /* PG_PS or-in */
150
151static vm_object_t kptobj;
152
153static int nkpt;
154vm_offset_t kernel_vm_end;
155
156/*
157 * Data for the pv entry allocation mechanism
158 */
159static vm_zone_t pvzone;
160static struct vm_zone pvzone_store;
161static struct vm_object pvzone_obj;
162static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
163static int pmap_pagedaemon_waken = 0;
164static struct pv_entry *pvinit;
165
166/*
a93980ab
MD
167 * Considering all the issues I'm having with pmap caching, if breakage
168 * continues to occur, and for debugging, I've added a sysctl that will
169 * just do an unconditional invltlb.
170 */
171static int dreadful_invltlb;
172
173SYSCTL_INT(_vm, OID_AUTO, dreadful_invltlb,
9733f757 174 CTLFLAG_RW, &dreadful_invltlb, 0, "Debugging sysctl to force invltlb on pmap operations");
a93980ab
MD
175
176/*
984263bc
MD
177 * All those kernel PT submaps that BSD is so fond of
178 */
4090d6ff 179pt_entry_t *CMAP1 = NULL, *ptmmap;
984263bc 180caddr_t CADDR1 = 0, ptvmmap = 0;
984263bc 181static pt_entry_t *msgbufmap;
4090d6ff 182struct msgbuf *msgbufp=NULL;
984263bc
MD
183
184/*
185 * Crashdump maps.
186 */
187static pt_entry_t *pt_crashdumpmap;
188static caddr_t crashdumpmap;
189
984263bc 190extern pt_entry_t *SMPpt;
984263bc 191
3ae0cd58
RG
192static PMAP_INLINE void free_pv_entry (pv_entry_t pv);
193static unsigned * get_ptbase (pmap_t pmap);
194static pv_entry_t get_pv_entry (void);
195static void i386_protection_init (void);
5e8d0349 196static __inline void pmap_clearbit (vm_page_t m, int bit);
3ae0cd58
RG
197
198static void pmap_remove_all (vm_page_t m);
0f7a3396
MD
199static int pmap_remove_pte (struct pmap *pmap, unsigned *ptq,
200 vm_offset_t sva, pmap_inval_info_t info);
201static void pmap_remove_page (struct pmap *pmap,
202 vm_offset_t va, pmap_inval_info_t info);
3ae0cd58 203static int pmap_remove_entry (struct pmap *pmap, vm_page_t m,
0f7a3396 204 vm_offset_t va, pmap_inval_info_t info);
3ae0cd58 205static boolean_t pmap_testbit (vm_page_t m, int bit);
2bb9cc6f
MD
206static void pmap_insert_entry (pmap_t pmap, pv_entry_t pv,
207 vm_offset_t va, vm_page_t mpte, vm_page_t m);
3ae0cd58
RG
208
209static vm_page_t pmap_allocpte (pmap_t pmap, vm_offset_t va);
210
211static int pmap_release_free_page (pmap_t pmap, vm_page_t p);
212static vm_page_t _pmap_allocpte (pmap_t pmap, unsigned ptepindex);
213static unsigned * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
214static vm_page_t pmap_page_lookup (vm_object_t object, vm_pindex_t pindex);
0f7a3396 215static int pmap_unuse_pt (pmap_t, vm_offset_t, vm_page_t, pmap_inval_info_t);
984263bc
MD
216static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
217
218static unsigned pdir4mb;
219
220/*
840de426
MD
221 * Move the kernel virtual free pointer to the next
222 * 4MB. This is used to help improve performance
223 * by using a large (4MB) page for much of the kernel
224 * (.text, .data, .bss)
225 */
4107b0c0
MD
226static
227vm_offset_t
840de426
MD
228pmap_kmem_choose(vm_offset_t addr)
229{
230 vm_offset_t newaddr = addr;
231#ifndef DISABLE_PSE
232 if (cpu_feature & CPUID_PSE) {
233 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
234 }
235#endif
236 return newaddr;
237}
238
239/*
4107b0c0
MD
240 * This function returns a pointer to the pte entry in the pmap and has
241 * the side effect of potentially retaining a cached mapping of the pmap.
e0e69b7d 242 *
4107b0c0
MD
243 * The caller must hold vm_token and the returned value is only valid
244 * until the caller blocks or releases the token.
984263bc 245 */
4107b0c0
MD
246static
247unsigned *
840de426 248pmap_pte(pmap_t pmap, vm_offset_t va)
984263bc
MD
249{
250 unsigned *pdeaddr;
251
4107b0c0 252 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
253 if (pmap) {
254 pdeaddr = (unsigned *) pmap_pde(pmap, va);
255 if (*pdeaddr & PG_PS)
256 return pdeaddr;
4107b0c0 257 if (*pdeaddr)
984263bc 258 return get_ptbase(pmap) + i386_btop(va);
984263bc
MD
259 }
260 return (0);
261}
262
263/*
4107b0c0
MD
264 * pmap_pte using the kernel_pmap
265 *
266 * Used for debugging, no requirements.
267 */
268unsigned *
269pmap_kernel_pte(vm_offset_t va)
270{
271 unsigned *pdeaddr;
272
273 pdeaddr = (unsigned *) pmap_pde(&kernel_pmap, va);
274 if (*pdeaddr & PG_PS)
275 return pdeaddr;
276 if (*pdeaddr)
277 return (unsigned *)vtopte(va);
278 return(0);
279}
280
281/*
e0e69b7d
MD
282 * pmap_pte_quick:
283 *
c1692ddf
MD
284 * Super fast pmap_pte routine best used when scanning the pv lists.
285 * This eliminates many course-grained invltlb calls. Note that many of
286 * the pv list scans are across different pmaps and it is very wasteful
287 * to do an entire invltlb when checking a single mapping.
e0e69b7d 288 *
c1692ddf
MD
289 * Should only be called while in a critical section.
290 *
4107b0c0
MD
291 * The caller must hold vm_token and the returned value is only valid
292 * until the caller blocks or releases the token.
984263bc 293 */
4107b0c0
MD
294static
295unsigned *
840de426 296pmap_pte_quick(pmap_t pmap, vm_offset_t va)
984263bc 297{
840de426
MD
298 struct mdglobaldata *gd = mdcpu;
299 unsigned pde, newpf;
300
4107b0c0 301 ASSERT_LWKT_TOKEN_HELD(&vm_token);
840de426
MD
302 if ((pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) != 0) {
303 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
304 unsigned index = i386_btop(va);
305 /* are we current address space or kernel? */
fbbaeba3 306 if ((pmap == &kernel_pmap) ||
840de426
MD
307 (frame == (((unsigned) PTDpde) & PG_FRAME))) {
308 return (unsigned *) PTmap + index;
309 }
310 newpf = pde & PG_FRAME;
4107b0c0
MD
311 if (((*(unsigned *)gd->gd_PMAP1) & PG_FRAME) != newpf) {
312 *(unsigned *)gd->gd_PMAP1 = newpf | PG_RW | PG_V;
840de426
MD
313 cpu_invlpg(gd->gd_PADDR1);
314 }
06bb314f 315 return gd->gd_PADDR1 + (index & (NPTEPG - 1));
984263bc 316 }
840de426 317 return (0);
984263bc
MD
318}
319
840de426 320
984263bc 321/*
4107b0c0 322 * Bootstrap the system enough to run with virtual memory.
984263bc 323 *
4107b0c0
MD
324 * On the i386 this is called after mapping has already been enabled
325 * and just syncs the pmap module with what has already been done.
326 * [We can't call it easily with mapping off since the kernel is not
327 * mapped with PA == VA, hence we would have to relocate every address
328 * from the linked base (virtual) address "KERNBASE" to the actual
329 * (physical) address starting relative to 0]
984263bc
MD
330 */
331void
f123d5a1 332pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr)
984263bc
MD
333{
334 vm_offset_t va;
335 pt_entry_t *pte;
85100692 336 struct mdglobaldata *gd;
984263bc 337 int i;
81c04d07 338 int pg;
984263bc 339
c439ad8f
MD
340 KvaStart = (vm_offset_t)VADDR(PTDPTDI, 0);
341 KvaSize = (vm_offset_t)VADDR(APTDPTDI, 0) - KvaStart;
342 KvaEnd = KvaStart + KvaSize;
343
984263bc
MD
344 avail_start = firstaddr;
345
346 /*
e880033d
MD
347 * XXX The calculation of virtual_start is wrong. It's NKPT*PAGE_SIZE
348 * too large. It should instead be correctly calculated in locore.s and
984263bc
MD
349 * not based on 'first' (which is a physical address, not a virtual
350 * address, for the start of unused physical memory). The kernel
351 * page tables are NOT double mapped and thus should not be included
352 * in this calculation.
353 */
e880033d
MD
354 virtual_start = (vm_offset_t) KERNBASE + firstaddr;
355 virtual_start = pmap_kmem_choose(virtual_start);
c439ad8f 356 virtual_end = VADDR(KPTDI+NKPDE-1, NPTEPG-1);
984263bc
MD
357
358 /*
359 * Initialize protection array.
360 */
361 i386_protection_init();
362
363 /*
364 * The kernel's pmap is statically allocated so we don't have to use
365 * pmap_create, which is unlikely to work correctly at this part of
366 * the boot sequence (XXX and which no longer exists).
b12defdc
MD
367 *
368 * The kernel_pmap's pm_pteobj is used only for locking and not
369 * for mmu pages.
984263bc 370 */
fbbaeba3
MD
371 kernel_pmap.pm_pdir = (pd_entry_t *)(KERNBASE + (u_int)IdlePTD);
372 kernel_pmap.pm_count = 1;
c2fb025d 373 kernel_pmap.pm_active = (cpumask_t)-1 & ~CPUMASK_LOCK;
b12defdc 374 kernel_pmap.pm_pteobj = &kernel_object;
fbbaeba3 375 TAILQ_INIT(&kernel_pmap.pm_pvlist);
b12defdc
MD
376 TAILQ_INIT(&kernel_pmap.pm_pvlist_free);
377 spin_init(&kernel_pmap.pm_spin);
378 lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
984263bc
MD
379 nkpt = NKPT;
380
381 /*
382 * Reserve some special page table entries/VA space for temporary
383 * mapping of pages.
384 */
385#define SYSMAP(c, p, v, n) \
386 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
387
e880033d 388 va = virtual_start;
4107b0c0 389 pte = (pt_entry_t *) pmap_kernel_pte(va);
984263bc
MD
390
391 /*
392 * CMAP1/CMAP2 are used for zeroing and copying pages.
393 */
394 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
984263bc
MD
395
396 /*
397 * Crashdump maps.
398 */
399 SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
400
401 /*
e731d345
MD
402 * ptvmmap is used for reading arbitrary physical pages via
403 * /dev/mem.
404 */
405 SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
406
407 /*
984263bc
MD
408 * msgbufp is used to map the system message buffer.
409 * XXX msgbufmap is not used.
410 */
411 SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
412 atop(round_page(MSGBUF_SIZE)))
413
e880033d 414 virtual_start = va;
984263bc 415
17a9f566 416 *(int *) CMAP1 = 0;
984263bc
MD
417 for (i = 0; i < NKPT; i++)
418 PTD[i] = 0;
419
a2a5ad0d
MD
420 /*
421 * PG_G is terribly broken on SMP because we IPI invltlb's in some
422 * cases rather then invl1pg. Actually, I don't even know why it
423 * works under UP because self-referential page table mappings
424 */
425#ifdef SMP
426 pgeflag = 0;
427#else
428 if (cpu_feature & CPUID_PGE)
984263bc 429 pgeflag = PG_G;
a2a5ad0d 430#endif
984263bc
MD
431
432/*
433 * Initialize the 4MB page size flag
434 */
435 pseflag = 0;
436/*
437 * The 4MB page version of the initial
438 * kernel page mapping.
439 */
440 pdir4mb = 0;
441
442#if !defined(DISABLE_PSE)
443 if (cpu_feature & CPUID_PSE) {
444 unsigned ptditmp;
445 /*
446 * Note that we have enabled PSE mode
447 */
448 pseflag = PG_PS;
449 ptditmp = *((unsigned *)PTmap + i386_btop(KERNBASE));
450 ptditmp &= ~(NBPDR - 1);
451 ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
452 pdir4mb = ptditmp;
453
8a8d5d85
MD
454#ifndef SMP
455 /*
456 * Enable the PSE mode. If we are SMP we can't do this
457 * now because the APs will not be able to use it when
458 * they boot up.
459 */
460 load_cr4(rcr4() | CR4_PSE);
984263bc 461
8a8d5d85
MD
462 /*
463 * We can do the mapping here for the single processor
464 * case. We simply ignore the old page table page from
465 * now on.
466 */
467 /*
468 * For SMP, we still need 4K pages to bootstrap APs,
469 * PSE will be enabled as soon as all APs are up.
470 */
b5b32410 471 PTD[KPTDI] = (pd_entry_t)ptditmp;
fbbaeba3 472 kernel_pmap.pm_pdir[KPTDI] = (pd_entry_t)ptditmp;
0f7a3396 473 cpu_invltlb();
8a8d5d85 474#endif
984263bc
MD
475 }
476#endif
984263bc 477
81c04d07
MD
478 /*
479 * We need to finish setting up the globaldata page for the BSP.
480 * locore has already populated the page table for the mdglobaldata
481 * portion.
482 */
483 pg = MDGLOBALDATA_BASEALLOC_PAGES;
85100692 484 gd = &CPU_prvspace[0].mdglobaldata;
81c04d07
MD
485 gd->gd_CMAP1 = &SMPpt[pg + 0];
486 gd->gd_CMAP2 = &SMPpt[pg + 1];
487 gd->gd_CMAP3 = &SMPpt[pg + 2];
488 gd->gd_PMAP1 = &SMPpt[pg + 3];
9388fcaa 489 gd->gd_GDMAP1 = &PTD[APTDPTDI];
85100692
MD
490 gd->gd_CADDR1 = CPU_prvspace[0].CPAGE1;
491 gd->gd_CADDR2 = CPU_prvspace[0].CPAGE2;
492 gd->gd_CADDR3 = CPU_prvspace[0].CPAGE3;
493 gd->gd_PADDR1 = (unsigned *)CPU_prvspace[0].PPAGE1;
9388fcaa 494 gd->gd_GDADDR1= (unsigned *)VADDR(APTDPTDI, 0);
984263bc 495
0f7a3396 496 cpu_invltlb();
984263bc
MD
497}
498
499#ifdef SMP
500/*
501 * Set 4mb pdir for mp startup
502 */
503void
504pmap_set_opt(void)
505{
506 if (pseflag && (cpu_feature & CPUID_PSE)) {
507 load_cr4(rcr4() | CR4_PSE);
72740893 508 if (pdir4mb && mycpu->gd_cpuid == 0) { /* only on BSP */
fbbaeba3 509 kernel_pmap.pm_pdir[KPTDI] =
984263bc
MD
510 PTD[KPTDI] = (pd_entry_t)pdir4mb;
511 cpu_invltlb();
512 }
513 }
514}
515#endif
516
517/*
4107b0c0
MD
518 * Initialize the pmap module, called by vm_init()
519 *
520 * Called from the low level boot code only.
984263bc
MD
521 */
522void
e7252eda 523pmap_init(void)
984263bc
MD
524{
525 int i;
526 int initial_pvs;
527
528 /*
529 * object for kernel page table pages
530 */
531 kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
532
533 /*
534 * Allocate memory for random pmap data structures. Includes the
535 * pv_head_table.
536 */
537
538 for(i = 0; i < vm_page_array_size; i++) {
539 vm_page_t m;
540
541 m = &vm_page_array[i];
542 TAILQ_INIT(&m->md.pv_list);
543 m->md.pv_list_count = 0;
544 }
545
546 /*
547 * init the pv free list
548 */
549 initial_pvs = vm_page_array_size;
550 if (initial_pvs < MINPV)
551 initial_pvs = MINPV;
552 pvzone = &pvzone_store;
948209ce
MD
553 pvinit = (void *)kmem_alloc(&kernel_map,
554 initial_pvs * sizeof (struct pv_entry));
555 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
556 pvinit, initial_pvs);
984263bc
MD
557
558 /*
559 * Now it is safe to enable pv_table recording.
560 */
561 pmap_initialized = TRUE;
562}
563
564/*
565 * Initialize the address space (zone) for the pv_entries. Set a
566 * high water mark so that the system can recover from excessive
567 * numbers of pv entries.
4107b0c0
MD
568 *
569 * Called from the low level boot code only.
984263bc
MD
570 */
571void
f123d5a1 572pmap_init2(void)
984263bc
MD
573{
574 int shpgperproc = PMAP_SHPGPERPROC;
948209ce 575 int entry_max;
984263bc
MD
576
577 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
578 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
948209ce
MD
579
580#ifdef PMAP_PVLIMIT
581 /*
582 * Horrible hack for systems with a lot of memory running i386.
583 * the calculated pv_entry_max can wind up eating a ton of KVM
584 * so put a cap on the number of entries if the user did not
585 * change any of the values. This saves about 44MB of KVM on
586 * boxes with 3+GB of ram.
587 *
588 * On the flip side, this makes it more likely that some setups
589 * will run out of pv entries. Those sysads will have to bump
590 * the limit up with vm.pamp.pv_entries or vm.pmap.shpgperproc.
591 */
592 if (shpgperproc == PMAP_SHPGPERPROC) {
593 if (pv_entry_max > PMAP_PVLIMIT)
594 pv_entry_max = PMAP_PVLIMIT;
595 }
596#endif
984263bc
MD
597 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
598 pv_entry_high_water = 9 * (pv_entry_max / 10);
948209ce
MD
599
600 /*
601 * Subtract out pages already installed in the zone (hack)
602 */
603 entry_max = pv_entry_max - vm_page_array_size;
604 if (entry_max <= 0)
605 entry_max = 1;
606
607 zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1);
984263bc
MD
608}
609
610
611/***************************************************
612 * Low level helper routines.....
613 ***************************************************/
614
5926987a
MD
615#ifdef PMAP_DEBUG
616
617static void
618test_m_maps_pv(vm_page_t m, pv_entry_t pv)
619{
620 pv_entry_t spv;
621
74b9d1ec 622 crit_enter();
5926987a
MD
623#ifdef PMAP_DEBUG
624 KKASSERT(pv->pv_m == m);
625#endif
626 TAILQ_FOREACH(spv, &m->md.pv_list, pv_list) {
74b9d1ec
MD
627 if (pv == spv) {
628 crit_exit();
5926987a 629 return;
74b9d1ec 630 }
5926987a 631 }
74b9d1ec 632 crit_exit();
ed20d0e3 633 panic("test_m_maps_pv: failed m %p pv %p", m, pv);
5926987a
MD
634}
635
636static void
637ptbase_assert(struct pmap *pmap)
638{
639 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
640
641 /* are we current address space or kernel? */
4107b0c0 642 if (pmap == &kernel_pmap || frame == (((unsigned)PTDpde) & PG_FRAME))
5926987a 643 return;
3558dcda 644 KKASSERT(frame == (*mdcpu->gd_GDMAP1 & PG_FRAME));
5926987a
MD
645}
646
647#else
648
649#define test_m_maps_pv(m, pv)
650#define ptbase_assert(pmap)
651
652#endif
653
984263bc
MD
654#if defined(PMAP_DIAGNOSTIC)
655
656/*
657 * This code checks for non-writeable/modified pages.
658 * This should be an invalid condition.
659 */
660static int
661pmap_nw_modified(pt_entry_t ptea)
662{
663 int pte;
664
665 pte = (int) ptea;
666
667 if ((pte & (PG_M|PG_RW)) == PG_M)
668 return 1;
669 else
670 return 0;
671}
672#endif
673
674
675/*
4107b0c0
MD
676 * This routine defines the region(s) of memory that should not be tested
677 * for the modified bit.
678 *
679 * No requirements.
984263bc
MD
680 */
681static PMAP_INLINE int
682pmap_track_modified(vm_offset_t va)
683{
684 if ((va < clean_sva) || (va >= clean_eva))
685 return 1;
686 else
687 return 0;
688}
689
c1692ddf
MD
690/*
691 * Retrieve the mapped page table base for a particular pmap. Use our self
692 * mapping for the kernel_pmap or our current pmap.
693 *
694 * For foreign pmaps we use the per-cpu page table map. Since this involves
695 * installing a ptd it's actually (per-process x per-cpu). However, we
696 * still cannot depend on our mapping to survive thread switches because
697 * the process might be threaded and switching to another thread for the
698 * same process on the same cpu will allow that other thread to make its
699 * own mapping.
700 *
701 * This could be a bit confusing but the jist is for something like the
702 * vkernel which uses foreign pmaps all the time this represents a pretty
703 * good cache that avoids unnecessary invltlb()s.
4107b0c0
MD
704 *
705 * The caller must hold vm_token and the returned value is only valid
706 * until the caller blocks or releases the token.
c1692ddf 707 */
984263bc 708static unsigned *
e0e69b7d 709get_ptbase(pmap_t pmap)
984263bc
MD
710{
711 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
c1692ddf 712 struct mdglobaldata *gd = mdcpu;
984263bc 713
4107b0c0
MD
714 ASSERT_LWKT_TOKEN_HELD(&vm_token);
715
5926987a
MD
716 /*
717 * We can use PTmap if the pmap is our current address space or
718 * the kernel address space.
719 */
fbbaeba3 720 if (pmap == &kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
984263bc
MD
721 return (unsigned *) PTmap;
722 }
e0e69b7d 723
5926987a 724 /*
c1692ddf
MD
725 * Otherwise we use the per-cpu alternative page table map. Each
726 * cpu gets its own map. Because of this we cannot use this map
727 * from interrupts or threads which can preempt.
be3aecf7
MD
728 *
729 * Even if we already have the map cached we may still have to
730 * invalidate the TLB if another cpu modified a PDE in the map.
5926987a 731 */
c1692ddf
MD
732 KKASSERT(gd->mi.gd_intr_nesting_level == 0 &&
733 (gd->mi.gd_curthread->td_flags & TDF_INTTHREAD) == 0);
e0e69b7d 734
c1692ddf
MD
735 if ((*gd->gd_GDMAP1 & PG_FRAME) != frame) {
736 *gd->gd_GDMAP1 = frame | PG_RW | PG_V;
be3aecf7
MD
737 pmap->pm_cached |= gd->mi.gd_cpumask;
738 cpu_invltlb();
739 } else if ((pmap->pm_cached & gd->mi.gd_cpumask) == 0) {
740 pmap->pm_cached |= gd->mi.gd_cpumask;
984263bc 741 cpu_invltlb();
a93980ab
MD
742 } else if (dreadful_invltlb) {
743 cpu_invltlb();
984263bc 744 }
c1692ddf 745 return ((unsigned *)gd->gd_GDADDR1);
984263bc
MD
746}
747
748/*
e0e69b7d
MD
749 * pmap_extract:
750 *
4107b0c0 751 * Extract the physical page address associated with the map/VA pair.
e0e69b7d 752 *
4107b0c0 753 * The caller may hold vm_token if it desires non-blocking operation.
984263bc 754 */
6ef943a3 755vm_paddr_t
840de426 756pmap_extract(pmap_t pmap, vm_offset_t va)
984263bc
MD
757{
758 vm_offset_t rtval;
759 vm_offset_t pdirindex;
840de426 760
4107b0c0 761 lwkt_gettoken(&vm_token);
984263bc
MD
762 pdirindex = va >> PDRSHIFT;
763 if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
764 unsigned *pte;
765 if ((rtval & PG_PS) != 0) {
766 rtval &= ~(NBPDR - 1);
767 rtval |= va & (NBPDR - 1);
4107b0c0
MD
768 } else {
769 pte = get_ptbase(pmap) + i386_btop(va);
770 rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
984263bc 771 }
4107b0c0
MD
772 } else {
773 rtval = 0;
984263bc 774 }
4107b0c0
MD
775 lwkt_reltoken(&vm_token);
776 return rtval;
f6bf3af1
MD
777}
778
984263bc
MD
779/***************************************************
780 * Low level mapping routines.....
781 ***************************************************/
782
783/*
4107b0c0
MD
784 * Map a wired VM page to a KVA, fully SMP synchronized.
785 *
786 * No requirements, non blocking.
984263bc 787 */
24712b90 788void
6ef943a3 789pmap_kenter(vm_offset_t va, vm_paddr_t pa)
984263bc 790{
840de426 791 unsigned *pte;
0f7a3396
MD
792 unsigned npte;
793 pmap_inval_info info;
984263bc 794
0f7a3396 795 pmap_inval_init(&info);
984263bc
MD
796 npte = pa | PG_RW | PG_V | pgeflag;
797 pte = (unsigned *)vtopte(va);
c2fb025d 798 pmap_inval_interlock(&info, &kernel_pmap, va);
984263bc 799 *pte = npte;
c2fb025d
MD
800 pmap_inval_deinterlock(&info, &kernel_pmap);
801 pmap_inval_done(&info);
984263bc
MD
802}
803
6d1ec6fa 804/*
4107b0c0
MD
805 * Map a wired VM page to a KVA, synchronized on current cpu only.
806 *
807 * No requirements, non blocking.
6d1ec6fa 808 */
24712b90
MD
809void
810pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
811{
812 unsigned *pte;
813 unsigned npte;
814
815 npte = pa | PG_RW | PG_V | pgeflag;
816 pte = (unsigned *)vtopte(va);
817 *pte = npte;
818 cpu_invlpg((void *)va);
819}
820
4107b0c0
MD
821/*
822 * Synchronize a previously entered VA on all cpus.
823 *
824 * No requirements, non blocking.
825 */
24712b90
MD
826void
827pmap_kenter_sync(vm_offset_t va)
828{
829 pmap_inval_info info;
830
831 pmap_inval_init(&info);
c2fb025d
MD
832 pmap_inval_interlock(&info, &kernel_pmap, va);
833 pmap_inval_deinterlock(&info, &kernel_pmap);
834 pmap_inval_done(&info);
24712b90
MD
835}
836
4107b0c0
MD
837/*
838 * Synchronize a previously entered VA on the current cpu only.
839 *
840 * No requirements, non blocking.
841 */
24712b90
MD
842void
843pmap_kenter_sync_quick(vm_offset_t va)
844{
845 cpu_invlpg((void *)va);
846}
847
984263bc 848/*
4107b0c0
MD
849 * Remove a page from the kernel pagetables, fully SMP synchronized.
850 *
851 * No requirements, non blocking.
984263bc 852 */
24712b90 853void
840de426 854pmap_kremove(vm_offset_t va)
984263bc 855{
840de426 856 unsigned *pte;
0f7a3396 857 pmap_inval_info info;
984263bc 858
0f7a3396 859 pmap_inval_init(&info);
984263bc 860 pte = (unsigned *)vtopte(va);
c2fb025d 861 pmap_inval_interlock(&info, &kernel_pmap, va);
984263bc 862 *pte = 0;
c2fb025d
MD
863 pmap_inval_deinterlock(&info, &kernel_pmap);
864 pmap_inval_done(&info);
984263bc
MD
865}
866
4107b0c0
MD
867/*
868 * Remove a page from the kernel pagetables, synchronized on current cpu only.
869 *
870 * No requirements, non blocking.
871 */
24712b90
MD
872void
873pmap_kremove_quick(vm_offset_t va)
874{
875 unsigned *pte;
876 pte = (unsigned *)vtopte(va);
877 *pte = 0;
878 cpu_invlpg((void *)va);
879}
880
984263bc 881/*
4107b0c0
MD
882 * Adjust the permissions of a page in the kernel page table,
883 * synchronized on the current cpu only.
884 *
885 * No requirements, non blocking.
9ad680a3
MD
886 */
887void
888pmap_kmodify_rw(vm_offset_t va)
889{
4107b0c0 890 atomic_set_int(vtopte(va), PG_RW);
9ad680a3
MD
891 cpu_invlpg((void *)va);
892}
893
4107b0c0
MD
894/*
895 * Adjust the permissions of a page in the kernel page table,
896 * synchronized on the current cpu only.
897 *
898 * No requirements, non blocking.
899 */
9ad680a3
MD
900void
901pmap_kmodify_nc(vm_offset_t va)
902{
4107b0c0 903 atomic_set_int(vtopte(va), PG_N);
9ad680a3
MD
904 cpu_invlpg((void *)va);
905}
906
907/*
4107b0c0 908 * Map a range of physical addresses into kernel virtual address space.
984263bc 909 *
4107b0c0 910 * No requirements, non blocking.
984263bc
MD
911 */
912vm_offset_t
8e5e6f1b 913pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
984263bc 914{
8e5e6f1b
AH
915 vm_offset_t sva, virt;
916
917 sva = virt = *virtp;
984263bc
MD
918 while (start < end) {
919 pmap_kenter(virt, start);
920 virt += PAGE_SIZE;
921 start += PAGE_SIZE;
922 }
8e5e6f1b
AH
923 *virtp = virt;
924 return (sva);
984263bc
MD
925}
926
984263bc 927/*
4107b0c0
MD
928 * Add a list of wired pages to the kva, fully SMP synchronized.
929 *
930 * No requirements, non blocking.
984263bc
MD
931 */
932void
840de426 933pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
984263bc
MD
934{
935 vm_offset_t end_va;
936
937 end_va = va + count * PAGE_SIZE;
938
939 while (va < end_va) {
940 unsigned *pte;
941
942 pte = (unsigned *)vtopte(va);
943 *pte = VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag;
984263bc 944 cpu_invlpg((void *)va);
984263bc
MD
945 va += PAGE_SIZE;
946 m++;
947 }
948#ifdef SMP
0f7a3396 949 smp_invltlb(); /* XXX */
984263bc
MD
950#endif
951}
952
953/*
4107b0c0 954 * Remove pages from KVA, fully SMP synchronized.
7155fc7d 955 *
4107b0c0 956 * No requirements, non blocking.
984263bc
MD
957 */
958void
840de426 959pmap_qremove(vm_offset_t va, int count)
984263bc
MD
960{
961 vm_offset_t end_va;
962
963 end_va = va + count*PAGE_SIZE;
964
965 while (va < end_va) {
966 unsigned *pte;
967
968 pte = (unsigned *)vtopte(va);
969 *pte = 0;
984263bc 970 cpu_invlpg((void *)va);
984263bc
MD
971 va += PAGE_SIZE;
972 }
973#ifdef SMP
974 smp_invltlb();
975#endif
976}
977
06ecca5a
MD
978/*
979 * This routine works like vm_page_lookup() but also blocks as long as the
980 * page is busy. This routine does not busy the page it returns.
981 *
b12defdc 982 * The caller must hold the object.
06ecca5a 983 */
984263bc 984static vm_page_t
840de426 985pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
984263bc
MD
986{
987 vm_page_t m;
06ecca5a 988
b12defdc
MD
989 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
990 m = vm_page_lookup_busy_wait(object, pindex, FALSE, "pplookp");
17cde63e 991
06ecca5a 992 return(m);
984263bc
MD
993}
994
995/*
263e4574 996 * Create a new thread and optionally associate it with a (new) process.
6ef943a3 997 * NOTE! the new thread's cpu may not equal the current cpu.
263e4574 998 */
7d0bac62
MD
999void
1000pmap_init_thread(thread_t td)
263e4574 1001{
f470d0c8 1002 /* enforce pcb placement */
f470d0c8 1003 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
65d6ce10 1004 td->td_savefpu = &td->td_pcb->pcb_save;
7d0bac62 1005 td->td_sp = (char *)td->td_pcb - 16;
263e4574
MD
1006}
1007
1008/*
984263bc
MD
1009 * This routine directly affects the fork perf for a process.
1010 */
1011void
13d13d89 1012pmap_init_proc(struct proc *p)
984263bc 1013{
984263bc
MD
1014}
1015
984263bc
MD
1016/***************************************************
1017 * Page table page management routines.....
1018 ***************************************************/
1019
1020/*
90244566
MD
1021 * This routine unwires page table pages, removing and freeing the page
1022 * tale page when the wire count drops to 0.
4107b0c0
MD
1023 *
1024 * The caller must hold vm_token.
1025 * This function can block.
984263bc
MD
1026 */
1027static int
90244566 1028_pmap_unwire_pte(pmap_t pmap, vm_page_t m, pmap_inval_info_t info)
840de426 1029{
17cde63e
MD
1030 /*
1031 * Wait until we can busy the page ourselves. We cannot have
1032 * any active flushes if we block.
1033 */
b12defdc 1034 vm_page_busy_wait(m, FALSE, "pmuwpt");
eec2b734 1035 KASSERT(m->queue == PQ_NONE,
90244566 1036 ("_pmap_unwire_pte: %p->queue != PQ_NONE", m));
984263bc 1037
90244566 1038 if (m->wire_count == 1) {
984263bc 1039 /*
be3aecf7
MD
1040 * Unmap the page table page.
1041 *
1042 * NOTE: We must clear pm_cached for all cpus, including
1043 * the current one, when clearing a page directory
1044 * entry.
984263bc 1045 */
c2fb025d 1046 pmap_inval_interlock(info, pmap, -1);
2247fe02 1047 KKASSERT(pmap->pm_pdir[m->pindex]);
984263bc 1048 pmap->pm_pdir[m->pindex] = 0;
be3aecf7 1049 pmap->pm_cached = 0;
c2fb025d 1050 pmap_inval_deinterlock(info, pmap);
eec2b734
MD
1051
1052 KKASSERT(pmap->pm_stats.resident_count > 0);
984263bc 1053 --pmap->pm_stats.resident_count;
984263bc
MD
1054
1055 if (pmap->pm_ptphint == m)
1056 pmap->pm_ptphint = NULL;
1057
1058 /*
eec2b734
MD
1059 * This was our last hold, the page had better be unwired
1060 * after we decrement wire_count.
1061 *
1062 * FUTURE NOTE: shared page directory page could result in
1063 * multiple wire counts.
984263bc 1064 */
90244566 1065 vm_page_unwire(m, 0);
17cde63e 1066 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
eec2b734
MD
1067 vm_page_flash(m);
1068 vm_page_free_zero(m);
984263bc 1069 return 1;
17cde63e 1070 } else {
90244566
MD
1071 KKASSERT(m->wire_count > 1);
1072 if (vm_page_unwire_quick(m))
1073 panic("pmap_unwire_pte: Insufficient wire_count");
b12defdc 1074 vm_page_wakeup(m);
17cde63e 1075 return 0;
984263bc 1076 }
984263bc
MD
1077}
1078
4107b0c0
MD
1079/*
1080 * The caller must hold vm_token.
92ba8d28 1081 *
4107b0c0 1082 * This function can block.
92ba8d28
MD
1083 *
1084 * This function can race the wire_count 2->1 case because the page
1085 * is not busied during the unwire_quick operation. An eventual
1086 * pmap_release() will catch the case.
4107b0c0 1087 */
984263bc 1088static PMAP_INLINE int
90244566 1089pmap_unwire_pte(pmap_t pmap, vm_page_t m, pmap_inval_info_t info)
984263bc 1090{
90244566
MD
1091 KKASSERT(m->wire_count > 0);
1092 if (m->wire_count > 1) {
1093 if (vm_page_unwire_quick(m))
1094 panic("pmap_unwire_pte: Insufficient wire_count");
984263bc 1095 return 0;
eec2b734 1096 } else {
90244566 1097 return _pmap_unwire_pte(pmap, m, info);
eec2b734 1098 }
984263bc
MD
1099}
1100
1101/*
4107b0c0 1102 * After removing a (user) page table entry, this routine is used to
984263bc 1103 * conditionally free the page, and manage the hold/wire counts.
5926987a 1104 *
4107b0c0
MD
1105 * The caller must hold vm_token.
1106 * This function can block regardless.
984263bc
MD
1107 */
1108static int
0f7a3396 1109pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
4107b0c0 1110 pmap_inval_info_t info)
984263bc
MD
1111{
1112 unsigned ptepindex;
4107b0c0 1113
b12defdc
MD
1114 ASSERT_LWKT_TOKEN_HELD(vm_object_token(pmap->pm_pteobj));
1115
984263bc
MD
1116 if (va >= UPT_MIN_ADDRESS)
1117 return 0;
1118
1119 if (mpte == NULL) {
1120 ptepindex = (va >> PDRSHIFT);
b1482674
MD
1121 if ((mpte = pmap->pm_ptphint) != NULL &&
1122 mpte->pindex == ptepindex &&
1123 (mpte->flags & PG_BUSY) == 0) {
1124 ; /* use mpte */
984263bc 1125 } else {
b12defdc 1126 mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
984263bc 1127 pmap->pm_ptphint = mpte;
b12defdc 1128 vm_page_wakeup(mpte);
984263bc
MD
1129 }
1130 }
1131
90244566 1132 return pmap_unwire_pte(pmap, mpte, info);
984263bc
MD
1133}
1134
54a764e8 1135/*
fbbaeba3
MD
1136 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because
1137 * it, and IdlePTD, represents the template used to update all other pmaps.
1138 *
1139 * On architectures where the kernel pmap is not integrated into the user
1140 * process pmap, this pmap represents the process pmap, not the kernel pmap.
1141 * kernel_pmap should be used to directly access the kernel_pmap.
4107b0c0
MD
1142 *
1143 * No requirements.
54a764e8 1144 */
984263bc 1145void
840de426 1146pmap_pinit0(struct pmap *pmap)
984263bc
MD
1147{
1148 pmap->pm_pdir =
e4846942 1149 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
24712b90 1150 pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t) IdlePTD);
984263bc
MD
1151 pmap->pm_count = 1;
1152 pmap->pm_active = 0;
be3aecf7 1153 pmap->pm_cached = 0;
984263bc
MD
1154 pmap->pm_ptphint = NULL;
1155 TAILQ_INIT(&pmap->pm_pvlist);
b12defdc
MD
1156 TAILQ_INIT(&pmap->pm_pvlist_free);
1157 spin_init(&pmap->pm_spin);
1158 lwkt_token_init(&pmap->pm_token, "pmap_tok");
984263bc
MD
1159 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1160}
1161
1162/*
1163 * Initialize a preallocated and zeroed pmap structure,
1164 * such as one in a vmspace structure.
4107b0c0
MD
1165 *
1166 * No requirements.
984263bc
MD
1167 */
1168void
840de426 1169pmap_pinit(struct pmap *pmap)
984263bc
MD
1170{
1171 vm_page_t ptdpg;
1172
1173 /*
1174 * No need to allocate page table space yet but we do need a valid
1175 * page directory table.
1176 */
b5b32410 1177 if (pmap->pm_pdir == NULL) {
984263bc 1178 pmap->pm_pdir =
e4846942 1179 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
b5b32410 1180 }
984263bc
MD
1181
1182 /*
c3834cb2 1183 * Allocate an object for the ptes
984263bc
MD
1184 */
1185 if (pmap->pm_pteobj == NULL)
c3834cb2 1186 pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1);
984263bc
MD
1187
1188 /*
c3834cb2
MD
1189 * Allocate the page directory page, unless we already have
1190 * one cached. If we used the cached page the wire_count will
1191 * already be set appropriately.
984263bc 1192 */
c3834cb2
MD
1193 if ((ptdpg = pmap->pm_pdirm) == NULL) {
1194 ptdpg = vm_page_grab(pmap->pm_pteobj, PTDPTDI,
d2d8515b
MD
1195 VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
1196 VM_ALLOC_ZERO);
c3834cb2 1197 pmap->pm_pdirm = ptdpg;
b12defdc
MD
1198 vm_page_flag_clear(ptdpg, PG_MAPPED);
1199 vm_page_wire(ptdpg);
d2d8515b 1200 KKASSERT(ptdpg->valid == VM_PAGE_BITS_ALL);
c3834cb2 1201 pmap_kenter((vm_offset_t)pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg));
b12defdc 1202 vm_page_wakeup(ptdpg);
c3834cb2 1203 }
984263bc 1204 pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
984263bc
MD
1205
1206 /* install self-referential address mapping entry */
1207 *(unsigned *) (pmap->pm_pdir + PTDPTDI) =
1208 VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
1209
1210 pmap->pm_count = 1;
1211 pmap->pm_active = 0;
be3aecf7 1212 pmap->pm_cached = 0;
984263bc
MD
1213 pmap->pm_ptphint = NULL;
1214 TAILQ_INIT(&pmap->pm_pvlist);
b12defdc
MD
1215 TAILQ_INIT(&pmap->pm_pvlist_free);
1216 spin_init(&pmap->pm_spin);
1217 lwkt_token_init(&pmap->pm_token, "pmap_tok");
984263bc 1218 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
eec2b734 1219 pmap->pm_stats.resident_count = 1;
984263bc
MD
1220}
1221
1222/*
c3834cb2
MD
1223 * Clean up a pmap structure so it can be physically freed. This routine
1224 * is called by the vmspace dtor function. A great deal of pmap data is
1225 * left passively mapped to improve vmspace management so we have a bit
1226 * of cleanup work to do here.
4107b0c0
MD
1227 *
1228 * No requirements.
e3161323
MD
1229 */
1230void
1231pmap_puninit(pmap_t pmap)
1232{
c3834cb2
MD
1233 vm_page_t p;
1234
e3161323 1235 KKASSERT(pmap->pm_active == 0);
c3834cb2
MD
1236 if ((p = pmap->pm_pdirm) != NULL) {
1237 KKASSERT(pmap->pm_pdir != NULL);
1238 pmap_kremove((vm_offset_t)pmap->pm_pdir);
b12defdc 1239 vm_page_busy_wait(p, FALSE, "pgpun");
90244566 1240 vm_page_unwire(p, 0);
c3834cb2
MD
1241 vm_page_free_zero(p);
1242 pmap->pm_pdirm = NULL;
1243 }
e3161323
MD
1244 if (pmap->pm_pdir) {
1245 kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pdir, PAGE_SIZE);
1246 pmap->pm_pdir = NULL;
1247 }
1248 if (pmap->pm_pteobj) {
1249 vm_object_deallocate(pmap->pm_pteobj);
1250 pmap->pm_pteobj = NULL;
1251 }
1252}
1253
1254/*
984263bc
MD
1255 * Wire in kernel global address entries. To avoid a race condition
1256 * between pmap initialization and pmap_growkernel, this procedure
54a764e8
MD
1257 * adds the pmap to the master list (which growkernel scans to update),
1258 * then copies the template.
4107b0c0
MD
1259 *
1260 * No requirements.
984263bc
MD
1261 */
1262void
840de426 1263pmap_pinit2(struct pmap *pmap)
984263bc 1264{
b12defdc
MD
1265 /*
1266 * XXX copies current process, does not fill in MPPTDI
1267 */
1268 spin_lock(&pmap_spin);
54a764e8 1269 TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode);
984263bc 1270 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
b12defdc 1271 spin_unlock(&pmap_spin);
984263bc
MD
1272}
1273
344ad853 1274/*
eec2b734 1275 * Attempt to release and free a vm_page in a pmap. Returns 1 on success,
344ad853 1276 * 0 on failure (if the procedure had to sleep).
c3834cb2
MD
1277 *
1278 * When asked to remove the page directory page itself, we actually just
1279 * leave it cached so we do not have to incur the SMP inval overhead of
1280 * removing the kernel mapping. pmap_puninit() will take care of it.
4107b0c0
MD
1281 *
1282 * The caller must hold vm_token.
1283 * This function can block regardless.
344ad853 1284 */
984263bc 1285static int
840de426 1286pmap_release_free_page(struct pmap *pmap, vm_page_t p)
984263bc
MD
1287{
1288 unsigned *pde = (unsigned *) pmap->pm_pdir;
4107b0c0 1289
984263bc
MD
1290 /*
1291 * This code optimizes the case of freeing non-busy
1292 * page-table pages. Those pages are zero now, and
1293 * might as well be placed directly into the zero queue.
1294 */
b12defdc
MD
1295 if (vm_page_busy_try(p, FALSE)) {
1296 vm_page_sleep_busy(p, FALSE, "pmaprl");
984263bc 1297 return 0;
b12defdc 1298 }
984263bc 1299
eec2b734 1300 KKASSERT(pmap->pm_stats.resident_count > 0);
2247fe02 1301 KKASSERT(pde[p->pindex]);
984263bc 1302
b1482674
MD
1303 /*
1304 * page table page's wire_count must be 1. Caller is the pmap
1305 * termination code which holds the pm_pteobj, there is a race
1306 * if someone else is trying to hold the VM object in order to
1307 * clean up a wire_count.
1308 */
90244566 1309 if (p->wire_count != 1) {
b1482674
MD
1310 if (pmap->pm_pteobj->hold_count <= 1)
1311 panic("pmap_release: freeing wired page table page");
1312 kprintf("pmap_release_free_page: unwire race detected\n");
1313 vm_page_wakeup(p);
1314 tsleep(p, 0, "pmapx", 1);
1315 return 0;
984263bc 1316 }
b1482674
MD
1317
1318 /*
1319 * Remove the page table page from the processes address space.
1320 */
1321 pmap->pm_cached = 0;
1322 pde[p->pindex] = 0;
1323 --pmap->pm_stats.resident_count;
c3834cb2
MD
1324 if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
1325 pmap->pm_ptphint = NULL;
1326
984263bc 1327 /*
c3834cb2
MD
1328 * We leave the page directory page cached, wired, and mapped in
1329 * the pmap until the dtor function (pmap_puninit()) gets called.
1330 * However, still clean it up so we can set PG_ZERO.
c1692ddf
MD
1331 *
1332 * The pmap has already been removed from the pmap_list in the
1333 * PTDPTDI case.
984263bc
MD
1334 */
1335 if (p->pindex == PTDPTDI) {
1336 bzero(pde + KPTDI, nkpt * PTESIZE);
9388fcaa 1337 bzero(pde + MPPTDI, (NPDEPG - MPPTDI) * PTESIZE);
c3834cb2
MD
1338 vm_page_flag_set(p, PG_ZERO);
1339 vm_page_wakeup(p);
1340 } else {
92ba8d28
MD
1341 /*
1342 * This case can occur if a pmap_unwire_pte() loses a race
1343 * while the page is unbusied.
1344 */
1345 /*panic("pmap_release: page should already be gone %p", p);*/
1346 vm_page_flag_clear(p, PG_MAPPED);
90244566 1347 vm_page_unwire(p, 0);
c3834cb2 1348 vm_page_free_zero(p);
984263bc 1349 }
984263bc
MD
1350 return 1;
1351}
1352
1353/*
4107b0c0
MD
1354 * This routine is called if the page table page is not mapped correctly.
1355 *
1356 * The caller must hold vm_token.
984263bc
MD
1357 */
1358static vm_page_t
840de426 1359_pmap_allocpte(pmap_t pmap, unsigned ptepindex)
984263bc 1360{
480c83b6 1361 vm_offset_t ptepa;
984263bc
MD
1362 vm_page_t m;
1363
1364 /*
d2d8515b
MD
1365 * Find or fabricate a new pagetable page. Setting VM_ALLOC_ZERO
1366 * will zero any new page and mark it valid.
984263bc
MD
1367 */
1368 m = vm_page_grab(pmap->pm_pteobj, ptepindex,
d2d8515b 1369 VM_ALLOC_NORMAL | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
984263bc
MD
1370
1371 KASSERT(m->queue == PQ_NONE,
1372 ("_pmap_allocpte: %p->queue != PQ_NONE", m));
1373
eec2b734 1374 /*
90244566 1375 * Increment the wire count for the page we will be returning to
eec2b734
MD
1376 * the caller.
1377 */
90244566 1378 vm_page_wire(m);
eec2b734
MD
1379
1380 /*
1381 * It is possible that someone else got in and mapped by the page
1382 * directory page while we were blocked, if so just unbusy and
90244566 1383 * return the wired page.
eec2b734
MD
1384 */
1385 if ((ptepa = pmap->pm_pdir[ptepindex]) != 0) {
1386 KKASSERT((ptepa & PG_FRAME) == VM_PAGE_TO_PHYS(m));
1387 vm_page_wakeup(m);
1388 return(m);
1389 }
1390
984263bc
MD
1391 /*
1392 * Map the pagetable page into the process address space, if
1393 * it isn't already there.
be3aecf7
MD
1394 *
1395 * NOTE: For safety clear pm_cached for all cpus including the
1396 * current one when adding a PDE to the map.
984263bc 1397 */
eec2b734 1398 ++pmap->pm_stats.resident_count;
984263bc
MD
1399
1400 ptepa = VM_PAGE_TO_PHYS(m);
1401 pmap->pm_pdir[ptepindex] =
1402 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
be3aecf7 1403 pmap->pm_cached = 0;
984263bc
MD
1404
1405 /*
1406 * Set the page table hint
1407 */
1408 pmap->pm_ptphint = m;
984263bc
MD
1409 vm_page_flag_set(m, PG_MAPPED);
1410 vm_page_wakeup(m);
1411
1412 return m;
1413}
1414
4107b0c0
MD
1415/*
1416 * Allocate a page table entry for a va.
1417 *
1418 * The caller must hold vm_token.
1419 */
984263bc 1420static vm_page_t
840de426 1421pmap_allocpte(pmap_t pmap, vm_offset_t va)
984263bc
MD
1422{
1423 unsigned ptepindex;
1424 vm_offset_t ptepa;
b1482674 1425 vm_page_t mpte;
984263bc 1426
b12defdc
MD
1427 ASSERT_LWKT_TOKEN_HELD(vm_object_token(pmap->pm_pteobj));
1428
984263bc
MD
1429 /*
1430 * Calculate pagetable page index
1431 */
1432 ptepindex = va >> PDRSHIFT;
1433
1434 /*
1435 * Get the page directory entry
1436 */
1437 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
1438
1439 /*
1440 * This supports switching from a 4MB page to a
1441 * normal 4K page.
1442 */
1443 if (ptepa & PG_PS) {
1444 pmap->pm_pdir[ptepindex] = 0;
1445 ptepa = 0;
0f7a3396 1446 smp_invltlb();
54341a3b 1447 cpu_invltlb();
984263bc
MD
1448 }
1449
1450 /*
1451 * If the page table page is mapped, we just increment the
90244566 1452 * wire count, and activate it.
984263bc
MD
1453 */
1454 if (ptepa) {
1455 /*
1456 * In order to get the page table page, try the
1457 * hint first.
1458 */
b1482674
MD
1459 if ((mpte = pmap->pm_ptphint) != NULL &&
1460 (mpte->pindex == ptepindex) &&
1461 (mpte->flags & PG_BUSY) == 0) {
1462 vm_page_wire_quick(mpte);
984263bc 1463 } else {
b1482674
MD
1464 mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
1465 pmap->pm_ptphint = mpte;
1466 vm_page_wire_quick(mpte);
1467 vm_page_wakeup(mpte);
984263bc 1468 }
b1482674 1469 return mpte;
984263bc
MD
1470 }
1471 /*
1472 * Here if the pte page isn't mapped, or if it has been deallocated.
1473 */
1474 return _pmap_allocpte(pmap, ptepindex);
1475}
1476
1477
1478/***************************************************
1f804340 1479 * Pmap allocation/deallocation routines.
984263bc
MD
1480 ***************************************************/
1481
1482/*
1483 * Release any resources held by the given physical map.
1484 * Called when a pmap initialized by pmap_pinit is being released.
1485 * Should only be called if the map contains no valid mappings.
4107b0c0 1486 *
b12defdc 1487 * Caller must hold pmap->pm_token
984263bc 1488 */
1f804340
MD
1489static int pmap_release_callback(struct vm_page *p, void *data);
1490
984263bc 1491void
840de426 1492pmap_release(struct pmap *pmap)
984263bc 1493{
984263bc 1494 vm_object_t object = pmap->pm_pteobj;
1f804340 1495 struct rb_vm_page_scan_info info;
984263bc 1496
4107b0c0
MD
1497 KASSERT(pmap->pm_active == 0,
1498 ("pmap still active! %08x", pmap->pm_active));
984263bc
MD
1499#if defined(DIAGNOSTIC)
1500 if (object->ref_count != 1)
1501 panic("pmap_release: pteobj reference count != 1");
1502#endif
1503
1f804340
MD
1504 info.pmap = pmap;
1505 info.object = object;
b12defdc
MD
1506
1507 spin_lock(&pmap_spin);
54a764e8 1508 TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
b12defdc 1509 spin_unlock(&pmap_spin);
1f804340 1510
b12defdc 1511 vm_object_hold(object);
1f804340 1512 do {
1f804340
MD
1513 info.error = 0;
1514 info.mpte = NULL;
1515 info.limit = object->generation;
1516
1517 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1518 pmap_release_callback, &info);
1519 if (info.error == 0 && info.mpte) {
1520 if (!pmap_release_free_page(pmap, info.mpte))
1521 info.error = 1;
984263bc 1522 }
1f804340 1523 } while (info.error);
2f2d9e58 1524 vm_object_drop(object);
b12defdc
MD
1525
1526 pmap->pm_cached = 0;
1f804340
MD
1527}
1528
4107b0c0
MD
1529/*
1530 * The caller must hold vm_token.
1531 */
1f804340
MD
1532static int
1533pmap_release_callback(struct vm_page *p, void *data)
1534{
1535 struct rb_vm_page_scan_info *info = data;
1536
1537 if (p->pindex == PTDPTDI) {
1538 info->mpte = p;
1539 return(0);
344ad853 1540 }
1f804340
MD
1541 if (!pmap_release_free_page(info->pmap, p)) {
1542 info->error = 1;
1543 return(-1);
1544 }
1545 if (info->object->generation != info->limit) {
1546 info->error = 1;
1547 return(-1);
1548 }
1549 return(0);
984263bc 1550}
984263bc
MD
1551
1552/*
0e5797fe 1553 * Grow the number of kernel page table entries, if needed.
4107b0c0
MD
1554 *
1555 * No requirements.
984263bc
MD
1556 */
1557void
a8cf2878 1558pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
984263bc 1559{
a8cf2878 1560 vm_offset_t addr = kend;
54a764e8 1561 struct pmap *pmap;
984263bc
MD
1562 vm_offset_t ptppaddr;
1563 vm_page_t nkpg;
1564 pd_entry_t newpdir;
1565
b12defdc 1566 vm_object_hold(kptobj);
984263bc
MD
1567 if (kernel_vm_end == 0) {
1568 kernel_vm_end = KERNBASE;
1569 nkpt = 0;
1570 while (pdir_pde(PTD, kernel_vm_end)) {
4107b0c0
MD
1571 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1572 ~(PAGE_SIZE * NPTEPG - 1);
984263bc
MD
1573 nkpt++;
1574 }
1575 }
1576 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1577 while (kernel_vm_end < addr) {
1578 if (pdir_pde(PTD, kernel_vm_end)) {
4107b0c0
MD
1579 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1580 ~(PAGE_SIZE * NPTEPG - 1);
984263bc
MD
1581 continue;
1582 }
1583
1584 /*
1585 * This index is bogus, but out of the way
1586 */
4107b0c0
MD
1587 nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_NORMAL |
1588 VM_ALLOC_SYSTEM |
1589 VM_ALLOC_INTERRUPT);
dc1fd4b3 1590 if (nkpg == NULL)
984263bc
MD
1591 panic("pmap_growkernel: no memory to grow kernel");
1592
984263bc
MD
1593 vm_page_wire(nkpg);
1594 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1595 pmap_zero_page(ptppaddr);
1596 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1597 pdir_pde(PTD, kernel_vm_end) = newpdir;
fbbaeba3 1598 *pmap_pde(&kernel_pmap, kernel_vm_end) = newpdir;
0e5797fe
MD
1599 nkpt++;
1600
1601 /*
54a764e8 1602 * This update must be interlocked with pmap_pinit2.
0e5797fe 1603 */
b12defdc 1604 spin_lock(&pmap_spin);
54a764e8
MD
1605 TAILQ_FOREACH(pmap, &pmap_list, pm_pmnode) {
1606 *pmap_pde(pmap, kernel_vm_end) = newpdir;
1607 }
b12defdc 1608 spin_unlock(&pmap_spin);
54a764e8
MD
1609 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1610 ~(PAGE_SIZE * NPTEPG - 1);
984263bc 1611 }
b12defdc 1612 vm_object_drop(kptobj);
984263bc
MD
1613}
1614
1615/*
4107b0c0
MD
1616 * Retire the given physical map from service.
1617 *
1618 * Should only be called if the map contains no valid mappings.
1619 *
1620 * No requirements.
984263bc
MD
1621 */
1622void
840de426 1623pmap_destroy(pmap_t pmap)
984263bc 1624{
984263bc
MD
1625 if (pmap == NULL)
1626 return;
1627
4107b0c0
MD
1628 lwkt_gettoken(&vm_token);
1629 if (--pmap->pm_count == 0) {
984263bc
MD
1630 pmap_release(pmap);
1631 panic("destroying a pmap is not yet implemented");
1632 }
4107b0c0 1633 lwkt_reltoken(&vm_token);
984263bc
MD
1634}
1635
1636/*
4107b0c0
MD
1637 * Add a reference to the specified pmap.
1638 *
1639 * No requirements.
984263bc
MD
1640 */
1641void
840de426 1642pmap_reference(pmap_t pmap)
984263bc 1643{
4107b0c0
MD
1644 if (pmap) {
1645 lwkt_gettoken(&vm_token);
1646 ++pmap->pm_count;
1647 lwkt_reltoken(&vm_token);
984263bc
MD
1648 }
1649}
1650
1651/***************************************************
4107b0c0 1652 * page management routines.
984263bc
MD
1653 ***************************************************/
1654
1655/*
8a8d5d85
MD
1656 * free the pv_entry back to the free list. This function may be
1657 * called from an interrupt.
4107b0c0
MD
1658 *
1659 * The caller must hold vm_token.
984263bc
MD
1660 */
1661static PMAP_INLINE void
840de426 1662free_pv_entry(pv_entry_t pv)
984263bc 1663{
2bb9cc6f
MD
1664 struct mdglobaldata *gd;
1665
5926987a
MD
1666#ifdef PMAP_DEBUG
1667 KKASSERT(pv->pv_m != NULL);
1668 pv->pv_m = NULL;
1669#endif
2bb9cc6f 1670 gd = mdcpu;
984263bc 1671 pv_entry_count--;
2bb9cc6f
MD
1672 if (gd->gd_freepv == NULL)
1673 gd->gd_freepv = pv;
1674 else
1675 zfree(pvzone, pv);
984263bc
MD
1676}
1677
1678/*
1679 * get a new pv_entry, allocating a block from the system
2bb9cc6f
MD
1680 * when needed. This function may be called from an interrupt thread.
1681 *
1682 * THIS FUNCTION CAN BLOCK ON THE ZALLOC TOKEN, serialization of other
1683 * tokens (aka vm_token) to be temporarily lost.
4107b0c0
MD
1684 *
1685 * The caller must hold vm_token.
984263bc
MD
1686 */
1687static pv_entry_t
1688get_pv_entry(void)
1689{
2bb9cc6f
MD
1690 struct mdglobaldata *gd;
1691 pv_entry_t pv;
1692
984263bc
MD
1693 pv_entry_count++;
1694 if (pv_entry_high_water &&
20479584
MD
1695 (pv_entry_count > pv_entry_high_water) &&
1696 (pmap_pagedaemon_waken == 0)) {
984263bc
MD
1697 pmap_pagedaemon_waken = 1;
1698 wakeup (&vm_pages_needed);
1699 }
2bb9cc6f
MD
1700 gd = mdcpu;
1701 if ((pv = gd->gd_freepv) != NULL)
1702 gd->gd_freepv = NULL;
1703 else
1704 pv = zalloc(pvzone);
1705 return pv;
984263bc
MD
1706}
1707
1708/*
1709 * This routine is very drastic, but can save the system
1710 * in a pinch.
4107b0c0
MD
1711 *
1712 * No requirements.
984263bc
MD
1713 */
1714void
840de426 1715pmap_collect(void)
984263bc
MD
1716{
1717 int i;
1718 vm_page_t m;
1719 static int warningdone=0;
1720
1721 if (pmap_pagedaemon_waken == 0)
1722 return;
4107b0c0 1723 lwkt_gettoken(&vm_token);
20479584 1724 pmap_pagedaemon_waken = 0;
984263bc
MD
1725
1726 if (warningdone < 5) {
948209ce
MD
1727 kprintf("pmap_collect: collecting pv entries -- "
1728 "suggest increasing PMAP_SHPGPERPROC\n");
984263bc
MD
1729 warningdone++;
1730 }
1731
b12defdc 1732 for (i = 0; i < vm_page_array_size; i++) {
984263bc 1733 m = &vm_page_array[i];
b12defdc 1734 if (m->wire_count || m->hold_count)
984263bc 1735 continue;
b12defdc
MD
1736 if (vm_page_busy_try(m, TRUE) == 0) {
1737 if (m->wire_count == 0 && m->hold_count == 0) {
1738 pmap_remove_all(m);
1739 }
1740 vm_page_wakeup(m);
4107b0c0 1741 }
984263bc 1742 }
4107b0c0 1743 lwkt_reltoken(&vm_token);
984263bc
MD
1744}
1745
1746
1747/*
b1482674
MD
1748 * Remove the pv entry and unwire the page table page related to the
1749 * pte the caller has cleared from the page table.
4107b0c0
MD
1750 *
1751 * The caller must hold vm_token.
984263bc 1752 */
984263bc 1753static int
0f7a3396 1754pmap_remove_entry(struct pmap *pmap, vm_page_t m,
4107b0c0 1755 vm_offset_t va, pmap_inval_info_t info)
984263bc
MD
1756{
1757 pv_entry_t pv;
1758 int rtval;
984263bc 1759
b1482674
MD
1760 /*
1761 * Cannot block
1762 */
4107b0c0 1763 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
1764 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1765 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1766 if (pmap == pv->pv_pmap && va == pv->pv_va)
1767 break;
1768 }
1769 } else {
1770 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
5926987a
MD
1771#ifdef PMAP_DEBUG
1772 KKASSERT(pv->pv_pmap == pmap);
1773#endif
1774 if (va == pv->pv_va)
984263bc
MD
1775 break;
1776 }
1777 }
5926987a 1778 KKASSERT(pv);
984263bc 1779
b1482674
MD
1780 /*
1781 * Cannot block
1782 */
984263bc 1783 rtval = 0;
5926987a
MD
1784 test_m_maps_pv(m, pv);
1785 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1786 m->md.pv_list_count--;
cef01e15
MD
1787 if (m->object)
1788 atomic_add_int(&m->object->agg_pv_list_count, -1);
5926987a
MD
1789 if (TAILQ_EMPTY(&m->md.pv_list))
1790 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
1791 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1792 ++pmap->pm_generation;
b1482674
MD
1793
1794 /*
1795 * This can block.
1796 */
b12defdc 1797 vm_object_hold(pmap->pm_pteobj);
5926987a 1798 rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem, info);
b12defdc 1799 vm_object_drop(pmap->pm_pteobj);
5926987a 1800 free_pv_entry(pv);
b12defdc 1801
984263bc
MD
1802 return rtval;
1803}
1804
1805/*
4107b0c0
MD
1806 * Create a pv entry for page at pa for (pmap, va).
1807 *
1808 * The caller must hold vm_token.
984263bc
MD
1809 */
1810static void
2bb9cc6f
MD
1811pmap_insert_entry(pmap_t pmap, pv_entry_t pv, vm_offset_t va,
1812 vm_page_t mpte, vm_page_t m)
984263bc 1813{
5926987a
MD
1814#ifdef PMAP_DEBUG
1815 KKASSERT(pv->pv_m == NULL);
1816 pv->pv_m = m;
1817#endif
984263bc
MD
1818 pv->pv_va = va;
1819 pv->pv_pmap = pmap;
1820 pv->pv_ptem = mpte;
1821
1822 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1823 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
5926987a 1824 ++pmap->pm_generation;
984263bc 1825 m->md.pv_list_count++;
cef01e15
MD
1826 if (m->object)
1827 atomic_add_int(&m->object->agg_pv_list_count, 1);
984263bc
MD
1828}
1829
1830/*
5926987a
MD
1831 * pmap_remove_pte: do the things to unmap a page in a process.
1832 *
4107b0c0
MD
1833 * The caller must hold vm_token.
1834 *
1835 * WARNING! As with most other pmap functions this one can block, so
1836 * callers using temporary page table mappings must reload
1837 * them.
984263bc
MD
1838 */
1839static int
0f7a3396 1840pmap_remove_pte(struct pmap *pmap, unsigned *ptq, vm_offset_t va,
5926987a 1841 pmap_inval_info_t info)
984263bc
MD
1842{
1843 unsigned oldpte;
1844 vm_page_t m;
1845
5926987a 1846 ptbase_assert(pmap);
c2fb025d 1847 pmap_inval_interlock(info, pmap, va);
5926987a 1848 ptbase_assert(pmap);
984263bc
MD
1849 oldpte = loadandclear(ptq);
1850 if (oldpte & PG_W)
1851 pmap->pm_stats.wired_count -= 1;
c2fb025d 1852 pmap_inval_deinterlock(info, pmap);
90244566 1853 KKASSERT(oldpte & PG_V);
984263bc
MD
1854 /*
1855 * Machines that don't support invlpg, also don't support
0f7a3396
MD
1856 * PG_G. XXX PG_G is disabled for SMP so don't worry about
1857 * the SMP case.
984263bc
MD
1858 */
1859 if (oldpte & PG_G)
41a01a4d 1860 cpu_invlpg((void *)va);
eec2b734
MD
1861 KKASSERT(pmap->pm_stats.resident_count > 0);
1862 --pmap->pm_stats.resident_count;
984263bc
MD
1863 if (oldpte & PG_MANAGED) {
1864 m = PHYS_TO_VM_PAGE(oldpte);
1865 if (oldpte & PG_M) {
1866#if defined(PMAP_DIAGNOSTIC)
1867 if (pmap_nw_modified((pt_entry_t) oldpte)) {
d557216f
MD
1868 kprintf("pmap_remove: modified page not "
1869 "writable: va: %p, pte: 0x%lx\n",
1870 (void *)va, (long)oldpte);
984263bc
MD
1871 }
1872#endif
1873 if (pmap_track_modified(va))
1874 vm_page_dirty(m);
1875 }
1876 if (oldpte & PG_A)
1877 vm_page_flag_set(m, PG_REFERENCED);
0f7a3396 1878 return pmap_remove_entry(pmap, m, va, info);
984263bc 1879 } else {
0f7a3396 1880 return pmap_unuse_pt(pmap, va, NULL, info);
984263bc
MD
1881 }
1882
1883 return 0;
1884}
1885
1886/*
5926987a 1887 * Remove a single page from a process address space.
e0e69b7d 1888 *
4107b0c0 1889 * The caller must hold vm_token.
984263bc
MD
1890 */
1891static void
0f7a3396 1892pmap_remove_page(struct pmap *pmap, vm_offset_t va, pmap_inval_info_t info)
984263bc 1893{
840de426 1894 unsigned *ptq;
984263bc
MD
1895
1896 /*
90244566 1897 * If there is no pte for this address, just skip it!!! Otherwise
e0e69b7d 1898 * get a local va for mappings for this pmap and remove the entry.
984263bc 1899 */
e0e69b7d
MD
1900 if (*pmap_pde(pmap, va) != 0) {
1901 ptq = get_ptbase(pmap) + i386_btop(va);
1902 if (*ptq) {
0f7a3396 1903 pmap_remove_pte(pmap, ptq, va, info);
5926987a 1904 /* ptq invalid */
e0e69b7d 1905 }
984263bc 1906 }
984263bc
MD
1907}
1908
1909/*
4107b0c0 1910 * Remove the given range of addresses from the specified map.
984263bc 1911 *
4107b0c0
MD
1912 * It is assumed that the start and end are properly rounded to the page
1913 * size.
e0e69b7d 1914 *
4107b0c0 1915 * No requirements.
984263bc
MD
1916 */
1917void
840de426 1918pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
984263bc 1919{
840de426 1920 unsigned *ptbase;
984263bc
MD
1921 vm_offset_t pdnxt;
1922 vm_offset_t ptpaddr;
1923 vm_offset_t sindex, eindex;
0f7a3396 1924 struct pmap_inval_info info;
984263bc
MD
1925
1926 if (pmap == NULL)
1927 return;
1928
b12defdc 1929 vm_object_hold(pmap->pm_pteobj);
4107b0c0
MD
1930 lwkt_gettoken(&vm_token);
1931 if (pmap->pm_stats.resident_count == 0) {
1932 lwkt_reltoken(&vm_token);
b12defdc 1933 vm_object_drop(pmap->pm_pteobj);
984263bc 1934 return;
4107b0c0 1935 }
984263bc 1936
0f7a3396
MD
1937 pmap_inval_init(&info);
1938
984263bc
MD
1939 /*
1940 * special handling of removing one page. a very
1941 * common operation and easy to short circuit some
1942 * code.
1943 */
1944 if (((sva + PAGE_SIZE) == eva) &&
1945 (((unsigned) pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
0f7a3396 1946 pmap_remove_page(pmap, sva, &info);
c2fb025d 1947 pmap_inval_done(&info);
4107b0c0 1948 lwkt_reltoken(&vm_token);
b12defdc 1949 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
1950 return;
1951 }
1952
984263bc
MD
1953 /*
1954 * Get a local virtual address for the mappings that are being
1955 * worked with.
1956 */
984263bc
MD
1957 sindex = i386_btop(sva);
1958 eindex = i386_btop(eva);
1959
1960 for (; sindex < eindex; sindex = pdnxt) {
1961 unsigned pdirindex;
1962
1963 /*
1964 * Calculate index for next page table.
1965 */
1966 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1967 if (pmap->pm_stats.resident_count == 0)
1968 break;
1969
1970 pdirindex = sindex / NPDEPG;
1971 if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
c2fb025d 1972 pmap_inval_interlock(&info, pmap, -1);
984263bc
MD
1973 pmap->pm_pdir[pdirindex] = 0;
1974 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
be3aecf7 1975 pmap->pm_cached = 0;
c2fb025d 1976 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
1977 continue;
1978 }
1979
1980 /*
1981 * Weed out invalid mappings. Note: we assume that the page
1982 * directory table is always allocated, and in kernel virtual.
1983 */
1984 if (ptpaddr == 0)
1985 continue;
1986
1987 /*
1988 * Limit our scan to either the end of the va represented
1989 * by the current page table page, or to the end of the
1990 * range being removed.
1991 */
1992 if (pdnxt > eindex) {
1993 pdnxt = eindex;
1994 }
1995
8790d7d8 1996 /*
5926987a
MD
1997 * NOTE: pmap_remove_pte() can block and wipe the temporary
1998 * ptbase.
8790d7d8 1999 */
0f7a3396 2000 for (; sindex != pdnxt; sindex++) {
984263bc 2001 vm_offset_t va;
8790d7d8
MD
2002
2003 ptbase = get_ptbase(pmap);
0f7a3396 2004 if (ptbase[sindex] == 0)
984263bc 2005 continue;
984263bc 2006 va = i386_ptob(sindex);
0f7a3396 2007 if (pmap_remove_pte(pmap, ptbase + sindex, va, &info))
984263bc
MD
2008 break;
2009 }
2010 }
c2fb025d 2011 pmap_inval_done(&info);
4107b0c0 2012 lwkt_reltoken(&vm_token);
b12defdc 2013 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
2014}
2015
2016/*
4107b0c0
MD
2017 * Removes this physical page from all physical maps in which it resides.
2018 * Reflects back modify bits to the pager.
984263bc 2019 *
4107b0c0 2020 * No requirements.
984263bc 2021 */
984263bc 2022static void
840de426 2023pmap_remove_all(vm_page_t m)
984263bc 2024{
0f7a3396 2025 struct pmap_inval_info info;
840de426 2026 unsigned *pte, tpte;
0f7a3396 2027 pv_entry_t pv;
984263bc 2028
bee81bdd
SS
2029 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2030 return;
984263bc 2031
0f7a3396 2032 pmap_inval_init(&info);
984263bc 2033 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
eec2b734
MD
2034 KKASSERT(pv->pv_pmap->pm_stats.resident_count > 0);
2035 --pv->pv_pmap->pm_stats.resident_count;
984263bc
MD
2036
2037 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
c2fb025d 2038 pmap_inval_interlock(&info, pv->pv_pmap, pv->pv_va);
984263bc
MD
2039 tpte = loadandclear(pte);
2040 if (tpte & PG_W)
2041 pv->pv_pmap->pm_stats.wired_count--;
c2fb025d 2042 pmap_inval_deinterlock(&info, pv->pv_pmap);
984263bc
MD
2043 if (tpte & PG_A)
2044 vm_page_flag_set(m, PG_REFERENCED);
c2fb025d
MD
2045#ifdef PMAP_DEBUG
2046 KKASSERT(PHYS_TO_VM_PAGE(tpte) == m);
2047#endif
984263bc
MD
2048
2049 /*
2050 * Update the vm_page_t clean and reference bits.
2051 */
2052 if (tpte & PG_M) {
2053#if defined(PMAP_DIAGNOSTIC)
2054 if (pmap_nw_modified((pt_entry_t) tpte)) {
d557216f
MD
2055 kprintf("pmap_remove_all: modified page "
2056 "not writable: va: %p, pte: 0x%lx\n",
2057 (void *)pv->pv_va, (long)tpte);
984263bc
MD
2058 }
2059#endif
2060 if (pmap_track_modified(pv->pv_va))
2061 vm_page_dirty(m);
2062 }
5926987a
MD
2063#ifdef PMAP_DEBUG
2064 KKASSERT(pv->pv_m == m);
2065#endif
2bb9cc6f 2066 KKASSERT(pv == TAILQ_FIRST(&m->md.pv_list));
984263bc 2067 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
8790d7d8
MD
2068 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2069 ++pv->pv_pmap->pm_generation;
984263bc 2070 m->md.pv_list_count--;
cef01e15
MD
2071 if (m->object)
2072 atomic_add_int(&m->object->agg_pv_list_count, -1);
17cde63e
MD
2073 if (TAILQ_EMPTY(&m->md.pv_list))
2074 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
b12defdc 2075 vm_object_hold(pv->pv_pmap->pm_pteobj);
0f7a3396 2076 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem, &info);
b12defdc 2077 vm_object_drop(pv->pv_pmap->pm_pteobj);
984263bc
MD
2078 free_pv_entry(pv);
2079 }
17cde63e 2080 KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
c2fb025d 2081 pmap_inval_done(&info);
984263bc
MD
2082}
2083
2084/*
4107b0c0
MD
2085 * Set the physical protection on the specified range of this map
2086 * as requested.
e0e69b7d 2087 *
4107b0c0 2088 * No requirements.
984263bc
MD
2089 */
2090void
2091pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2092{
840de426 2093 unsigned *ptbase;
984263bc
MD
2094 vm_offset_t pdnxt, ptpaddr;
2095 vm_pindex_t sindex, eindex;
0f7a3396 2096 pmap_inval_info info;
984263bc
MD
2097
2098 if (pmap == NULL)
2099 return;
2100
2101 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2102 pmap_remove(pmap, sva, eva);
2103 return;
2104 }
2105
2106 if (prot & VM_PROT_WRITE)
2107 return;
2108
4107b0c0 2109 lwkt_gettoken(&vm_token);
0f7a3396 2110 pmap_inval_init(&info);
984263bc
MD
2111
2112 ptbase = get_ptbase(pmap);
2113
2114 sindex = i386_btop(sva);
2115 eindex = i386_btop(eva);
2116
2117 for (; sindex < eindex; sindex = pdnxt) {
984263bc
MD
2118 unsigned pdirindex;
2119
2120 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
2121
2122 pdirindex = sindex / NPDEPG;
2123 if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
c2fb025d 2124 pmap_inval_interlock(&info, pmap, -1);
55f2596a 2125 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
984263bc 2126 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
c2fb025d 2127 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2128 continue;
2129 }
2130
2131 /*
2132 * Weed out invalid mappings. Note: we assume that the page
2133 * directory table is always allocated, and in kernel virtual.
2134 */
2135 if (ptpaddr == 0)
2136 continue;
2137
2138 if (pdnxt > eindex) {
2139 pdnxt = eindex;
2140 }
2141
2142 for (; sindex != pdnxt; sindex++) {
984263bc 2143 unsigned pbits;
c2fb025d 2144 unsigned cbits;
984263bc
MD
2145 vm_page_t m;
2146
17cde63e 2147 /*
d5b2d319 2148 * XXX non-optimal.
17cde63e 2149 */
c2fb025d
MD
2150 pmap_inval_interlock(&info, pmap, i386_ptob(sindex));
2151again:
984263bc 2152 pbits = ptbase[sindex];
c2fb025d 2153 cbits = pbits;
984263bc
MD
2154
2155 if (pbits & PG_MANAGED) {
2156 m = NULL;
2157 if (pbits & PG_A) {
2158 m = PHYS_TO_VM_PAGE(pbits);
2159 vm_page_flag_set(m, PG_REFERENCED);
c2fb025d 2160 cbits &= ~PG_A;
984263bc
MD
2161 }
2162 if (pbits & PG_M) {
2163 if (pmap_track_modified(i386_ptob(sindex))) {
2164 if (m == NULL)
2165 m = PHYS_TO_VM_PAGE(pbits);
2166 vm_page_dirty(m);
c2fb025d 2167 cbits &= ~PG_M;
984263bc
MD
2168 }
2169 }
2170 }
c2fb025d
MD
2171 cbits &= ~PG_RW;
2172 if (pbits != cbits &&
2173 !atomic_cmpset_int(ptbase + sindex, pbits, cbits)) {
2174 goto again;
984263bc 2175 }
c2fb025d 2176 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2177 }
2178 }
c2fb025d 2179 pmap_inval_done(&info);
4107b0c0 2180 lwkt_reltoken(&vm_token);
984263bc
MD
2181}
2182
2183/*
4107b0c0
MD
2184 * Insert the given physical page (p) at the specified virtual address (v)
2185 * in the target physical map with the protection requested.
984263bc 2186 *
4107b0c0
MD
2187 * If specified, the page will be wired down, meaning that the related pte
2188 * cannot be reclaimed.
984263bc 2189 *
4107b0c0 2190 * No requirements.
984263bc
MD
2191 */
2192void
2193pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2194 boolean_t wired)
2195{
6ef943a3 2196 vm_paddr_t pa;
840de426 2197 unsigned *pte;
6ef943a3 2198 vm_paddr_t opa;
984263bc
MD
2199 vm_offset_t origpte, newpte;
2200 vm_page_t mpte;
0f7a3396 2201 pmap_inval_info info;
2bb9cc6f 2202 pv_entry_t pv;
984263bc
MD
2203
2204 if (pmap == NULL)
2205 return;
2206
2207 va &= PG_FRAME;
2208#ifdef PMAP_DIAGNOSTIC
c439ad8f 2209 if (va >= KvaEnd)
984263bc 2210 panic("pmap_enter: toobig");
d557216f
MD
2211 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) {
2212 panic("pmap_enter: invalid to pmap_enter page "
2213 "table pages (va: %p)", (void *)va);
2214 }
984263bc 2215#endif
fbbaeba3
MD
2216 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2217 kprintf("Warning: pmap_enter called on UVA with kernel_pmap\n");
7ce2998e 2218 print_backtrace(-1);
fbbaeba3
MD
2219 }
2220 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2221 kprintf("Warning: pmap_enter called on KVA without kernel_pmap\n");
7ce2998e 2222 print_backtrace(-1);
fbbaeba3 2223 }
984263bc 2224
b12defdc 2225 vm_object_hold(pmap->pm_pteobj);
4107b0c0
MD
2226 lwkt_gettoken(&vm_token);
2227
984263bc 2228 /*
2bb9cc6f
MD
2229 * This can block, get it before we do anything important.
2230 */
2231 if (pmap_initialized &&
2232 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2233 pv = get_pv_entry();
2234 } else {
2235 pv = NULL;
2236 }
2237
2238 /*
984263bc
MD
2239 * In the case that a page table page is not
2240 * resident, we are creating it here.
2241 */
17cde63e 2242 if (va < UPT_MIN_ADDRESS)
984263bc 2243 mpte = pmap_allocpte(pmap, va);
17cde63e
MD
2244 else
2245 mpte = NULL;
984263bc 2246
b12defdc
MD
2247 if ((prot & VM_PROT_NOSYNC) == 0)
2248 pmap_inval_init(&info);
984263bc
MD
2249 pte = pmap_pte(pmap, va);
2250
2251 /*
2252 * Page Directory table entry not valid, we need a new PT page
2253 */
2254 if (pte == NULL) {
ed20d0e3 2255 panic("pmap_enter: invalid page directory pdir=0x%lx, va=%p",
d557216f 2256 (long)pmap->pm_pdir[PTDPTDI], (void *)va);
984263bc
MD
2257 }
2258
2259 pa = VM_PAGE_TO_PHYS(m) & PG_FRAME;
2260 origpte = *(vm_offset_t *)pte;
2261 opa = origpte & PG_FRAME;
2262
2263 if (origpte & PG_PS)
2264 panic("pmap_enter: attempted pmap_enter on 4MB page");
2265
2266 /*
2267 * Mapping has not changed, must be protection or wiring change.
2268 */
2269 if (origpte && (opa == pa)) {
2270 /*
2271 * Wiring change, just update stats. We don't worry about
2272 * wiring PT pages as they remain resident as long as there
2273 * are valid mappings in them. Hence, if a user page is wired,
2274 * the PT page will be also.
2275 */
2276 if (wired && ((origpte & PG_W) == 0))
2277 pmap->pm_stats.wired_count++;
2278 else if (!wired && (origpte & PG_W))
2279 pmap->pm_stats.wired_count--;
2280
2281#if defined(PMAP_DIAGNOSTIC)
2282 if (pmap_nw_modified((pt_entry_t) origpte)) {
d557216f
MD
2283 kprintf("pmap_enter: modified page not "
2284 "writable: va: %p, pte: 0x%lx\n",
2285 (void *)va, (long )origpte);
984263bc
MD
2286 }
2287#endif
2288
2289 /*
984263bc
MD
2290 * We might be turning off write access to the page,
2291 * so we go ahead and sense modify status.
2292 */
2293 if (origpte & PG_MANAGED) {
2294 if ((origpte & PG_M) && pmap_track_modified(va)) {
2295 vm_page_t om;
2296 om = PHYS_TO_VM_PAGE(opa);
2297 vm_page_dirty(om);
2298 }
2299 pa |= PG_MANAGED;
17cde63e 2300 KKASSERT(m->flags & PG_MAPPED);
984263bc
MD
2301 }
2302 goto validate;
2303 }
2304 /*
2305 * Mapping has changed, invalidate old range and fall through to
2306 * handle validating new mapping.
5926987a
MD
2307 *
2308 * Since we have a ref on the page directory page pmap_pte()
2309 * will always return non-NULL.
2310 *
2311 * NOTE: pmap_remove_pte() can block and cause the temporary ptbase
2312 * to get wiped. reload the ptbase. I'm not sure if it is
2313 * also possible to race another pmap_enter() but check for
2314 * that case too.
984263bc 2315 */
5926987a 2316 while (opa) {
984263bc 2317 int err;
5926987a
MD
2318
2319 KKASSERT((origpte & PG_FRAME) ==
2320 (*(vm_offset_t *)pte & PG_FRAME));
0f7a3396 2321 err = pmap_remove_pte(pmap, pte, va, &info);
984263bc 2322 if (err)
d557216f 2323 panic("pmap_enter: pte vanished, va: %p", (void *)va);
5926987a
MD
2324 pte = pmap_pte(pmap, va);
2325 origpte = *(vm_offset_t *)pte;
2326 opa = origpte & PG_FRAME;
2327 if (opa) {
2328 kprintf("pmap_enter: Warning, raced pmap %p va %p\n",
2329 pmap, (void *)va);
2330 }
984263bc
MD
2331 }
2332
2333 /*
2334 * Enter on the PV list if part of our managed memory. Note that we
2335 * raise IPL while manipulating pv_table since pmap_enter can be
2336 * called at interrupt time.
2337 */
2338 if (pmap_initialized &&
2339 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2bb9cc6f
MD
2340 pmap_insert_entry(pmap, pv, va, mpte, m);
2341 pv = NULL;
5926987a 2342 ptbase_assert(pmap);
984263bc 2343 pa |= PG_MANAGED;
17cde63e 2344 vm_page_flag_set(m, PG_MAPPED);
984263bc
MD
2345 }
2346
2347 /*
2348 * Increment counters
2349 */
eec2b734 2350 ++pmap->pm_stats.resident_count;
984263bc
MD
2351 if (wired)
2352 pmap->pm_stats.wired_count++;
5926987a 2353 KKASSERT(*pte == 0);
984263bc
MD
2354
2355validate:
2356 /*
2357 * Now validate mapping with desired protection/wiring.
2358 */
5926987a 2359 ptbase_assert(pmap);
984263bc
MD
2360 newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
2361
2362 if (wired)
2363 newpte |= PG_W;
2364 if (va < UPT_MIN_ADDRESS)
2365 newpte |= PG_U;
fbbaeba3 2366 if (pmap == &kernel_pmap)
984263bc
MD
2367 newpte |= pgeflag;
2368
2369 /*
2bb9cc6f
MD
2370 * If the mapping or permission bits are different, we need
2371 * to update the pte. If the pte is already present we have
2372 * to get rid of the extra wire-count on mpte we had obtained
2373 * above.
b1482674
MD
2374 *
2375 * mpte has a new wire_count, which also serves to prevent the
2376 * page table page from getting ripped out while we work. If we
2377 * are modifying an existing pte instead of installing a new one
2378 * we have to drop it.
984263bc
MD
2379 */
2380 if ((origpte & ~(PG_M|PG_A)) != newpte) {
b12defdc
MD
2381 if (prot & VM_PROT_NOSYNC)
2382 cpu_invlpg((void *)va);
2383 else
2384 pmap_inval_interlock(&info, pmap, va);
5926987a 2385 ptbase_assert(pmap);
2bb9cc6f
MD
2386
2387 if (*pte) {
2388 KKASSERT((*pte & PG_FRAME) == (newpte & PG_FRAME));
2389 if (vm_page_unwire_quick(mpte))
2390 panic("pmap_enter: Insufficient wire_count");
2391 }
2392
984263bc 2393 *pte = newpte | PG_A;
b12defdc
MD
2394 if ((prot & VM_PROT_NOSYNC) == 0)
2395 pmap_inval_deinterlock(&info, pmap);
17cde63e
MD
2396 if (newpte & PG_RW)
2397 vm_page_flag_set(m, PG_WRITEABLE);
b1482674
MD
2398 } else {
2399 if (*pte) {
2400 KKASSERT((*pte & PG_FRAME) == (newpte & PG_FRAME));
2401 if (vm_page_unwire_quick(mpte))
2402 panic("pmap_enter: Insufficient wire_count");
2403 }
984263bc 2404 }
b1482674
MD
2405
2406 /*
2407 * NOTE: mpte invalid after this point if we block.
2408 */
c695044a 2409 KKASSERT((newpte & PG_MANAGED) == 0 || (m->flags & PG_MAPPED));
b12defdc
MD
2410 if ((prot & VM_PROT_NOSYNC) == 0)
2411 pmap_inval_done(&info);
2bb9cc6f
MD
2412 if (pv)
2413 free_pv_entry(pv);
4107b0c0 2414 lwkt_reltoken(&vm_token);
b12defdc 2415 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
2416}
2417
2418/*
17cde63e
MD
2419 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
2420 * This code also assumes that the pmap has no pre-existing entry for this
2421 * VA.
2422 *
2423 * This code currently may only be used on user pmaps, not kernel_pmap.
4107b0c0
MD
2424 *
2425 * No requirements.
984263bc 2426 */
1b9d3514 2427void
17cde63e 2428pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
984263bc
MD
2429{
2430 unsigned *pte;
6ef943a3 2431 vm_paddr_t pa;
17cde63e
MD
2432 vm_page_t mpte;
2433 unsigned ptepindex;
2434 vm_offset_t ptepa;
0f7a3396 2435 pmap_inval_info info;
2bb9cc6f 2436 pv_entry_t pv;
0f7a3396 2437
b12defdc 2438 vm_object_hold(pmap->pm_pteobj);
4107b0c0 2439 lwkt_gettoken(&vm_token);
2bb9cc6f
MD
2440
2441 /*
2442 * This can block, get it before we do anything important.
2443 */
2444 if (pmap_initialized &&
2445 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2446 pv = get_pv_entry();
2447 } else {
2448 pv = NULL;
2449 }
2450
0f7a3396 2451 pmap_inval_init(&info);
984263bc 2452
fbbaeba3
MD
2453 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2454 kprintf("Warning: pmap_enter_quick called on UVA with kernel_pmap\n");
7ce2998e 2455 print_backtrace(-1);
fbbaeba3
MD
2456 }
2457 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2458 kprintf("Warning: pmap_enter_quick called on KVA without kernel_pmap\n");
7ce2998e 2459 print_backtrace(-1);
fbbaeba3
MD
2460 }
2461
17cde63e
MD
2462 KKASSERT(va < UPT_MIN_ADDRESS); /* assert used on user pmaps only */
2463
984263bc 2464 /*
17cde63e
MD
2465 * Calculate the page table page (mpte), allocating it if necessary.
2466 *
2467 * A held page table page (mpte), or NULL, is passed onto the
2468 * section following.
984263bc
MD
2469 */
2470 if (va < UPT_MIN_ADDRESS) {
984263bc
MD
2471 /*
2472 * Calculate pagetable page index
2473 */
2474 ptepindex = va >> PDRSHIFT;
17cde63e
MD
2475
2476 do {
984263bc
MD
2477 /*
2478 * Get the page directory entry
2479 */
2480 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
2481
2482 /*
2483 * If the page table page is mapped, we just increment
90244566 2484 * the wire count, and activate it.
984263bc
MD
2485 */
2486 if (ptepa) {
2487 if (ptepa & PG_PS)
2488 panic("pmap_enter_quick: unexpected mapping into 4MB page");
b1482674
MD
2489 if ((mpte = pmap->pm_ptphint) != NULL &&
2490 (mpte->pindex == ptepindex) &&
2491 (mpte->flags & PG_BUSY) == 0) {
2bb9cc6f 2492 vm_page_wire_quick(mpte);
984263bc 2493 } else {
2bb9cc6f
MD
2494 mpte = pmap_page_lookup(pmap->pm_pteobj,
2495 ptepindex);
984263bc 2496 pmap->pm_ptphint = mpte;
2bb9cc6f 2497 vm_page_wire_quick(mpte);
b12defdc 2498 vm_page_wakeup(mpte);
984263bc 2499 }
984263bc
MD
2500 } else {
2501 mpte = _pmap_allocpte(pmap, ptepindex);
2502 }
17cde63e 2503 } while (mpte == NULL);
984263bc
MD
2504 } else {
2505 mpte = NULL;
17cde63e 2506 /* this code path is not yet used */
984263bc
MD
2507 }
2508
2509 /*
17cde63e
MD
2510 * With a valid (and held) page directory page, we can just use
2511 * vtopte() to get to the pte. If the pte is already present
2512 * we do not disturb it.
984263bc
MD
2513 */
2514 pte = (unsigned *)vtopte(va);
17cde63e 2515 if (*pte & PG_V) {
17cde63e
MD
2516 pa = VM_PAGE_TO_PHYS(m);
2517 KKASSERT(((*pte ^ pa) & PG_FRAME) == 0);
c2fb025d 2518 pmap_inval_done(&info);
b1482674
MD
2519 if (mpte)
2520 pmap_unwire_pte(pmap, mpte, &info);
4107b0c0 2521 lwkt_reltoken(&vm_token);
b12defdc 2522 vm_object_drop(pmap->pm_pteobj);
b1482674 2523 if (pv) {
2bb9cc6f 2524 free_pv_entry(pv);
b1482674
MD
2525 /* pv = NULL; */
2526 }
17cde63e 2527 return;
984263bc
MD
2528 }
2529
2530 /*
17cde63e 2531 * Enter on the PV list if part of our managed memory
984263bc 2532 */
2bb9cc6f
MD
2533 if (pmap_initialized &&
2534 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2535 pmap_insert_entry(pmap, pv, va, mpte, m);
2536 pv = NULL;
17cde63e
MD
2537 vm_page_flag_set(m, PG_MAPPED);
2538 }
984263bc
MD
2539
2540 /*
2541 * Increment counters
2542 */
eec2b734 2543 ++pmap->pm_stats.resident_count;
984263bc
MD
2544
2545 pa = VM_PAGE_TO_PHYS(m);
2546
2547 /*
2548 * Now validate mapping with RO protection
2549 */
2550 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2551 *pte = pa | PG_V | PG_U;
2552 else
2553 *pte = pa | PG_V | PG_U | PG_MANAGED;
17cde63e 2554/* pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
c2fb025d 2555 pmap_inval_done(&info);
b1482674 2556 if (pv) {
2bb9cc6f 2557 free_pv_entry(pv);
b1482674
MD
2558 /* pv = NULL; */
2559 }
4107b0c0 2560 lwkt_reltoken(&vm_token);
b12defdc 2561 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
2562}
2563
2564/*
2565 * Make a temporary mapping for a physical address. This is only intended
2566 * to be used for panic dumps.
4107b0c0 2567 *
fb8345e6
MD
2568 * The caller is responsible for calling smp_invltlb().
2569 *
4107b0c0 2570 * No requirements.
984263bc
MD
2571 */
2572void *
8e5ea5f7 2573pmap_kenter_temporary(vm_paddr_t pa, long i)
984263bc 2574{
fb8345e6 2575 pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
984263bc
MD
2576 return ((void *)crashdumpmap);
2577}
2578
2579#define MAX_INIT_PT (96)
06ecca5a 2580
984263bc 2581/*
06ecca5a
MD
2582 * This routine preloads the ptes for a given object into the specified pmap.
2583 * This eliminates the blast of soft faults on process startup and
2584 * immediately after an mmap.
4107b0c0
MD
2585 *
2586 * No requirements.
984263bc 2587 */
1f804340
MD
2588static int pmap_object_init_pt_callback(vm_page_t p, void *data);
2589
984263bc 2590void
083a7402
MD
2591pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
2592 vm_object_t object, vm_pindex_t pindex,
2593 vm_size_t size, int limit)
984263bc 2594{
1f804340 2595 struct rb_vm_page_scan_info info;
287ebb09 2596 struct lwp *lp;
984263bc 2597 int psize;
984263bc 2598
54a764e8
MD
2599 /*
2600 * We can't preinit if read access isn't set or there is no pmap
2601 * or object.
2602 */
083a7402 2603 if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
984263bc
MD
2604 return;
2605
54a764e8
MD
2606 /*
2607 * We can't preinit if the pmap is not the current pmap
2608 */
287ebb09
MD
2609 lp = curthread->td_lwp;
2610 if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
54a764e8
MD
2611 return;
2612
984263bc
MD
2613 psize = i386_btop(size);
2614
2615 if ((object->type != OBJT_VNODE) ||
2616 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
2617 (object->resident_page_count > MAX_INIT_PT))) {
2618 return;
2619 }
2620
2621 if (psize + pindex > object->size) {
2622 if (object->size < pindex)
2623 return;
2624 psize = object->size - pindex;
2625 }
2626
1f804340
MD
2627 if (psize == 0)
2628 return;
06ecca5a 2629
984263bc 2630 /*
1f804340
MD
2631 * Use a red-black scan to traverse the requested range and load
2632 * any valid pages found into the pmap.
06ecca5a 2633 *
9acd5bbb
MD
2634 * We cannot safely scan the object's memq unless we are in a
2635 * critical section since interrupts can remove pages from objects.
984263bc 2636 */
1f804340
MD
2637 info.start_pindex = pindex;
2638 info.end_pindex = pindex + psize - 1;
2639 info.limit = limit;
2640 info.mpte = NULL;
2641 info.addr = addr;
2642 info.pmap = pmap;
2643
2f2d9e58 2644 vm_object_hold(object);
1f804340
MD
2645 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
2646 pmap_object_init_pt_callback, &info);
2f2d9e58 2647 vm_object_drop(object);
1f804340 2648}
06ecca5a 2649
4107b0c0
MD
2650/*
2651 * The caller must hold vm_token.
2652 */
1f804340
MD
2653static
2654int
2655pmap_object_init_pt_callback(vm_page_t p, void *data)
2656{
2657 struct rb_vm_page_scan_info *info = data;
2658 vm_pindex_t rel_index;
2659 /*
2660 * don't allow an madvise to blow away our really
2661 * free pages allocating pv entries.
2662 */
2663 if ((info->limit & MAP_PREFAULT_MADVISE) &&
2664 vmstats.v_free_count < vmstats.v_free_reserved) {
2665 return(-1);
984263bc 2666 }
0d987a03
MD
2667
2668 /*
2669 * Ignore list markers and ignore pages we cannot instantly
2670 * busy (while holding the object token).
2671 */
2672 if (p->flags & PG_MARKER)
2673 return 0;
b12defdc
MD
2674 if (vm_page_busy_try(p, TRUE))
2675 return 0;
1f804340 2676 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
b12defdc 2677 (p->flags & PG_FICTITIOUS) == 0) {
1f804340
MD
2678 if ((p->queue - p->pc) == PQ_CACHE)
2679 vm_page_deactivate(p);
1f804340 2680 rel_index = p->pindex - info->start_pindex;
17cde63e
MD
2681 pmap_enter_quick(info->pmap,
2682 info->addr + i386_ptob(rel_index), p);
1f804340 2683 }
b12defdc 2684 vm_page_wakeup(p);
1f804340 2685 return(0);
984263bc
MD
2686}
2687
2688/*
1b9d3514
MD
2689 * Return TRUE if the pmap is in shape to trivially
2690 * pre-fault the specified address.
2691 *
2692 * Returns FALSE if it would be non-trivial or if a
2693 * pte is already loaded into the slot.
4107b0c0
MD
2694 *
2695 * No requirements.
984263bc 2696 */
1b9d3514
MD
2697int
2698pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
984263bc 2699{
1b9d3514 2700 unsigned *pte;
4107b0c0 2701 int ret;
984263bc 2702
4107b0c0
MD
2703 lwkt_gettoken(&vm_token);
2704 if ((*pmap_pde(pmap, addr)) == 0) {
2705 ret = 0;
2706 } else {
2707 pte = (unsigned *) vtopte(addr);
2708 ret = (*pte) ? 0 : 1;
2709 }
2710 lwkt_reltoken(&vm_token);
2711 return(ret);
984263bc
MD
2712}
2713
2714/*
4107b0c0
MD
2715 * Change the wiring attribute for a map/virtual-adderss pair. The mapping
2716 * must already exist.
2717 *
2718 * No requirements.
984263bc
MD
2719 */
2720void
840de426 2721pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
984263bc 2722{
840de426 2723 unsigned *pte;
984263bc
MD
2724
2725 if (pmap == NULL)
2726 return;
2727
4107b0c0 2728 lwkt_gettoken(&vm_token);
984263bc
MD
2729 pte = pmap_pte(pmap, va);
2730
2731 if (wired && !pmap_pte_w(pte))
2732 pmap->pm_stats.wired_count++;
2733 else if (!wired && pmap_pte_w(pte))
2734 pmap->pm_stats.wired_count--;
2735
2736 /*
2737 * Wiring is not a hardware characteristic so there is no need to
0f7a3396
MD
2738 * invalidate TLB. However, in an SMP environment we must use
2739 * a locked bus cycle to update the pte (if we are not using
2740 * the pmap_inval_*() API that is)... it's ok to do this for simple
2741 * wiring changes.
984263bc 2742 */
0f7a3396
MD
2743#ifdef SMP
2744 if (wired)
2745 atomic_set_int(pte, PG_W);
2746 else
2747 atomic_clear_int(pte, PG_W);
2748#else
2749 if (wired)
2750 atomic_set_int_nonlocked(pte, PG_W);
2751 else
2752 atomic_clear_int_nonlocked(pte, PG_W);
2753#endif
4107b0c0 2754 lwkt_reltoken(&vm_token);
984263bc
MD
2755}
2756
984263bc 2757/*
4107b0c0
MD
2758 * Copy the range specified by src_addr/len from the source map to the
2759 * range dst_addr/len in the destination map.
2760 *
2761 * This routine is only advisory and need not do anything.
984263bc 2762 *
4107b0c0 2763 * No requirements.
984263bc 2764 */
984263bc 2765void
840de426 2766pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
4107b0c0 2767 vm_size_t len, vm_offset_t src_addr)
984263bc 2768{
4107b0c0 2769 /* does nothing */
984263bc
MD
2770}
2771
2772/*
4107b0c0
MD
2773 * Zero the specified PA by mapping the page into KVM and clearing its
2774 * contents.
e0e69b7d 2775 *
4107b0c0 2776 * No requirements.
984263bc
MD
2777 */
2778void
6ef943a3 2779pmap_zero_page(vm_paddr_t phys)
984263bc 2780{
85100692 2781 struct mdglobaldata *gd = mdcpu;
17a9f566 2782
e0e69b7d 2783 crit_enter();
85100692
MD
2784 if (*(int *)gd->gd_CMAP3)
2785 panic("pmap_zero_page: CMAP3 busy");
85100692 2786 *(int *)gd->gd_CMAP3 =
17a9f566 2787 PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
85100692 2788 cpu_invlpg(gd->gd_CADDR3);
1fa15583 2789 bzero(gd->gd_CADDR3, PAGE_SIZE);
85100692 2790 *(int *) gd->gd_CMAP3 = 0;
e0e69b7d 2791 crit_exit();
8100156a
MD
2792}
2793
2794/*
4107b0c0 2795 * Assert that a page is empty, panic if it isn't.
8100156a 2796 *
4107b0c0 2797 * No requirements.
8100156a
MD
2798 */
2799void
2800pmap_page_assertzero(vm_paddr_t phys)
2801{
2802 struct mdglobaldata *gd = mdcpu;
2803 int i;
2804
2805 crit_enter();
2806 if (*(int *)gd->gd_CMAP3)
2807 panic("pmap_zero_page: CMAP3 busy");
2808 *(int *)gd->gd_CMAP3 =
2809 PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2810 cpu_invlpg(gd->gd_CADDR3);
2811 for (i = 0; i < PAGE_SIZE; i += 4) {
2812 if (*(int *)((char *)gd->gd_CADDR3 + i) != 0) {
ed20d0e3 2813 panic("pmap_page_assertzero() @ %p not zero!",
8100156a
MD
2814 (void *)gd->gd_CADDR3);
2815 }
2816 }
2817 *(int *) gd->gd_CMAP3 = 0;
2818 crit_exit();
984263bc
MD
2819}
2820
2821/*
4107b0c0
MD
2822 * Zero part of a physical page by mapping it into memory and clearing
2823 * its contents with bzero.
e0e69b7d 2824 *
4107b0c0 2825 * off and size may not cover an area beyond a single hardware page.
984263bc 2826 *
4107b0c0 2827 * No requirements.
984263bc
MD
2828 */
2829void
6ef943a3 2830pmap_zero_page_area(vm_paddr_t phys, int off, int size)
984263bc 2831{
85100692 2832 struct mdglobaldata *gd = mdcpu;
17a9f566 2833
e0e69b7d 2834 crit_enter();
85100692
MD
2835 if (*(int *) gd->gd_CMAP3)
2836 panic("pmap_zero_page: CMAP3 busy");
85100692
MD
2837 *(int *) gd->gd_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2838 cpu_invlpg(gd->gd_CADDR3);
1fa15583 2839 bzero((char *)gd->gd_CADDR3 + off, size);
85100692 2840 *(int *) gd->gd_CMAP3 = 0;
e0e69b7d 2841 crit_exit();
984263bc
MD
2842}
2843
2844/*
4107b0c0
MD
2845 * Copy the physical page from the source PA to the target PA.
2846 * This function may be called from an interrupt. No locking
2847 * is required.
e0e69b7d 2848 *
4107b0c0 2849 * No requirements.
984263bc
MD
2850 */
2851void
6ef943a3 2852pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
984263bc 2853{
85100692 2854 struct mdglobaldata *gd = mdcpu;
17a9f566 2855
e0e69b7d 2856 crit_enter();
85100692
MD
2857 if (*(int *) gd->gd_CMAP1)
2858 panic("pmap_copy_page: CMAP1 busy");
2859 if (*(int *) gd->gd_CMAP2)
2860 panic("pmap_copy_page: CMAP2 busy");
984263bc 2861
85100692
MD
2862 *(int *) gd->gd_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2863 *(int *) gd->gd_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
984263bc 2864
85100692
MD
2865 cpu_invlpg(gd->gd_CADDR1);
2866 cpu_invlpg(gd->gd_CADDR2);
984263bc 2867
85100692 2868 bcopy(gd->gd_CADDR1, gd->gd_CADDR2, PAGE_SIZE);
984263bc 2869
85100692
MD
2870 *(int *) gd->gd_CMAP1 = 0;
2871 *(int *) gd->gd_CMAP2 = 0;
e0e69b7d 2872 crit_exit();
984263bc
MD
2873}
2874
f6bf3af1 2875/*
4107b0c0
MD
2876 * Copy the physical page from the source PA to the target PA.
2877 * This function may be called from an interrupt. No locking
2878 * is required.
f6bf3af1 2879 *
4107b0c0 2880 * No requirements.
f6bf3af1
MD
2881 */
2882void
2883pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
2884{
2885 struct mdglobaldata *gd = mdcpu;
2886
2887 crit_enter();
2888 if (*(int *) gd->gd_CMAP1)
2889 panic("pmap_copy_page: CMAP1 busy");
2890 if (*(int *) gd->gd_CMAP2)
2891 panic("pmap_copy_page: CMAP2 busy");
2892
2893 *(int *) gd->gd_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2894 *(int *) gd->gd_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
2895
2896 cpu_invlpg(gd->gd_CADDR1);
2897 cpu_invlpg(gd->gd_CADDR2);
2898
2899 bcopy((char *)gd->gd_CADDR1 + (src & PAGE_MASK),
2900 (char *)gd->gd_CADDR2 + (dst & PAGE_MASK),
2901 bytes);
2902
2903 *(int *) gd->gd_CMAP1 = 0;
2904 *(int *) gd->gd_CMAP2 = 0;
2905 crit_exit();
2906}
2907
984263bc
MD
2908/*
2909 * Returns true if the pmap's pv is one of the first
2910 * 16 pvs linked to from this page. This count may
2911 * be changed upwards or downwards in the future; it
2912 * is only necessary that true be returned for a small
2913 * subset of pmaps for proper page aging.
4107b0c0
MD
2914 *
2915 * No requirements.
984263bc
MD
2916 */
2917boolean_t
840de426 2918pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
984263bc
MD
2919{
2920 pv_entry_t pv;
2921 int loops = 0;
984263bc
MD
2922
2923 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2924 return FALSE;
2925
4107b0c0 2926 lwkt_gettoken(&vm_token);
984263bc
MD
2927 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2928 if (pv->pv_pmap == pmap) {
11502947 2929 lwkt_reltoken(&vm_token);
984263bc
MD
2930 return TRUE;
2931 }
2932 loops++;
2933 if (loops >= 16)
2934 break;
2935 }
4107b0c0 2936 lwkt_reltoken(&vm_token);
984263bc
MD
2937 return (FALSE);
2938}
2939
984263bc
MD
2940/*
2941 * Remove all pages from specified address space
2942 * this aids process exit speeds. Also, this code
2943 * is special cased for current process only, but
2944 * can have the more generic (and slightly slower)
2945 * mode enabled. This is much faster than pmap_remove
2946 * in the case of running down an entire address space.
4107b0c0
MD
2947 *
2948 * No requirements.
984263bc
MD
2949 */
2950void
840de426 2951pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
984263bc 2952{
287ebb09 2953 struct lwp *lp;
984263bc
MD
2954 unsigned *pte, tpte;
2955 pv_entry_t pv, npv;
984263bc 2956 vm_page_t m;
0f7a3396 2957 pmap_inval_info info;
4a22e893 2958 int iscurrentpmap;
8790d7d8 2959 int32_t save_generation;
984263bc 2960
287ebb09
MD
2961 lp = curthread->td_lwp;
2962 if (lp && pmap == vmspace_pmap(lp->lwp_vmspace))
4a22e893
MD
2963 iscurrentpmap = 1;
2964 else
2965 iscurrentpmap = 0;
984263bc 2966
b12defdc
MD
2967 if (pmap->pm_pteobj)
2968 vm_object_hold(pmap->pm_pteobj);
4107b0c0 2969 lwkt_gettoken(&vm_token);
0f7a3396 2970 pmap_inval_init(&info);
b12defdc 2971
4a22e893 2972 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
984263bc
MD
2973 if (pv->pv_va >= eva || pv->pv_va < sva) {
2974 npv = TAILQ_NEXT(pv, pv_plist);
2975 continue;
2976 }
2977
8790d7d8
MD
2978 KKASSERT(pmap == pv->pv_pmap);
2979
4a22e893
MD
2980 if (iscurrentpmap)
2981 pte = (unsigned *)vtopte(pv->pv_va);
2982 else
8790d7d8 2983 pte = pmap_pte_quick(pmap, pv->pv_va);
5926987a 2984 KKASSERT(*pte);
c2fb025d 2985 pmap_inval_interlock(&info, pmap, pv->pv_va);
984263bc 2986
4a22e893
MD
2987 /*
2988 * We cannot remove wired pages from a process' mapping
2989 * at this time
2990 */
17cde63e 2991 if (*pte & PG_W) {
c2fb025d 2992 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2993 npv = TAILQ_NEXT(pv, pv_plist);
2994 continue;
2995 }
2247fe02 2996 KKASSERT(*pte);
17cde63e 2997 tpte = loadandclear(pte);
c2fb025d 2998 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2999
3000 m = PHYS_TO_VM_PAGE(tpte);
5926987a 3001 test_m_maps_pv(m, pv);
984263bc
MD
3002
3003 KASSERT(m < &vm_page_array[vm_page_array_size],
3004 ("pmap_remove_pages: bad tpte %x", tpte));
3005
eec2b734
MD
3006 KKASSERT(pmap->pm_stats.resident_count > 0);
3007 --pmap->pm_stats.resident_count;
984263bc
MD
3008
3009 /*
3010 * Update the vm_page_t clean and reference bits.
3011 */
3012 if (tpte & PG_M) {
3013 vm_page_dirty(m);
3014 }
3015
984263bc 3016 npv = TAILQ_NEXT(pv, pv_plist);
5926987a
MD
3017#ifdef PMAP_DEBUG
3018 KKASSERT(pv->pv_m == m);
3019 KKASSERT(pv->pv_pmap == pmap);
3020#endif
8790d7d8
MD
3021 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
3022 save_generation = ++pmap->pm_generation;
984263bc
MD
3023
3024 m->md.pv_list_count--;
cef01e15
MD
3025 if (m->object)
3026 atomic_add_int(&m->object->agg_pv_list_count, -1);
984263bc 3027 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
17cde63e 3028 if (TAILQ_EMPTY(&m->md.pv_list))
984263bc 3029 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
984263bc 3030
8790d7d8 3031 pmap_unuse_pt(pmap, pv->pv_va, pv->pv_ptem, &info);
984263bc 3032 free_pv_entry(pv);
8790d7d8
MD
3033
3034 /*
3035 * Restart the scan if we blocked during the unuse or free
3036 * calls and other removals were made.
3037 */
3038 if (save_generation != pmap->pm_generation) {
3039 kprintf("Warning: pmap_remove_pages race-A avoided\n");
5926987a 3040 npv = TAILQ_FIRST(&pmap->pm_pvlist);
8790d7d8 3041 }
984263bc 3042 }
c2fb025d 3043 pmap_inval_done(&info);
4107b0c0 3044 lwkt_reltoken(&vm_token);
b12defdc
MD
3045 if (pmap->pm_pteobj)
3046 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
3047}
3048
3049/*
3050 * pmap_testbit tests bits in pte's
5e8d0349 3051 * note that the testbit/clearbit routines are inline,
984263bc 3052 * and a lot of things compile-time evaluate.
4107b0c0
MD
3053 *
3054 * The caller must hold vm_token.
984263bc
MD
3055 */
3056static boolean_t
840de426 3057pmap_testbit(vm_page_t m, int bit)
984263bc
MD
3058{
3059 pv_entry_t pv;
3060 unsigned *pte;
984263bc
MD
3061
3062 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3063 return FALSE;
3064
3065 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
3066 return FALSE;
3067
984263bc
MD
3068 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3069 /*
3070 * if the bit being tested is the modified bit, then
3071 * mark clean_map and ptes as never
3072 * modified.
3073 */
3074 if (bit & (PG_A|PG_M)) {
3075 if (!pmap_track_modified(pv->pv_va))
3076 continue;
3077 }
3078
3079#if defined(PMAP_DIAGNOSTIC)
3080 if (!pv->pv_pmap) {
d557216f
MD
3081 kprintf("Null pmap (tb) at va: %p\n",
3082 (void *)pv->pv_va);
984263bc
MD
3083 continue;
3084 }
3085#endif
3086 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
74b9d1ec 3087 if (*pte & bit) {
984263bc 3088 return TRUE;
74b9d1ec 3089 }
984263bc 3090 }
984263bc
MD
3091 return (FALSE);
3092}
3093
3094/*
4107b0c0
MD
3095 * This routine is used to modify bits in ptes
3096 *
3097 * The caller must hold vm_token.
984263bc
MD
3098 */
3099static __inline void
5e8d0349 3100pmap_clearbit(vm_page_t m, int bit)
984263bc 3101{
0f7a3396 3102 struct pmap_inval_info info;
840de426
MD
3103 pv_entry_t pv;
3104 unsigned *pte;
5e8d0349 3105 unsigned pbits;
984263bc
MD
3106
3107 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3108 return;
3109
0f7a3396 3110 pmap_inval_init(&info);
984263bc
MD
3111
3112 /*
3113 * Loop over all current mappings setting/clearing as appropos If
3114 * setting RO do we need to clear the VAC?
3115 */
3116 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3117 /*
3118 * don't write protect pager mappings
3119 */
5e8d0349 3120 if (bit == PG_RW) {
984263bc
MD
3121 if (!pmap_track_modified(pv->pv_va))
3122 continue;
3123 }
3124
3125#if defined(PMAP_DIAGNOSTIC)
3126 if (!pv->pv_pmap) {
d557216f
MD
3127 kprintf("Null pmap (cb) at va: %p\n",
3128 (void *)pv->pv_va);
984263bc
MD
3129 continue;
3130 }
3131#endif
3132
0f7a3396
MD
3133 /*
3134 * Careful here. We can use a locked bus instruction to
3135 * clear PG_A or PG_M safely but we need to synchronize
3136 * with the target cpus when we mess with PG_RW.
70fc5283
MD
3137 *
3138 * We do not have to force synchronization when clearing
3139 * PG_M even for PTEs generated via virtual memory maps,
3140 * because the virtual kernel will invalidate the pmap
3141 * entry when/if it needs to resynchronize the Modify bit.
0f7a3396 3142 */
70fc5283 3143 if (bit & PG_RW)
c2fb025d 3144 pmap_inval_interlock(&info, pv->pv_pmap, pv->pv_va);
17cde63e
MD
3145 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3146again:
5e8d0349
MD
3147 pbits = *pte;
3148 if (pbits & bit) {
3149 if (bit == PG_RW) {
17cde63e 3150 if (pbits & PG_M) {
5e8d0349 3151 vm_page_dirty(m);
17cde63e
MD
3152 atomic_clear_int(pte, PG_M|PG_RW);
3153 } else {
3154 /*
3155 * The cpu may be trying to set PG_M
3156 * simultaniously with our clearing
3157 * of PG_RW.
3158 */
3159 if (!atomic_cmpset_int(pte, pbits,
3160 pbits & ~PG_RW))
3161 goto again;
3162 }
5e8d0349
MD
3163 } else if (bit == PG_M) {
3164 /*
70fc5283
MD
3165 * We could also clear PG_RW here to force
3166 * a fault on write to redetect PG_M for
3167 * virtual kernels, but it isn't necessary
3168 * since virtual kernels invalidate the pte
3169 * when they clear the VPTE_M bit in their
3170 * virtual page tables.
5e8d0349 3171 */
70fc5283 3172 atomic_clear_int(pte, PG_M);
5e8d0349
MD
3173 } else {
3174 atomic_clear_int(pte, bit);
984263bc
MD
3175 }
3176 }
c2fb025d
MD
3177 if (bit & PG_RW)
3178 pmap_inval_deinterlock(&info, pv->pv_pmap);
984263bc 3179 }
c2fb025d 3180 pmap_inval_done(&info);
984263bc
MD
3181}
3182
3183/*
4107b0c0 3184 * Lower the permission for all mappings to a given page.
984263bc 3185 *