kernel - Fix pmap_remove() issue.
[dragonfly.git] / sys / platform / pc32 / i386 / pmap.c
CommitLineData
984263bc 1/*
4107b0c0
MD
2 * (MPSAFE)
3 *
984263bc
MD
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
45 */
46
47/*
4107b0c0 48 * Manages physical address maps.
984263bc 49 *
b12defdc 50 * In most cases we hold page table pages busy in order to manipulate them.
984263bc 51 */
5926987a
MD
52/*
53 * PMAP_DEBUG - see platform/pc32/include/pmap.h
54 */
984263bc
MD
55
56#include "opt_disable_pse.h"
57#include "opt_pmap.h"
58#include "opt_msgbuf.h"
984263bc
MD
59
60#include <sys/param.h>
61#include <sys/systm.h>
62#include <sys/kernel.h>
63#include <sys/proc.h>
64#include <sys/msgbuf.h>
65#include <sys/vmmeter.h>
66#include <sys/mman.h>
b12defdc 67#include <sys/thread.h>
984263bc
MD
68
69#include <vm/vm.h>
70#include <vm/vm_param.h>
71#include <sys/sysctl.h>
72#include <sys/lock.h>
73#include <vm/vm_kern.h>
74#include <vm/vm_page.h>
75#include <vm/vm_map.h>
76#include <vm/vm_object.h>
77#include <vm/vm_extern.h>
78#include <vm/vm_pageout.h>
79#include <vm/vm_pager.h>
80#include <vm/vm_zone.h>
81
82#include <sys/user.h>
e0e69b7d 83#include <sys/thread2.h>
e3161323 84#include <sys/sysref2.h>
b12defdc 85#include <sys/spinlock2.h>
90244566 86#include <vm/vm_page2.h>
984263bc
MD
87
88#include <machine/cputypes.h>
89#include <machine/md_var.h>
90#include <machine/specialreg.h>
984263bc 91#include <machine/smp.h>
a9295349 92#include <machine_base/apic/apicreg.h>
85100692 93#include <machine/globaldata.h>
0f7a3396
MD
94#include <machine/pmap.h>
95#include <machine/pmap_inval.h>
984263bc
MD
96
97#define PMAP_KEEP_PDIRS
98#ifndef PMAP_SHPGPERPROC
99#define PMAP_SHPGPERPROC 200
948209ce 100#define PMAP_PVLIMIT 1400000 /* i386 kvm problems */
984263bc
MD
101#endif
102
103#if defined(DIAGNOSTIC)
104#define PMAP_DIAGNOSTIC
105#endif
106
107#define MINPV 2048
108
109#if !defined(PMAP_DIAGNOSTIC)
110#define PMAP_INLINE __inline
111#else
112#define PMAP_INLINE
113#endif
114
115/*
116 * Get PDEs and PTEs for user/kernel address space
117 */
118#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
119#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
120
121#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
122#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
123#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
124#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
125#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
126
984263bc
MD
127/*
128 * Given a map and a machine independent protection code,
129 * convert to a vax protection code.
130 */
639a9b43
MD
131#define pte_prot(m, p) \
132 (protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
984263bc
MD
133static int protection_codes[8];
134
fbbaeba3 135struct pmap kernel_pmap;
54a764e8
MD
136static TAILQ_HEAD(,pmap) pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list);
137
e880033d 138vm_paddr_t avail_start; /* PA of first available physical page */
6ef943a3 139vm_paddr_t avail_end; /* PA of last available physical page */
e880033d 140vm_offset_t virtual_start; /* VA of first avail page (after kernel bss) */
984263bc 141vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
791c6551
MD
142vm_offset_t virtual2_start;
143vm_offset_t virtual2_end;
c439ad8f
MD
144vm_offset_t KvaStart; /* VA start of KVA space */
145vm_offset_t KvaEnd; /* VA end of KVA space (non-inclusive) */
146vm_offset_t KvaSize; /* max size of kernel virtual address space */
984263bc
MD
147static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
148static int pgeflag; /* PG_G or-in */
149static int pseflag; /* PG_PS or-in */
150
151static vm_object_t kptobj;
152
153static int nkpt;
154vm_offset_t kernel_vm_end;
155
156/*
157 * Data for the pv entry allocation mechanism
158 */
159static vm_zone_t pvzone;
160static struct vm_zone pvzone_store;
161static struct vm_object pvzone_obj;
162static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
163static int pmap_pagedaemon_waken = 0;
164static struct pv_entry *pvinit;
165
166/*
a93980ab
MD
167 * Considering all the issues I'm having with pmap caching, if breakage
168 * continues to occur, and for debugging, I've added a sysctl that will
169 * just do an unconditional invltlb.
170 */
171static int dreadful_invltlb;
172
173SYSCTL_INT(_vm, OID_AUTO, dreadful_invltlb,
9733f757 174 CTLFLAG_RW, &dreadful_invltlb, 0, "Debugging sysctl to force invltlb on pmap operations");
a93980ab
MD
175
176/*
984263bc
MD
177 * All those kernel PT submaps that BSD is so fond of
178 */
4090d6ff 179pt_entry_t *CMAP1 = NULL, *ptmmap;
984263bc 180caddr_t CADDR1 = 0, ptvmmap = 0;
984263bc 181static pt_entry_t *msgbufmap;
4090d6ff 182struct msgbuf *msgbufp=NULL;
984263bc
MD
183
184/*
185 * Crashdump maps.
186 */
187static pt_entry_t *pt_crashdumpmap;
188static caddr_t crashdumpmap;
189
984263bc 190extern pt_entry_t *SMPpt;
984263bc 191
3ae0cd58
RG
192static PMAP_INLINE void free_pv_entry (pv_entry_t pv);
193static unsigned * get_ptbase (pmap_t pmap);
194static pv_entry_t get_pv_entry (void);
195static void i386_protection_init (void);
5e8d0349 196static __inline void pmap_clearbit (vm_page_t m, int bit);
3ae0cd58 197
554cf9ac
MD
198static void pmap_remove_all (vm_page_t m);
199static void pmap_remove_pte (struct pmap *pmap, unsigned *ptq,
0f7a3396
MD
200 vm_offset_t sva, pmap_inval_info_t info);
201static void pmap_remove_page (struct pmap *pmap,
202 vm_offset_t va, pmap_inval_info_t info);
554cf9ac 203static void pmap_remove_entry (struct pmap *pmap, vm_page_t m,
0f7a3396 204 vm_offset_t va, pmap_inval_info_t info);
3ae0cd58 205static boolean_t pmap_testbit (vm_page_t m, int bit);
2bb9cc6f
MD
206static void pmap_insert_entry (pmap_t pmap, pv_entry_t pv,
207 vm_offset_t va, vm_page_t mpte, vm_page_t m);
3ae0cd58
RG
208
209static vm_page_t pmap_allocpte (pmap_t pmap, vm_offset_t va);
210
211static int pmap_release_free_page (pmap_t pmap, vm_page_t p);
212static vm_page_t _pmap_allocpte (pmap_t pmap, unsigned ptepindex);
213static unsigned * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
214static vm_page_t pmap_page_lookup (vm_object_t object, vm_pindex_t pindex);
554cf9ac 215static void pmap_unuse_pt (pmap_t, vm_offset_t, vm_page_t, pmap_inval_info_t);
984263bc
MD
216static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
217
218static unsigned pdir4mb;
219
220/*
840de426
MD
221 * Move the kernel virtual free pointer to the next
222 * 4MB. This is used to help improve performance
223 * by using a large (4MB) page for much of the kernel
224 * (.text, .data, .bss)
225 */
4107b0c0
MD
226static
227vm_offset_t
840de426
MD
228pmap_kmem_choose(vm_offset_t addr)
229{
230 vm_offset_t newaddr = addr;
231#ifndef DISABLE_PSE
232 if (cpu_feature & CPUID_PSE) {
233 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
234 }
235#endif
236 return newaddr;
237}
238
239/*
4107b0c0
MD
240 * This function returns a pointer to the pte entry in the pmap and has
241 * the side effect of potentially retaining a cached mapping of the pmap.
e0e69b7d 242 *
4107b0c0
MD
243 * The caller must hold vm_token and the returned value is only valid
244 * until the caller blocks or releases the token.
984263bc 245 */
4107b0c0
MD
246static
247unsigned *
840de426 248pmap_pte(pmap_t pmap, vm_offset_t va)
984263bc
MD
249{
250 unsigned *pdeaddr;
251
4107b0c0 252 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
253 if (pmap) {
254 pdeaddr = (unsigned *) pmap_pde(pmap, va);
255 if (*pdeaddr & PG_PS)
256 return pdeaddr;
4107b0c0 257 if (*pdeaddr)
984263bc 258 return get_ptbase(pmap) + i386_btop(va);
984263bc
MD
259 }
260 return (0);
261}
262
263/*
4107b0c0
MD
264 * pmap_pte using the kernel_pmap
265 *
266 * Used for debugging, no requirements.
267 */
268unsigned *
269pmap_kernel_pte(vm_offset_t va)
270{
271 unsigned *pdeaddr;
272
273 pdeaddr = (unsigned *) pmap_pde(&kernel_pmap, va);
274 if (*pdeaddr & PG_PS)
275 return pdeaddr;
276 if (*pdeaddr)
277 return (unsigned *)vtopte(va);
278 return(0);
279}
280
281/*
e0e69b7d
MD
282 * pmap_pte_quick:
283 *
c1692ddf
MD
284 * Super fast pmap_pte routine best used when scanning the pv lists.
285 * This eliminates many course-grained invltlb calls. Note that many of
286 * the pv list scans are across different pmaps and it is very wasteful
287 * to do an entire invltlb when checking a single mapping.
e0e69b7d 288 *
c1692ddf
MD
289 * Should only be called while in a critical section.
290 *
4107b0c0
MD
291 * The caller must hold vm_token and the returned value is only valid
292 * until the caller blocks or releases the token.
984263bc 293 */
4107b0c0
MD
294static
295unsigned *
840de426 296pmap_pte_quick(pmap_t pmap, vm_offset_t va)
984263bc 297{
840de426
MD
298 struct mdglobaldata *gd = mdcpu;
299 unsigned pde, newpf;
300
4107b0c0 301 ASSERT_LWKT_TOKEN_HELD(&vm_token);
840de426
MD
302 if ((pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) != 0) {
303 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
304 unsigned index = i386_btop(va);
305 /* are we current address space or kernel? */
fbbaeba3 306 if ((pmap == &kernel_pmap) ||
840de426
MD
307 (frame == (((unsigned) PTDpde) & PG_FRAME))) {
308 return (unsigned *) PTmap + index;
309 }
310 newpf = pde & PG_FRAME;
4107b0c0
MD
311 if (((*(unsigned *)gd->gd_PMAP1) & PG_FRAME) != newpf) {
312 *(unsigned *)gd->gd_PMAP1 = newpf | PG_RW | PG_V;
840de426
MD
313 cpu_invlpg(gd->gd_PADDR1);
314 }
06bb314f 315 return gd->gd_PADDR1 + (index & (NPTEPG - 1));
984263bc 316 }
840de426 317 return (0);
984263bc
MD
318}
319
840de426 320
984263bc 321/*
4107b0c0 322 * Bootstrap the system enough to run with virtual memory.
984263bc 323 *
4107b0c0
MD
324 * On the i386 this is called after mapping has already been enabled
325 * and just syncs the pmap module with what has already been done.
326 * [We can't call it easily with mapping off since the kernel is not
327 * mapped with PA == VA, hence we would have to relocate every address
328 * from the linked base (virtual) address "KERNBASE" to the actual
329 * (physical) address starting relative to 0]
984263bc
MD
330 */
331void
f123d5a1 332pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr)
984263bc
MD
333{
334 vm_offset_t va;
335 pt_entry_t *pte;
85100692 336 struct mdglobaldata *gd;
984263bc 337 int i;
81c04d07 338 int pg;
984263bc 339
c439ad8f
MD
340 KvaStart = (vm_offset_t)VADDR(PTDPTDI, 0);
341 KvaSize = (vm_offset_t)VADDR(APTDPTDI, 0) - KvaStart;
342 KvaEnd = KvaStart + KvaSize;
343
984263bc
MD
344 avail_start = firstaddr;
345
346 /*
e880033d
MD
347 * XXX The calculation of virtual_start is wrong. It's NKPT*PAGE_SIZE
348 * too large. It should instead be correctly calculated in locore.s and
984263bc
MD
349 * not based on 'first' (which is a physical address, not a virtual
350 * address, for the start of unused physical memory). The kernel
351 * page tables are NOT double mapped and thus should not be included
352 * in this calculation.
353 */
e880033d
MD
354 virtual_start = (vm_offset_t) KERNBASE + firstaddr;
355 virtual_start = pmap_kmem_choose(virtual_start);
c439ad8f 356 virtual_end = VADDR(KPTDI+NKPDE-1, NPTEPG-1);
984263bc
MD
357
358 /*
359 * Initialize protection array.
360 */
361 i386_protection_init();
362
363 /*
364 * The kernel's pmap is statically allocated so we don't have to use
365 * pmap_create, which is unlikely to work correctly at this part of
366 * the boot sequence (XXX and which no longer exists).
b12defdc
MD
367 *
368 * The kernel_pmap's pm_pteobj is used only for locking and not
369 * for mmu pages.
984263bc 370 */
fbbaeba3
MD
371 kernel_pmap.pm_pdir = (pd_entry_t *)(KERNBASE + (u_int)IdlePTD);
372 kernel_pmap.pm_count = 1;
c2fb025d 373 kernel_pmap.pm_active = (cpumask_t)-1 & ~CPUMASK_LOCK;
b12defdc 374 kernel_pmap.pm_pteobj = &kernel_object;
fbbaeba3 375 TAILQ_INIT(&kernel_pmap.pm_pvlist);
b12defdc
MD
376 TAILQ_INIT(&kernel_pmap.pm_pvlist_free);
377 spin_init(&kernel_pmap.pm_spin);
378 lwkt_token_init(&kernel_pmap.pm_token, "kpmap_tok");
984263bc
MD
379 nkpt = NKPT;
380
381 /*
382 * Reserve some special page table entries/VA space for temporary
383 * mapping of pages.
384 */
385#define SYSMAP(c, p, v, n) \
386 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
387
e880033d 388 va = virtual_start;
4107b0c0 389 pte = (pt_entry_t *) pmap_kernel_pte(va);
984263bc
MD
390
391 /*
392 * CMAP1/CMAP2 are used for zeroing and copying pages.
393 */
394 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
984263bc
MD
395
396 /*
397 * Crashdump maps.
398 */
399 SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
400
401 /*
e731d345
MD
402 * ptvmmap is used for reading arbitrary physical pages via
403 * /dev/mem.
404 */
405 SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
406
407 /*
984263bc
MD
408 * msgbufp is used to map the system message buffer.
409 * XXX msgbufmap is not used.
410 */
411 SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
412 atop(round_page(MSGBUF_SIZE)))
413
e880033d 414 virtual_start = va;
984263bc 415
17a9f566 416 *(int *) CMAP1 = 0;
984263bc
MD
417 for (i = 0; i < NKPT; i++)
418 PTD[i] = 0;
419
a2a5ad0d
MD
420 /*
421 * PG_G is terribly broken on SMP because we IPI invltlb's in some
422 * cases rather then invl1pg. Actually, I don't even know why it
423 * works under UP because self-referential page table mappings
424 */
425#ifdef SMP
426 pgeflag = 0;
427#else
428 if (cpu_feature & CPUID_PGE)
984263bc 429 pgeflag = PG_G;
a2a5ad0d 430#endif
984263bc
MD
431
432/*
433 * Initialize the 4MB page size flag
434 */
435 pseflag = 0;
436/*
437 * The 4MB page version of the initial
438 * kernel page mapping.
439 */
440 pdir4mb = 0;
441
442#if !defined(DISABLE_PSE)
443 if (cpu_feature & CPUID_PSE) {
444 unsigned ptditmp;
445 /*
446 * Note that we have enabled PSE mode
447 */
448 pseflag = PG_PS;
449 ptditmp = *((unsigned *)PTmap + i386_btop(KERNBASE));
450 ptditmp &= ~(NBPDR - 1);
451 ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
452 pdir4mb = ptditmp;
453
8a8d5d85
MD
454#ifndef SMP
455 /*
456 * Enable the PSE mode. If we are SMP we can't do this
457 * now because the APs will not be able to use it when
458 * they boot up.
459 */
460 load_cr4(rcr4() | CR4_PSE);
984263bc 461
8a8d5d85
MD
462 /*
463 * We can do the mapping here for the single processor
464 * case. We simply ignore the old page table page from
465 * now on.
466 */
467 /*
468 * For SMP, we still need 4K pages to bootstrap APs,
469 * PSE will be enabled as soon as all APs are up.
470 */
b5b32410 471 PTD[KPTDI] = (pd_entry_t)ptditmp;
fbbaeba3 472 kernel_pmap.pm_pdir[KPTDI] = (pd_entry_t)ptditmp;
0f7a3396 473 cpu_invltlb();
8a8d5d85 474#endif
984263bc
MD
475 }
476#endif
984263bc 477
81c04d07
MD
478 /*
479 * We need to finish setting up the globaldata page for the BSP.
480 * locore has already populated the page table for the mdglobaldata
481 * portion.
482 */
483 pg = MDGLOBALDATA_BASEALLOC_PAGES;
85100692 484 gd = &CPU_prvspace[0].mdglobaldata;
81c04d07
MD
485 gd->gd_CMAP1 = &SMPpt[pg + 0];
486 gd->gd_CMAP2 = &SMPpt[pg + 1];
487 gd->gd_CMAP3 = &SMPpt[pg + 2];
488 gd->gd_PMAP1 = &SMPpt[pg + 3];
9388fcaa 489 gd->gd_GDMAP1 = &PTD[APTDPTDI];
85100692
MD
490 gd->gd_CADDR1 = CPU_prvspace[0].CPAGE1;
491 gd->gd_CADDR2 = CPU_prvspace[0].CPAGE2;
492 gd->gd_CADDR3 = CPU_prvspace[0].CPAGE3;
493 gd->gd_PADDR1 = (unsigned *)CPU_prvspace[0].PPAGE1;
9388fcaa 494 gd->gd_GDADDR1= (unsigned *)VADDR(APTDPTDI, 0);
984263bc 495
0f7a3396 496 cpu_invltlb();
984263bc
MD
497}
498
499#ifdef SMP
500/*
501 * Set 4mb pdir for mp startup
502 */
503void
504pmap_set_opt(void)
505{
506 if (pseflag && (cpu_feature & CPUID_PSE)) {
507 load_cr4(rcr4() | CR4_PSE);
72740893 508 if (pdir4mb && mycpu->gd_cpuid == 0) { /* only on BSP */
fbbaeba3 509 kernel_pmap.pm_pdir[KPTDI] =
984263bc
MD
510 PTD[KPTDI] = (pd_entry_t)pdir4mb;
511 cpu_invltlb();
512 }
513 }
514}
515#endif
516
517/*
4107b0c0
MD
518 * Initialize the pmap module, called by vm_init()
519 *
520 * Called from the low level boot code only.
984263bc
MD
521 */
522void
e7252eda 523pmap_init(void)
984263bc
MD
524{
525 int i;
526 int initial_pvs;
527
528 /*
529 * object for kernel page table pages
530 */
531 kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
532
533 /*
534 * Allocate memory for random pmap data structures. Includes the
535 * pv_head_table.
536 */
537
538 for(i = 0; i < vm_page_array_size; i++) {
539 vm_page_t m;
540
541 m = &vm_page_array[i];
542 TAILQ_INIT(&m->md.pv_list);
543 m->md.pv_list_count = 0;
544 }
545
546 /*
547 * init the pv free list
548 */
549 initial_pvs = vm_page_array_size;
550 if (initial_pvs < MINPV)
551 initial_pvs = MINPV;
552 pvzone = &pvzone_store;
948209ce
MD
553 pvinit = (void *)kmem_alloc(&kernel_map,
554 initial_pvs * sizeof (struct pv_entry));
555 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
556 pvinit, initial_pvs);
984263bc
MD
557
558 /*
559 * Now it is safe to enable pv_table recording.
560 */
561 pmap_initialized = TRUE;
562}
563
564/*
565 * Initialize the address space (zone) for the pv_entries. Set a
566 * high water mark so that the system can recover from excessive
567 * numbers of pv entries.
4107b0c0
MD
568 *
569 * Called from the low level boot code only.
984263bc
MD
570 */
571void
f123d5a1 572pmap_init2(void)
984263bc
MD
573{
574 int shpgperproc = PMAP_SHPGPERPROC;
948209ce 575 int entry_max;
984263bc
MD
576
577 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
578 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
948209ce
MD
579
580#ifdef PMAP_PVLIMIT
581 /*
582 * Horrible hack for systems with a lot of memory running i386.
583 * the calculated pv_entry_max can wind up eating a ton of KVM
584 * so put a cap on the number of entries if the user did not
585 * change any of the values. This saves about 44MB of KVM on
586 * boxes with 3+GB of ram.
587 *
588 * On the flip side, this makes it more likely that some setups
589 * will run out of pv entries. Those sysads will have to bump
590 * the limit up with vm.pamp.pv_entries or vm.pmap.shpgperproc.
591 */
592 if (shpgperproc == PMAP_SHPGPERPROC) {
593 if (pv_entry_max > PMAP_PVLIMIT)
594 pv_entry_max = PMAP_PVLIMIT;
595 }
596#endif
984263bc
MD
597 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
598 pv_entry_high_water = 9 * (pv_entry_max / 10);
948209ce
MD
599
600 /*
601 * Subtract out pages already installed in the zone (hack)
602 */
603 entry_max = pv_entry_max - vm_page_array_size;
604 if (entry_max <= 0)
605 entry_max = 1;
606
607 zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1);
984263bc
MD
608}
609
610
611/***************************************************
612 * Low level helper routines.....
613 ***************************************************/
614
5926987a
MD
615#ifdef PMAP_DEBUG
616
617static void
618test_m_maps_pv(vm_page_t m, pv_entry_t pv)
619{
620 pv_entry_t spv;
621
74b9d1ec 622 crit_enter();
5926987a
MD
623#ifdef PMAP_DEBUG
624 KKASSERT(pv->pv_m == m);
625#endif
626 TAILQ_FOREACH(spv, &m->md.pv_list, pv_list) {
74b9d1ec
MD
627 if (pv == spv) {
628 crit_exit();
5926987a 629 return;
74b9d1ec 630 }
5926987a 631 }
74b9d1ec 632 crit_exit();
ed20d0e3 633 panic("test_m_maps_pv: failed m %p pv %p", m, pv);
5926987a
MD
634}
635
636static void
637ptbase_assert(struct pmap *pmap)
638{
639 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
640
641 /* are we current address space or kernel? */
4107b0c0 642 if (pmap == &kernel_pmap || frame == (((unsigned)PTDpde) & PG_FRAME))
5926987a 643 return;
3558dcda 644 KKASSERT(frame == (*mdcpu->gd_GDMAP1 & PG_FRAME));
5926987a
MD
645}
646
647#else
648
649#define test_m_maps_pv(m, pv)
650#define ptbase_assert(pmap)
651
652#endif
653
984263bc
MD
654#if defined(PMAP_DIAGNOSTIC)
655
656/*
657 * This code checks for non-writeable/modified pages.
658 * This should be an invalid condition.
659 */
660static int
661pmap_nw_modified(pt_entry_t ptea)
662{
663 int pte;
664
665 pte = (int) ptea;
666
667 if ((pte & (PG_M|PG_RW)) == PG_M)
668 return 1;
669 else
670 return 0;
671}
672#endif
673
674
675/*
4107b0c0
MD
676 * This routine defines the region(s) of memory that should not be tested
677 * for the modified bit.
678 *
679 * No requirements.
984263bc
MD
680 */
681static PMAP_INLINE int
682pmap_track_modified(vm_offset_t va)
683{
684 if ((va < clean_sva) || (va >= clean_eva))
685 return 1;
686 else
687 return 0;
688}
689
c1692ddf
MD
690/*
691 * Retrieve the mapped page table base for a particular pmap. Use our self
692 * mapping for the kernel_pmap or our current pmap.
693 *
694 * For foreign pmaps we use the per-cpu page table map. Since this involves
695 * installing a ptd it's actually (per-process x per-cpu). However, we
696 * still cannot depend on our mapping to survive thread switches because
697 * the process might be threaded and switching to another thread for the
698 * same process on the same cpu will allow that other thread to make its
699 * own mapping.
700 *
701 * This could be a bit confusing but the jist is for something like the
702 * vkernel which uses foreign pmaps all the time this represents a pretty
703 * good cache that avoids unnecessary invltlb()s.
4107b0c0
MD
704 *
705 * The caller must hold vm_token and the returned value is only valid
706 * until the caller blocks or releases the token.
c1692ddf 707 */
984263bc 708static unsigned *
e0e69b7d 709get_ptbase(pmap_t pmap)
984263bc
MD
710{
711 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
c1692ddf 712 struct mdglobaldata *gd = mdcpu;
984263bc 713
4107b0c0
MD
714 ASSERT_LWKT_TOKEN_HELD(&vm_token);
715
5926987a
MD
716 /*
717 * We can use PTmap if the pmap is our current address space or
718 * the kernel address space.
719 */
fbbaeba3 720 if (pmap == &kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
984263bc
MD
721 return (unsigned *) PTmap;
722 }
e0e69b7d 723
5926987a 724 /*
c1692ddf
MD
725 * Otherwise we use the per-cpu alternative page table map. Each
726 * cpu gets its own map. Because of this we cannot use this map
727 * from interrupts or threads which can preempt.
be3aecf7
MD
728 *
729 * Even if we already have the map cached we may still have to
730 * invalidate the TLB if another cpu modified a PDE in the map.
5926987a 731 */
c1692ddf
MD
732 KKASSERT(gd->mi.gd_intr_nesting_level == 0 &&
733 (gd->mi.gd_curthread->td_flags & TDF_INTTHREAD) == 0);
e0e69b7d 734
c1692ddf
MD
735 if ((*gd->gd_GDMAP1 & PG_FRAME) != frame) {
736 *gd->gd_GDMAP1 = frame | PG_RW | PG_V;
be3aecf7
MD
737 pmap->pm_cached |= gd->mi.gd_cpumask;
738 cpu_invltlb();
739 } else if ((pmap->pm_cached & gd->mi.gd_cpumask) == 0) {
740 pmap->pm_cached |= gd->mi.gd_cpumask;
984263bc 741 cpu_invltlb();
a93980ab
MD
742 } else if (dreadful_invltlb) {
743 cpu_invltlb();
984263bc 744 }
c1692ddf 745 return ((unsigned *)gd->gd_GDADDR1);
984263bc
MD
746}
747
748/*
e0e69b7d
MD
749 * pmap_extract:
750 *
4107b0c0 751 * Extract the physical page address associated with the map/VA pair.
e0e69b7d 752 *
4107b0c0 753 * The caller may hold vm_token if it desires non-blocking operation.
984263bc 754 */
6ef943a3 755vm_paddr_t
840de426 756pmap_extract(pmap_t pmap, vm_offset_t va)
984263bc
MD
757{
758 vm_offset_t rtval;
759 vm_offset_t pdirindex;
840de426 760
4107b0c0 761 lwkt_gettoken(&vm_token);
984263bc
MD
762 pdirindex = va >> PDRSHIFT;
763 if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
764 unsigned *pte;
765 if ((rtval & PG_PS) != 0) {
766 rtval &= ~(NBPDR - 1);
767 rtval |= va & (NBPDR - 1);
4107b0c0
MD
768 } else {
769 pte = get_ptbase(pmap) + i386_btop(va);
770 rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
984263bc 771 }
4107b0c0
MD
772 } else {
773 rtval = 0;
984263bc 774 }
4107b0c0
MD
775 lwkt_reltoken(&vm_token);
776 return rtval;
f6bf3af1
MD
777}
778
984263bc
MD
779/***************************************************
780 * Low level mapping routines.....
781 ***************************************************/
782
783/*
4107b0c0
MD
784 * Map a wired VM page to a KVA, fully SMP synchronized.
785 *
786 * No requirements, non blocking.
984263bc 787 */
24712b90 788void
6ef943a3 789pmap_kenter(vm_offset_t va, vm_paddr_t pa)
984263bc 790{
840de426 791 unsigned *pte;
0f7a3396
MD
792 unsigned npte;
793 pmap_inval_info info;
984263bc 794
0f7a3396 795 pmap_inval_init(&info);
984263bc
MD
796 npte = pa | PG_RW | PG_V | pgeflag;
797 pte = (unsigned *)vtopte(va);
c2fb025d 798 pmap_inval_interlock(&info, &kernel_pmap, va);
984263bc 799 *pte = npte;
c2fb025d
MD
800 pmap_inval_deinterlock(&info, &kernel_pmap);
801 pmap_inval_done(&info);
984263bc
MD
802}
803
6d1ec6fa 804/*
4107b0c0
MD
805 * Map a wired VM page to a KVA, synchronized on current cpu only.
806 *
807 * No requirements, non blocking.
6d1ec6fa 808 */
24712b90
MD
809void
810pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
811{
812 unsigned *pte;
813 unsigned npte;
814
815 npte = pa | PG_RW | PG_V | pgeflag;
816 pte = (unsigned *)vtopte(va);
817 *pte = npte;
818 cpu_invlpg((void *)va);
819}
820
4107b0c0
MD
821/*
822 * Synchronize a previously entered VA on all cpus.
823 *
824 * No requirements, non blocking.
825 */
24712b90
MD
826void
827pmap_kenter_sync(vm_offset_t va)
828{
829 pmap_inval_info info;
830
831 pmap_inval_init(&info);
c2fb025d
MD
832 pmap_inval_interlock(&info, &kernel_pmap, va);
833 pmap_inval_deinterlock(&info, &kernel_pmap);
834 pmap_inval_done(&info);
24712b90
MD
835}
836
4107b0c0
MD
837/*
838 * Synchronize a previously entered VA on the current cpu only.
839 *
840 * No requirements, non blocking.
841 */
24712b90
MD
842void
843pmap_kenter_sync_quick(vm_offset_t va)
844{
845 cpu_invlpg((void *)va);
846}
847
984263bc 848/*
4107b0c0
MD
849 * Remove a page from the kernel pagetables, fully SMP synchronized.
850 *
851 * No requirements, non blocking.
984263bc 852 */
24712b90 853void
840de426 854pmap_kremove(vm_offset_t va)
984263bc 855{
840de426 856 unsigned *pte;
0f7a3396 857 pmap_inval_info info;
984263bc 858
0f7a3396 859 pmap_inval_init(&info);
984263bc 860 pte = (unsigned *)vtopte(va);
c2fb025d 861 pmap_inval_interlock(&info, &kernel_pmap, va);
984263bc 862 *pte = 0;
c2fb025d
MD
863 pmap_inval_deinterlock(&info, &kernel_pmap);
864 pmap_inval_done(&info);
984263bc
MD
865}
866
4107b0c0
MD
867/*
868 * Remove a page from the kernel pagetables, synchronized on current cpu only.
869 *
870 * No requirements, non blocking.
871 */
24712b90
MD
872void
873pmap_kremove_quick(vm_offset_t va)
874{
875 unsigned *pte;
876 pte = (unsigned *)vtopte(va);
877 *pte = 0;
878 cpu_invlpg((void *)va);
879}
880
984263bc 881/*
4107b0c0
MD
882 * Adjust the permissions of a page in the kernel page table,
883 * synchronized on the current cpu only.
884 *
885 * No requirements, non blocking.
9ad680a3
MD
886 */
887void
888pmap_kmodify_rw(vm_offset_t va)
889{
4107b0c0 890 atomic_set_int(vtopte(va), PG_RW);
9ad680a3
MD
891 cpu_invlpg((void *)va);
892}
893
4107b0c0
MD
894/*
895 * Adjust the permissions of a page in the kernel page table,
896 * synchronized on the current cpu only.
897 *
898 * No requirements, non blocking.
899 */
9ad680a3
MD
900void
901pmap_kmodify_nc(vm_offset_t va)
902{
4107b0c0 903 atomic_set_int(vtopte(va), PG_N);
9ad680a3
MD
904 cpu_invlpg((void *)va);
905}
906
907/*
4107b0c0 908 * Map a range of physical addresses into kernel virtual address space.
984263bc 909 *
4107b0c0 910 * No requirements, non blocking.
984263bc
MD
911 */
912vm_offset_t
8e5e6f1b 913pmap_map(vm_offset_t *virtp, vm_paddr_t start, vm_paddr_t end, int prot)
984263bc 914{
8e5e6f1b
AH
915 vm_offset_t sva, virt;
916
917 sva = virt = *virtp;
984263bc
MD
918 while (start < end) {
919 pmap_kenter(virt, start);
920 virt += PAGE_SIZE;
921 start += PAGE_SIZE;
922 }
8e5e6f1b
AH
923 *virtp = virt;
924 return (sva);
984263bc
MD
925}
926
984263bc 927/*
4107b0c0
MD
928 * Add a list of wired pages to the kva, fully SMP synchronized.
929 *
930 * No requirements, non blocking.
984263bc
MD
931 */
932void
840de426 933pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
984263bc
MD
934{
935 vm_offset_t end_va;
936
937 end_va = va + count * PAGE_SIZE;
938
939 while (va < end_va) {
940 unsigned *pte;
941
942 pte = (unsigned *)vtopte(va);
943 *pte = VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag;
984263bc 944 cpu_invlpg((void *)va);
984263bc
MD
945 va += PAGE_SIZE;
946 m++;
947 }
948#ifdef SMP
0f7a3396 949 smp_invltlb(); /* XXX */
984263bc
MD
950#endif
951}
952
953/*
4107b0c0 954 * Remove pages from KVA, fully SMP synchronized.
7155fc7d 955 *
4107b0c0 956 * No requirements, non blocking.
984263bc
MD
957 */
958void
840de426 959pmap_qremove(vm_offset_t va, int count)
984263bc
MD
960{
961 vm_offset_t end_va;
962
963 end_va = va + count*PAGE_SIZE;
964
965 while (va < end_va) {
966 unsigned *pte;
967
968 pte = (unsigned *)vtopte(va);
969 *pte = 0;
984263bc 970 cpu_invlpg((void *)va);
984263bc
MD
971 va += PAGE_SIZE;
972 }
973#ifdef SMP
974 smp_invltlb();
975#endif
976}
977
06ecca5a
MD
978/*
979 * This routine works like vm_page_lookup() but also blocks as long as the
980 * page is busy. This routine does not busy the page it returns.
981 *
b12defdc 982 * The caller must hold the object.
06ecca5a 983 */
984263bc 984static vm_page_t
840de426 985pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
984263bc
MD
986{
987 vm_page_t m;
06ecca5a 988
b12defdc
MD
989 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
990 m = vm_page_lookup_busy_wait(object, pindex, FALSE, "pplookp");
17cde63e 991
06ecca5a 992 return(m);
984263bc
MD
993}
994
995/*
263e4574 996 * Create a new thread and optionally associate it with a (new) process.
6ef943a3 997 * NOTE! the new thread's cpu may not equal the current cpu.
263e4574 998 */
7d0bac62
MD
999void
1000pmap_init_thread(thread_t td)
263e4574 1001{
f470d0c8 1002 /* enforce pcb placement */
f470d0c8 1003 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
65d6ce10 1004 td->td_savefpu = &td->td_pcb->pcb_save;
7d0bac62 1005 td->td_sp = (char *)td->td_pcb - 16;
263e4574
MD
1006}
1007
1008/*
984263bc
MD
1009 * This routine directly affects the fork perf for a process.
1010 */
1011void
13d13d89 1012pmap_init_proc(struct proc *p)
984263bc 1013{
984263bc
MD
1014}
1015
984263bc
MD
1016/***************************************************
1017 * Page table page management routines.....
1018 ***************************************************/
1019
1020/*
90244566
MD
1021 * This routine unwires page table pages, removing and freeing the page
1022 * tale page when the wire count drops to 0.
4107b0c0
MD
1023 *
1024 * The caller must hold vm_token.
1025 * This function can block.
984263bc
MD
1026 */
1027static int
90244566 1028_pmap_unwire_pte(pmap_t pmap, vm_page_t m, pmap_inval_info_t info)
840de426 1029{
17cde63e
MD
1030 /*
1031 * Wait until we can busy the page ourselves. We cannot have
1032 * any active flushes if we block.
1033 */
b12defdc 1034 vm_page_busy_wait(m, FALSE, "pmuwpt");
eec2b734 1035 KASSERT(m->queue == PQ_NONE,
90244566 1036 ("_pmap_unwire_pte: %p->queue != PQ_NONE", m));
984263bc 1037
90244566 1038 if (m->wire_count == 1) {
984263bc 1039 /*
be3aecf7
MD
1040 * Unmap the page table page.
1041 *
1042 * NOTE: We must clear pm_cached for all cpus, including
1043 * the current one, when clearing a page directory
1044 * entry.
984263bc 1045 */
c2fb025d 1046 pmap_inval_interlock(info, pmap, -1);
2247fe02 1047 KKASSERT(pmap->pm_pdir[m->pindex]);
984263bc 1048 pmap->pm_pdir[m->pindex] = 0;
be3aecf7 1049 pmap->pm_cached = 0;
c2fb025d 1050 pmap_inval_deinterlock(info, pmap);
eec2b734
MD
1051
1052 KKASSERT(pmap->pm_stats.resident_count > 0);
984263bc 1053 --pmap->pm_stats.resident_count;
984263bc
MD
1054
1055 if (pmap->pm_ptphint == m)
1056 pmap->pm_ptphint = NULL;
1057
1058 /*
eec2b734
MD
1059 * This was our last hold, the page had better be unwired
1060 * after we decrement wire_count.
1061 *
1062 * FUTURE NOTE: shared page directory page could result in
1063 * multiple wire counts.
984263bc 1064 */
90244566 1065 vm_page_unwire(m, 0);
17cde63e 1066 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
eec2b734
MD
1067 vm_page_flash(m);
1068 vm_page_free_zero(m);
984263bc 1069 return 1;
17cde63e 1070 } else {
90244566
MD
1071 KKASSERT(m->wire_count > 1);
1072 if (vm_page_unwire_quick(m))
1073 panic("pmap_unwire_pte: Insufficient wire_count");
b12defdc 1074 vm_page_wakeup(m);
17cde63e 1075 return 0;
984263bc 1076 }
984263bc
MD
1077}
1078
4107b0c0
MD
1079/*
1080 * The caller must hold vm_token.
92ba8d28 1081 *
4107b0c0 1082 * This function can block.
92ba8d28
MD
1083 *
1084 * This function can race the wire_count 2->1 case because the page
1085 * is not busied during the unwire_quick operation. An eventual
1086 * pmap_release() will catch the case.
4107b0c0 1087 */
984263bc 1088static PMAP_INLINE int
90244566 1089pmap_unwire_pte(pmap_t pmap, vm_page_t m, pmap_inval_info_t info)
984263bc 1090{
90244566
MD
1091 KKASSERT(m->wire_count > 0);
1092 if (m->wire_count > 1) {
1093 if (vm_page_unwire_quick(m))
1094 panic("pmap_unwire_pte: Insufficient wire_count");
984263bc 1095 return 0;
eec2b734 1096 } else {
90244566 1097 return _pmap_unwire_pte(pmap, m, info);
eec2b734 1098 }
984263bc
MD
1099}
1100
1101/*
4107b0c0 1102 * After removing a (user) page table entry, this routine is used to
984263bc 1103 * conditionally free the page, and manage the hold/wire counts.
5926987a 1104 *
4107b0c0
MD
1105 * The caller must hold vm_token.
1106 * This function can block regardless.
984263bc 1107 */
554cf9ac 1108static void
0f7a3396 1109pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
4107b0c0 1110 pmap_inval_info_t info)
984263bc
MD
1111{
1112 unsigned ptepindex;
4107b0c0 1113
b12defdc
MD
1114 ASSERT_LWKT_TOKEN_HELD(vm_object_token(pmap->pm_pteobj));
1115
984263bc 1116 if (va >= UPT_MIN_ADDRESS)
554cf9ac 1117 return;
984263bc
MD
1118
1119 if (mpte == NULL) {
1120 ptepindex = (va >> PDRSHIFT);
b1482674
MD
1121 if ((mpte = pmap->pm_ptphint) != NULL &&
1122 mpte->pindex == ptepindex &&
1123 (mpte->flags & PG_BUSY) == 0) {
1124 ; /* use mpte */
984263bc 1125 } else {
b12defdc 1126 mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
984263bc 1127 pmap->pm_ptphint = mpte;
b12defdc 1128 vm_page_wakeup(mpte);
984263bc
MD
1129 }
1130 }
554cf9ac 1131 pmap_unwire_pte(pmap, mpte, info);
984263bc
MD
1132}
1133
54a764e8 1134/*
fbbaeba3
MD
1135 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because
1136 * it, and IdlePTD, represents the template used to update all other pmaps.
1137 *
1138 * On architectures where the kernel pmap is not integrated into the user
1139 * process pmap, this pmap represents the process pmap, not the kernel pmap.
1140 * kernel_pmap should be used to directly access the kernel_pmap.
4107b0c0
MD
1141 *
1142 * No requirements.
54a764e8 1143 */
984263bc 1144void
840de426 1145pmap_pinit0(struct pmap *pmap)
984263bc
MD
1146{
1147 pmap->pm_pdir =
e4846942 1148 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
24712b90 1149 pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t) IdlePTD);
984263bc
MD
1150 pmap->pm_count = 1;
1151 pmap->pm_active = 0;
be3aecf7 1152 pmap->pm_cached = 0;
984263bc
MD
1153 pmap->pm_ptphint = NULL;
1154 TAILQ_INIT(&pmap->pm_pvlist);
b12defdc
MD
1155 TAILQ_INIT(&pmap->pm_pvlist_free);
1156 spin_init(&pmap->pm_spin);
1157 lwkt_token_init(&pmap->pm_token, "pmap_tok");
984263bc
MD
1158 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1159}
1160
1161/*
1162 * Initialize a preallocated and zeroed pmap structure,
1163 * such as one in a vmspace structure.
4107b0c0
MD
1164 *
1165 * No requirements.
984263bc
MD
1166 */
1167void
840de426 1168pmap_pinit(struct pmap *pmap)
984263bc
MD
1169{
1170 vm_page_t ptdpg;
1171
1172 /*
1173 * No need to allocate page table space yet but we do need a valid
1174 * page directory table.
1175 */
b5b32410 1176 if (pmap->pm_pdir == NULL) {
984263bc 1177 pmap->pm_pdir =
e4846942 1178 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
b5b32410 1179 }
984263bc
MD
1180
1181 /*
c3834cb2 1182 * Allocate an object for the ptes
984263bc
MD
1183 */
1184 if (pmap->pm_pteobj == NULL)
c3834cb2 1185 pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1);
984263bc
MD
1186
1187 /*
c3834cb2
MD
1188 * Allocate the page directory page, unless we already have
1189 * one cached. If we used the cached page the wire_count will
1190 * already be set appropriately.
984263bc 1191 */
c3834cb2
MD
1192 if ((ptdpg = pmap->pm_pdirm) == NULL) {
1193 ptdpg = vm_page_grab(pmap->pm_pteobj, PTDPTDI,
d2d8515b
MD
1194 VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
1195 VM_ALLOC_ZERO);
c3834cb2 1196 pmap->pm_pdirm = ptdpg;
b12defdc
MD
1197 vm_page_flag_clear(ptdpg, PG_MAPPED);
1198 vm_page_wire(ptdpg);
d2d8515b 1199 KKASSERT(ptdpg->valid == VM_PAGE_BITS_ALL);
c3834cb2 1200 pmap_kenter((vm_offset_t)pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg));
b12defdc 1201 vm_page_wakeup(ptdpg);
c3834cb2 1202 }
984263bc 1203 pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
984263bc
MD
1204
1205 /* install self-referential address mapping entry */
1206 *(unsigned *) (pmap->pm_pdir + PTDPTDI) =
1207 VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
1208
1209 pmap->pm_count = 1;
1210 pmap->pm_active = 0;
be3aecf7 1211 pmap->pm_cached = 0;
984263bc
MD
1212 pmap->pm_ptphint = NULL;
1213 TAILQ_INIT(&pmap->pm_pvlist);
b12defdc
MD
1214 TAILQ_INIT(&pmap->pm_pvlist_free);
1215 spin_init(&pmap->pm_spin);
1216 lwkt_token_init(&pmap->pm_token, "pmap_tok");
984263bc 1217 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
eec2b734 1218 pmap->pm_stats.resident_count = 1;
984263bc
MD
1219}
1220
1221/*
c3834cb2
MD
1222 * Clean up a pmap structure so it can be physically freed. This routine
1223 * is called by the vmspace dtor function. A great deal of pmap data is
1224 * left passively mapped to improve vmspace management so we have a bit
1225 * of cleanup work to do here.
4107b0c0
MD
1226 *
1227 * No requirements.
e3161323
MD
1228 */
1229void
1230pmap_puninit(pmap_t pmap)
1231{
c3834cb2
MD
1232 vm_page_t p;
1233
e3161323 1234 KKASSERT(pmap->pm_active == 0);
c3834cb2
MD
1235 if ((p = pmap->pm_pdirm) != NULL) {
1236 KKASSERT(pmap->pm_pdir != NULL);
1237 pmap_kremove((vm_offset_t)pmap->pm_pdir);
b12defdc 1238 vm_page_busy_wait(p, FALSE, "pgpun");
90244566 1239 vm_page_unwire(p, 0);
c3834cb2
MD
1240 vm_page_free_zero(p);
1241 pmap->pm_pdirm = NULL;
1242 }
e3161323
MD
1243 if (pmap->pm_pdir) {
1244 kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pdir, PAGE_SIZE);
1245 pmap->pm_pdir = NULL;
1246 }
1247 if (pmap->pm_pteobj) {
1248 vm_object_deallocate(pmap->pm_pteobj);
1249 pmap->pm_pteobj = NULL;
1250 }
1251}
1252
1253/*
984263bc
MD
1254 * Wire in kernel global address entries. To avoid a race condition
1255 * between pmap initialization and pmap_growkernel, this procedure
54a764e8
MD
1256 * adds the pmap to the master list (which growkernel scans to update),
1257 * then copies the template.
4107b0c0
MD
1258 *
1259 * No requirements.
984263bc
MD
1260 */
1261void
840de426 1262pmap_pinit2(struct pmap *pmap)
984263bc 1263{
b12defdc
MD
1264 /*
1265 * XXX copies current process, does not fill in MPPTDI
1266 */
1267 spin_lock(&pmap_spin);
54a764e8 1268 TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode);
984263bc 1269 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
b12defdc 1270 spin_unlock(&pmap_spin);
984263bc
MD
1271}
1272
344ad853 1273/*
eec2b734 1274 * Attempt to release and free a vm_page in a pmap. Returns 1 on success,
344ad853 1275 * 0 on failure (if the procedure had to sleep).
c3834cb2
MD
1276 *
1277 * When asked to remove the page directory page itself, we actually just
1278 * leave it cached so we do not have to incur the SMP inval overhead of
1279 * removing the kernel mapping. pmap_puninit() will take care of it.
4107b0c0
MD
1280 *
1281 * The caller must hold vm_token.
1282 * This function can block regardless.
344ad853 1283 */
984263bc 1284static int
840de426 1285pmap_release_free_page(struct pmap *pmap, vm_page_t p)
984263bc
MD
1286{
1287 unsigned *pde = (unsigned *) pmap->pm_pdir;
4107b0c0 1288
984263bc
MD
1289 /*
1290 * This code optimizes the case of freeing non-busy
1291 * page-table pages. Those pages are zero now, and
1292 * might as well be placed directly into the zero queue.
1293 */
b12defdc
MD
1294 if (vm_page_busy_try(p, FALSE)) {
1295 vm_page_sleep_busy(p, FALSE, "pmaprl");
984263bc 1296 return 0;
b12defdc 1297 }
984263bc 1298
eec2b734 1299 KKASSERT(pmap->pm_stats.resident_count > 0);
2247fe02 1300 KKASSERT(pde[p->pindex]);
984263bc 1301
b1482674
MD
1302 /*
1303 * page table page's wire_count must be 1. Caller is the pmap
1304 * termination code which holds the pm_pteobj, there is a race
1305 * if someone else is trying to hold the VM object in order to
1306 * clean up a wire_count.
1307 */
90244566 1308 if (p->wire_count != 1) {
b1482674
MD
1309 if (pmap->pm_pteobj->hold_count <= 1)
1310 panic("pmap_release: freeing wired page table page");
1311 kprintf("pmap_release_free_page: unwire race detected\n");
1312 vm_page_wakeup(p);
1313 tsleep(p, 0, "pmapx", 1);
1314 return 0;
984263bc 1315 }
b1482674
MD
1316
1317 /*
1318 * Remove the page table page from the processes address space.
1319 */
1320 pmap->pm_cached = 0;
1321 pde[p->pindex] = 0;
1322 --pmap->pm_stats.resident_count;
c3834cb2
MD
1323 if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
1324 pmap->pm_ptphint = NULL;
1325
984263bc 1326 /*
c3834cb2
MD
1327 * We leave the page directory page cached, wired, and mapped in
1328 * the pmap until the dtor function (pmap_puninit()) gets called.
1329 * However, still clean it up so we can set PG_ZERO.
c1692ddf
MD
1330 *
1331 * The pmap has already been removed from the pmap_list in the
1332 * PTDPTDI case.
984263bc
MD
1333 */
1334 if (p->pindex == PTDPTDI) {
1335 bzero(pde + KPTDI, nkpt * PTESIZE);
9388fcaa 1336 bzero(pde + MPPTDI, (NPDEPG - MPPTDI) * PTESIZE);
c3834cb2
MD
1337 vm_page_flag_set(p, PG_ZERO);
1338 vm_page_wakeup(p);
1339 } else {
92ba8d28
MD
1340 /*
1341 * This case can occur if a pmap_unwire_pte() loses a race
1342 * while the page is unbusied.
1343 */
1344 /*panic("pmap_release: page should already be gone %p", p);*/
1345 vm_page_flag_clear(p, PG_MAPPED);
90244566 1346 vm_page_unwire(p, 0);
c3834cb2 1347 vm_page_free_zero(p);
984263bc 1348 }
984263bc
MD
1349 return 1;
1350}
1351
1352/*
4107b0c0
MD
1353 * This routine is called if the page table page is not mapped correctly.
1354 *
1355 * The caller must hold vm_token.
984263bc
MD
1356 */
1357static vm_page_t
840de426 1358_pmap_allocpte(pmap_t pmap, unsigned ptepindex)
984263bc 1359{
480c83b6 1360 vm_offset_t ptepa;
984263bc
MD
1361 vm_page_t m;
1362
1363 /*
d2d8515b
MD
1364 * Find or fabricate a new pagetable page. Setting VM_ALLOC_ZERO
1365 * will zero any new page and mark it valid.
984263bc
MD
1366 */
1367 m = vm_page_grab(pmap->pm_pteobj, ptepindex,
d2d8515b 1368 VM_ALLOC_NORMAL | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
984263bc
MD
1369
1370 KASSERT(m->queue == PQ_NONE,
1371 ("_pmap_allocpte: %p->queue != PQ_NONE", m));
1372
eec2b734 1373 /*
90244566 1374 * Increment the wire count for the page we will be returning to
eec2b734
MD
1375 * the caller.
1376 */
90244566 1377 vm_page_wire(m);
eec2b734
MD
1378
1379 /*
1380 * It is possible that someone else got in and mapped by the page
1381 * directory page while we were blocked, if so just unbusy and
90244566 1382 * return the wired page.
eec2b734
MD
1383 */
1384 if ((ptepa = pmap->pm_pdir[ptepindex]) != 0) {
1385 KKASSERT((ptepa & PG_FRAME) == VM_PAGE_TO_PHYS(m));
1386 vm_page_wakeup(m);
1387 return(m);
1388 }
1389
984263bc
MD
1390 /*
1391 * Map the pagetable page into the process address space, if
1392 * it isn't already there.
be3aecf7
MD
1393 *
1394 * NOTE: For safety clear pm_cached for all cpus including the
1395 * current one when adding a PDE to the map.
984263bc 1396 */
eec2b734 1397 ++pmap->pm_stats.resident_count;
984263bc
MD
1398
1399 ptepa = VM_PAGE_TO_PHYS(m);
1400 pmap->pm_pdir[ptepindex] =
1401 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
be3aecf7 1402 pmap->pm_cached = 0;
984263bc
MD
1403
1404 /*
1405 * Set the page table hint
1406 */
1407 pmap->pm_ptphint = m;
984263bc
MD
1408 vm_page_flag_set(m, PG_MAPPED);
1409 vm_page_wakeup(m);
1410
1411 return m;
1412}
1413
4107b0c0
MD
1414/*
1415 * Allocate a page table entry for a va.
1416 *
1417 * The caller must hold vm_token.
1418 */
984263bc 1419static vm_page_t
840de426 1420pmap_allocpte(pmap_t pmap, vm_offset_t va)
984263bc
MD
1421{
1422 unsigned ptepindex;
1423 vm_offset_t ptepa;
b1482674 1424 vm_page_t mpte;
984263bc 1425
b12defdc
MD
1426 ASSERT_LWKT_TOKEN_HELD(vm_object_token(pmap->pm_pteobj));
1427
984263bc
MD
1428 /*
1429 * Calculate pagetable page index
1430 */
1431 ptepindex = va >> PDRSHIFT;
1432
1433 /*
1434 * Get the page directory entry
1435 */
1436 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
1437
1438 /*
1439 * This supports switching from a 4MB page to a
1440 * normal 4K page.
1441 */
1442 if (ptepa & PG_PS) {
1443 pmap->pm_pdir[ptepindex] = 0;
1444 ptepa = 0;
0f7a3396 1445 smp_invltlb();
54341a3b 1446 cpu_invltlb();
984263bc
MD
1447 }
1448
1449 /*
1450 * If the page table page is mapped, we just increment the
90244566 1451 * wire count, and activate it.
984263bc
MD
1452 */
1453 if (ptepa) {
1454 /*
1455 * In order to get the page table page, try the
1456 * hint first.
1457 */
b1482674
MD
1458 if ((mpte = pmap->pm_ptphint) != NULL &&
1459 (mpte->pindex == ptepindex) &&
1460 (mpte->flags & PG_BUSY) == 0) {
1461 vm_page_wire_quick(mpte);
984263bc 1462 } else {
b1482674
MD
1463 mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
1464 pmap->pm_ptphint = mpte;
1465 vm_page_wire_quick(mpte);
1466 vm_page_wakeup(mpte);
984263bc 1467 }
b1482674 1468 return mpte;
984263bc
MD
1469 }
1470 /*
1471 * Here if the pte page isn't mapped, or if it has been deallocated.
1472 */
1473 return _pmap_allocpte(pmap, ptepindex);
1474}
1475
1476
1477/***************************************************
1f804340 1478 * Pmap allocation/deallocation routines.
984263bc
MD
1479 ***************************************************/
1480
1481/*
1482 * Release any resources held by the given physical map.
1483 * Called when a pmap initialized by pmap_pinit is being released.
1484 * Should only be called if the map contains no valid mappings.
4107b0c0 1485 *
b12defdc 1486 * Caller must hold pmap->pm_token
984263bc 1487 */
1f804340
MD
1488static int pmap_release_callback(struct vm_page *p, void *data);
1489
984263bc 1490void
840de426 1491pmap_release(struct pmap *pmap)
984263bc 1492{
984263bc 1493 vm_object_t object = pmap->pm_pteobj;
1f804340 1494 struct rb_vm_page_scan_info info;
984263bc 1495
4107b0c0
MD
1496 KASSERT(pmap->pm_active == 0,
1497 ("pmap still active! %08x", pmap->pm_active));
984263bc
MD
1498#if defined(DIAGNOSTIC)
1499 if (object->ref_count != 1)
1500 panic("pmap_release: pteobj reference count != 1");
1501#endif
1502
1f804340
MD
1503 info.pmap = pmap;
1504 info.object = object;
b12defdc
MD
1505
1506 spin_lock(&pmap_spin);
54a764e8 1507 TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
b12defdc 1508 spin_unlock(&pmap_spin);
1f804340 1509
b12defdc 1510 vm_object_hold(object);
1f804340 1511 do {
1f804340
MD
1512 info.error = 0;
1513 info.mpte = NULL;
1514 info.limit = object->generation;
1515
1516 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1517 pmap_release_callback, &info);
1518 if (info.error == 0 && info.mpte) {
1519 if (!pmap_release_free_page(pmap, info.mpte))
1520 info.error = 1;
984263bc 1521 }
1f804340 1522 } while (info.error);
2f2d9e58 1523 vm_object_drop(object);
b12defdc
MD
1524
1525 pmap->pm_cached = 0;
1f804340
MD
1526}
1527
4107b0c0
MD
1528/*
1529 * The caller must hold vm_token.
1530 */
1f804340
MD
1531static int
1532pmap_release_callback(struct vm_page *p, void *data)
1533{
1534 struct rb_vm_page_scan_info *info = data;
1535
1536 if (p->pindex == PTDPTDI) {
1537 info->mpte = p;
1538 return(0);
344ad853 1539 }
1f804340
MD
1540 if (!pmap_release_free_page(info->pmap, p)) {
1541 info->error = 1;
1542 return(-1);
1543 }
1544 if (info->object->generation != info->limit) {
1545 info->error = 1;
1546 return(-1);
1547 }
1548 return(0);
984263bc 1549}
984263bc
MD
1550
1551/*
0e5797fe 1552 * Grow the number of kernel page table entries, if needed.
4107b0c0
MD
1553 *
1554 * No requirements.
984263bc
MD
1555 */
1556void
a8cf2878 1557pmap_growkernel(vm_offset_t kstart, vm_offset_t kend)
984263bc 1558{
a8cf2878 1559 vm_offset_t addr = kend;
54a764e8 1560 struct pmap *pmap;
984263bc
MD
1561 vm_offset_t ptppaddr;
1562 vm_page_t nkpg;
1563 pd_entry_t newpdir;
1564
b12defdc 1565 vm_object_hold(kptobj);
984263bc
MD
1566 if (kernel_vm_end == 0) {
1567 kernel_vm_end = KERNBASE;
1568 nkpt = 0;
1569 while (pdir_pde(PTD, kernel_vm_end)) {
4107b0c0
MD
1570 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1571 ~(PAGE_SIZE * NPTEPG - 1);
984263bc
MD
1572 nkpt++;
1573 }
1574 }
1575 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1576 while (kernel_vm_end < addr) {
1577 if (pdir_pde(PTD, kernel_vm_end)) {
4107b0c0
MD
1578 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1579 ~(PAGE_SIZE * NPTEPG - 1);
984263bc
MD
1580 continue;
1581 }
1582
1583 /*
1584 * This index is bogus, but out of the way
1585 */
4107b0c0
MD
1586 nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_NORMAL |
1587 VM_ALLOC_SYSTEM |
1588 VM_ALLOC_INTERRUPT);
dc1fd4b3 1589 if (nkpg == NULL)
984263bc
MD
1590 panic("pmap_growkernel: no memory to grow kernel");
1591
984263bc
MD
1592 vm_page_wire(nkpg);
1593 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1594 pmap_zero_page(ptppaddr);
1595 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1596 pdir_pde(PTD, kernel_vm_end) = newpdir;
fbbaeba3 1597 *pmap_pde(&kernel_pmap, kernel_vm_end) = newpdir;
0e5797fe
MD
1598 nkpt++;
1599
1600 /*
54a764e8 1601 * This update must be interlocked with pmap_pinit2.
0e5797fe 1602 */
b12defdc 1603 spin_lock(&pmap_spin);
54a764e8
MD
1604 TAILQ_FOREACH(pmap, &pmap_list, pm_pmnode) {
1605 *pmap_pde(pmap, kernel_vm_end) = newpdir;
1606 }
b12defdc 1607 spin_unlock(&pmap_spin);
54a764e8
MD
1608 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1609 ~(PAGE_SIZE * NPTEPG - 1);
984263bc 1610 }
b12defdc 1611 vm_object_drop(kptobj);
984263bc
MD
1612}
1613
1614/*
4107b0c0
MD
1615 * Retire the given physical map from service.
1616 *
1617 * Should only be called if the map contains no valid mappings.
1618 *
1619 * No requirements.
984263bc
MD
1620 */
1621void
840de426 1622pmap_destroy(pmap_t pmap)
984263bc 1623{
984263bc
MD
1624 if (pmap == NULL)
1625 return;
1626
4107b0c0
MD
1627 lwkt_gettoken(&vm_token);
1628 if (--pmap->pm_count == 0) {
984263bc
MD
1629 pmap_release(pmap);
1630 panic("destroying a pmap is not yet implemented");
1631 }
4107b0c0 1632 lwkt_reltoken(&vm_token);
984263bc
MD
1633}
1634
1635/*
4107b0c0
MD
1636 * Add a reference to the specified pmap.
1637 *
1638 * No requirements.
984263bc
MD
1639 */
1640void
840de426 1641pmap_reference(pmap_t pmap)
984263bc 1642{
4107b0c0
MD
1643 if (pmap) {
1644 lwkt_gettoken(&vm_token);
1645 ++pmap->pm_count;
1646 lwkt_reltoken(&vm_token);
984263bc
MD
1647 }
1648}
1649
1650/***************************************************
4107b0c0 1651 * page management routines.
984263bc
MD
1652 ***************************************************/
1653
1654/*
8a8d5d85
MD
1655 * free the pv_entry back to the free list. This function may be
1656 * called from an interrupt.
4107b0c0
MD
1657 *
1658 * The caller must hold vm_token.
984263bc
MD
1659 */
1660static PMAP_INLINE void
840de426 1661free_pv_entry(pv_entry_t pv)
984263bc 1662{
2bb9cc6f
MD
1663 struct mdglobaldata *gd;
1664
5926987a
MD
1665#ifdef PMAP_DEBUG
1666 KKASSERT(pv->pv_m != NULL);
1667 pv->pv_m = NULL;
1668#endif
2bb9cc6f 1669 gd = mdcpu;
984263bc 1670 pv_entry_count--;
2bb9cc6f
MD
1671 if (gd->gd_freepv == NULL)
1672 gd->gd_freepv = pv;
1673 else
1674 zfree(pvzone, pv);
984263bc
MD
1675}
1676
1677/*
1678 * get a new pv_entry, allocating a block from the system
2bb9cc6f
MD
1679 * when needed. This function may be called from an interrupt thread.
1680 *
1681 * THIS FUNCTION CAN BLOCK ON THE ZALLOC TOKEN, serialization of other
1682 * tokens (aka vm_token) to be temporarily lost.
4107b0c0
MD
1683 *
1684 * The caller must hold vm_token.
984263bc
MD
1685 */
1686static pv_entry_t
1687get_pv_entry(void)
1688{
2bb9cc6f
MD
1689 struct mdglobaldata *gd;
1690 pv_entry_t pv;
1691
984263bc
MD
1692 pv_entry_count++;
1693 if (pv_entry_high_water &&
20479584
MD
1694 (pv_entry_count > pv_entry_high_water) &&
1695 (pmap_pagedaemon_waken == 0)) {
984263bc
MD
1696 pmap_pagedaemon_waken = 1;
1697 wakeup (&vm_pages_needed);
1698 }
2bb9cc6f
MD
1699 gd = mdcpu;
1700 if ((pv = gd->gd_freepv) != NULL)
1701 gd->gd_freepv = NULL;
1702 else
1703 pv = zalloc(pvzone);
1704 return pv;
984263bc
MD
1705}
1706
1707/*
1708 * This routine is very drastic, but can save the system
1709 * in a pinch.
4107b0c0
MD
1710 *
1711 * No requirements.
984263bc
MD
1712 */
1713void
840de426 1714pmap_collect(void)
984263bc
MD
1715{
1716 int i;
1717 vm_page_t m;
1718 static int warningdone=0;
1719
1720 if (pmap_pagedaemon_waken == 0)
1721 return;
4107b0c0 1722 lwkt_gettoken(&vm_token);
20479584 1723 pmap_pagedaemon_waken = 0;
984263bc
MD
1724
1725 if (warningdone < 5) {
948209ce
MD
1726 kprintf("pmap_collect: collecting pv entries -- "
1727 "suggest increasing PMAP_SHPGPERPROC\n");
984263bc
MD
1728 warningdone++;
1729 }
1730
b12defdc 1731 for (i = 0; i < vm_page_array_size; i++) {
984263bc 1732 m = &vm_page_array[i];
b12defdc 1733 if (m->wire_count || m->hold_count)
984263bc 1734 continue;
b12defdc
MD
1735 if (vm_page_busy_try(m, TRUE) == 0) {
1736 if (m->wire_count == 0 && m->hold_count == 0) {
1737 pmap_remove_all(m);
1738 }
1739 vm_page_wakeup(m);
4107b0c0 1740 }
984263bc 1741 }
4107b0c0 1742 lwkt_reltoken(&vm_token);
984263bc
MD
1743}
1744
1745
1746/*
b1482674
MD
1747 * Remove the pv entry and unwire the page table page related to the
1748 * pte the caller has cleared from the page table.
4107b0c0
MD
1749 *
1750 * The caller must hold vm_token.
984263bc 1751 */
554cf9ac 1752static void
0f7a3396 1753pmap_remove_entry(struct pmap *pmap, vm_page_t m,
4107b0c0 1754 vm_offset_t va, pmap_inval_info_t info)
984263bc
MD
1755{
1756 pv_entry_t pv;
984263bc 1757
b1482674
MD
1758 /*
1759 * Cannot block
1760 */
4107b0c0 1761 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
1762 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1763 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1764 if (pmap == pv->pv_pmap && va == pv->pv_va)
1765 break;
1766 }
1767 } else {
1768 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
5926987a
MD
1769#ifdef PMAP_DEBUG
1770 KKASSERT(pv->pv_pmap == pmap);
1771#endif
1772 if (va == pv->pv_va)
984263bc
MD
1773 break;
1774 }
1775 }
5926987a 1776 KKASSERT(pv);
984263bc 1777
b1482674
MD
1778 /*
1779 * Cannot block
1780 */
5926987a
MD
1781 test_m_maps_pv(m, pv);
1782 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1783 m->md.pv_list_count--;
cef01e15
MD
1784 if (m->object)
1785 atomic_add_int(&m->object->agg_pv_list_count, -1);
5926987a
MD
1786 if (TAILQ_EMPTY(&m->md.pv_list))
1787 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
1788 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1789 ++pmap->pm_generation;
b1482674
MD
1790
1791 /*
1792 * This can block.
1793 */
b12defdc 1794 vm_object_hold(pmap->pm_pteobj);
554cf9ac 1795 pmap_unuse_pt(pmap, va, pv->pv_ptem, info);
b12defdc 1796 vm_object_drop(pmap->pm_pteobj);
5926987a 1797 free_pv_entry(pv);
984263bc
MD
1798}
1799
1800/*
4107b0c0
MD
1801 * Create a pv entry for page at pa for (pmap, va).
1802 *
1803 * The caller must hold vm_token.
984263bc
MD
1804 */
1805static void
2bb9cc6f
MD
1806pmap_insert_entry(pmap_t pmap, pv_entry_t pv, vm_offset_t va,
1807 vm_page_t mpte, vm_page_t m)
984263bc 1808{
5926987a
MD
1809#ifdef PMAP_DEBUG
1810 KKASSERT(pv->pv_m == NULL);
1811 pv->pv_m = m;
1812#endif
984263bc
MD
1813 pv->pv_va = va;
1814 pv->pv_pmap = pmap;
1815 pv->pv_ptem = mpte;
1816
1817 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1818 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
5926987a 1819 ++pmap->pm_generation;
984263bc 1820 m->md.pv_list_count++;
cef01e15
MD
1821 if (m->object)
1822 atomic_add_int(&m->object->agg_pv_list_count, 1);
984263bc
MD
1823}
1824
1825/*
5926987a
MD
1826 * pmap_remove_pte: do the things to unmap a page in a process.
1827 *
4107b0c0
MD
1828 * The caller must hold vm_token.
1829 *
1830 * WARNING! As with most other pmap functions this one can block, so
1831 * callers using temporary page table mappings must reload
1832 * them.
984263bc 1833 */
554cf9ac 1834static void
0f7a3396 1835pmap_remove_pte(struct pmap *pmap, unsigned *ptq, vm_offset_t va,
5926987a 1836 pmap_inval_info_t info)
984263bc
MD
1837{
1838 unsigned oldpte;
1839 vm_page_t m;
1840
5926987a 1841 ptbase_assert(pmap);
c2fb025d 1842 pmap_inval_interlock(info, pmap, va);
5926987a 1843 ptbase_assert(pmap);
984263bc
MD
1844 oldpte = loadandclear(ptq);
1845 if (oldpte & PG_W)
1846 pmap->pm_stats.wired_count -= 1;
c2fb025d 1847 pmap_inval_deinterlock(info, pmap);
90244566 1848 KKASSERT(oldpte & PG_V);
984263bc
MD
1849 /*
1850 * Machines that don't support invlpg, also don't support
0f7a3396
MD
1851 * PG_G. XXX PG_G is disabled for SMP so don't worry about
1852 * the SMP case.
984263bc
MD
1853 */
1854 if (oldpte & PG_G)
41a01a4d 1855 cpu_invlpg((void *)va);
eec2b734
MD
1856 KKASSERT(pmap->pm_stats.resident_count > 0);
1857 --pmap->pm_stats.resident_count;
984263bc
MD
1858 if (oldpte & PG_MANAGED) {
1859 m = PHYS_TO_VM_PAGE(oldpte);
1860 if (oldpte & PG_M) {
1861#if defined(PMAP_DIAGNOSTIC)
1862 if (pmap_nw_modified((pt_entry_t) oldpte)) {
d557216f
MD
1863 kprintf("pmap_remove: modified page not "
1864 "writable: va: %p, pte: 0x%lx\n",
1865 (void *)va, (long)oldpte);
984263bc
MD
1866 }
1867#endif
1868 if (pmap_track_modified(va))
1869 vm_page_dirty(m);
1870 }
1871 if (oldpte & PG_A)
1872 vm_page_flag_set(m, PG_REFERENCED);
554cf9ac 1873 pmap_remove_entry(pmap, m, va, info);
984263bc 1874 } else {
554cf9ac 1875 pmap_unuse_pt(pmap, va, NULL, info);
984263bc 1876 }
984263bc
MD
1877}
1878
1879/*
5926987a 1880 * Remove a single page from a process address space.
e0e69b7d 1881 *
4107b0c0 1882 * The caller must hold vm_token.
984263bc
MD
1883 */
1884static void
0f7a3396 1885pmap_remove_page(struct pmap *pmap, vm_offset_t va, pmap_inval_info_t info)
984263bc 1886{
840de426 1887 unsigned *ptq;
984263bc
MD
1888
1889 /*
90244566 1890 * If there is no pte for this address, just skip it!!! Otherwise
e0e69b7d 1891 * get a local va for mappings for this pmap and remove the entry.
984263bc 1892 */
e0e69b7d
MD
1893 if (*pmap_pde(pmap, va) != 0) {
1894 ptq = get_ptbase(pmap) + i386_btop(va);
1895 if (*ptq) {
0f7a3396 1896 pmap_remove_pte(pmap, ptq, va, info);
5926987a 1897 /* ptq invalid */
e0e69b7d 1898 }
984263bc 1899 }
984263bc
MD
1900}
1901
1902/*
4107b0c0 1903 * Remove the given range of addresses from the specified map.
984263bc 1904 *
4107b0c0
MD
1905 * It is assumed that the start and end are properly rounded to the page
1906 * size.
e0e69b7d 1907 *
4107b0c0 1908 * No requirements.
984263bc
MD
1909 */
1910void
840de426 1911pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
984263bc 1912{
840de426 1913 unsigned *ptbase;
984263bc
MD
1914 vm_offset_t pdnxt;
1915 vm_offset_t ptpaddr;
1916 vm_offset_t sindex, eindex;
0f7a3396 1917 struct pmap_inval_info info;
984263bc
MD
1918
1919 if (pmap == NULL)
1920 return;
1921
b12defdc 1922 vm_object_hold(pmap->pm_pteobj);
4107b0c0
MD
1923 lwkt_gettoken(&vm_token);
1924 if (pmap->pm_stats.resident_count == 0) {
1925 lwkt_reltoken(&vm_token);
b12defdc 1926 vm_object_drop(pmap->pm_pteobj);
984263bc 1927 return;
4107b0c0 1928 }
984263bc 1929
0f7a3396
MD
1930 pmap_inval_init(&info);
1931
984263bc
MD
1932 /*
1933 * special handling of removing one page. a very
1934 * common operation and easy to short circuit some
1935 * code.
1936 */
1937 if (((sva + PAGE_SIZE) == eva) &&
1938 (((unsigned) pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
0f7a3396 1939 pmap_remove_page(pmap, sva, &info);
c2fb025d 1940 pmap_inval_done(&info);
4107b0c0 1941 lwkt_reltoken(&vm_token);
b12defdc 1942 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
1943 return;
1944 }
1945
984263bc
MD
1946 /*
1947 * Get a local virtual address for the mappings that are being
1948 * worked with.
1949 */
984263bc
MD
1950 sindex = i386_btop(sva);
1951 eindex = i386_btop(eva);
1952
554cf9ac 1953 while (sindex < eindex) {
984263bc
MD
1954 unsigned pdirindex;
1955
1956 /*
554cf9ac 1957 * Stop scanning if no pages are left
984263bc 1958 */
984263bc
MD
1959 if (pmap->pm_stats.resident_count == 0)
1960 break;
1961
554cf9ac
MD
1962 /*
1963 * Calculate index for next page table, limited by eindex.
1964 */
1965 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1966 if (pdnxt > eindex)
1967 pdnxt = eindex;
1968
984263bc 1969 pdirindex = sindex / NPDEPG;
554cf9ac
MD
1970 ptpaddr = (unsigned)pmap->pm_pdir[pdirindex];
1971 if (ptpaddr & PG_PS) {
c2fb025d 1972 pmap_inval_interlock(&info, pmap, -1);
984263bc
MD
1973 pmap->pm_pdir[pdirindex] = 0;
1974 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
be3aecf7 1975 pmap->pm_cached = 0;
c2fb025d 1976 pmap_inval_deinterlock(&info, pmap);
554cf9ac 1977 sindex = pdnxt;
984263bc
MD
1978 continue;
1979 }
1980
1981 /*
1982 * Weed out invalid mappings. Note: we assume that the page
1983 * directory table is always allocated, and in kernel virtual.
1984 */
554cf9ac
MD
1985 if (ptpaddr == 0) {
1986 sindex = pdnxt;
984263bc 1987 continue;
984263bc
MD
1988 }
1989
8790d7d8 1990 /*
554cf9ac
MD
1991 * Sub-scan the page table page. pmap_remove_pte() can
1992 * block on us, invalidating ptbase, so we must reload
1993 * ptbase and we must also check whether the page directory
1994 * page is still present.
8790d7d8 1995 */
554cf9ac 1996 while (sindex < pdnxt) {
984263bc 1997 vm_offset_t va;
8790d7d8
MD
1998
1999 ptbase = get_ptbase(pmap);
554cf9ac
MD
2000 if (ptbase[sindex]) {
2001 va = i386_ptob(sindex);
2002 pmap_remove_pte(pmap, ptbase + sindex,
2003 va, &info);
2004 }
2005 if (pmap->pm_pdir[pdirindex] == 0 ||
2006 (pmap->pm_pdir[pdirindex] & PG_PS)) {
984263bc 2007 break;
554cf9ac
MD
2008 }
2009 ++sindex;
984263bc
MD
2010 }
2011 }
c2fb025d 2012 pmap_inval_done(&info);
4107b0c0 2013 lwkt_reltoken(&vm_token);
b12defdc 2014 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
2015}
2016
2017/*
4107b0c0
MD
2018 * Removes this physical page from all physical maps in which it resides.
2019 * Reflects back modify bits to the pager.
984263bc 2020 *
4107b0c0 2021 * No requirements.
984263bc 2022 */
984263bc 2023static void
840de426 2024pmap_remove_all(vm_page_t m)
984263bc 2025{
0f7a3396 2026 struct pmap_inval_info info;
840de426 2027 unsigned *pte, tpte;
0f7a3396 2028 pv_entry_t pv;
984263bc 2029
bee81bdd
SS
2030 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2031 return;
984263bc 2032
0f7a3396 2033 pmap_inval_init(&info);
984263bc 2034 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
eec2b734
MD
2035 KKASSERT(pv->pv_pmap->pm_stats.resident_count > 0);
2036 --pv->pv_pmap->pm_stats.resident_count;
984263bc
MD
2037
2038 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
c2fb025d 2039 pmap_inval_interlock(&info, pv->pv_pmap, pv->pv_va);
984263bc
MD
2040 tpte = loadandclear(pte);
2041 if (tpte & PG_W)
2042 pv->pv_pmap->pm_stats.wired_count--;
c2fb025d 2043 pmap_inval_deinterlock(&info, pv->pv_pmap);
984263bc
MD
2044 if (tpte & PG_A)
2045 vm_page_flag_set(m, PG_REFERENCED);
c2fb025d 2046 KKASSERT(PHYS_TO_VM_PAGE(tpte) == m);
984263bc
MD
2047
2048 /*
2049 * Update the vm_page_t clean and reference bits.
2050 */
2051 if (tpte & PG_M) {
2052#if defined(PMAP_DIAGNOSTIC)
2053 if (pmap_nw_modified((pt_entry_t) tpte)) {
d557216f
MD
2054 kprintf("pmap_remove_all: modified page "
2055 "not writable: va: %p, pte: 0x%lx\n",
2056 (void *)pv->pv_va, (long)tpte);
984263bc
MD
2057 }
2058#endif
2059 if (pmap_track_modified(pv->pv_va))
2060 vm_page_dirty(m);
2061 }
5926987a
MD
2062#ifdef PMAP_DEBUG
2063 KKASSERT(pv->pv_m == m);
2064#endif
2bb9cc6f 2065 KKASSERT(pv == TAILQ_FIRST(&m->md.pv_list));
984263bc 2066 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
8790d7d8
MD
2067 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2068 ++pv->pv_pmap->pm_generation;
984263bc 2069 m->md.pv_list_count--;
cef01e15
MD
2070 if (m->object)
2071 atomic_add_int(&m->object->agg_pv_list_count, -1);
17cde63e
MD
2072 if (TAILQ_EMPTY(&m->md.pv_list))
2073 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
b12defdc 2074 vm_object_hold(pv->pv_pmap->pm_pteobj);
0f7a3396 2075 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem, &info);
b12defdc 2076 vm_object_drop(pv->pv_pmap->pm_pteobj);
984263bc
MD
2077 free_pv_entry(pv);
2078 }
17cde63e 2079 KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
c2fb025d 2080 pmap_inval_done(&info);
984263bc
MD
2081}
2082
2083/*
4107b0c0
MD
2084 * Set the physical protection on the specified range of this map
2085 * as requested.
e0e69b7d 2086 *
4107b0c0 2087 * No requirements.
984263bc
MD
2088 */
2089void
2090pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2091{
840de426 2092 unsigned *ptbase;
984263bc
MD
2093 vm_offset_t pdnxt, ptpaddr;
2094 vm_pindex_t sindex, eindex;
0f7a3396 2095 pmap_inval_info info;
984263bc
MD
2096
2097 if (pmap == NULL)
2098 return;
2099
2100 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2101 pmap_remove(pmap, sva, eva);
2102 return;
2103 }
2104
2105 if (prot & VM_PROT_WRITE)
2106 return;
2107
4107b0c0 2108 lwkt_gettoken(&vm_token);
0f7a3396 2109 pmap_inval_init(&info);
984263bc
MD
2110
2111 ptbase = get_ptbase(pmap);
2112
2113 sindex = i386_btop(sva);
2114 eindex = i386_btop(eva);
2115
2116 for (; sindex < eindex; sindex = pdnxt) {
984263bc
MD
2117 unsigned pdirindex;
2118
2119 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
2120
2121 pdirindex = sindex / NPDEPG;
2122 if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
c2fb025d 2123 pmap_inval_interlock(&info, pmap, -1);
55f2596a 2124 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
984263bc 2125 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
c2fb025d 2126 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2127 continue;
2128 }
2129
2130 /*
2131 * Weed out invalid mappings. Note: we assume that the page
2132 * directory table is always allocated, and in kernel virtual.
2133 */
2134 if (ptpaddr == 0)
2135 continue;
2136
2137 if (pdnxt > eindex) {
2138 pdnxt = eindex;
2139 }
2140
2141 for (; sindex != pdnxt; sindex++) {
984263bc 2142 unsigned pbits;
c2fb025d 2143 unsigned cbits;
984263bc
MD
2144 vm_page_t m;
2145
17cde63e 2146 /*
d5b2d319 2147 * XXX non-optimal.
17cde63e 2148 */
c2fb025d
MD
2149 pmap_inval_interlock(&info, pmap, i386_ptob(sindex));
2150again:
984263bc 2151 pbits = ptbase[sindex];
c2fb025d 2152 cbits = pbits;
984263bc
MD
2153
2154 if (pbits & PG_MANAGED) {
2155 m = NULL;
2156 if (pbits & PG_A) {
2157 m = PHYS_TO_VM_PAGE(pbits);
2158 vm_page_flag_set(m, PG_REFERENCED);
c2fb025d 2159 cbits &= ~PG_A;
984263bc
MD
2160 }
2161 if (pbits & PG_M) {
2162 if (pmap_track_modified(i386_ptob(sindex))) {
2163 if (m == NULL)
2164 m = PHYS_TO_VM_PAGE(pbits);
2165 vm_page_dirty(m);
c2fb025d 2166 cbits &= ~PG_M;
984263bc
MD
2167 }
2168 }
2169 }
c2fb025d
MD
2170 cbits &= ~PG_RW;
2171 if (pbits != cbits &&
2172 !atomic_cmpset_int(ptbase + sindex, pbits, cbits)) {
2173 goto again;
984263bc 2174 }
c2fb025d 2175 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2176 }
2177 }
c2fb025d 2178 pmap_inval_done(&info);
4107b0c0 2179 lwkt_reltoken(&vm_token);
984263bc
MD
2180}
2181
2182/*
4107b0c0
MD
2183 * Insert the given physical page (p) at the specified virtual address (v)
2184 * in the target physical map with the protection requested.
984263bc 2185 *
4107b0c0
MD
2186 * If specified, the page will be wired down, meaning that the related pte
2187 * cannot be reclaimed.
984263bc 2188 *
4107b0c0 2189 * No requirements.
984263bc
MD
2190 */
2191void
2192pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2193 boolean_t wired)
2194{
6ef943a3 2195 vm_paddr_t pa;
840de426 2196 unsigned *pte;
6ef943a3 2197 vm_paddr_t opa;
984263bc
MD
2198 vm_offset_t origpte, newpte;
2199 vm_page_t mpte;
0f7a3396 2200 pmap_inval_info info;
2bb9cc6f 2201 pv_entry_t pv;
984263bc
MD
2202
2203 if (pmap == NULL)
2204 return;
2205
2206 va &= PG_FRAME;
2207#ifdef PMAP_DIAGNOSTIC
c439ad8f 2208 if (va >= KvaEnd)
984263bc 2209 panic("pmap_enter: toobig");
d557216f
MD
2210 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS)) {
2211 panic("pmap_enter: invalid to pmap_enter page "
2212 "table pages (va: %p)", (void *)va);
2213 }
984263bc 2214#endif
fbbaeba3
MD
2215 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2216 kprintf("Warning: pmap_enter called on UVA with kernel_pmap\n");
7ce2998e 2217 print_backtrace(-1);
fbbaeba3
MD
2218 }
2219 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2220 kprintf("Warning: pmap_enter called on KVA without kernel_pmap\n");
7ce2998e 2221 print_backtrace(-1);
fbbaeba3 2222 }
984263bc 2223
b12defdc 2224 vm_object_hold(pmap->pm_pteobj);
4107b0c0
MD
2225 lwkt_gettoken(&vm_token);
2226
984263bc 2227 /*
2bb9cc6f
MD
2228 * This can block, get it before we do anything important.
2229 */
2230 if (pmap_initialized &&
2231 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2232 pv = get_pv_entry();
2233 } else {
2234 pv = NULL;
2235 }
2236
2237 /*
984263bc
MD
2238 * In the case that a page table page is not
2239 * resident, we are creating it here.
2240 */
17cde63e 2241 if (va < UPT_MIN_ADDRESS)
984263bc 2242 mpte = pmap_allocpte(pmap, va);
17cde63e
MD
2243 else
2244 mpte = NULL;
984263bc 2245
b12defdc
MD
2246 if ((prot & VM_PROT_NOSYNC) == 0)
2247 pmap_inval_init(&info);
984263bc
MD
2248 pte = pmap_pte(pmap, va);
2249
2250 /*
2251 * Page Directory table entry not valid, we need a new PT page
2252 */
2253 if (pte == NULL) {
ed20d0e3 2254 panic("pmap_enter: invalid page directory pdir=0x%lx, va=%p",
d557216f 2255 (long)pmap->pm_pdir[PTDPTDI], (void *)va);
984263bc
MD
2256 }
2257
2258 pa = VM_PAGE_TO_PHYS(m) & PG_FRAME;
2259 origpte = *(vm_offset_t *)pte;
2260 opa = origpte & PG_FRAME;
2261
2262 if (origpte & PG_PS)
2263 panic("pmap_enter: attempted pmap_enter on 4MB page");
2264
2265 /*
2266 * Mapping has not changed, must be protection or wiring change.
2267 */
2268 if (origpte && (opa == pa)) {
2269 /*
2270 * Wiring change, just update stats. We don't worry about
2271 * wiring PT pages as they remain resident as long as there
2272 * are valid mappings in them. Hence, if a user page is wired,
2273 * the PT page will be also.
2274 */
2275 if (wired && ((origpte & PG_W) == 0))
2276 pmap->pm_stats.wired_count++;
2277 else if (!wired && (origpte & PG_W))
2278 pmap->pm_stats.wired_count--;
2279
2280#if defined(PMAP_DIAGNOSTIC)
2281 if (pmap_nw_modified((pt_entry_t) origpte)) {
d557216f
MD
2282 kprintf("pmap_enter: modified page not "
2283 "writable: va: %p, pte: 0x%lx\n",
2284 (void *)va, (long )origpte);
984263bc
MD
2285 }
2286#endif
2287
2288 /*
984263bc
MD
2289 * We might be turning off write access to the page,
2290 * so we go ahead and sense modify status.
2291 */
2292 if (origpte & PG_MANAGED) {
2293 if ((origpte & PG_M) && pmap_track_modified(va)) {
2294 vm_page_t om;
2295 om = PHYS_TO_VM_PAGE(opa);
2296 vm_page_dirty(om);
2297 }
2298 pa |= PG_MANAGED;
17cde63e 2299 KKASSERT(m->flags & PG_MAPPED);
984263bc
MD
2300 }
2301 goto validate;
2302 }
2303 /*
2304 * Mapping has changed, invalidate old range and fall through to
2305 * handle validating new mapping.
5926987a
MD
2306 *
2307 * Since we have a ref on the page directory page pmap_pte()
2308 * will always return non-NULL.
2309 *
2310 * NOTE: pmap_remove_pte() can block and cause the temporary ptbase
2311 * to get wiped. reload the ptbase. I'm not sure if it is
2312 * also possible to race another pmap_enter() but check for
2313 * that case too.
984263bc 2314 */
5926987a 2315 while (opa) {
5926987a
MD
2316 KKASSERT((origpte & PG_FRAME) ==
2317 (*(vm_offset_t *)pte & PG_FRAME));
554cf9ac 2318 pmap_remove_pte(pmap, pte, va, &info);
5926987a
MD
2319 pte = pmap_pte(pmap, va);
2320 origpte = *(vm_offset_t *)pte;
2321 opa = origpte & PG_FRAME;
2322 if (opa) {
2323 kprintf("pmap_enter: Warning, raced pmap %p va %p\n",
2324 pmap, (void *)va);
2325 }
984263bc
MD
2326 }
2327
2328 /*
2329 * Enter on the PV list if part of our managed memory. Note that we
2330 * raise IPL while manipulating pv_table since pmap_enter can be
2331 * called at interrupt time.
2332 */
2333 if (pmap_initialized &&
2334 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2bb9cc6f
MD
2335 pmap_insert_entry(pmap, pv, va, mpte, m);
2336 pv = NULL;
5926987a 2337 ptbase_assert(pmap);
984263bc 2338 pa |= PG_MANAGED;
17cde63e 2339 vm_page_flag_set(m, PG_MAPPED);
984263bc
MD
2340 }
2341
2342 /*
2343 * Increment counters
2344 */
eec2b734 2345 ++pmap->pm_stats.resident_count;
984263bc
MD
2346 if (wired)
2347 pmap->pm_stats.wired_count++;
5926987a 2348 KKASSERT(*pte == 0);
984263bc
MD
2349
2350validate:
2351 /*
2352 * Now validate mapping with desired protection/wiring.
2353 */
5926987a 2354 ptbase_assert(pmap);
984263bc
MD
2355 newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
2356
2357 if (wired)
2358 newpte |= PG_W;
2359 if (va < UPT_MIN_ADDRESS)
2360 newpte |= PG_U;
fbbaeba3 2361 if (pmap == &kernel_pmap)
984263bc
MD
2362 newpte |= pgeflag;
2363
2364 /*
2bb9cc6f
MD
2365 * If the mapping or permission bits are different, we need
2366 * to update the pte. If the pte is already present we have
2367 * to get rid of the extra wire-count on mpte we had obtained
2368 * above.
b1482674
MD
2369 *
2370 * mpte has a new wire_count, which also serves to prevent the
2371 * page table page from getting ripped out while we work. If we
2372 * are modifying an existing pte instead of installing a new one
2373 * we have to drop it.
984263bc
MD
2374 */
2375 if ((origpte & ~(PG_M|PG_A)) != newpte) {
b12defdc
MD
2376 if (prot & VM_PROT_NOSYNC)
2377 cpu_invlpg((void *)va);
2378 else
2379 pmap_inval_interlock(&info, pmap, va);
5926987a 2380 ptbase_assert(pmap);
2bb9cc6f
MD
2381
2382 if (*pte) {
2383 KKASSERT((*pte & PG_FRAME) == (newpte & PG_FRAME));
2384 if (vm_page_unwire_quick(mpte))
2385 panic("pmap_enter: Insufficient wire_count");
2386 }
2387
984263bc 2388 *pte = newpte | PG_A;
b12defdc
MD
2389 if ((prot & VM_PROT_NOSYNC) == 0)
2390 pmap_inval_deinterlock(&info, pmap);
17cde63e
MD
2391 if (newpte & PG_RW)
2392 vm_page_flag_set(m, PG_WRITEABLE);
b1482674
MD
2393 } else {
2394 if (*pte) {
2395 KKASSERT((*pte & PG_FRAME) == (newpte & PG_FRAME));
2396 if (vm_page_unwire_quick(mpte))
2397 panic("pmap_enter: Insufficient wire_count");
2398 }
984263bc 2399 }
b1482674
MD
2400
2401 /*
2402 * NOTE: mpte invalid after this point if we block.
2403 */
c695044a 2404 KKASSERT((newpte & PG_MANAGED) == 0 || (m->flags & PG_MAPPED));
b12defdc
MD
2405 if ((prot & VM_PROT_NOSYNC) == 0)
2406 pmap_inval_done(&info);
2bb9cc6f
MD
2407 if (pv)
2408 free_pv_entry(pv);
4107b0c0 2409 lwkt_reltoken(&vm_token);
b12defdc 2410 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
2411}
2412
2413/*
17cde63e
MD
2414 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
2415 * This code also assumes that the pmap has no pre-existing entry for this
2416 * VA.
2417 *
2418 * This code currently may only be used on user pmaps, not kernel_pmap.
4107b0c0
MD
2419 *
2420 * No requirements.
984263bc 2421 */
1b9d3514 2422void
17cde63e 2423pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
984263bc
MD
2424{
2425 unsigned *pte;
6ef943a3 2426 vm_paddr_t pa;
17cde63e
MD
2427 vm_page_t mpte;
2428 unsigned ptepindex;
2429 vm_offset_t ptepa;
0f7a3396 2430 pmap_inval_info info;
2bb9cc6f 2431 pv_entry_t pv;
0f7a3396 2432
b12defdc 2433 vm_object_hold(pmap->pm_pteobj);
4107b0c0 2434 lwkt_gettoken(&vm_token);
2bb9cc6f
MD
2435
2436 /*
2437 * This can block, get it before we do anything important.
2438 */
2439 if (pmap_initialized &&
2440 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2441 pv = get_pv_entry();
2442 } else {
2443 pv = NULL;
2444 }
2445
0f7a3396 2446 pmap_inval_init(&info);
984263bc 2447
fbbaeba3
MD
2448 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2449 kprintf("Warning: pmap_enter_quick called on UVA with kernel_pmap\n");
7ce2998e 2450 print_backtrace(-1);
fbbaeba3
MD
2451 }
2452 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2453 kprintf("Warning: pmap_enter_quick called on KVA without kernel_pmap\n");
7ce2998e 2454 print_backtrace(-1);
fbbaeba3
MD
2455 }
2456
17cde63e
MD
2457 KKASSERT(va < UPT_MIN_ADDRESS); /* assert used on user pmaps only */
2458
984263bc 2459 /*
17cde63e
MD
2460 * Calculate the page table page (mpte), allocating it if necessary.
2461 *
2462 * A held page table page (mpte), or NULL, is passed onto the
2463 * section following.
984263bc
MD
2464 */
2465 if (va < UPT_MIN_ADDRESS) {
984263bc
MD
2466 /*
2467 * Calculate pagetable page index
2468 */
2469 ptepindex = va >> PDRSHIFT;
17cde63e
MD
2470
2471 do {
984263bc
MD
2472 /*
2473 * Get the page directory entry
2474 */
2475 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
2476
2477 /*
2478 * If the page table page is mapped, we just increment
90244566 2479 * the wire count, and activate it.
984263bc
MD
2480 */
2481 if (ptepa) {
2482 if (ptepa & PG_PS)
2483 panic("pmap_enter_quick: unexpected mapping into 4MB page");
b1482674
MD
2484 if ((mpte = pmap->pm_ptphint) != NULL &&
2485 (mpte->pindex == ptepindex) &&
2486 (mpte->flags & PG_BUSY) == 0) {
2bb9cc6f 2487 vm_page_wire_quick(mpte);
984263bc 2488 } else {
2bb9cc6f
MD
2489 mpte = pmap_page_lookup(pmap->pm_pteobj,
2490 ptepindex);
984263bc 2491 pmap->pm_ptphint = mpte;
2bb9cc6f 2492 vm_page_wire_quick(mpte);
b12defdc 2493 vm_page_wakeup(mpte);
984263bc 2494 }
984263bc
MD
2495 } else {
2496 mpte = _pmap_allocpte(pmap, ptepindex);
2497 }
17cde63e 2498 } while (mpte == NULL);
984263bc
MD
2499 } else {
2500 mpte = NULL;
17cde63e 2501 /* this code path is not yet used */
984263bc
MD
2502 }
2503
2504 /*
17cde63e
MD
2505 * With a valid (and held) page directory page, we can just use
2506 * vtopte() to get to the pte. If the pte is already present
2507 * we do not disturb it.
984263bc
MD
2508 */
2509 pte = (unsigned *)vtopte(va);
554cf9ac
MD
2510 if (*pte) {
2511 KKASSERT(*pte & PG_V);
17cde63e
MD
2512 pa = VM_PAGE_TO_PHYS(m);
2513 KKASSERT(((*pte ^ pa) & PG_FRAME) == 0);
c2fb025d 2514 pmap_inval_done(&info);
b1482674
MD
2515 if (mpte)
2516 pmap_unwire_pte(pmap, mpte, &info);
b1482674 2517 if (pv) {
2bb9cc6f 2518 free_pv_entry(pv);
b1482674
MD
2519 /* pv = NULL; */
2520 }
554cf9ac
MD
2521 lwkt_reltoken(&vm_token);
2522 vm_object_drop(pmap->pm_pteobj);
17cde63e 2523 return;
984263bc
MD
2524 }
2525
2526 /*
17cde63e 2527 * Enter on the PV list if part of our managed memory
984263bc 2528 */
2bb9cc6f
MD
2529 if (pmap_initialized &&
2530 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2531 pmap_insert_entry(pmap, pv, va, mpte, m);
2532 pv = NULL;
17cde63e
MD
2533 vm_page_flag_set(m, PG_MAPPED);
2534 }
984263bc
MD
2535
2536 /*
2537 * Increment counters
2538 */
eec2b734 2539 ++pmap->pm_stats.resident_count;
984263bc
MD
2540
2541 pa = VM_PAGE_TO_PHYS(m);
2542
2543 /*
2544 * Now validate mapping with RO protection
2545 */
2546 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2547 *pte = pa | PG_V | PG_U;
2548 else
2549 *pte = pa | PG_V | PG_U | PG_MANAGED;
17cde63e 2550/* pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
c2fb025d 2551 pmap_inval_done(&info);
b1482674 2552 if (pv) {
2bb9cc6f 2553 free_pv_entry(pv);
b1482674
MD
2554 /* pv = NULL; */
2555 }
4107b0c0 2556 lwkt_reltoken(&vm_token);
b12defdc 2557 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
2558}
2559
2560/*
2561 * Make a temporary mapping for a physical address. This is only intended
2562 * to be used for panic dumps.
4107b0c0 2563 *
fb8345e6
MD
2564 * The caller is responsible for calling smp_invltlb().
2565 *
4107b0c0 2566 * No requirements.
984263bc
MD
2567 */
2568void *
8e5ea5f7 2569pmap_kenter_temporary(vm_paddr_t pa, long i)
984263bc 2570{
fb8345e6 2571 pmap_kenter_quick((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
984263bc
MD
2572 return ((void *)crashdumpmap);
2573}
2574
2575#define MAX_INIT_PT (96)
06ecca5a 2576
984263bc 2577/*
06ecca5a
MD
2578 * This routine preloads the ptes for a given object into the specified pmap.
2579 * This eliminates the blast of soft faults on process startup and
2580 * immediately after an mmap.
4107b0c0
MD
2581 *
2582 * No requirements.
984263bc 2583 */
1f804340
MD
2584static int pmap_object_init_pt_callback(vm_page_t p, void *data);
2585
984263bc 2586void
083a7402
MD
2587pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
2588 vm_object_t object, vm_pindex_t pindex,
2589 vm_size_t size, int limit)
984263bc 2590{
1f804340 2591 struct rb_vm_page_scan_info info;
287ebb09 2592 struct lwp *lp;
984263bc 2593 int psize;
984263bc 2594
54a764e8
MD
2595 /*
2596 * We can't preinit if read access isn't set or there is no pmap
2597 * or object.
2598 */
083a7402 2599 if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
984263bc
MD
2600 return;
2601
54a764e8
MD
2602 /*
2603 * We can't preinit if the pmap is not the current pmap
2604 */
287ebb09
MD
2605 lp = curthread->td_lwp;
2606 if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
54a764e8
MD
2607 return;
2608
984263bc
MD
2609 psize = i386_btop(size);
2610
2611 if ((object->type != OBJT_VNODE) ||
2612 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
2613 (object->resident_page_count > MAX_INIT_PT))) {
2614 return;
2615 }
2616
2617 if (psize + pindex > object->size) {
2618 if (object->size < pindex)
2619 return;
2620 psize = object->size - pindex;
2621 }
2622
1f804340
MD
2623 if (psize == 0)
2624 return;
06ecca5a 2625
984263bc 2626 /*
1f804340
MD
2627 * Use a red-black scan to traverse the requested range and load
2628 * any valid pages found into the pmap.
06ecca5a 2629 *
9acd5bbb
MD
2630 * We cannot safely scan the object's memq unless we are in a
2631 * critical section since interrupts can remove pages from objects.
984263bc 2632 */
1f804340
MD
2633 info.start_pindex = pindex;
2634 info.end_pindex = pindex + psize - 1;
2635 info.limit = limit;
2636 info.mpte = NULL;
2637 info.addr = addr;
2638 info.pmap = pmap;
2639
2f2d9e58 2640 vm_object_hold(object);
1f804340
MD
2641 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
2642 pmap_object_init_pt_callback, &info);
2f2d9e58 2643 vm_object_drop(object);
1f804340 2644}
06ecca5a 2645
4107b0c0
MD
2646/*
2647 * The caller must hold vm_token.
2648 */
1f804340
MD
2649static
2650int
2651pmap_object_init_pt_callback(vm_page_t p, void *data)
2652{
2653 struct rb_vm_page_scan_info *info = data;
2654 vm_pindex_t rel_index;
2655 /*
2656 * don't allow an madvise to blow away our really
2657 * free pages allocating pv entries.
2658 */
2659 if ((info->limit & MAP_PREFAULT_MADVISE) &&
2660 vmstats.v_free_count < vmstats.v_free_reserved) {
2661 return(-1);
984263bc 2662 }
0d987a03
MD
2663
2664 /*
2665 * Ignore list markers and ignore pages we cannot instantly
2666 * busy (while holding the object token).
2667 */
2668 if (p->flags & PG_MARKER)
2669 return 0;
b12defdc
MD
2670 if (vm_page_busy_try(p, TRUE))
2671 return 0;
1f804340 2672 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
b12defdc 2673 (p->flags & PG_FICTITIOUS) == 0) {
1f804340
MD
2674 if ((p->queue - p->pc) == PQ_CACHE)
2675 vm_page_deactivate(p);
1f804340 2676 rel_index = p->pindex - info->start_pindex;
17cde63e
MD
2677 pmap_enter_quick(info->pmap,
2678 info->addr + i386_ptob(rel_index), p);
1f804340 2679 }
b12defdc 2680 vm_page_wakeup(p);
1f804340 2681 return(0);
984263bc
MD
2682}
2683
2684/*
1b9d3514
MD
2685 * Return TRUE if the pmap is in shape to trivially
2686 * pre-fault the specified address.
2687 *
2688 * Returns FALSE if it would be non-trivial or if a
2689 * pte is already loaded into the slot.
4107b0c0
MD
2690 *
2691 * No requirements.
984263bc 2692 */
1b9d3514
MD
2693int
2694pmap_prefault_ok(pmap_t pmap, vm_offset_t addr)
984263bc 2695{
1b9d3514 2696 unsigned *pte;
4107b0c0 2697 int ret;
984263bc 2698
4107b0c0
MD
2699 lwkt_gettoken(&vm_token);
2700 if ((*pmap_pde(pmap, addr)) == 0) {
2701 ret = 0;
2702 } else {
2703 pte = (unsigned *) vtopte(addr);
2704 ret = (*pte) ? 0 : 1;
2705 }
2706 lwkt_reltoken(&vm_token);
2707 return(ret);
984263bc
MD
2708}
2709
2710/*
4107b0c0
MD
2711 * Change the wiring attribute for a map/virtual-adderss pair. The mapping
2712 * must already exist.
2713 *
2714 * No requirements.
984263bc
MD
2715 */
2716void
840de426 2717pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
984263bc 2718{
840de426 2719 unsigned *pte;
984263bc
MD
2720
2721 if (pmap == NULL)
2722 return;
2723
4107b0c0 2724 lwkt_gettoken(&vm_token);
984263bc
MD
2725 pte = pmap_pte(pmap, va);
2726
2727 if (wired && !pmap_pte_w(pte))
2728 pmap->pm_stats.wired_count++;
2729 else if (!wired && pmap_pte_w(pte))
2730 pmap->pm_stats.wired_count--;
2731
2732 /*
2733 * Wiring is not a hardware characteristic so there is no need to
0f7a3396
MD
2734 * invalidate TLB. However, in an SMP environment we must use
2735 * a locked bus cycle to update the pte (if we are not using
2736 * the pmap_inval_*() API that is)... it's ok to do this for simple
2737 * wiring changes.
984263bc 2738 */
0f7a3396
MD
2739#ifdef SMP
2740 if (wired)
2741 atomic_set_int(pte, PG_W);
2742 else
2743 atomic_clear_int(pte, PG_W);
2744#else
2745 if (wired)
2746 atomic_set_int_nonlocked(pte, PG_W);
2747 else
2748 atomic_clear_int_nonlocked(pte, PG_W);
2749#endif
4107b0c0 2750 lwkt_reltoken(&vm_token);
984263bc
MD
2751}
2752
984263bc 2753/*
4107b0c0
MD
2754 * Copy the range specified by src_addr/len from the source map to the
2755 * range dst_addr/len in the destination map.
2756 *
2757 * This routine is only advisory and need not do anything.
984263bc 2758 *
4107b0c0 2759 * No requirements.
984263bc 2760 */
984263bc 2761void
840de426 2762pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
4107b0c0 2763 vm_size_t len, vm_offset_t src_addr)
984263bc 2764{
4107b0c0 2765 /* does nothing */
984263bc
MD
2766}
2767
2768/*
4107b0c0
MD
2769 * Zero the specified PA by mapping the page into KVM and clearing its
2770 * contents.
e0e69b7d 2771 *
4107b0c0 2772 * No requirements.
984263bc
MD
2773 */
2774void
6ef943a3 2775pmap_zero_page(vm_paddr_t phys)
984263bc 2776{
85100692 2777 struct mdglobaldata *gd = mdcpu;
17a9f566 2778
e0e69b7d 2779 crit_enter();
85100692
MD
2780 if (*(int *)gd->gd_CMAP3)
2781 panic("pmap_zero_page: CMAP3 busy");
85100692 2782 *(int *)gd->gd_CMAP3 =
17a9f566 2783 PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
85100692 2784 cpu_invlpg(gd->gd_CADDR3);
1fa15583 2785 bzero(gd->gd_CADDR3, PAGE_SIZE);
85100692 2786 *(int *) gd->gd_CMAP3 = 0;
e0e69b7d 2787 crit_exit();
8100156a
MD
2788}
2789
2790/*
4107b0c0 2791 * Assert that a page is empty, panic if it isn't.
8100156a 2792 *
4107b0c0 2793 * No requirements.
8100156a
MD
2794 */
2795void
2796pmap_page_assertzero(vm_paddr_t phys)
2797{
2798 struct mdglobaldata *gd = mdcpu;
2799 int i;
2800
2801 crit_enter();
2802 if (*(int *)gd->gd_CMAP3)
2803 panic("pmap_zero_page: CMAP3 busy");
2804 *(int *)gd->gd_CMAP3 =
2805 PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2806 cpu_invlpg(gd->gd_CADDR3);
2807 for (i = 0; i < PAGE_SIZE; i += 4) {
2808 if (*(int *)((char *)gd->gd_CADDR3 + i) != 0) {
ed20d0e3 2809 panic("pmap_page_assertzero() @ %p not zero!",
8100156a
MD
2810 (void *)gd->gd_CADDR3);
2811 }
2812 }
2813 *(int *) gd->gd_CMAP3 = 0;
2814 crit_exit();
984263bc
MD
2815}
2816
2817/*
4107b0c0
MD
2818 * Zero part of a physical page by mapping it into memory and clearing
2819 * its contents with bzero.
e0e69b7d 2820 *
4107b0c0 2821 * off and size may not cover an area beyond a single hardware page.
984263bc 2822 *
4107b0c0 2823 * No requirements.
984263bc
MD
2824 */
2825void
6ef943a3 2826pmap_zero_page_area(vm_paddr_t phys, int off, int size)
984263bc 2827{
85100692 2828 struct mdglobaldata *gd = mdcpu;
17a9f566 2829
e0e69b7d 2830 crit_enter();
85100692
MD
2831 if (*(int *) gd->gd_CMAP3)
2832 panic("pmap_zero_page: CMAP3 busy");
85100692
MD
2833 *(int *) gd->gd_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2834 cpu_invlpg(gd->gd_CADDR3);
1fa15583 2835 bzero((char *)gd->gd_CADDR3 + off, size);
85100692 2836 *(int *) gd->gd_CMAP3 = 0;
e0e69b7d 2837 crit_exit();
984263bc
MD
2838}
2839
2840/*
4107b0c0
MD
2841 * Copy the physical page from the source PA to the target PA.
2842 * This function may be called from an interrupt. No locking
2843 * is required.
e0e69b7d 2844 *
4107b0c0 2845 * No requirements.
984263bc
MD
2846 */
2847void
6ef943a3 2848pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
984263bc 2849{
85100692 2850 struct mdglobaldata *gd = mdcpu;
17a9f566 2851
e0e69b7d 2852 crit_enter();
85100692
MD
2853 if (*(int *) gd->gd_CMAP1)
2854 panic("pmap_copy_page: CMAP1 busy");
2855 if (*(int *) gd->gd_CMAP2)
2856 panic("pmap_copy_page: CMAP2 busy");
984263bc 2857
85100692
MD
2858 *(int *) gd->gd_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2859 *(int *) gd->gd_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
984263bc 2860
85100692
MD
2861 cpu_invlpg(gd->gd_CADDR1);
2862 cpu_invlpg(gd->gd_CADDR2);
984263bc 2863
85100692 2864 bcopy(gd->gd_CADDR1, gd->gd_CADDR2, PAGE_SIZE);
984263bc 2865
85100692
MD
2866 *(int *) gd->gd_CMAP1 = 0;
2867 *(int *) gd->gd_CMAP2 = 0;
e0e69b7d 2868 crit_exit();
984263bc
MD
2869}
2870
f6bf3af1 2871/*
4107b0c0
MD
2872 * Copy the physical page from the source PA to the target PA.
2873 * This function may be called from an interrupt. No locking
2874 * is required.
f6bf3af1 2875 *
4107b0c0 2876 * No requirements.
f6bf3af1
MD
2877 */
2878void
2879pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
2880{
2881 struct mdglobaldata *gd = mdcpu;
2882
2883 crit_enter();
2884 if (*(int *) gd->gd_CMAP1)
2885 panic("pmap_copy_page: CMAP1 busy");
2886 if (*(int *) gd->gd_CMAP2)
2887 panic("pmap_copy_page: CMAP2 busy");
2888
2889 *(int *) gd->gd_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2890 *(int *) gd->gd_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
2891
2892 cpu_invlpg(gd->gd_CADDR1);
2893 cpu_invlpg(gd->gd_CADDR2);
2894
2895 bcopy((char *)gd->gd_CADDR1 + (src & PAGE_MASK),
2896 (char *)gd->gd_CADDR2 + (dst & PAGE_MASK),
2897 bytes);
2898
2899 *(int *) gd->gd_CMAP1 = 0;
2900 *(int *) gd->gd_CMAP2 = 0;
2901 crit_exit();
2902}
2903
984263bc
MD
2904/*
2905 * Returns true if the pmap's pv is one of the first
2906 * 16 pvs linked to from this page. This count may
2907 * be changed upwards or downwards in the future; it
2908 * is only necessary that true be returned for a small
2909 * subset of pmaps for proper page aging.
4107b0c0
MD
2910 *
2911 * No requirements.
984263bc
MD
2912 */
2913boolean_t
840de426 2914pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
984263bc
MD
2915{
2916 pv_entry_t pv;
2917 int loops = 0;
984263bc
MD
2918
2919 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2920 return FALSE;
2921
4107b0c0 2922 lwkt_gettoken(&vm_token);
984263bc
MD
2923 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2924 if (pv->pv_pmap == pmap) {
11502947 2925 lwkt_reltoken(&vm_token);
984263bc
MD
2926 return TRUE;
2927 }
2928 loops++;
2929 if (loops >= 16)
2930 break;
2931 }
4107b0c0 2932 lwkt_reltoken(&vm_token);
984263bc
MD
2933 return (FALSE);
2934}
2935
984263bc
MD
2936/*
2937 * Remove all pages from specified address space
2938 * this aids process exit speeds. Also, this code
2939 * is special cased for current process only, but
2940 * can have the more generic (and slightly slower)
2941 * mode enabled. This is much faster than pmap_remove
2942 * in the case of running down an entire address space.
4107b0c0
MD
2943 *
2944 * No requirements.
984263bc
MD
2945 */
2946void
840de426 2947pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
984263bc 2948{
287ebb09 2949 struct lwp *lp;
984263bc
MD
2950 unsigned *pte, tpte;
2951 pv_entry_t pv, npv;
984263bc 2952 vm_page_t m;
0f7a3396 2953 pmap_inval_info info;
4a22e893 2954 int iscurrentpmap;
8790d7d8 2955 int32_t save_generation;
984263bc 2956
287ebb09
MD
2957 lp = curthread->td_lwp;
2958 if (lp && pmap == vmspace_pmap(lp->lwp_vmspace))
4a22e893
MD
2959 iscurrentpmap = 1;
2960 else
2961 iscurrentpmap = 0;
984263bc 2962
b12defdc
MD
2963 if (pmap->pm_pteobj)
2964 vm_object_hold(pmap->pm_pteobj);
4107b0c0 2965 lwkt_gettoken(&vm_token);
0f7a3396 2966 pmap_inval_init(&info);
b12defdc 2967
4a22e893 2968 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
984263bc
MD
2969 if (pv->pv_va >= eva || pv->pv_va < sva) {
2970 npv = TAILQ_NEXT(pv, pv_plist);
2971 continue;
2972 }
2973
8790d7d8
MD
2974 KKASSERT(pmap == pv->pv_pmap);
2975
4a22e893
MD
2976 if (iscurrentpmap)
2977 pte = (unsigned *)vtopte(pv->pv_va);
2978 else
8790d7d8 2979 pte = pmap_pte_quick(pmap, pv->pv_va);
5926987a 2980 KKASSERT(*pte);
c2fb025d 2981 pmap_inval_interlock(&info, pmap, pv->pv_va);
984263bc 2982
4a22e893
MD
2983 /*
2984 * We cannot remove wired pages from a process' mapping
2985 * at this time
2986 */
17cde63e 2987 if (*pte & PG_W) {
c2fb025d 2988 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2989 npv = TAILQ_NEXT(pv, pv_plist);
2990 continue;
2991 }
2247fe02 2992 KKASSERT(*pte);
17cde63e 2993 tpte = loadandclear(pte);
c2fb025d 2994 pmap_inval_deinterlock(&info, pmap);
984263bc
MD
2995
2996 m = PHYS_TO_VM_PAGE(tpte);
5926987a 2997 test_m_maps_pv(m, pv);
984263bc
MD
2998
2999 KASSERT(m < &vm_page_array[vm_page_array_size],
3000 ("pmap_remove_pages: bad tpte %x", tpte));
3001
eec2b734
MD
3002 KKASSERT(pmap->pm_stats.resident_count > 0);
3003 --pmap->pm_stats.resident_count;
984263bc
MD
3004
3005 /*
3006 * Update the vm_page_t clean and reference bits.
3007 */
3008 if (tpte & PG_M) {
3009 vm_page_dirty(m);
3010 }
3011
984263bc 3012 npv = TAILQ_NEXT(pv, pv_plist);
5926987a
MD
3013#ifdef PMAP_DEBUG
3014 KKASSERT(pv->pv_m == m);
3015 KKASSERT(pv->pv_pmap == pmap);
3016#endif
8790d7d8
MD
3017 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
3018 save_generation = ++pmap->pm_generation;
984263bc
MD
3019
3020 m->md.pv_list_count--;
cef01e15
MD
3021 if (m->object)
3022 atomic_add_int(&m->object->agg_pv_list_count, -1);
984263bc 3023 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
17cde63e 3024 if (TAILQ_EMPTY(&m->md.pv_list))
984263bc 3025 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
984263bc 3026
8790d7d8 3027 pmap_unuse_pt(pmap, pv->pv_va, pv->pv_ptem, &info);
984263bc 3028 free_pv_entry(pv);
8790d7d8
MD
3029
3030 /*
3031 * Restart the scan if we blocked during the unuse or free
3032 * calls and other removals were made.
3033 */
3034 if (save_generation != pmap->pm_generation) {
3035 kprintf("Warning: pmap_remove_pages race-A avoided\n");
5926987a 3036 npv = TAILQ_FIRST(&pmap->pm_pvlist);
8790d7d8 3037 }
984263bc 3038 }
c2fb025d 3039 pmap_inval_done(&info);
4107b0c0 3040 lwkt_reltoken(&vm_token);
b12defdc
MD
3041 if (pmap->pm_pteobj)
3042 vm_object_drop(pmap->pm_pteobj);
984263bc
MD
3043}
3044
3045/*
3046 * pmap_testbit tests bits in pte's
5e8d0349 3047 * note that the testbit/clearbit routines are inline,
984263bc 3048 * and a lot of things compile-time evaluate.
4107b0c0
MD
3049 *
3050 * The caller must hold vm_token.
984263bc
MD
3051 */
3052static boolean_t
840de426 3053pmap_testbit(vm_page_t m, int bit)
984263bc
MD
3054{
3055 pv_entry_t pv;
3056 unsigned *pte;
984263bc
MD
3057
3058 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3059 return FALSE;
3060
3061 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
3062 return FALSE;
3063
984263bc
MD
3064 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3065 /*
3066 * if the bit being tested is the modified bit, then
3067 * mark clean_map and ptes as never
3068 * modified.
3069 */
3070 if (bit & (PG_A|PG_M)) {
3071 if (!pmap_track_modified(pv->pv_va))
3072 continue;
3073 }
3074
3075#if defined(PMAP_DIAGNOSTIC)
3076 if (!pv->pv_pmap) {
d557216f
MD
3077 kprintf("Null pmap (tb) at va: %p\n",
3078 (void *)pv->pv_va);
984263bc
MD
3079 continue;
3080 }
3081#endif
3082 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
74b9d1ec 3083 if (*pte & bit) {
984263bc 3084 return TRUE;
74b9d1ec 3085 }
984263bc 3086 }
984263bc
MD
3087 return (FALSE);
3088}
3089
3090/*
4107b0c0
MD
3091 * This routine is used to modify bits in ptes
3092 *
3093 * The caller must hold vm_token.
984263bc
MD
3094 */
3095static __inline void
5e8d0349 3096pmap_clearbit(vm_page_t m, int bit)
984263bc 3097{
0f7a3396 3098 struct pmap_inval_info info;
840de426
MD
3099 pv_entry_t pv;
3100 unsigned *pte;
5e8d0349 3101 unsigned pbits;
984263bc
MD
3102
3103 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3104 return;
3105
0f7a3396 3106 pmap_inval_init(&info);
984263bc
MD
3107
3108 /*
3109 * Loop over all current mappings setting/clearing as appropos If
3110 * setting RO do we need to clear the VAC?
3111 */
3112 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3113 /*
3114 * don't write protect pager mappings
3115 */
5e8d0349 3116 if (bit == PG_RW) {
984263bc
MD
3117 if (!pmap_track_modified(pv->pv_va))
3118 continue;
3119 }
3120
3121#if defined(PMAP_DIAGNOSTIC)
3122 if (!pv->pv_pmap) {
d557216f
MD
3123 kprintf("Null pmap (cb) at va: %p\n",
3124 (void *)pv->pv_va);
984263bc
MD
3125 continue;
3126 }
3127#endif
3128
0f7a3396
MD
3129 /*
3130 * Careful here. We can use a locked bus instruction to
3131 * clear PG_A or PG_M safely but we need to synchronize
3132 * with the target cpus when we mess with PG_RW.
70fc5283
MD
3133 *
3134 * We do not have to force synchronization when clearing
3135 * PG_M even for PTEs generated via virtual memory maps,
3136 * because the virtual kernel will invalidate the pmap
3137 * entry when/if it needs to resynchronize the Modify bit.
0f7a3396 3138 */
70fc5283 3139 if (bit & PG_RW)
c2fb025d 3140 pmap_inval_interlock(&info, pv->pv_pmap, pv->pv_va);
17cde63e
MD
3141 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3142again:
5e8d0349
MD
3143 pbits = *pte;
3144 if (pbits & bit) {
3145 if (bit == PG_RW) {
17cde63e 3146 if (pbits & PG_M) {
5e8d0349 3147 vm_page_dirty(m);
17cde63e
MD
3148 atomic_clear_int(pte, PG_M|PG_RW);
3149 } else {
3150 /*
3151 * The cpu may be trying to set PG_M
3152 * simultaniously with our clearing
3153 * of PG_RW.
3154 */
3155 if (!atomic_cmpset_int(pte, pbits,
3156 pbits & ~PG_RW))
3157 goto again;
3158 }
5e8d0349
MD
3159 } else if (bit == PG_M) {
3160 /*
70fc5283
MD
3161 * We could also clear PG_RW here to force
3162 * a fault on write to redetect PG_M for
3163 * virtual kernels, but it isn't necessary
3164 * since virtual kernels invalidate the pte
3165 * when they clear the VPTE_M bit in their
3166 * virtual page tables.
5e8d0349 3167 */
70fc5283 3168 atomic_clear_int(pte, PG_M);
5e8d0349
MD
3169 } else {
3170 atomic_clear_int(pte, bit);
984263bc
MD
3171 }
3172 }
c2fb025d
MD
3173 if (bit & PG_RW)
3174 pmap_inval_deinterlock(&info, pv->pv_pmap);
984263bc 3175 }
c2fb025d 3176 pmap_inval_done(&info);
984263bc
MD
3177}
3178
3179/*
4107b0c0