kill db_print_backtrace()
[dragonfly.git] / sys / platform / pc32 / i386 / pmap.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
42 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
cfd17028 43 * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.87 2008/08/25 17:01:38 dillon Exp $
984263bc
MD
44 */
45
46/*
47 * Manages physical address maps.
48 *
49 * In addition to hardware address maps, this
50 * module is called upon to provide software-use-only
51 * maps which may or may not be stored in the same
52 * form as hardware maps. These pseudo-maps are
53 * used to store intermediate results from copy
54 * operations to and from address spaces.
55 *
56 * Since the information managed by this module is
57 * also stored by the logical address mapping module,
58 * this module may throw away valid virtual-to-physical
59 * mappings at almost any time. However, invalidations
60 * of virtual-to-physical mappings must be done as
61 * requested.
62 *
63 * In order to cope with hardware architectures which
64 * make virtual-to-physical map invalidates expensive,
65 * this module may delay invalidate or reduced protection
66 * operations until such time as they are actually
67 * necessary. This module is given full information as
68 * to which processors are currently using which maps,
69 * and to when physical maps must be made correct.
70 */
71
72#include "opt_disable_pse.h"
73#include "opt_pmap.h"
74#include "opt_msgbuf.h"
984263bc
MD
75
76#include <sys/param.h>
77#include <sys/systm.h>
78#include <sys/kernel.h>
79#include <sys/proc.h>
80#include <sys/msgbuf.h>
81#include <sys/vmmeter.h>
82#include <sys/mman.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <sys/sysctl.h>
87#include <sys/lock.h>
88#include <vm/vm_kern.h>
89#include <vm/vm_page.h>
90#include <vm/vm_map.h>
91#include <vm/vm_object.h>
92#include <vm/vm_extern.h>
93#include <vm/vm_pageout.h>
94#include <vm/vm_pager.h>
95#include <vm/vm_zone.h>
96
97#include <sys/user.h>
e0e69b7d 98#include <sys/thread2.h>
e3161323 99#include <sys/sysref2.h>
984263bc
MD
100
101#include <machine/cputypes.h>
102#include <machine/md_var.h>
103#include <machine/specialreg.h>
984263bc 104#include <machine/smp.h>
a9295349 105#include <machine_base/apic/apicreg.h>
85100692 106#include <machine/globaldata.h>
0f7a3396
MD
107#include <machine/pmap.h>
108#include <machine/pmap_inval.h>
984263bc
MD
109
110#define PMAP_KEEP_PDIRS
111#ifndef PMAP_SHPGPERPROC
112#define PMAP_SHPGPERPROC 200
113#endif
114
115#if defined(DIAGNOSTIC)
116#define PMAP_DIAGNOSTIC
117#endif
118
119#define MINPV 2048
120
121#if !defined(PMAP_DIAGNOSTIC)
122#define PMAP_INLINE __inline
123#else
124#define PMAP_INLINE
125#endif
126
127/*
128 * Get PDEs and PTEs for user/kernel address space
129 */
130#define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
131#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
132
133#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
134#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
135#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
136#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
137#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
138
984263bc
MD
139
140/*
141 * Given a map and a machine independent protection code,
142 * convert to a vax protection code.
143 */
639a9b43
MD
144#define pte_prot(m, p) \
145 (protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
984263bc
MD
146static int protection_codes[8];
147
fbbaeba3 148struct pmap kernel_pmap;
54a764e8
MD
149static TAILQ_HEAD(,pmap) pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list);
150
e880033d 151vm_paddr_t avail_start; /* PA of first available physical page */
6ef943a3 152vm_paddr_t avail_end; /* PA of last available physical page */
e880033d 153vm_offset_t virtual_start; /* VA of first avail page (after kernel bss) */
984263bc 154vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
c439ad8f
MD
155vm_offset_t KvaStart; /* VA start of KVA space */
156vm_offset_t KvaEnd; /* VA end of KVA space (non-inclusive) */
157vm_offset_t KvaSize; /* max size of kernel virtual address space */
984263bc
MD
158static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
159static int pgeflag; /* PG_G or-in */
160static int pseflag; /* PG_PS or-in */
161
162static vm_object_t kptobj;
163
164static int nkpt;
165vm_offset_t kernel_vm_end;
166
167/*
168 * Data for the pv entry allocation mechanism
169 */
170static vm_zone_t pvzone;
171static struct vm_zone pvzone_store;
172static struct vm_object pvzone_obj;
173static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
174static int pmap_pagedaemon_waken = 0;
175static struct pv_entry *pvinit;
176
177/*
178 * All those kernel PT submaps that BSD is so fond of
179 */
e731d345 180pt_entry_t *CMAP1 = 0, *ptmmap;
984263bc 181caddr_t CADDR1 = 0, ptvmmap = 0;
984263bc
MD
182static pt_entry_t *msgbufmap;
183struct msgbuf *msgbufp=0;
184
185/*
186 * Crashdump maps.
187 */
188static pt_entry_t *pt_crashdumpmap;
189static caddr_t crashdumpmap;
190
984263bc 191extern pt_entry_t *SMPpt;
984263bc 192
3ae0cd58
RG
193static PMAP_INLINE void free_pv_entry (pv_entry_t pv);
194static unsigned * get_ptbase (pmap_t pmap);
195static pv_entry_t get_pv_entry (void);
196static void i386_protection_init (void);
5e8d0349 197static __inline void pmap_clearbit (vm_page_t m, int bit);
3ae0cd58
RG
198
199static void pmap_remove_all (vm_page_t m);
17cde63e 200static void pmap_enter_quick (pmap_t pmap, vm_offset_t va, vm_page_t m);
0f7a3396
MD
201static int pmap_remove_pte (struct pmap *pmap, unsigned *ptq,
202 vm_offset_t sva, pmap_inval_info_t info);
203static void pmap_remove_page (struct pmap *pmap,
204 vm_offset_t va, pmap_inval_info_t info);
3ae0cd58 205static int pmap_remove_entry (struct pmap *pmap, vm_page_t m,
0f7a3396 206 vm_offset_t va, pmap_inval_info_t info);
3ae0cd58
RG
207static boolean_t pmap_testbit (vm_page_t m, int bit);
208static void pmap_insert_entry (pmap_t pmap, vm_offset_t va,
209 vm_page_t mpte, vm_page_t m);
210
211static vm_page_t pmap_allocpte (pmap_t pmap, vm_offset_t va);
212
213static int pmap_release_free_page (pmap_t pmap, vm_page_t p);
214static vm_page_t _pmap_allocpte (pmap_t pmap, unsigned ptepindex);
215static unsigned * pmap_pte_quick (pmap_t pmap, vm_offset_t va);
216static vm_page_t pmap_page_lookup (vm_object_t object, vm_pindex_t pindex);
0f7a3396 217static int pmap_unuse_pt (pmap_t, vm_offset_t, vm_page_t, pmap_inval_info_t);
984263bc
MD
218static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
219
220static unsigned pdir4mb;
221
840de426
MD
222/*
223 * Move the kernel virtual free pointer to the next
224 * 4MB. This is used to help improve performance
225 * by using a large (4MB) page for much of the kernel
226 * (.text, .data, .bss)
227 */
228static vm_offset_t
229pmap_kmem_choose(vm_offset_t addr)
230{
231 vm_offset_t newaddr = addr;
232#ifndef DISABLE_PSE
233 if (cpu_feature & CPUID_PSE) {
234 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
235 }
236#endif
237 return newaddr;
238}
239
984263bc 240/*
e0e69b7d
MD
241 * pmap_pte:
242 *
243 * Extract the page table entry associated with the given map/virtual
244 * pair.
245 *
246 * This function may NOT be called from an interrupt.
984263bc 247 */
984263bc 248PMAP_INLINE unsigned *
840de426 249pmap_pte(pmap_t pmap, vm_offset_t va)
984263bc
MD
250{
251 unsigned *pdeaddr;
252
253 if (pmap) {
254 pdeaddr = (unsigned *) pmap_pde(pmap, va);
255 if (*pdeaddr & PG_PS)
256 return pdeaddr;
257 if (*pdeaddr) {
258 return get_ptbase(pmap) + i386_btop(va);
259 }
260 }
261 return (0);
262}
263
264/*
e0e69b7d
MD
265 * pmap_pte_quick:
266 *
267 * Super fast pmap_pte routine best used when scanning the pv lists.
268 * This eliminates many course-grained invltlb calls. Note that many of
269 * the pv list scans are across different pmaps and it is very wasteful
270 * to do an entire invltlb when checking a single mapping.
271 *
9acd5bbb 272 * Should only be called while in a critical section.
984263bc 273 */
840de426
MD
274static unsigned *
275pmap_pte_quick(pmap_t pmap, vm_offset_t va)
984263bc 276{
840de426
MD
277 struct mdglobaldata *gd = mdcpu;
278 unsigned pde, newpf;
279
280 if ((pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) != 0) {
281 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
282 unsigned index = i386_btop(va);
283 /* are we current address space or kernel? */
fbbaeba3 284 if ((pmap == &kernel_pmap) ||
840de426
MD
285 (frame == (((unsigned) PTDpde) & PG_FRAME))) {
286 return (unsigned *) PTmap + index;
287 }
288 newpf = pde & PG_FRAME;
289 if ( ((* (unsigned *) gd->gd_PMAP1) & PG_FRAME) != newpf) {
290 * (unsigned *) gd->gd_PMAP1 = newpf | PG_RW | PG_V;
291 cpu_invlpg(gd->gd_PADDR1);
292 }
293 return gd->gd_PADDR1 + ((unsigned) index & (NPTEPG - 1));
984263bc 294 }
840de426 295 return (0);
984263bc
MD
296}
297
840de426 298
984263bc
MD
299/*
300 * Bootstrap the system enough to run with virtual memory.
301 *
302 * On the i386 this is called after mapping has already been enabled
303 * and just syncs the pmap module with what has already been done.
304 * [We can't call it easily with mapping off since the kernel is not
305 * mapped with PA == VA, hence we would have to relocate every address
306 * from the linked base (virtual) address "KERNBASE" to the actual
307 * (physical) address starting relative to 0]
308 */
309void
f123d5a1 310pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr)
984263bc
MD
311{
312 vm_offset_t va;
313 pt_entry_t *pte;
85100692 314 struct mdglobaldata *gd;
984263bc 315 int i;
81c04d07 316 int pg;
984263bc 317
c439ad8f
MD
318 KvaStart = (vm_offset_t)VADDR(PTDPTDI, 0);
319 KvaSize = (vm_offset_t)VADDR(APTDPTDI, 0) - KvaStart;
320 KvaEnd = KvaStart + KvaSize;
321
984263bc
MD
322 avail_start = firstaddr;
323
324 /*
e880033d
MD
325 * XXX The calculation of virtual_start is wrong. It's NKPT*PAGE_SIZE
326 * too large. It should instead be correctly calculated in locore.s and
984263bc
MD
327 * not based on 'first' (which is a physical address, not a virtual
328 * address, for the start of unused physical memory). The kernel
329 * page tables are NOT double mapped and thus should not be included
330 * in this calculation.
331 */
e880033d
MD
332 virtual_start = (vm_offset_t) KERNBASE + firstaddr;
333 virtual_start = pmap_kmem_choose(virtual_start);
c439ad8f 334 virtual_end = VADDR(KPTDI+NKPDE-1, NPTEPG-1);
984263bc
MD
335
336 /*
337 * Initialize protection array.
338 */
339 i386_protection_init();
340
341 /*
342 * The kernel's pmap is statically allocated so we don't have to use
343 * pmap_create, which is unlikely to work correctly at this part of
344 * the boot sequence (XXX and which no longer exists).
345 */
fbbaeba3
MD
346 kernel_pmap.pm_pdir = (pd_entry_t *)(KERNBASE + (u_int)IdlePTD);
347 kernel_pmap.pm_count = 1;
348 kernel_pmap.pm_active = (cpumask_t)-1; /* don't allow deactivation */
349 TAILQ_INIT(&kernel_pmap.pm_pvlist);
984263bc
MD
350 nkpt = NKPT;
351
352 /*
353 * Reserve some special page table entries/VA space for temporary
354 * mapping of pages.
355 */
356#define SYSMAP(c, p, v, n) \
357 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
358
e880033d 359 va = virtual_start;
fbbaeba3 360 pte = (pt_entry_t *) pmap_pte(&kernel_pmap, va);
984263bc
MD
361
362 /*
363 * CMAP1/CMAP2 are used for zeroing and copying pages.
364 */
365 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
984263bc
MD
366
367 /*
368 * Crashdump maps.
369 */
370 SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
371
e731d345
MD
372 /*
373 * ptvmmap is used for reading arbitrary physical pages via
374 * /dev/mem.
375 */
376 SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
377
984263bc
MD
378 /*
379 * msgbufp is used to map the system message buffer.
380 * XXX msgbufmap is not used.
381 */
382 SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
383 atop(round_page(MSGBUF_SIZE)))
384
e880033d 385 virtual_start = va;
984263bc 386
17a9f566 387 *(int *) CMAP1 = 0;
984263bc
MD
388 for (i = 0; i < NKPT; i++)
389 PTD[i] = 0;
390
a2a5ad0d
MD
391 /*
392 * PG_G is terribly broken on SMP because we IPI invltlb's in some
393 * cases rather then invl1pg. Actually, I don't even know why it
394 * works under UP because self-referential page table mappings
395 */
396#ifdef SMP
397 pgeflag = 0;
398#else
399 if (cpu_feature & CPUID_PGE)
984263bc 400 pgeflag = PG_G;
a2a5ad0d 401#endif
984263bc
MD
402
403/*
404 * Initialize the 4MB page size flag
405 */
406 pseflag = 0;
407/*
408 * The 4MB page version of the initial
409 * kernel page mapping.
410 */
411 pdir4mb = 0;
412
413#if !defined(DISABLE_PSE)
414 if (cpu_feature & CPUID_PSE) {
415 unsigned ptditmp;
416 /*
417 * Note that we have enabled PSE mode
418 */
419 pseflag = PG_PS;
420 ptditmp = *((unsigned *)PTmap + i386_btop(KERNBASE));
421 ptditmp &= ~(NBPDR - 1);
422 ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
423 pdir4mb = ptditmp;
424
8a8d5d85
MD
425#ifndef SMP
426 /*
427 * Enable the PSE mode. If we are SMP we can't do this
428 * now because the APs will not be able to use it when
429 * they boot up.
430 */
431 load_cr4(rcr4() | CR4_PSE);
984263bc 432
8a8d5d85
MD
433 /*
434 * We can do the mapping here for the single processor
435 * case. We simply ignore the old page table page from
436 * now on.
437 */
438 /*
439 * For SMP, we still need 4K pages to bootstrap APs,
440 * PSE will be enabled as soon as all APs are up.
441 */
b5b32410 442 PTD[KPTDI] = (pd_entry_t)ptditmp;
fbbaeba3 443 kernel_pmap.pm_pdir[KPTDI] = (pd_entry_t)ptditmp;
0f7a3396 444 cpu_invltlb();
8a8d5d85 445#endif
984263bc
MD
446 }
447#endif
97359a5b 448#ifdef SMP
984263bc
MD
449 if (cpu_apic_address == 0)
450 panic("pmap_bootstrap: no local apic!");
451
452 /* local apic is mapped on last page */
453 SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag |
454 (cpu_apic_address & PG_FRAME));
17a9f566 455#endif
984263bc 456
81c04d07
MD
457 /*
458 * We need to finish setting up the globaldata page for the BSP.
459 * locore has already populated the page table for the mdglobaldata
460 * portion.
461 */
462 pg = MDGLOBALDATA_BASEALLOC_PAGES;
85100692 463 gd = &CPU_prvspace[0].mdglobaldata;
81c04d07
MD
464 gd->gd_CMAP1 = &SMPpt[pg + 0];
465 gd->gd_CMAP2 = &SMPpt[pg + 1];
466 gd->gd_CMAP3 = &SMPpt[pg + 2];
467 gd->gd_PMAP1 = &SMPpt[pg + 3];
85100692
MD
468 gd->gd_CADDR1 = CPU_prvspace[0].CPAGE1;
469 gd->gd_CADDR2 = CPU_prvspace[0].CPAGE2;
470 gd->gd_CADDR3 = CPU_prvspace[0].CPAGE3;
471 gd->gd_PADDR1 = (unsigned *)CPU_prvspace[0].PPAGE1;
984263bc 472
0f7a3396 473 cpu_invltlb();
984263bc
MD
474}
475
476#ifdef SMP
477/*
478 * Set 4mb pdir for mp startup
479 */
480void
481pmap_set_opt(void)
482{
483 if (pseflag && (cpu_feature & CPUID_PSE)) {
484 load_cr4(rcr4() | CR4_PSE);
72740893 485 if (pdir4mb && mycpu->gd_cpuid == 0) { /* only on BSP */
fbbaeba3 486 kernel_pmap.pm_pdir[KPTDI] =
984263bc
MD
487 PTD[KPTDI] = (pd_entry_t)pdir4mb;
488 cpu_invltlb();
489 }
490 }
491}
492#endif
493
494/*
495 * Initialize the pmap module.
496 * Called by vm_init, to initialize any structures that the pmap
497 * system needs to map virtual memory.
498 * pmap_init has been enhanced to support in a fairly consistant
499 * way, discontiguous physical memory.
500 */
501void
e7252eda 502pmap_init(void)
984263bc
MD
503{
504 int i;
505 int initial_pvs;
506
507 /*
508 * object for kernel page table pages
509 */
510 kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
511
512 /*
513 * Allocate memory for random pmap data structures. Includes the
514 * pv_head_table.
515 */
516
517 for(i = 0; i < vm_page_array_size; i++) {
518 vm_page_t m;
519
520 m = &vm_page_array[i];
521 TAILQ_INIT(&m->md.pv_list);
522 m->md.pv_list_count = 0;
523 }
524
525 /*
526 * init the pv free list
527 */
528 initial_pvs = vm_page_array_size;
529 if (initial_pvs < MINPV)
530 initial_pvs = MINPV;
531 pvzone = &pvzone_store;
e4846942 532 pvinit = (struct pv_entry *) kmem_alloc(&kernel_map,
984263bc
MD
533 initial_pvs * sizeof (struct pv_entry));
534 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
c5a45196 535 initial_pvs);
984263bc
MD
536
537 /*
538 * Now it is safe to enable pv_table recording.
539 */
540 pmap_initialized = TRUE;
541}
542
543/*
544 * Initialize the address space (zone) for the pv_entries. Set a
545 * high water mark so that the system can recover from excessive
546 * numbers of pv entries.
547 */
548void
f123d5a1 549pmap_init2(void)
984263bc
MD
550{
551 int shpgperproc = PMAP_SHPGPERPROC;
552
553 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
554 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
555 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
556 pv_entry_high_water = 9 * (pv_entry_max / 10);
557 zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
558}
559
560
561/***************************************************
562 * Low level helper routines.....
563 ***************************************************/
564
565#if defined(PMAP_DIAGNOSTIC)
566
567/*
568 * This code checks for non-writeable/modified pages.
569 * This should be an invalid condition.
570 */
571static int
572pmap_nw_modified(pt_entry_t ptea)
573{
574 int pte;
575
576 pte = (int) ptea;
577
578 if ((pte & (PG_M|PG_RW)) == PG_M)
579 return 1;
580 else
581 return 0;
582}
583#endif
584
585
586/*
587 * this routine defines the region(s) of memory that should
588 * not be tested for the modified bit.
589 */
590static PMAP_INLINE int
591pmap_track_modified(vm_offset_t va)
592{
593 if ((va < clean_sva) || (va >= clean_eva))
594 return 1;
595 else
596 return 0;
597}
598
984263bc 599static unsigned *
e0e69b7d 600get_ptbase(pmap_t pmap)
984263bc
MD
601{
602 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
e0e69b7d 603 struct globaldata *gd = mycpu;
984263bc
MD
604
605 /* are we current address space or kernel? */
fbbaeba3 606 if (pmap == &kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
984263bc
MD
607 return (unsigned *) PTmap;
608 }
e0e69b7d 609
984263bc 610 /* otherwise, we are alternate address space */
8790d7d8
MD
611 KKASSERT(gd->gd_intr_nesting_level == 0 &&
612 (gd->gd_curthread->td_flags & TDF_INTTHREAD) == 0);
e0e69b7d 613
984263bc 614 if (frame != (((unsigned) APTDpde) & PG_FRAME)) {
b5b32410 615 APTDpde = (pd_entry_t)(frame | PG_RW | PG_V);
984263bc
MD
616 /* The page directory is not shared between CPUs */
617 cpu_invltlb();
984263bc
MD
618 }
619 return (unsigned *) APTmap;
620}
621
984263bc 622/*
e0e69b7d
MD
623 * pmap_extract:
624 *
625 * Extract the physical page address associated with the map/VA pair.
626 *
627 * This function may not be called from an interrupt if the pmap is
628 * not kernel_pmap.
984263bc 629 */
6ef943a3 630vm_paddr_t
840de426 631pmap_extract(pmap_t pmap, vm_offset_t va)
984263bc
MD
632{
633 vm_offset_t rtval;
634 vm_offset_t pdirindex;
840de426 635
984263bc
MD
636 pdirindex = va >> PDRSHIFT;
637 if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
638 unsigned *pte;
639 if ((rtval & PG_PS) != 0) {
640 rtval &= ~(NBPDR - 1);
641 rtval |= va & (NBPDR - 1);
642 return rtval;
643 }
644 pte = get_ptbase(pmap) + i386_btop(va);
645 rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
646 return rtval;
647 }
648 return 0;
f6bf3af1
MD
649}
650
984263bc
MD
651/***************************************************
652 * Low level mapping routines.....
653 ***************************************************/
654
655/*
6d1ec6fa
HP
656 * Routine: pmap_kenter
657 * Function:
658 * Add a wired page to the KVA
659 * NOTE! note that in order for the mapping to take effect -- you
660 * should do an invltlb after doing the pmap_kenter().
984263bc 661 */
24712b90 662void
6ef943a3 663pmap_kenter(vm_offset_t va, vm_paddr_t pa)
984263bc 664{
840de426 665 unsigned *pte;
0f7a3396
MD
666 unsigned npte;
667 pmap_inval_info info;
984263bc 668
0f7a3396 669 pmap_inval_init(&info);
984263bc
MD
670 npte = pa | PG_RW | PG_V | pgeflag;
671 pte = (unsigned *)vtopte(va);
17cde63e 672 pmap_inval_add(&info, &kernel_pmap, va);
984263bc 673 *pte = npte;
0f7a3396 674 pmap_inval_flush(&info);
984263bc
MD
675}
676
6d1ec6fa
HP
677/*
678 * Routine: pmap_kenter_quick
679 * Function:
680 * Similar to pmap_kenter(), except we only invalidate the
681 * mapping on the current CPU.
682 */
24712b90
MD
683void
684pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
685{
686 unsigned *pte;
687 unsigned npte;
688
689 npte = pa | PG_RW | PG_V | pgeflag;
690 pte = (unsigned *)vtopte(va);
691 *pte = npte;
692 cpu_invlpg((void *)va);
693}
694
695void
696pmap_kenter_sync(vm_offset_t va)
697{
698 pmap_inval_info info;
699
700 pmap_inval_init(&info);
fbbaeba3 701 pmap_inval_add(&info, &kernel_pmap, va);
24712b90
MD
702 pmap_inval_flush(&info);
703}
704
705void
706pmap_kenter_sync_quick(vm_offset_t va)
707{
708 cpu_invlpg((void *)va);
709}
710
984263bc
MD
711/*
712 * remove a page from the kernel pagetables
713 */
24712b90 714void
840de426 715pmap_kremove(vm_offset_t va)
984263bc 716{
840de426 717 unsigned *pte;
0f7a3396 718 pmap_inval_info info;
984263bc 719
0f7a3396 720 pmap_inval_init(&info);
984263bc 721 pte = (unsigned *)vtopte(va);
17cde63e 722 pmap_inval_add(&info, &kernel_pmap, va);
984263bc 723 *pte = 0;
0f7a3396 724 pmap_inval_flush(&info);
984263bc
MD
725}
726
24712b90
MD
727void
728pmap_kremove_quick(vm_offset_t va)
729{
730 unsigned *pte;
731 pte = (unsigned *)vtopte(va);
732 *pte = 0;
733 cpu_invlpg((void *)va);
734}
735
9ad680a3
MD
736/*
737 * XXX these need to be recoded. They are not used in any critical path.
738 */
739void
740pmap_kmodify_rw(vm_offset_t va)
741{
742 *vtopte(va) |= PG_RW;
743 cpu_invlpg((void *)va);
744}
745
746void
747pmap_kmodify_nc(vm_offset_t va)
748{
749 *vtopte(va) |= PG_N;
750 cpu_invlpg((void *)va);
751}
752
984263bc
MD
753/*
754 * Used to map a range of physical addresses into kernel
755 * virtual address space.
756 *
757 * For now, VM is already on, we only need to map the
758 * specified memory.
759 */
760vm_offset_t
6ef943a3 761pmap_map(vm_offset_t virt, vm_paddr_t start, vm_paddr_t end, int prot)
984263bc
MD
762{
763 while (start < end) {
764 pmap_kenter(virt, start);
765 virt += PAGE_SIZE;
766 start += PAGE_SIZE;
767 }
768 return (virt);
769}
770
771
772/*
773 * Add a list of wired pages to the kva
774 * this routine is only used for temporary
775 * kernel mappings that do not need to have
776 * page modification or references recorded.
777 * Note that old mappings are simply written
778 * over. The page *must* be wired.
779 */
780void
840de426 781pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
984263bc
MD
782{
783 vm_offset_t end_va;
784
785 end_va = va + count * PAGE_SIZE;
786
787 while (va < end_va) {
788 unsigned *pte;
789
790 pte = (unsigned *)vtopte(va);
791 *pte = VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag;
984263bc 792 cpu_invlpg((void *)va);
984263bc
MD
793 va += PAGE_SIZE;
794 m++;
795 }
796#ifdef SMP
0f7a3396 797 smp_invltlb(); /* XXX */
984263bc
MD
798#endif
799}
800
8100156a
MD
801void
802pmap_qenter2(vm_offset_t va, vm_page_t *m, int count, cpumask_t *mask)
803{
804 vm_offset_t end_va;
805 cpumask_t cmask = mycpu->gd_cpumask;
806
807 end_va = va + count * PAGE_SIZE;
808
809 while (va < end_va) {
810 unsigned *pte;
811 unsigned pteval;
812
813 /*
814 * Install the new PTE. If the pte changed from the prior
815 * mapping we must reset the cpu mask and invalidate the page.
816 * If the pte is the same but we have not seen it on the
817 * current cpu, invlpg the existing mapping. Otherwise the
818 * entry is optimal and no invalidation is required.
819 */
820 pte = (unsigned *)vtopte(va);
821 pteval = VM_PAGE_TO_PHYS(*m) | PG_A | PG_RW | PG_V | pgeflag;
822 if (*pte != pteval) {
a02705a9 823 *mask = 0;
8100156a
MD
824 *pte = pteval;
825 cpu_invlpg((void *)va);
826 } else if ((*mask & cmask) == 0) {
8100156a
MD
827 cpu_invlpg((void *)va);
828 }
829 va += PAGE_SIZE;
830 m++;
831 }
a02705a9 832 *mask |= cmask;
8100156a
MD
833}
834
984263bc
MD
835/*
836 * this routine jerks page mappings from the
837 * kernel -- it is meant only for temporary mappings.
838 */
839void
840de426 840pmap_qremove(vm_offset_t va, int count)
984263bc
MD
841{
842 vm_offset_t end_va;
843
844 end_va = va + count*PAGE_SIZE;
845
846 while (va < end_va) {
847 unsigned *pte;
848
849 pte = (unsigned *)vtopte(va);
850 *pte = 0;
984263bc 851 cpu_invlpg((void *)va);
984263bc
MD
852 va += PAGE_SIZE;
853 }
854#ifdef SMP
855 smp_invltlb();
856#endif
857}
858
06ecca5a
MD
859/*
860 * This routine works like vm_page_lookup() but also blocks as long as the
861 * page is busy. This routine does not busy the page it returns.
862 *
863 * Unless the caller is managing objects whos pages are in a known state,
654a39f0
MD
864 * the call should be made with a critical section held so the page's object
865 * association remains valid on return.
06ecca5a 866 */
984263bc 867static vm_page_t
840de426 868pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
984263bc
MD
869{
870 vm_page_t m;
06ecca5a 871
17cde63e
MD
872 do {
873 m = vm_page_lookup(object, pindex);
874 } while (m && vm_page_sleep_busy(m, FALSE, "pplookp"));
875
06ecca5a 876 return(m);
984263bc
MD
877}
878
263e4574
MD
879/*
880 * Create a new thread and optionally associate it with a (new) process.
6ef943a3 881 * NOTE! the new thread's cpu may not equal the current cpu.
263e4574 882 */
7d0bac62
MD
883void
884pmap_init_thread(thread_t td)
263e4574 885{
f470d0c8 886 /* enforce pcb placement */
f470d0c8 887 td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_size) - 1;
65d6ce10 888 td->td_savefpu = &td->td_pcb->pcb_save;
7d0bac62 889 td->td_sp = (char *)td->td_pcb - 16;
263e4574
MD
890}
891
984263bc 892/*
984263bc
MD
893 * This routine directly affects the fork perf for a process.
894 */
895void
13d13d89 896pmap_init_proc(struct proc *p)
984263bc 897{
984263bc
MD
898}
899
900/*
901 * Dispose the UPAGES for a process that has exited.
902 * This routine directly impacts the exit perf of a process.
903 */
c6880072 904void
7e1d4bf4 905pmap_dispose_proc(struct proc *p)
984263bc 906{
f1d1c3fa 907 KASSERT(p->p_lock == 0, ("attempt to dispose referenced proc! %p", p));
984263bc
MD
908}
909
984263bc
MD
910/***************************************************
911 * Page table page management routines.....
912 ***************************************************/
913
914/*
915 * This routine unholds page table pages, and if the hold count
916 * drops to zero, then it decrements the wire count.
917 */
918static int
0f7a3396 919_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, pmap_inval_info_t info)
840de426 920{
17cde63e
MD
921 /*
922 * Wait until we can busy the page ourselves. We cannot have
923 * any active flushes if we block.
924 */
925 if (m->flags & PG_BUSY) {
926 pmap_inval_flush(info);
927 while (vm_page_sleep_busy(m, FALSE, "pmuwpt"))
928 ;
929 }
eec2b734
MD
930 KASSERT(m->queue == PQ_NONE,
931 ("_pmap_unwire_pte_hold: %p->queue != PQ_NONE", m));
984263bc 932
eec2b734 933 if (m->hold_count == 1) {
984263bc 934 /*
eec2b734 935 * Unmap the page table page
984263bc 936 */
eec2b734 937 vm_page_busy(m);
0f7a3396 938 pmap_inval_add(info, pmap, -1);
984263bc 939 pmap->pm_pdir[m->pindex] = 0;
eec2b734
MD
940
941 KKASSERT(pmap->pm_stats.resident_count > 0);
984263bc 942 --pmap->pm_stats.resident_count;
984263bc
MD
943
944 if (pmap->pm_ptphint == m)
945 pmap->pm_ptphint = NULL;
946
947 /*
eec2b734
MD
948 * This was our last hold, the page had better be unwired
949 * after we decrement wire_count.
950 *
951 * FUTURE NOTE: shared page directory page could result in
952 * multiple wire counts.
984263bc 953 */
eec2b734 954 vm_page_unhold(m);
984263bc 955 --m->wire_count;
eec2b734
MD
956 KKASSERT(m->wire_count == 0);
957 --vmstats.v_wire_count;
17cde63e 958 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
eec2b734
MD
959 vm_page_flash(m);
960 vm_page_free_zero(m);
984263bc 961 return 1;
17cde63e
MD
962 } else {
963 KKASSERT(m->hold_count > 1);
964 vm_page_unhold(m);
965 return 0;
984263bc 966 }
984263bc
MD
967}
968
969static PMAP_INLINE int
0f7a3396 970pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, pmap_inval_info_t info)
984263bc 971{
eec2b734
MD
972 KKASSERT(m->hold_count > 0);
973 if (m->hold_count > 1) {
974 vm_page_unhold(m);
984263bc 975 return 0;
eec2b734
MD
976 } else {
977 return _pmap_unwire_pte_hold(pmap, m, info);
978 }
984263bc
MD
979}
980
981/*
982 * After removing a page table entry, this routine is used to
983 * conditionally free the page, and manage the hold/wire counts.
984 */
985static int
0f7a3396
MD
986pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
987 pmap_inval_info_t info)
984263bc
MD
988{
989 unsigned ptepindex;
990 if (va >= UPT_MIN_ADDRESS)
991 return 0;
992
993 if (mpte == NULL) {
994 ptepindex = (va >> PDRSHIFT);
995 if (pmap->pm_ptphint &&
996 (pmap->pm_ptphint->pindex == ptepindex)) {
997 mpte = pmap->pm_ptphint;
998 } else {
0f7a3396 999 pmap_inval_flush(info);
984263bc
MD
1000 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
1001 pmap->pm_ptphint = mpte;
1002 }
1003 }
1004
0f7a3396 1005 return pmap_unwire_pte_hold(pmap, mpte, info);
984263bc
MD
1006}
1007
54a764e8 1008/*
fbbaeba3
MD
1009 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because
1010 * it, and IdlePTD, represents the template used to update all other pmaps.
1011 *
1012 * On architectures where the kernel pmap is not integrated into the user
1013 * process pmap, this pmap represents the process pmap, not the kernel pmap.
1014 * kernel_pmap should be used to directly access the kernel_pmap.
54a764e8 1015 */
984263bc 1016void
840de426 1017pmap_pinit0(struct pmap *pmap)
984263bc
MD
1018{
1019 pmap->pm_pdir =
e4846942 1020 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
24712b90 1021 pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t) IdlePTD);
984263bc
MD
1022 pmap->pm_count = 1;
1023 pmap->pm_active = 0;
1024 pmap->pm_ptphint = NULL;
1025 TAILQ_INIT(&pmap->pm_pvlist);
1026 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1027}
1028
1029/*
1030 * Initialize a preallocated and zeroed pmap structure,
1031 * such as one in a vmspace structure.
1032 */
1033void
840de426 1034pmap_pinit(struct pmap *pmap)
984263bc
MD
1035{
1036 vm_page_t ptdpg;
1037
1038 /*
1039 * No need to allocate page table space yet but we do need a valid
1040 * page directory table.
1041 */
b5b32410 1042 if (pmap->pm_pdir == NULL) {
984263bc 1043 pmap->pm_pdir =
e4846942 1044 (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
b5b32410 1045 }
984263bc
MD
1046
1047 /*
c3834cb2 1048 * Allocate an object for the ptes
984263bc
MD
1049 */
1050 if (pmap->pm_pteobj == NULL)
c3834cb2 1051 pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1);
984263bc
MD
1052
1053 /*
c3834cb2
MD
1054 * Allocate the page directory page, unless we already have
1055 * one cached. If we used the cached page the wire_count will
1056 * already be set appropriately.
984263bc 1057 */
c3834cb2
MD
1058 if ((ptdpg = pmap->pm_pdirm) == NULL) {
1059 ptdpg = vm_page_grab(pmap->pm_pteobj, PTDPTDI,
1060 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1061 pmap->pm_pdirm = ptdpg;
1062 vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY);
1063 ptdpg->valid = VM_PAGE_BITS_ALL;
1064 ptdpg->wire_count = 1;
1065 ++vmstats.v_wire_count;
1066 pmap_kenter((vm_offset_t)pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg));
1067 }
984263bc
MD
1068 if ((ptdpg->flags & PG_ZERO) == 0)
1069 bzero(pmap->pm_pdir, PAGE_SIZE);
1070
984263bc 1071 pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
984263bc
MD
1072
1073 /* install self-referential address mapping entry */
1074 *(unsigned *) (pmap->pm_pdir + PTDPTDI) =
1075 VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
1076
1077 pmap->pm_count = 1;
1078 pmap->pm_active = 0;
1079 pmap->pm_ptphint = NULL;
1080 TAILQ_INIT(&pmap->pm_pvlist);
1081 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
eec2b734 1082 pmap->pm_stats.resident_count = 1;
984263bc
MD
1083}
1084
e3161323 1085/*
c3834cb2
MD
1086 * Clean up a pmap structure so it can be physically freed. This routine
1087 * is called by the vmspace dtor function. A great deal of pmap data is
1088 * left passively mapped to improve vmspace management so we have a bit
1089 * of cleanup work to do here.
e3161323
MD
1090 */
1091void
1092pmap_puninit(pmap_t pmap)
1093{
c3834cb2
MD
1094 vm_page_t p;
1095
e3161323 1096 KKASSERT(pmap->pm_active == 0);
c3834cb2
MD
1097 if ((p = pmap->pm_pdirm) != NULL) {
1098 KKASSERT(pmap->pm_pdir != NULL);
1099 pmap_kremove((vm_offset_t)pmap->pm_pdir);
1100 p->wire_count--;
1101 vmstats.v_wire_count--;
1102 KKASSERT((p->flags & PG_BUSY) == 0);
1103 vm_page_busy(p);
1104 vm_page_free_zero(p);
1105 pmap->pm_pdirm = NULL;
1106 }
e3161323
MD
1107 if (pmap->pm_pdir) {
1108 kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pdir, PAGE_SIZE);
1109 pmap->pm_pdir = NULL;
1110 }
1111 if (pmap->pm_pteobj) {
1112 vm_object_deallocate(pmap->pm_pteobj);
1113 pmap->pm_pteobj = NULL;
1114 }
1115}
1116
984263bc
MD
1117/*
1118 * Wire in kernel global address entries. To avoid a race condition
1119 * between pmap initialization and pmap_growkernel, this procedure
54a764e8
MD
1120 * adds the pmap to the master list (which growkernel scans to update),
1121 * then copies the template.
984263bc
MD
1122 */
1123void
840de426 1124pmap_pinit2(struct pmap *pmap)
984263bc 1125{
54a764e8
MD
1126 crit_enter();
1127 TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode);
984263bc
MD
1128 /* XXX copies current process, does not fill in MPPTDI */
1129 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
54a764e8 1130 crit_exit();
984263bc
MD
1131}
1132
344ad853 1133/*
eec2b734 1134 * Attempt to release and free a vm_page in a pmap. Returns 1 on success,
344ad853 1135 * 0 on failure (if the procedure had to sleep).
c3834cb2
MD
1136 *
1137 * When asked to remove the page directory page itself, we actually just
1138 * leave it cached so we do not have to incur the SMP inval overhead of
1139 * removing the kernel mapping. pmap_puninit() will take care of it.
344ad853 1140 */
984263bc 1141static int
840de426 1142pmap_release_free_page(struct pmap *pmap, vm_page_t p)
984263bc
MD
1143{
1144 unsigned *pde = (unsigned *) pmap->pm_pdir;
1145 /*
1146 * This code optimizes the case of freeing non-busy
1147 * page-table pages. Those pages are zero now, and
1148 * might as well be placed directly into the zero queue.
1149 */
1150 if (vm_page_sleep_busy(p, FALSE, "pmaprl"))
1151 return 0;
1152
1153 vm_page_busy(p);
1154
1155 /*
1156 * Remove the page table page from the processes address space.
1157 */
1158 pde[p->pindex] = 0;
eec2b734
MD
1159 KKASSERT(pmap->pm_stats.resident_count > 0);
1160 --pmap->pm_stats.resident_count;
984263bc
MD
1161
1162 if (p->hold_count) {
1163 panic("pmap_release: freeing held page table page");
1164 }
c3834cb2
MD
1165 if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
1166 pmap->pm_ptphint = NULL;
1167
984263bc 1168 /*
c3834cb2
MD
1169 * We leave the page directory page cached, wired, and mapped in
1170 * the pmap until the dtor function (pmap_puninit()) gets called.
1171 * However, still clean it up so we can set PG_ZERO.
984263bc
MD
1172 */
1173 if (p->pindex == PTDPTDI) {
1174 bzero(pde + KPTDI, nkpt * PTESIZE);
984263bc 1175 pde[MPPTDI] = 0;
984263bc 1176 pde[APTDPTDI] = 0;
c3834cb2
MD
1177 vm_page_flag_set(p, PG_ZERO);
1178 vm_page_wakeup(p);
1179 } else {
1180 p->wire_count--;
1181 vmstats.v_wire_count--;
1182 vm_page_free_zero(p);
984263bc 1183 }
984263bc
MD
1184 return 1;
1185}
1186
1187/*
1188 * this routine is called if the page table page is not
1189 * mapped correctly.
1190 */
1191static vm_page_t
840de426 1192_pmap_allocpte(pmap_t pmap, unsigned ptepindex)
984263bc
MD
1193{
1194 vm_offset_t pteva, ptepa;
1195 vm_page_t m;
1196
1197 /*
1198 * Find or fabricate a new pagetable page
1199 */
1200 m = vm_page_grab(pmap->pm_pteobj, ptepindex,
dc1fd4b3 1201 VM_ALLOC_NORMAL | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
984263bc
MD
1202
1203 KASSERT(m->queue == PQ_NONE,
1204 ("_pmap_allocpte: %p->queue != PQ_NONE", m));
1205
eec2b734
MD
1206 /*
1207 * Increment the hold count for the page we will be returning to
1208 * the caller.
1209 */
1210 m->hold_count++;
1211
1212 /*
1213 * It is possible that someone else got in and mapped by the page
1214 * directory page while we were blocked, if so just unbusy and
1215 * return the held page.
1216 */
1217 if ((ptepa = pmap->pm_pdir[ptepindex]) != 0) {
1218 KKASSERT((ptepa & PG_FRAME) == VM_PAGE_TO_PHYS(m));
1219 vm_page_wakeup(m);
1220 return(m);
1221 }
1222
984263bc 1223 if (m->wire_count == 0)
12e4aaff 1224 vmstats.v_wire_count++;
984263bc
MD
1225 m->wire_count++;
1226
984263bc
MD
1227
1228 /*
1229 * Map the pagetable page into the process address space, if
1230 * it isn't already there.
1231 */
1232
eec2b734 1233 ++pmap->pm_stats.resident_count;
984263bc
MD
1234
1235 ptepa = VM_PAGE_TO_PHYS(m);
1236 pmap->pm_pdir[ptepindex] =
1237 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1238
1239 /*
1240 * Set the page table hint
1241 */
1242 pmap->pm_ptphint = m;
1243
1244 /*
1245 * Try to use the new mapping, but if we cannot, then
1246 * do it with the routine that maps the page explicitly.
1247 */
1248 if ((m->flags & PG_ZERO) == 0) {
1249 if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
1250 (((unsigned) PTDpde) & PG_FRAME)) {
1251 pteva = UPT_MIN_ADDRESS + i386_ptob(ptepindex);
1252 bzero((caddr_t) pteva, PAGE_SIZE);
1253 } else {
1254 pmap_zero_page(ptepa);
1255 }
1256 }
1257
1258 m->valid = VM_PAGE_BITS_ALL;
1259 vm_page_flag_clear(m, PG_ZERO);
1260 vm_page_flag_set(m, PG_MAPPED);
1261 vm_page_wakeup(m);
1262
1263 return m;
1264}
1265
1266static vm_page_t
840de426 1267pmap_allocpte(pmap_t pmap, vm_offset_t va)
984263bc
MD
1268{
1269 unsigned ptepindex;
1270 vm_offset_t ptepa;
1271 vm_page_t m;
1272
1273 /*
1274 * Calculate pagetable page index
1275 */
1276 ptepindex = va >> PDRSHIFT;
1277
1278 /*
1279 * Get the page directory entry
1280 */
1281 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
1282
1283 /*
1284 * This supports switching from a 4MB page to a
1285 * normal 4K page.
1286 */
1287 if (ptepa & PG_PS) {
1288 pmap->pm_pdir[ptepindex] = 0;
1289 ptepa = 0;
0f7a3396
MD
1290 cpu_invltlb();
1291 smp_invltlb();
984263bc
MD
1292 }
1293
1294 /*
1295 * If the page table page is mapped, we just increment the
1296 * hold count, and activate it.
1297 */
1298 if (ptepa) {
1299 /*
1300 * In order to get the page table page, try the
1301 * hint first.
1302 */
1303 if (pmap->pm_ptphint &&
1304 (pmap->pm_ptphint->pindex == ptepindex)) {
1305 m = pmap->pm_ptphint;
1306 } else {
1307 m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
1308 pmap->pm_ptphint = m;
1309 }
1310 m->hold_count++;
1311 return m;
1312 }
1313 /*
1314 * Here if the pte page isn't mapped, or if it has been deallocated.
1315 */
1316 return _pmap_allocpte(pmap, ptepindex);
1317}
1318
1319
1320/***************************************************
1f804340 1321 * Pmap allocation/deallocation routines.
984263bc
MD
1322 ***************************************************/
1323
1324/*
1325 * Release any resources held by the given physical map.
1326 * Called when a pmap initialized by pmap_pinit is being released.
1327 * Should only be called if the map contains no valid mappings.
1328 */
1f804340
MD
1329static int pmap_release_callback(struct vm_page *p, void *data);
1330
984263bc 1331void
840de426 1332pmap_release(struct pmap *pmap)
984263bc 1333{
984263bc 1334 vm_object_t object = pmap->pm_pteobj;
1f804340 1335 struct rb_vm_page_scan_info info;
984263bc 1336
e3161323 1337 KASSERT(pmap->pm_active == 0, ("pmap still active! %08x", pmap->pm_active));
984263bc
MD
1338#if defined(DIAGNOSTIC)
1339 if (object->ref_count != 1)
1340 panic("pmap_release: pteobj reference count != 1");
1341#endif
1342
1f804340
MD
1343 info.pmap = pmap;
1344 info.object = object;
9acd5bbb 1345 crit_enter();
54a764e8 1346 TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode);
1f804340
MD
1347 crit_exit();
1348
1349 do {
1350 crit_enter();
1351 info.error = 0;
1352 info.mpte = NULL;
1353 info.limit = object->generation;
1354
1355 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1356 pmap_release_callback, &info);
1357 if (info.error == 0 && info.mpte) {
1358 if (!pmap_release_free_page(pmap, info.mpte))
1359 info.error = 1;
984263bc 1360 }
344ad853 1361 crit_exit();
1f804340
MD
1362 } while (info.error);
1363}
1364
1365static int
1366pmap_release_callback(struct vm_page *p, void *data)
1367{
1368 struct rb_vm_page_scan_info *info = data;
1369
1370 if (p->pindex == PTDPTDI) {
1371 info->mpte = p;
1372 return(0);
344ad853 1373 }
1f804340
MD
1374 if (!pmap_release_free_page(info->pmap, p)) {
1375 info->error = 1;
1376 return(-1);
1377 }
1378 if (info->object->generation != info->limit) {
1379 info->error = 1;
1380 return(-1);
1381 }
1382 return(0);
984263bc 1383}
984263bc
MD
1384
1385/*
0e5797fe 1386 * Grow the number of kernel page table entries, if needed.
984263bc 1387 */
0e5797fe 1388
984263bc
MD
1389void
1390pmap_growkernel(vm_offset_t addr)
1391{
54a764e8 1392 struct pmap *pmap;
984263bc
MD
1393 vm_offset_t ptppaddr;
1394 vm_page_t nkpg;
1395 pd_entry_t newpdir;
1396
9acd5bbb 1397 crit_enter();
984263bc
MD
1398 if (kernel_vm_end == 0) {
1399 kernel_vm_end = KERNBASE;
1400 nkpt = 0;
1401 while (pdir_pde(PTD, kernel_vm_end)) {
1402 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1403 nkpt++;
1404 }
1405 }
1406 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1407 while (kernel_vm_end < addr) {
1408 if (pdir_pde(PTD, kernel_vm_end)) {
1409 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1410 continue;
1411 }
1412
1413 /*
1414 * This index is bogus, but out of the way
1415 */
dc1fd4b3
MD
1416 nkpg = vm_page_alloc(kptobj, nkpt,
1417 VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT);
1418 if (nkpg == NULL)
984263bc
MD
1419 panic("pmap_growkernel: no memory to grow kernel");
1420
984263bc
MD
1421 vm_page_wire(nkpg);
1422 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1423 pmap_zero_page(ptppaddr);
1424 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1425 pdir_pde(PTD, kernel_vm_end) = newpdir;
fbbaeba3 1426 *pmap_pde(&kernel_pmap, kernel_vm_end) = newpdir;
0e5797fe
MD
1427 nkpt++;
1428
1429 /*
54a764e8 1430 * This update must be interlocked with pmap_pinit2.
0e5797fe 1431 */
54a764e8
MD
1432 TAILQ_FOREACH(pmap, &pmap_list, pm_pmnode) {
1433 *pmap_pde(pmap, kernel_vm_end) = newpdir;
1434 }
1435 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1436 ~(PAGE_SIZE * NPTEPG - 1);
984263bc 1437 }
9acd5bbb 1438 crit_exit();
984263bc
MD
1439}
1440
1441/*
1442 * Retire the given physical map from service.
1443 * Should only be called if the map contains
1444 * no valid mappings.
1445 */
1446void
840de426 1447pmap_destroy(pmap_t pmap)
984263bc
MD
1448{
1449 int count;
1450
1451 if (pmap == NULL)
1452 return;
1453
1454 count = --pmap->pm_count;
1455 if (count == 0) {
1456 pmap_release(pmap);
1457 panic("destroying a pmap is not yet implemented");
1458 }
1459}
1460
1461/*
1462 * Add a reference to the specified pmap.
1463 */
1464void
840de426 1465pmap_reference(pmap_t pmap)
984263bc
MD
1466{
1467 if (pmap != NULL) {
1468 pmap->pm_count++;
1469 }
1470}
1471
1472/***************************************************
1473* page management routines.
1474 ***************************************************/
1475
1476/*
8a8d5d85
MD
1477 * free the pv_entry back to the free list. This function may be
1478 * called from an interrupt.
984263bc
MD
1479 */
1480static PMAP_INLINE void
840de426 1481free_pv_entry(pv_entry_t pv)
984263bc
MD
1482{
1483 pv_entry_count--;
8a8d5d85 1484 zfree(pvzone, pv);
984263bc
MD
1485}
1486
1487/*
1488 * get a new pv_entry, allocating a block from the system
8a8d5d85 1489 * when needed. This function may be called from an interrupt.
984263bc
MD
1490 */
1491static pv_entry_t
1492get_pv_entry(void)
1493{
1494 pv_entry_count++;
1495 if (pv_entry_high_water &&
20479584
MD
1496 (pv_entry_count > pv_entry_high_water) &&
1497 (pmap_pagedaemon_waken == 0)) {
984263bc
MD
1498 pmap_pagedaemon_waken = 1;
1499 wakeup (&vm_pages_needed);
1500 }
8a8d5d85 1501 return zalloc(pvzone);
984263bc
MD
1502}
1503
1504/*
1505 * This routine is very drastic, but can save the system
1506 * in a pinch.
1507 */
1508void
840de426 1509pmap_collect(void)
984263bc
MD
1510{
1511 int i;
1512 vm_page_t m;
1513 static int warningdone=0;
1514
1515 if (pmap_pagedaemon_waken == 0)
1516 return;
20479584 1517 pmap_pagedaemon_waken = 0;
984263bc
MD
1518
1519 if (warningdone < 5) {
26be20a0 1520 kprintf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
984263bc
MD
1521 warningdone++;
1522 }
1523
1524 for(i = 0; i < vm_page_array_size; i++) {
1525 m = &vm_page_array[i];
1526 if (m->wire_count || m->hold_count || m->busy ||
1527 (m->flags & PG_BUSY))
1528 continue;
1529 pmap_remove_all(m);
1530 }
984263bc
MD
1531}
1532
1533
1534/*
1535 * If it is the first entry on the list, it is actually
1536 * in the header and we must copy the following entry up
1537 * to the header. Otherwise we must search the list for
1538 * the entry. In either case we free the now unused entry.
1539 */
984263bc 1540static int
0f7a3396
MD
1541pmap_remove_entry(struct pmap *pmap, vm_page_t m,
1542 vm_offset_t va, pmap_inval_info_t info)
984263bc
MD
1543{
1544 pv_entry_t pv;
1545 int rtval;
984263bc 1546
9acd5bbb 1547 crit_enter();
984263bc
MD
1548 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1549 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1550 if (pmap == pv->pv_pmap && va == pv->pv_va)
1551 break;
1552 }
1553 } else {
1554 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1555 if (va == pv->pv_va)
1556 break;
1557 }
1558 }
1559
1560 rtval = 0;
1561 if (pv) {
984263bc
MD
1562 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1563 m->md.pv_list_count--;
17cde63e 1564 if (TAILQ_EMPTY(&m->md.pv_list))
984263bc 1565 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
984263bc 1566 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
8790d7d8
MD
1567 ++pmap->pm_generation;
1568 rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem, info);
984263bc
MD
1569 free_pv_entry(pv);
1570 }
9acd5bbb 1571 crit_exit();
984263bc
MD
1572 return rtval;
1573}
1574
1575/*
1576 * Create a pv entry for page at pa for
1577 * (pmap, va).
1578 */
1579static void
840de426 1580pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
984263bc 1581{
984263bc
MD
1582 pv_entry_t pv;
1583
9acd5bbb 1584 crit_enter();
984263bc
MD
1585 pv = get_pv_entry();
1586 pv->pv_va = va;
1587 pv->pv_pmap = pmap;
1588 pv->pv_ptem = mpte;
1589
1590 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1591 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1592 m->md.pv_list_count++;
1593
9acd5bbb 1594 crit_exit();
984263bc
MD
1595}
1596
1597/*
1598 * pmap_remove_pte: do the things to unmap a page in a process
1599 */
1600static int
0f7a3396
MD
1601pmap_remove_pte(struct pmap *pmap, unsigned *ptq, vm_offset_t va,
1602 pmap_inval_info_t info)
984263bc
MD
1603{
1604 unsigned oldpte;
1605 vm_page_t m;
1606
0f7a3396 1607 pmap_inval_add(info, pmap, va);
984263bc
MD
1608 oldpte = loadandclear(ptq);
1609 if (oldpte & PG_W)
1610 pmap->pm_stats.wired_count -= 1;
1611 /*
1612 * Machines that don't support invlpg, also don't support
0f7a3396
MD
1613 * PG_G. XXX PG_G is disabled for SMP so don't worry about
1614 * the SMP case.
984263bc
MD
1615 */
1616 if (oldpte & PG_G)
41a01a4d 1617 cpu_invlpg((void *)va);
eec2b734
MD
1618 KKASSERT(pmap->pm_stats.resident_count > 0);
1619 --pmap->pm_stats.resident_count;
984263bc
MD
1620 if (oldpte & PG_MANAGED) {
1621 m = PHYS_TO_VM_PAGE(oldpte);
1622 if (oldpte & PG_M) {
1623#if defined(PMAP_DIAGNOSTIC)
1624 if (pmap_nw_modified((pt_entry_t) oldpte)) {
26be20a0 1625 kprintf(
984263bc
MD
1626 "pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
1627 va, oldpte);
1628 }
1629#endif
1630 if (pmap_track_modified(va))
1631 vm_page_dirty(m);
1632 }
1633 if (oldpte & PG_A)
1634 vm_page_flag_set(m, PG_REFERENCED);
0f7a3396 1635 return pmap_remove_entry(pmap, m, va, info);
984263bc 1636 } else {
0f7a3396 1637 return pmap_unuse_pt(pmap, va, NULL, info);
984263bc
MD
1638 }
1639
1640 return 0;
1641}
1642
1643/*
e0e69b7d
MD
1644 * pmap_remove_page:
1645 *
1646 * Remove a single page from a process address space.
1647 *
1648 * This function may not be called from an interrupt if the pmap is
1649 * not kernel_pmap.
984263bc
MD
1650 */
1651static void
0f7a3396 1652pmap_remove_page(struct pmap *pmap, vm_offset_t va, pmap_inval_info_t info)
984263bc 1653{
840de426 1654 unsigned *ptq;
984263bc
MD
1655
1656 /*
e0e69b7d
MD
1657 * if there is no pte for this address, just skip it!!! Otherwise
1658 * get a local va for mappings for this pmap and remove the entry.
984263bc 1659 */
e0e69b7d
MD
1660 if (*pmap_pde(pmap, va) != 0) {
1661 ptq = get_ptbase(pmap) + i386_btop(va);
1662 if (*ptq) {
0f7a3396 1663 pmap_remove_pte(pmap, ptq, va, info);
e0e69b7d 1664 }
984263bc 1665 }
984263bc
MD
1666}
1667
1668/*
0f7a3396 1669 * pmap_remove:
e0e69b7d 1670 *
984263bc
MD
1671 * Remove the given range of addresses from the specified map.
1672 *
1673 * It is assumed that the start and end are properly
1674 * rounded to the page size.
e0e69b7d
MD
1675 *
1676 * This function may not be called from an interrupt if the pmap is
1677 * not kernel_pmap.
984263bc
MD
1678 */
1679void
840de426 1680pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
984263bc 1681{
840de426 1682 unsigned *ptbase;
984263bc
MD
1683 vm_offset_t pdnxt;
1684 vm_offset_t ptpaddr;
1685 vm_offset_t sindex, eindex;
0f7a3396 1686 struct pmap_inval_info info;
984263bc
MD
1687
1688 if (pmap == NULL)
1689 return;
1690
1691 if (pmap->pm_stats.resident_count == 0)
1692 return;
1693
0f7a3396
MD
1694 pmap_inval_init(&info);
1695
984263bc
MD
1696 /*
1697 * special handling of removing one page. a very
1698 * common operation and easy to short circuit some
1699 * code.
1700 */
1701 if (((sva + PAGE_SIZE) == eva) &&
1702 (((unsigned) pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
0f7a3396
MD
1703 pmap_remove_page(pmap, sva, &info);
1704 pmap_inval_flush(&info);
984263bc
MD
1705 return;
1706 }
1707
984263bc
MD
1708 /*
1709 * Get a local virtual address for the mappings that are being
1710 * worked with.
1711 */
984263bc
MD
1712 sindex = i386_btop(sva);
1713 eindex = i386_btop(eva);
1714
1715 for (; sindex < eindex; sindex = pdnxt) {
1716 unsigned pdirindex;
1717
1718 /*
1719 * Calculate index for next page table.
1720 */
1721 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1722 if (pmap->pm_stats.resident_count == 0)
1723 break;
1724
1725 pdirindex = sindex / NPDEPG;
1726 if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
0f7a3396 1727 pmap_inval_add(&info, pmap, -1);
984263bc
MD
1728 pmap->pm_pdir[pdirindex] = 0;
1729 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
984263bc
MD
1730 continue;
1731 }
1732
1733 /*
1734 * Weed out invalid mappings. Note: we assume that the page
1735 * directory table is always allocated, and in kernel virtual.
1736 */
1737 if (ptpaddr == 0)
1738 continue;
1739
1740 /*
1741 * Limit our scan to either the end of the va represented
1742 * by the current page table page, or to the end of the
1743 * range being removed.
1744 */
1745 if (pdnxt > eindex) {
1746 pdnxt = eindex;
1747 }
1748
8790d7d8
MD
1749 /*
1750 * NOTE: pmap_remove_pte() can block.
1751 */
0f7a3396 1752 for (; sindex != pdnxt; sindex++) {
984263bc 1753 vm_offset_t va;
8790d7d8
MD
1754
1755 ptbase = get_ptbase(pmap);
0f7a3396 1756 if (ptbase[sindex] == 0)
984263bc 1757 continue;
984263bc 1758 va = i386_ptob(sindex);
0f7a3396 1759 if (pmap_remove_pte(pmap, ptbase + sindex, va, &info))
984263bc
MD
1760 break;
1761 }
1762 }
0f7a3396 1763 pmap_inval_flush(&info);
984263bc
MD
1764}
1765
1766/*
e0e69b7d
MD
1767 * pmap_remove_all:
1768 *
1769 * Removes this physical page from all physical maps in which it resides.
1770 * Reflects back modify bits to the pager.
984263bc 1771 *
e0e69b7d 1772 * This routine may not be called from an interrupt.
984263bc
MD
1773 */
1774
1775static void
840de426 1776pmap_remove_all(vm_page_t m)
984263bc 1777{
0f7a3396 1778 struct pmap_inval_info info;
840de426 1779 unsigned *pte, tpte;
0f7a3396 1780 pv_entry_t pv;
984263bc 1781
bee81bdd
SS
1782 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1783 return;
984263bc 1784
0f7a3396 1785 pmap_inval_init(&info);
9acd5bbb 1786 crit_enter();
984263bc 1787 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
eec2b734
MD
1788 KKASSERT(pv->pv_pmap->pm_stats.resident_count > 0);
1789 --pv->pv_pmap->pm_stats.resident_count;
984263bc
MD
1790
1791 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
0f7a3396 1792 pmap_inval_add(&info, pv->pv_pmap, pv->pv_va);
984263bc 1793 tpte = loadandclear(pte);
17cde63e 1794
984263bc
MD
1795 if (tpte & PG_W)
1796 pv->pv_pmap->pm_stats.wired_count--;
1797
1798 if (tpte & PG_A)
1799 vm_page_flag_set(m, PG_REFERENCED);
1800
1801 /*
1802 * Update the vm_page_t clean and reference bits.
1803 */
1804 if (tpte & PG_M) {
1805#if defined(PMAP_DIAGNOSTIC)
1806 if (pmap_nw_modified((pt_entry_t) tpte)) {
26be20a0 1807 kprintf(
984263bc
MD
1808 "pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
1809 pv->pv_va, tpte);
1810 }
1811#endif
1812 if (pmap_track_modified(pv->pv_va))
1813 vm_page_dirty(m);
1814 }
984263bc 1815 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
8790d7d8
MD
1816 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1817 ++pv->pv_pmap->pm_generation;
984263bc 1818 m->md.pv_list_count--;
17cde63e
MD
1819 if (TAILQ_EMPTY(&m->md.pv_list))
1820 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
0f7a3396 1821 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem, &info);
984263bc
MD
1822 free_pv_entry(pv);
1823 }
9acd5bbb 1824 crit_exit();
17cde63e 1825 KKASSERT((m->flags & (PG_MAPPED|PG_WRITEABLE)) == 0);
0f7a3396 1826 pmap_inval_flush(&info);
984263bc
MD
1827}
1828
1829/*
e0e69b7d
MD
1830 * pmap_protect:
1831 *
1832 * Set the physical protection on the specified range of this map
1833 * as requested.
1834 *
1835 * This function may not be called from an interrupt if the map is
1836 * not the kernel_pmap.
984263bc
MD
1837 */
1838void
1839pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1840{
840de426 1841 unsigned *ptbase;
984263bc
MD
1842 vm_offset_t pdnxt, ptpaddr;
1843 vm_pindex_t sindex, eindex;
0f7a3396 1844 pmap_inval_info info;
984263bc
MD
1845
1846 if (pmap == NULL)
1847 return;
1848
1849 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1850 pmap_remove(pmap, sva, eva);
1851 return;
1852 }
1853
1854 if (prot & VM_PROT_WRITE)
1855 return;
1856
0f7a3396 1857 pmap_inval_init(&info);
984263bc
MD
1858
1859 ptbase = get_ptbase(pmap);
1860
1861 sindex = i386_btop(sva);
1862 eindex = i386_btop(eva);
1863
1864 for (; sindex < eindex; sindex = pdnxt) {
1865
1866 unsigned pdirindex;
1867
1868 pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1869
1870 pdirindex = sindex / NPDEPG;
1871 if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
0f7a3396 1872 pmap_inval_add(&info, pmap, -1);
55f2596a 1873 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
984263bc 1874 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
984263bc
MD
1875 continue;
1876 }
1877
1878 /*
1879 * Weed out invalid mappings. Note: we assume that the page
1880 * directory table is always allocated, and in kernel virtual.
1881 */
1882 if (ptpaddr == 0)
1883 continue;
1884
1885 if (pdnxt > eindex) {
1886 pdnxt = eindex;
1887 }
1888
1889 for (; sindex != pdnxt; sindex++) {
1890
1891 unsigned pbits;
1892 vm_page_t m;
1893
17cde63e
MD
1894 /*
1895 * XXX non-optimal. Note also that there can be
1896 * no pmap_inval_flush() calls until after we modify
1897 * ptbase[sindex] (or otherwise we have to do another
1898 * pmap_inval_add() call).
1899 */
0f7a3396 1900 pmap_inval_add(&info, pmap, i386_ptob(sindex));
984263bc
MD
1901 pbits = ptbase[sindex];
1902
1903 if (pbits & PG_MANAGED) {
1904 m = NULL;
1905 if (pbits & PG_A) {
1906 m = PHYS_TO_VM_PAGE(pbits);
1907 vm_page_flag_set(m, PG_REFERENCED);
1908 pbits &= ~PG_A;
1909 }
1910 if (pbits & PG_M) {
1911 if (pmap_track_modified(i386_ptob(sindex))) {
1912 if (m == NULL)
1913 m = PHYS_TO_VM_PAGE(pbits);
1914 vm_page_dirty(m);
1915 pbits &= ~PG_M;
1916 }
1917 }
1918 }
1919
1920 pbits &= ~PG_RW;
1921
1922 if (pbits != ptbase[sindex]) {
1923 ptbase[sindex] = pbits;
984263bc
MD
1924 }
1925 }
1926 }
0f7a3396 1927 pmap_inval_flush(&info);
984263bc
MD
1928}
1929
1930/*
1931 * Insert the given physical page (p) at
1932 * the specified virtual address (v) in the
1933 * target physical map with the protection requested.
1934 *
1935 * If specified, the page will be wired down, meaning
1936 * that the related pte can not be reclaimed.
1937 *
1938 * NB: This is the only routine which MAY NOT lazy-evaluate
1939 * or lose information. That is, this routine must actually
1940 * insert this page into the given map NOW.
1941 */
1942void
1943pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1944 boolean_t wired)
1945{
6ef943a3 1946 vm_paddr_t pa;
840de426 1947 unsigned *pte;
6ef943a3 1948 vm_paddr_t opa;
984263bc
MD
1949 vm_offset_t origpte, newpte;
1950 vm_page_t mpte;
0f7a3396 1951 pmap_inval_info info;
984263bc
MD
1952
1953 if (pmap == NULL)
1954 return;
1955
1956 va &= PG_FRAME;
1957#ifdef PMAP_DIAGNOSTIC
c439ad8f 1958 if (va >= KvaEnd)
984263bc
MD
1959 panic("pmap_enter: toobig");
1960 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
1961 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
1962#endif
fbbaeba3
MD
1963 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
1964 kprintf("Warning: pmap_enter called on UVA with kernel_pmap\n");
1e5fb84b 1965 print_backtrace();
fbbaeba3
MD
1966 }
1967 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
1968 kprintf("Warning: pmap_enter called on KVA without kernel_pmap\n");
1e5fb84b 1969 print_backtrace();
fbbaeba3 1970 }
984263bc 1971
984263bc
MD
1972 /*
1973 * In the case that a page table page is not
1974 * resident, we are creating it here.
1975 */
17cde63e 1976 if (va < UPT_MIN_ADDRESS)
984263bc 1977 mpte = pmap_allocpte(pmap, va);
17cde63e
MD
1978 else
1979 mpte = NULL;
984263bc 1980
0f7a3396 1981 pmap_inval_init(&info);
984263bc
MD
1982 pte = pmap_pte(pmap, va);
1983
1984 /*
1985 * Page Directory table entry not valid, we need a new PT page
1986 */
1987 if (pte == NULL) {
6ef943a3
MD
1988 panic("pmap_enter: invalid page directory pdir=%x, va=0x%x\n",
1989 (unsigned) pmap->pm_pdir[PTDPTDI], va);
984263bc
MD
1990 }
1991
1992 pa = VM_PAGE_TO_PHYS(m) & PG_FRAME;
1993 origpte = *(vm_offset_t *)pte;
1994 opa = origpte & PG_FRAME;
1995
1996 if (origpte & PG_PS)
1997 panic("pmap_enter: attempted pmap_enter on 4MB page");
1998
1999 /*
2000 * Mapping has not changed, must be protection or wiring change.
2001 */
2002 if (origpte && (opa == pa)) {
2003 /*
2004 * Wiring change, just update stats. We don't worry about
2005 * wiring PT pages as they remain resident as long as there
2006 * are valid mappings in them. Hence, if a user page is wired,
2007 * the PT page will be also.
2008 */
2009 if (wired && ((origpte & PG_W) == 0))
2010 pmap->pm_stats.wired_count++;
2011 else if (!wired && (origpte & PG_W))
2012 pmap->pm_stats.wired_count--;
2013
2014#if defined(PMAP_DIAGNOSTIC)
2015 if (pmap_nw_modified((pt_entry_t) origpte)) {
26be20a0 2016 kprintf(
984263bc
MD
2017 "pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
2018 va, origpte);
2019 }
2020#endif
2021
2022 /*
639a9b43
MD
2023 * Remove the extra pte reference. Note that we cannot
2024 * optimize the RO->RW case because we have adjusted the
2025 * wiring count above and may need to adjust the wiring
2026 * bits below.
984263bc
MD
2027 */
2028 if (mpte)
2029 mpte->hold_count--;
2030
984263bc
MD
2031 /*
2032 * We might be turning off write access to the page,
2033 * so we go ahead and sense modify status.
2034 */
2035 if (origpte & PG_MANAGED) {
2036 if ((origpte & PG_M) && pmap_track_modified(va)) {
2037 vm_page_t om;
2038 om = PHYS_TO_VM_PAGE(opa);
2039 vm_page_dirty(om);
2040 }
2041 pa |= PG_MANAGED;
17cde63e 2042 KKASSERT(m->flags & PG_MAPPED);
984263bc
MD
2043 }
2044 goto validate;
2045 }
2046 /*
2047 * Mapping has changed, invalidate old range and fall through to
2048 * handle validating new mapping.
2049 */
2050 if (opa) {
2051 int err;
0f7a3396 2052 err = pmap_remove_pte(pmap, pte, va, &info);
984263bc
MD
2053 if (err)
2054 panic("pmap_enter: pte vanished, va: 0x%x", va);
2055 }
2056
2057 /*
2058 * Enter on the PV list if part of our managed memory. Note that we
2059 * raise IPL while manipulating pv_table since pmap_enter can be
2060 * called at interrupt time.
2061 */
2062 if (pmap_initialized &&
2063 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2064 pmap_insert_entry(pmap, va, mpte, m);
2065 pa |= PG_MANAGED;
17cde63e 2066 vm_page_flag_set(m, PG_MAPPED);
984263bc
MD
2067 }
2068
2069 /*
2070 * Increment counters
2071 */
eec2b734 2072 ++pmap->pm_stats.resident_count;
984263bc
MD
2073 if (wired)
2074 pmap->pm_stats.wired_count++;
2075
2076validate:
2077 /*
2078 * Now validate mapping with desired protection/wiring.
2079 */
2080 newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
2081
2082 if (wired)
2083 newpte |= PG_W;
2084 if (va < UPT_MIN_ADDRESS)
2085 newpte |= PG_U;
fbbaeba3 2086 if (pmap == &kernel_pmap)
984263bc
MD
2087 newpte |= pgeflag;
2088
2089 /*
2090 * if the mapping or permission bits are different, we need
2091 * to update the pte.
2092 */
2093 if ((origpte & ~(PG_M|PG_A)) != newpte) {
17cde63e 2094 pmap_inval_add(&info, pmap, va);
984263bc 2095 *pte = newpte | PG_A;
17cde63e
MD
2096 if (newpte & PG_RW)
2097 vm_page_flag_set(m, PG_WRITEABLE);
984263bc 2098 }
c695044a 2099 KKASSERT((newpte & PG_MANAGED) == 0 || (m->flags & PG_MAPPED));
0f7a3396 2100 pmap_inval_flush(&info);
984263bc
MD
2101}
2102
2103/*
17cde63e
MD
2104 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
2105 * This code also assumes that the pmap has no pre-existing entry for this
2106 * VA.
2107 *
2108 * This code currently may only be used on user pmaps, not kernel_pmap.
984263bc 2109 */
17cde63e
MD
2110static void
2111pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
984263bc
MD
2112{
2113 unsigned *pte;
6ef943a3 2114 vm_paddr_t pa;
17cde63e
MD
2115 vm_page_t mpte;
2116 unsigned ptepindex;
2117 vm_offset_t ptepa;
0f7a3396
MD
2118 pmap_inval_info info;
2119
2120 pmap_inval_init(&info);
984263bc 2121
fbbaeba3
MD
2122 if (va < UPT_MAX_ADDRESS && pmap == &kernel_pmap) {
2123 kprintf("Warning: pmap_enter_quick called on UVA with kernel_pmap\n");
1e5fb84b 2124 print_backtrace();
fbbaeba3
MD
2125 }
2126 if (va >= UPT_MAX_ADDRESS && pmap != &kernel_pmap) {
2127 kprintf("Warning: pmap_enter_quick called on KVA without kernel_pmap\n");
1e5fb84b 2128 print_backtrace();
fbbaeba3
MD
2129 }
2130
17cde63e
MD
2131 KKASSERT(va < UPT_MIN_ADDRESS); /* assert used on user pmaps only */
2132
984263bc 2133 /*
17cde63e
MD
2134 * Calculate the page table page (mpte), allocating it if necessary.
2135 *
2136 * A held page table page (mpte), or NULL, is passed onto the
2137 * section following.
984263bc
MD
2138 */
2139 if (va < UPT_MIN_ADDRESS) {
984263bc
MD
2140 /*
2141 * Calculate pagetable page index
2142 */
2143 ptepindex = va >> PDRSHIFT;
17cde63e
MD
2144
2145 do {
984263bc
MD
2146 /*
2147 * Get the page directory entry
2148 */
2149 ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
2150
2151 /*
2152 * If the page table page is mapped, we just increment
2153 * the hold count, and activate it.
2154 */
2155 if (ptepa) {
2156 if (ptepa & PG_PS)
2157 panic("pmap_enter_quick: unexpected mapping into 4MB page");
2158 if (pmap->pm_ptphint &&
17cde63e 2159 (pmap->pm_ptphint->pindex == ptepindex)) {
984263bc
MD
2160 mpte = pmap->pm_ptphint;
2161 } else {
2162 mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
2163 pmap->pm_ptphint = mpte;
2164 }
17cde63e
MD
2165 if (mpte)
2166 mpte->hold_count++;
984263bc
MD
2167 } else {
2168 mpte = _pmap_allocpte(pmap, ptepindex);
2169 }
17cde63e 2170 } while (mpte == NULL);
984263bc
MD
2171 } else {
2172 mpte = NULL;
17cde63e 2173 /* this code path is not yet used */
984263bc
MD
2174 }
2175
2176 /*
17cde63e
MD
2177 * With a valid (and held) page directory page, we can just use
2178 * vtopte() to get to the pte. If the pte is already present
2179 * we do not disturb it.
984263bc
MD
2180 */
2181 pte = (unsigned *)vtopte(va);
17cde63e 2182 if (*pte & PG_V) {
984263bc 2183 if (mpte)
0f7a3396 2184 pmap_unwire_pte_hold(pmap, mpte, &info);
17cde63e
MD
2185 pa = VM_PAGE_TO_PHYS(m);
2186 KKASSERT(((*pte ^ pa) & PG_FRAME) == 0);
2187 return;
984263bc
MD
2188 }
2189
2190 /*
17cde63e 2191 * Enter on the PV list if part of our managed memory
984263bc 2192 */
17cde63e 2193 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
984263bc 2194 pmap_insert_entry(pmap, va, mpte, m);
17cde63e
MD
2195 vm_page_flag_set(m, PG_MAPPED);
2196 }
984263bc
MD
2197
2198 /*
2199 * Increment counters
2200 */
eec2b734 2201 ++pmap->pm_stats.resident_count;
984263bc
MD
2202
2203 pa = VM_PAGE_TO_PHYS(m);
2204
2205 /*
2206 * Now validate mapping with RO protection
2207 */
2208 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2209 *pte = pa | PG_V | PG_U;
2210 else
2211 *pte = pa | PG_V | PG_U | PG_MANAGED;
17cde63e
MD
2212/* pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
2213 pmap_inval_flush(&info);
984263bc
MD
2214}
2215
2216/*
2217 * Make a temporary mapping for a physical address. This is only intended
2218 * to be used for panic dumps.
2219 */
2220void *
6ef943a3 2221pmap_kenter_temporary(vm_paddr_t pa, int i)
984263bc
MD
2222{
2223 pmap_kenter((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa);
2224 return ((void *)crashdumpmap);
2225}
2226
2227#define MAX_INIT_PT (96)
06ecca5a 2228
984263bc 2229/*
06ecca5a
MD
2230 * This routine preloads the ptes for a given object into the specified pmap.
2231 * This eliminates the blast of soft faults on process startup and
2232 * immediately after an mmap.
984263bc 2233 */
1f804340
MD
2234static int pmap_object_init_pt_callback(vm_page_t p, void *data);
2235
984263bc 2236void
083a7402
MD
2237pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
2238 vm_object_t object, vm_pindex_t pindex,
2239 vm_size_t size, int limit)
984263bc 2240{
1f804340 2241 struct rb_vm_page_scan_info info;
287ebb09 2242 struct lwp *lp;
984263bc 2243 int psize;
984263bc 2244
54a764e8
MD
2245 /*
2246 * We can't preinit if read access isn't set or there is no pmap
2247 * or object.
2248 */
083a7402 2249 if ((prot & VM_PROT_READ) == 0 || pmap == NULL || object == NULL)
984263bc
MD
2250 return;
2251
54a764e8
MD
2252 /*
2253 * We can't preinit if the pmap is not the current pmap
2254 */
287ebb09
MD
2255 lp = curthread->td_lwp;
2256 if (lp == NULL || pmap != vmspace_pmap(lp->lwp_vmspace))
54a764e8
MD
2257 return;
2258
984263bc
MD
2259 psize = i386_btop(size);
2260
2261 if ((object->type != OBJT_VNODE) ||
2262 ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
2263 (object->resident_page_count > MAX_INIT_PT))) {
2264 return;
2265 }
2266
2267 if (psize + pindex > object->size) {
2268 if (object->size < pindex)
2269 return;
2270 psize = object->size - pindex;
2271 }
2272
1f804340
MD
2273 if (psize == 0)
2274 return;
06ecca5a 2275
984263bc 2276 /*
1f804340
MD
2277 * Use a red-black scan to traverse the requested range and load
2278 * any valid pages found into the pmap.
06ecca5a 2279 *
9acd5bbb
MD
2280 * We cannot safely scan the object's memq unless we are in a
2281 * critical section since interrupts can remove pages from objects.
984263bc 2282 */
1f804340
MD
2283 info.start_pindex = pindex;
2284 info.end_pindex = pindex + psize - 1;
2285 info.limit = limit;
2286 info.mpte = NULL;
2287 info.addr = addr;
2288 info.pmap = pmap;
2289
654a39f0 2290 crit_enter();
1f804340
MD
2291 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
2292 pmap_object_init_pt_callback, &info);
2293 crit_exit();
2294}
06ecca5a 2295
1f804340
MD
2296static
2297int
2298pmap_object_init_pt_callback(vm_page_t p, void *data)
2299{
2300 struct rb_vm_page_scan_info *info = data;
2301 vm_pindex_t rel_index;
2302 /*
2303 * don't allow an madvise to blow away our really
2304 * free pages allocating pv entries.
2305 */
2306 if ((info->limit & MAP_PREFAULT_MADVISE) &&
2307 vmstats.v_free_count < vmstats.v_free_reserved) {
2308 return(-1);
984263bc 2309 }
1f804340
MD
2310 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2311 (p->busy == 0) && (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2312 if ((p->queue - p->pc) == PQ_CACHE)
2313 vm_page_deactivate(p);
2314 vm_page_busy(p);
2315 rel_index = p->pindex - info->start_pindex;
17cde63e
MD
2316 pmap_enter_quick(info->pmap,
2317 info->addr + i386_ptob(rel_index), p);
1f804340
MD
2318 vm_page_wakeup(p);
2319 }
2320 return(0);
984263bc
MD
2321}
2322
2323/*
06ecca5a
MD
2324 * pmap_prefault provides a quick way of clustering pagefaults into a
2325 * processes address space. It is a "cousin" of pmap_object_init_pt,
2326 * except it runs at page fault time instead of mmap time.
984263bc
MD
2327 */
2328#define PFBAK 4
2329#define PFFOR 4
2330#define PAGEORDER_SIZE (PFBAK+PFFOR)
2331
2332static int pmap_prefault_pageorder[] = {
2333 -PAGE_SIZE, PAGE_SIZE,
2334 -2 * PAGE_SIZE, 2 * PAGE_SIZE,
6302a396 2335 -3 * PAGE_SIZE, 3 * PAGE_SIZE,
984263bc
MD
2336 -4 * PAGE_SIZE, 4 * PAGE_SIZE
2337};
2338
2339void
840de426 2340pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
984263bc
MD
2341{
2342 int i;
2343 vm_offset_t starta;
2344 vm_offset_t addr;
2345 vm_pindex_t pindex;
17cde63e 2346 vm_page_t m;
984263bc 2347 vm_object_t object;
287ebb09 2348 struct lwp *lp;
984263bc 2349
75f59a66
MD
2350 /*
2351 * We do not currently prefault mappings that use virtual page
2352 * tables. We do not prefault foreign pmaps.
2353 */
2354 if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
2355 return;
287ebb09
MD
2356 lp = curthread->td_lwp;
2357 if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
984263bc
MD
2358 return;
2359
2360 object = entry->object.vm_object;
2361
2362 starta = addra - PFBAK * PAGE_SIZE;
06ecca5a 2363 if (starta < entry->start)
984263bc 2364 starta = entry->start;
06ecca5a 2365 else if (starta > addra)
984263bc 2366 starta = 0;
984263bc 2367
06ecca5a 2368 /*
9acd5bbb
MD
2369 * critical section protection is required to maintain the
2370 * page/object association, interrupts can free pages and remove
2371 * them from their objects.
06ecca5a 2372 */
654a39f0 2373 crit_enter();
984263bc
MD
2374 for (i = 0; i < PAGEORDER_SIZE; i++) {
2375 vm_object_t lobject;
2376 unsigned *pte;
2377
2378 addr = addra + pmap_prefault_pageorder[i];
2379 if (addr > addra + (PFFOR * PAGE_SIZE))
2380 addr = 0;
2381
2382 if (addr < starta || addr >= entry->end)
2383 continue;
2384
3641b7ca 2385 if ((*pmap_pde(pmap, addr)) == 0)
984263bc
MD
2386 continue;
2387
2388 pte = (unsigned *) vtopte(addr);
2389 if (*pte)
2390 continue;
2391
2392 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2393 lobject = object;
06ecca5a 2394
984263bc 2395 for (m = vm_page_lookup(lobject, pindex);
06ecca5a
MD
2396 (!m && (lobject->type == OBJT_DEFAULT) &&
2397 (lobject->backing_object));
2398 lobject = lobject->backing_object
2399 ) {
984263bc
MD
2400 if (lobject->backing_object_offset & PAGE_MASK)
2401 break;
2402 pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
2403 m = vm_page_lookup(lobject->backing_object, pindex);
2404 }
2405
2406 /*
2407 * give-up when a page is not in memory
2408 */
2409 if (m == NULL)
2410 break;
2411
2412 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2413 (m->busy == 0) &&
2414 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2415
2416 if ((m->queue - m->pc) == PQ_CACHE) {
2417 vm_page_deactivate(m);
2418 }
2419 vm_page_busy(m);
17cde63e 2420 pmap_enter_quick(pmap, addr, m);
984263bc
MD
2421 vm_page_wakeup(m);
2422 }
2423 }
654a39f0 2424 crit_exit();
984263bc
MD
2425}
2426
2427/*
2428 * Routine: pmap_change_wiring
2429 * Function: Change the wiring attribute for a map/virtual-address
2430 * pair.
2431 * In/out conditions:
2432 * The mapping must already exist in the pmap.
2433 */
2434void
840de426 2435pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
984263bc 2436{
840de426 2437 unsigned *pte;
984263bc
MD
2438
2439 if (pmap == NULL)
2440 return;
2441
2442 pte = pmap_pte(pmap, va);
2443
2444 if (wired && !pmap_pte_w(pte))
2445 pmap->pm_stats.wired_count++;
2446 else if (!wired && pmap_pte_w(pte))
2447 pmap->pm_stats.wired_count--;
2448
2449 /*
2450 * Wiring is not a hardware characteristic so there is no need to
0f7a3396
MD
2451 * invalidate TLB. However, in an SMP environment we must use
2452 * a locked bus cycle to update the pte (if we are not using
2453 * the pmap_inval_*() API that is)... it's ok to do this for simple
2454 * wiring changes.
984263bc 2455 */
0f7a3396
MD
2456#ifdef SMP
2457 if (wired)
2458 atomic_set_int(pte, PG_W);
2459 else
2460 atomic_clear_int(pte, PG_W);
2461#else
2462 if (wired)
2463 atomic_set_int_nonlocked(pte, PG_W);
2464 else
2465 atomic_clear_int_nonlocked(pte, PG_W);
2466#endif
984263bc
MD
2467}
2468
2469
2470
2471/*
2472 * Copy the range specified by src_addr/len
2473 * from the source map to the range dst_addr/len
2474 * in the destination map.
2475 *
2476 * This routine is only advisory and need not do anything.
2477 */
984263bc 2478void
840de426
MD
2479pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2480 vm_size_t len, vm_offset_t src_addr)
984263bc 2481{
0f7a3396 2482 pmap_inval_info info;
984263bc
MD
2483 vm_offset_t addr;
2484 vm_offset_t end_addr = src_addr + len;
2485 vm_offset_t pdnxt;
2486 unsigned src_frame, dst_frame;
2487 vm_page_t m;
2488
2489 if (dst_addr != src_addr)
2490 return;
17cde63e
MD
2491 /*
2492 * XXX BUGGY. Amoung other things srcmpte is assumed to remain
2493 * valid through blocking calls, and that's just not going to
2494 * be the case.
2495 *
2496 * FIXME!
2497 */
2498 return;
984263bc
MD
2499
2500 src_frame = ((unsigned) src_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
2501 if (src_frame != (((unsigned) PTDpde) & PG_FRAME)) {
2502 return;
2503 }
2504
2505 dst_frame = ((unsigned) dst_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
2506 if (dst_frame != (((unsigned) APTDpde) & PG_FRAME)) {
2507 APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V);
984263bc
MD
2508 /* The page directory is not shared between CPUs */
2509 cpu_invltlb();
984263bc 2510 }
0f7a3396
MD
2511 pmap_inval_init(&info);
2512 pmap_inval_add(&info, dst_pmap, -1);
2513 pmap_inval_add(&info, src_pmap, -1);
984263bc 2514
06ecca5a 2515 /*
654a39f0 2516 * critical section protection is required to maintain the page/object
06ecca5a
MD
2517 * association, interrupts can free pages and remove them from
2518 * their objects.
2519 */
654a39f0 2520 crit_enter();
06ecca5a 2521 for (addr = src_addr; addr < end_addr; addr = pdnxt) {
984263bc
MD
2522 unsigned *src_pte, *dst_pte;
2523 vm_page_t dstmpte, srcmpte;
2524 vm_offset_t srcptepaddr;
2525 unsigned ptepindex;
2526
2527 if (addr >= UPT_MIN_ADDRESS)
2528 panic("pmap_copy: invalid to pmap_copy page tables\n");
2529
2530 /*
2531 * Don't let optional prefaulting of pages make us go
2532 * way below the low water mark of free pages or way
2533 * above high water mark of used pv entries.
2534 */
12e4aaff 2535 if (vmstats.v_free_count < vmstats.v_free_reserved ||
984263bc
MD
2536 pv_entry_count > pv_entry_high_water)
2537 break;
2538
2539 pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
2540 ptepindex = addr >> PDRSHIFT;
2541
2542 srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex];
2543 if (srcptepaddr == 0)
2544 continue;
2545
2546 if (srcptepaddr & PG_PS) {
2547 if (dst_pmap->pm_pdir[ptepindex] == 0) {
2548 dst_pmap->pm_pdir[ptepindex] = (pd_entry_t) srcptepaddr;
2549 dst_pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
2550 }
2551 continue;
2552 }
2553
2554 srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
17cde63e
MD
2555 if ((srcmpte == NULL) || (srcmpte->hold_count == 0) ||
2556 (srcmpte->flags & PG_BUSY)) {
984263bc 2557 continue;
17cde63e 2558 }
984263bc
MD
2559
2560 if (pdnxt > end_addr)
2561 pdnxt = end_addr;
2562
2563 src_pte = (unsigned *) vtopte(addr);
2564 dst_pte = (unsigned *) avtopte(addr);
2565 while (addr < pdnxt) {
2566 unsigned ptetemp;
5e8d0349 2567
984263bc
MD
2568 ptetemp = *src_pte;
2569 /*
2570 * we only virtual copy managed pages
2571 */
2572 if ((ptetemp & PG_MANAGED) != 0) {
2573 /*
2574 * We have to check after allocpte for the
2575 * pte still being around... allocpte can
2576 * block.
eec2b734
MD
2577 *
2578 * pmap_allocpte() can block. If we lose
2579 * our page directory mappings we stop.
984263bc
MD
2580 */
2581 dstmpte = pmap_allocpte(dst_pmap, addr);
eec2b734
MD
2582
2583 if (src_frame != (((unsigned) PTDpde) & PG_FRAME) ||
2584 dst_frame != (((unsigned) APTDpde) & PG_FRAME)
2585 ) {
2586 kprintf("WARNING: pmap_copy: detected and corrected race\n");
2587 pmap_unwire_pte_hold(dst_pmap, dstmpte, &info);
2588 goto failed;
17cde63e
MD
2589 } else if ((*dst_pte == 0) &&
2590 (ptetemp = *src_pte) != 0 &&
2591 (ptetemp & PG_MANAGED)) {
984263bc
MD
2592 /*
2593 * Clear the modified and
2594 * accessed (referenced) bits
2595 * during the copy.
2596 */
2597 m = PHYS_TO_VM_PAGE(ptetemp);
70fc5283 2598 *dst_pte = ptetemp & ~(PG_M | PG_A);
eec2b734 2599 ++dst_pmap->pm_stats.resident_count;
984263bc
MD
2600 pmap_insert_entry(dst_pmap, addr,
2601 dstmpte, m);
17cde63e 2602 KKASSERT(m->flags & PG_MAPPED);
984263bc 2603 } else {
17cde63e 2604 kprintf("WARNING: pmap_copy: dst_pte race detected and corrected\n");
0f7a3396 2605 pmap_unwire_pte_hold(dst_pmap, dstmpte, &info);
17cde63e 2606 goto failed;
984263bc
MD
2607 }
2608 if (dstmpte->hold_count >= srcmpte->hold_count)
2609 break;
2610 }
2611 addr += PAGE_SIZE;
2612 src_pte++;
2613 dst_pte++;
2614 }
2615 }
eec2b734 2616failed:
654a39f0 2617 crit_exit();
0f7a3396 2618 pmap_inval_flush(&info);
984263bc
MD
2619}
2620
984263bc 2621/*
e0e69b7d
MD
2622 * pmap_zero_page:
2623 *
2624 * Zero the specified PA by mapping the page into KVM and clearing its
2625 * contents.
2626 *
2627 * This function may be called from an interrupt and no locking is
2628 * required.
984263bc
MD
2629 */
2630void
6ef943a3 2631pmap_zero_page(vm_paddr_t phys)
984263bc 2632{
85100692 2633 struct mdglobaldata *gd = mdcpu;
17a9f566 2634
e0e69b7d 2635 crit_enter();
85100692
MD
2636 if (*(int *)gd->gd_CMAP3)
2637 panic("pmap_zero_page: CMAP3 busy");
85100692 2638 *(int *)gd->gd_CMAP3 =
17a9f566 2639 PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
85100692 2640 cpu_invlpg(gd->gd_CADDR3);
984263bc
MD
2641
2642#if defined(I686_CPU)
2643 if (cpu_class == CPUCLASS_686)
85100692 2644 i686_pagezero(gd->gd_CADDR3);
984263bc
MD
2645 else
2646#endif
85100692 2647 bzero(gd->gd_CADDR3, PAGE_SIZE);
85100692 2648 *(int *) gd->gd_CMAP3 = 0;
e0e69b7d 2649 crit_exit();
8100156a
MD
2650}
2651
2652/*
2653 * pmap_page_assertzero:
2654 *
2655 * Assert that a page is empty, panic if it isn't.
2656 */
2657void
2658pmap_page_assertzero(vm_paddr_t phys)
2659{
2660 struct mdglobaldata *gd = mdcpu;
2661 int i;
2662
2663 crit_enter();
2664 if (*(int *)gd->gd_CMAP3)
2665 panic("pmap_zero_page: CMAP3 busy");
2666 *(int *)gd->gd_CMAP3 =
2667 PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2668 cpu_invlpg(gd->gd_CADDR3);
2669 for (i = 0; i < PAGE_SIZE; i += 4) {
2670 if (*(int *)((char *)gd->gd_CADDR3 + i) != 0) {
2671 panic("pmap_page_assertzero() @ %p not zero!\n",
2672 (void *)gd->gd_CADDR3);
2673 }
2674 }
2675 *(int *) gd->gd_CMAP3 = 0;
2676 crit_exit();
984263bc
MD
2677}
2678
2679/*
e0e69b7d
MD
2680 * pmap_zero_page:
2681 *
2682 * Zero part of a physical page by mapping it into memory and clearing
2683 * its contents with bzero.
984263bc
MD
2684 *
2685 * off and size may not cover an area beyond a single hardware page.
2686 */
2687void
6ef943a3 2688pmap_zero_page_area(vm_paddr_t phys, int off, int size)
984263bc 2689{
85100692 2690 struct mdglobaldata *gd = mdcpu;
17a9f566 2691
e0e69b7d 2692 crit_enter();
85100692
MD
2693 if (*(int *) gd->gd_CMAP3)
2694 panic("pmap_zero_page: CMAP3 busy");
85100692
MD
2695 *(int *) gd->gd_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2696 cpu_invlpg(gd->gd_CADDR3);
984263bc
MD
2697
2698#if defined(I686_CPU)
2699 if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
85100692 2700 i686_pagezero(gd->gd_CADDR3);
984263bc
MD
2701 else
2702#endif
85100692 2703 bzero((char *)gd->gd_CADDR3 + off, size);
85100692 2704 *(int *) gd->gd_CMAP3 = 0;
e0e69b7d 2705 crit_exit();
984263bc
MD
2706}
2707
2708/*
e0e69b7d
MD
2709 * pmap_copy_page:
2710 *
2711 * Copy the physical page from the source PA to the target PA.
2712 * This function may be called from an interrupt. No locking
2713 * is required.
984263bc
MD
2714 */
2715void
6ef943a3 2716pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
984263bc 2717{
85100692 2718 struct mdglobaldata *gd = mdcpu;
17a9f566 2719
e0e69b7d 2720 crit_enter();
85100692
MD
2721 if (*(int *) gd->gd_CMAP1)
2722 panic("pmap_copy_page: CMAP1 busy");
2723 if (*(int *) gd->gd_CMAP2)
2724 panic("pmap_copy_page: CMAP2 busy");
984263bc 2725
85100692
MD
2726 *(int *) gd->gd_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2727 *(int *) gd->gd_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
984263bc 2728
85100692
MD
2729 cpu_invlpg(gd->gd_CADDR1);
2730 cpu_invlpg(gd->gd_CADDR2);
984263bc 2731
85100692 2732 bcopy(gd->gd_CADDR1, gd->gd_CADDR2, PAGE_SIZE);
984263bc 2733
85100692
MD
2734 *(int *) gd->gd_CMAP1 = 0;
2735 *(int *) gd->gd_CMAP2 = 0;
e0e69b7d 2736 crit_exit();
984263bc
MD
2737}
2738
f6bf3af1
MD
2739/*
2740 * pmap_copy_page_frag:
2741 *
2742 * Copy the physical page from the source PA to the target PA.
2743 * This function may be called from an interrupt. No locking
2744 * is required.
2745 */
2746void
2747pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
2748{
2749 struct mdglobaldata *gd = mdcpu;
2750
2751 crit_enter();
2752 if (*(int *) gd->gd_CMAP1)
2753 panic("pmap_copy_page: CMAP1 busy");
2754 if (*(int *) gd->gd_CMAP2)
2755 panic("pmap_copy_page: CMAP2 busy");
2756
2757 *(int *) gd->gd_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2758 *(int *) gd->gd_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
2759
2760 cpu_invlpg(gd->gd_CADDR1);
2761 cpu_invlpg(gd->gd_CADDR2);
2762
2763 bcopy((char *)gd->gd_CADDR1 + (src & PAGE_MASK),
2764 (char *)gd->gd_CADDR2 + (dst & PAGE_MASK),
2765 bytes);
2766
2767 *(int *) gd->gd_CMAP1 = 0;
2768 *(int *) gd->gd_CMAP2 = 0;
2769 crit_exit();
2770}
2771
984263bc
MD
2772/*
2773 * Returns true if the pmap's pv is one of the first
2774 * 16 pvs linked to from this page. This count may
2775 * be changed upwards or downwards in the future; it
2776 * is only necessary that true be returned for a small
2777 * subset of pmaps for proper page aging.
2778 */
2779boolean_t
840de426 2780pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
984263bc
MD
2781{
2782 pv_entry_t pv;
2783 int loops = 0;
984263bc
MD
2784
2785 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2786 return FALSE;
2787
9acd5bbb 2788 crit_enter();
984263bc
MD
2789
2790 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2791 if (pv->pv_pmap == pmap) {
9acd5bbb 2792 crit_exit();
984263bc
MD
2793 return TRUE;
2794 }
2795 loops++;
2796 if (loops >= 16)
2797 break;
2798 }
9acd5bbb 2799 crit_exit();
984263bc
MD
2800 return (FALSE);
2801}
2802
984263bc
MD
2803/*
2804 * Remove all pages from specified address space
2805 * this aids process exit speeds. Also, this code
2806 * is special cased for current process only, but
2807 * can have the more generic (and slightly slower)
2808 * mode enabled. This is much faster than pmap_remove
2809 * in the case of running down an entire address space.
2810 */
2811void
840de426 2812pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
984263bc 2813{
287ebb09 2814 struct lwp *lp;
984263bc
MD
2815 unsigned *pte, tpte;
2816 pv_entry_t pv, npv;
984263bc 2817 vm_page_t m;
0f7a3396 2818 pmap_inval_info info;
4a22e893 2819 int iscurrentpmap;
8790d7d8 2820 int32_t save_generation;
984263bc 2821
287ebb09
MD
2822 lp = curthread->td_lwp;
2823 if (lp && pmap == vmspace_pmap(lp->lwp_vmspace))
4a22e893
MD
2824 iscurrentpmap = 1;
2825 else
2826 iscurrentpmap = 0;
984263bc 2827
0f7a3396 2828 pmap_inval_init(&info);
9acd5bbb 2829 crit_enter();
4a22e893 2830 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
984263bc
MD
2831 if (pv->pv_va >= eva || pv->pv_va < sva) {
2832 npv = TAILQ_NEXT(pv, pv_plist);
2833 continue;
2834 }
2835
8790d7d8
MD
2836 KKASSERT(pmap == pv->pv_pmap);
2837
4a22e893
MD
2838 if (iscurrentpmap)
2839 pte = (unsigned *)vtopte(pv->pv_va);
2840 else
8790d7d8 2841 pte = pmap_pte_quick(pmap, pv->pv_va);
4a22e893 2842 if (pmap->pm_active)
8790d7d8 2843 pmap_inval_add(&info, pmap, pv->pv_va);
984263bc 2844
4a22e893
MD
2845 /*
2846 * We cannot remove wired pages from a process' mapping
2847 * at this time
2848 */
17cde63e 2849 if (*pte & PG_W) {
984263bc
MD
2850 npv = TAILQ_NEXT(pv, pv_plist);
2851 continue;
2852 }
17cde63e 2853 tpte = loadandclear(pte);
984263bc
MD
2854
2855 m = PHYS_TO_VM_PAGE(tpte);
2856
2857 KASSERT(m < &vm_page_array[vm_page_array_size],
2858 ("pmap_remove_pages: bad tpte %x", tpte));
2859
eec2b734
MD
2860 KKASSERT(pmap->pm_stats.resident_count > 0);
2861 --pmap->pm_stats.resident_count;
984263bc
MD
2862
2863 /*
2864 * Update the vm_page_t clean and reference bits.
2865 */
2866 if (tpte & PG_M) {
2867 vm_page_dirty(m);
2868 }
2869
984263bc 2870 npv = TAILQ_NEXT(pv, pv_plist);
8790d7d8
MD
2871 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2872 save_generation = ++pmap->pm_generation;
984263bc
MD
2873
2874 m->md.pv_list_count--;
2875 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
17cde63e 2876 if (TAILQ_EMPTY(&m->md.pv_list))
984263bc 2877 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
984263bc 2878
8790d7d8 2879 pmap_unuse_pt(pmap, pv->pv_va, pv->pv_ptem, &info);
984263bc 2880 free_pv_entry(pv);
8790d7d8
MD
2881
2882 /*
2883 * Restart the scan if we blocked during the unuse or free
2884 * calls and other removals were made.
2885 */
2886 if (save_generation != pmap->pm_generation) {
2887 kprintf("Warning: pmap_remove_pages race-A avoided\n");
2888 pv = TAILQ_FIRST(&pmap->pm_pvlist);
2889 }
984263bc 2890 }
0f7a3396 2891 pmap_inval_flush(&info);
9acd5bbb 2892 crit_exit();
984263bc
MD
2893}
2894
2895/*
2896 * pmap_testbit tests bits in pte's
5e8d0349 2897 * note that the testbit/clearbit routines are inline,
984263bc
MD
2898 * and a lot of things compile-time evaluate.
2899 */
2900static boolean_t
840de426 2901pmap_testbit(vm_page_t m, int bit)
984263bc
MD
2902{
2903 pv_entry_t pv;
2904 unsigned *pte;
984263bc
MD
2905
2906 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2907 return FALSE;
2908
2909 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
2910 return FALSE;
2911
9acd5bbb 2912 crit_enter();
984263bc
MD
2913
2914 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2915 /*
2916 * if the bit being tested is the modified bit, then
2917 * mark clean_map and ptes as never
2918 * modified.
2919 */
2920 if (bit & (PG_A|PG_M)) {
2921 if (!pmap_track_modified(pv->pv_va))
2922 continue;
2923 }
2924
2925#if defined(PMAP_DIAGNOSTIC)
2926 if (!pv->pv_pmap) {
26be20a0 2927 kprintf("Null pmap (tb) at va: 0x%x\n", pv->pv_va);
984263bc
MD
2928 continue;
2929 }
2930#endif
2931 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2932 if (*pte & bit) {
9acd5bbb 2933 crit_exit();
984263bc
MD
2934 return TRUE;
2935 }
2936 }
9acd5bbb 2937 crit_exit();
984263bc
MD
2938 return (FALSE);
2939}
2940
2941/*
2942 * this routine is used to modify bits in ptes
2943 */
2944static __inline void
5e8d0349 2945pmap_clearbit(vm_page_t m, int bit)
984263bc 2946{
0f7a3396 2947 struct pmap_inval_info info;
840de426
MD
2948 pv_entry_t pv;
2949 unsigned *pte;
5e8d0349 2950 unsigned pbits;
984263bc
MD
2951
2952 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2953 return;
2954
0f7a3396 2955 pmap_inval_init(&info);
9acd5bbb 2956 crit_enter();
984263bc
MD
2957
2958 /*
2959 * Loop over all current mappings setting/clearing as appropos If
2960 * setting RO do we need to clear the VAC?
2961 */
2962 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2963 /*
2964 * don't write protect pager mappings
2965 */
5e8d0349 2966 if (bit == PG_RW) {
984263bc
MD
2967 if (!pmap_track_modified(pv->pv_va))
2968 continue;
2969 }
2970
2971#if defined(PMAP_DIAGNOSTIC)
2972 if (!pv->pv_pmap) {
26be20a0 2973 kprintf("Null pmap (cb) at va: 0x%x\n", pv->pv_va);
984263bc
MD
2974 continue;
2975 }
2976#endif
2977
0f7a3396
MD
2978 /*
2979 * Careful here. We can use a locked bus instruction to
2980 * clear PG_A or PG_M safely but we need to synchronize
2981 * with the target cpus when we mess with PG_RW.
70fc5283
MD
2982 *
2983 * We do not have to force synchronization when clearing
2984 * PG_M even for PTEs generated via virtual memory maps,
2985 * because the virtual kernel will invalidate the pmap
2986 * entry when/if it needs to resynchronize the Modify bit.
0f7a3396 2987 */
70fc5283 2988 if (bit & PG_RW)
0f7a3396 2989 pmap_inval_add(&info, pv->pv_pmap, pv->pv_va);
17cde63e
MD
2990 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2991again:
5e8d0349
MD
2992 pbits = *pte;
2993 if (pbits & bit) {
2994 if (bit == PG_RW) {
17cde63e 2995 if (pbits & PG_M) {
5e8d0349 2996 vm_page_dirty(m);
17cde63e
MD
2997 atomic_clear_int(pte, PG_M|PG_RW);
2998 } else {
2999 /*
3000 * The cpu may be trying to set PG_M
3001 * simultaniously with our clearing
3002 * of PG_RW.
3003 */
3004 if (!atomic_cmpset_int(pte, pbits,
3005 pbits & ~PG_RW))
3006 goto again;
3007 }
5e8d0349
MD
3008 } else if (bit == PG_M) {
3009 /*
70fc5283
MD
3010 * We could also clear PG_RW here to force
3011 * a fault on write to redetect PG_M for
3012 * virtual kernels, but it isn't necessary
3013 * since virtual kernels invalidate the pte
3014 * when they clear the VPTE_M bit in their
3015 * virtual page tables.
5e8d0349 3016 */
70fc5283 3017 atomic_clear_int(pte, PG_M);
5e8d0349
MD
3018 } else {
3019 atomic_clear_int(pte, bit);
984263bc
MD
3020 }
3021 }
3022 }
0f7a3396 3023 pmap_inval_flush(&info);
9acd5bbb 3024 crit_exit();
984263bc
MD
3025}
3026
3027/*
3028 * pmap_page_protect:
3029 *
3030 * Lower the permission for all mappings to a given page.
3031 */
3032void
3033pmap_page_protect(vm_page_t m, vm_prot_t prot)
3034{
3035 if ((prot & VM_PROT_WRITE) == 0) {
3036 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
5e8d0349 3037 pmap_clearbit(m, PG_RW);
17cde63e 3038 vm_page_flag_clear(m, PG_WRITEABLE);
984263bc
MD
3039 } else {
3040 pmap_remove_all(m);
3041 }
3042 }
3043}
3044
6ef943a3 3045vm_paddr_t
cfd17028 3046pmap_phys_address(vm_pindex_t ppn)
984263bc
MD
3047{
3048 return (i386_ptob(ppn));
3049}
3050
3051/*
3052 * pmap_ts_referenced:
3053 *
3054 * Return a count of reference bits for a page, clearing those bits.
3055 * It is not necessary for every reference bit to be cleared, but it
3056 * is necessary that 0 only be returned when there are truly no
3057 * reference bits set.
3058 *
3059 * XXX: The exact number of bits to check and clear is a matter that
3060 * should be tested and standardized at some point in the future for
3061 * optimal aging of shared pages.
3062 */
3063int
3064pmap_ts_referenced(vm_page_t m)
3065{
840de426 3066 pv_entry_t pv, pvf, pvn;
984263bc 3067 unsigned *pte;
984263bc
MD
3068 int rtval = 0;
3069
3070 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3071 return (rtval);
3072
9acd5bbb 3073 crit_enter();
984263bc
MD
3074
3075 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3076
3077 pvf = pv;
3078
3079 do {
3080 pvn = TAILQ_NEXT(pv, pv_list);
3081
3082 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3083
3084 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
3085
3086 if (!pmap_track_modified(pv->pv_va))
3087 continue;
3088
3089 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3090
3091 if (pte && (*pte & PG_A)) {
0f7a3396
MD
3092#ifdef SMP
3093 atomic_clear_int(pte, PG_A);
3094#else
3095 atomic_clear_int_nonlocked(pte, PG_A);
3096#endif
984263bc
MD
3097 rtval++;
3098 if (rtval > 4) {
3099 break;
3100 }
3101 }
3102 } while ((pv = pvn) != NULL && pv != pvf);
3103 }
9acd5bbb 3104 crit_exit();
984263bc
MD
3105
3106 return (rtval);
3107}
3108
3109/*
3110 * pmap_is_modified:
3111 *
3112 * Return whether or not the specified physical page was modified
3113 * in any physical maps.
3114 */
3115boolean_t
3116pmap_is_modified(vm_page_t m)
3117{
3118 return pmap_testbit(m, PG_M);
3119}
3120
3121/*
3122 * Clear the modify bits on the specified physical page.
3123 */
3124void
3125pmap_clear_modify(vm_page_t m)
3126{
5e8d0349 3127 pmap_clearbit(m, PG_M);
984263bc
MD
3128}
3129
3130/*
3131 * pmap_clear_reference:
3132 *
3133 * Clear the reference bit on the specified physical page.
3134 */
3135void
3136pmap_clear_reference(vm_page_t m)
3137{
5e8d0349 3138 pmap_clearbit(m, PG_A);
984263bc
MD
3139}
3140
3141/*
3142 * Miscellaneous support routines follow
3143 */
3144
3145static void
840de426 3146i386_protection_init(void)
984263bc 3147{
840de426 3148 int *kp, prot;
984263bc
MD
3149
3150 kp = protection_codes;
3151 for (prot = 0; prot < 8; prot++) {
3152 switch (prot) {
3153 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
3154 /*
3155 * Read access is also 0. There isn't any execute bit,
3156 * so just make it readable.
3157 */
3158 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
3159 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
3160 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
3161 *kp++ = 0;
3162 break;
3163 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
3164 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
3165 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
3166 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
3167 *kp++ = PG_RW;
3168 break;
3169 }
3170 }
3171}
3172
3173/*
3174 * Map a set of physical memory pages into the kernel virtual
3175 * address space. Return a pointer to where it is mapped. This
3176 * routine is intended to be used for mapping device memory,
3177 * NOT real memory.
a2a5ad0d
MD
3178 *
3179 * NOTE: we can't use pgeflag unless we invalidate the pages one at
3180 * a time.
984263bc
MD
3181 */
3182void *
6ef943a3 3183pmap_mapdev(vm_paddr_t pa, vm_size_t size)
984263bc
MD
3184{
3185 vm_offset_t va, tmpva, offset;
3186 unsigned *pte;
3187
3188 offset = pa & PAGE_MASK;
3189 size = roundup(offset + size, PAGE_SIZE);
3190
e4846942 3191 va = kmem_alloc_nofault(&kernel_map, size);
984263bc
MD
3192 if (!va)
3193 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3194
3195 pa = pa & PG_FRAME;
3196 for (tmpva = va; size > 0;) {
3197 pte = (unsigned *)vtopte(tmpva);
a2a5ad0d 3198 *pte = pa | PG_RW | PG_V; /* | pgeflag; */
984263bc
MD
3199 size -= PAGE_SIZE;
3200 tmpva += PAGE_SIZE;
3201 pa += PAGE_SIZE;
3202 }
0f7a3396
MD
3203 cpu_invltlb();
3204 smp_invltlb();
984263bc
MD
3205
3206 return ((void *)(va + offset));
3207}
3208
3209void
840de426 3210pmap_unmapdev(vm_offset_t va, vm_size_t size)
984263bc
MD
3211{
3212 vm_offset_t base, offset;
3213
3214 base = va & PG_FRAME;
3215 offset = va & PAGE_MASK;
3216 size = roundup(offset + size, PAGE_SIZE);
0f579831 3217 pmap_qremove(va, size >> PAGE_SHIFT);
e4846942 3218 kmem_free(&kernel_map, base, size);
984263bc
MD
3219}
3220
3221/*
3222 * perform the pmap work for mincore
3223 */
3224int
840de426 3225pmap_mincore(pmap_t pmap, vm_offset_t addr)
984263bc 3226{
984263bc
MD
3227 unsigned *ptep, pte;
3228 vm_page_t m;
3229 int val = 0;
3230
3231 ptep = pmap_pte(pmap, addr);
3232 if (ptep == 0) {
3233 return 0;
3234 }
3235
3236 if ((pte = *ptep) != 0) {
3237 vm_offset_t pa;
3238
3239 val = MINCORE_INCORE;
3240 if ((pte & PG_MANAGED) == 0)
3241 return val;
3242
3243 pa = pte & PG_FRAME;
3244
3245 m = PHYS_TO_VM_PAGE(pa);
3246
3247 /*
3248 * Modified by us
3249 */
3250 if (pte & PG_M)
3251 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
3252 /*
3253 * Modified by someone
3254 */
3255 else if (m->dirty || pmap_is_modified(m))
3256 val |= MINCORE_MODIFIED_OTHER;
3257 /*
3258 * Referenced by us
3259 */
3260 if (pte & PG_A)
3261 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
3262
3263 /*
3264 * Referenced by someone
3265 */
3266 else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
3267 val |= MINCORE_REFERENCED_OTHER;
3268 vm_page_flag_set(m, PG_REFERENCED);
3269 }
3270 }
3271 return val;
3272}
3273
e3161323
MD
3274/*
3275 * Replace p->p_vmspace with a new one. If adjrefs is non-zero the new
3276 * vmspace will be ref'd and the old one will be deref'd.
3277 *
287ebb09
MD
3278 * The vmspace for all lwps associated with the process will be adjusted
3279 * and cr3 will be reloaded if any lwp is the current lwp.
e3161323 3280 */
984263bc 3281void
e3161323 3282pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
984263bc 3283{
e3161323 3284 struct vmspace *oldvm;
287ebb09 3285 struct lwp *lp;
08f2f1bb 3286
287ebb09 3287 crit_enter();
e3161323
MD
3288 oldvm = p->p_vmspace;
3289 if (oldvm != newvm) {
e3161323 3290 p->p_vmspace = newvm;
287ebb09 3291 KKASSERT(p->p_nthreads == 1);
3e291793 3292 lp = RB_ROOT(&p->p_lwp_tree);
287ebb09
MD
3293 pmap_setlwpvm(lp, newvm);
3294 if (adjrefs) {
3295 sysref_get(&newvm->vm_sysref);
3296 sysref_put(&oldvm->vm_sysref);
3297 }
3298 }
3299 crit_exit();
3300}
3301
3302/*
3303 * Set the vmspace for a LWP. The vmspace is almost universally set the
3304 * same as the process vmspace, but virtual kernels need to swap out contexts
3305 * on a per-lwp basis.
3306 */
3307void
3308pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
3309{
3310 struct vmspace *oldvm;
3311 struct pmap *pmap;
3312
3313 crit_enter();
3314 oldvm = lp->lwp_vmspace;
3315
3316 if (oldvm != newvm) {
3317 lp->lwp_vmspace = newvm;
3318 if (curthread->td_lwp == lp) {
e3161323 3319 pmap = vmspace_pmap(newvm);
984263bc 3320#if defined(SMP)
e3161323 3321 atomic_set_int(&pmap->pm_active, 1 << mycpu->gd_cpuid);
984263bc 3322#else
e3161323 3323 pmap->pm_active |= 1;
984263bc
MD
3324#endif
3325#if defined(SWTCH_OPTIM_STATS)
e3161323 3326 tlb_flush_count++;
984263bc 3327#endif
e3161323
MD
3328 curthread->td_pcb->pcb_cr3 = vtophys(pmap->pm_pdir);
3329 load_cr3(curthread->td_pcb->pcb_cr3);
3330 pmap = vmspace_pmap(oldvm);
4a22e893 3331#if defined(SMP)
e3161323
MD
3332 atomic_clear_int(&pmap->pm_active,
3333 1 << mycpu->gd_cpuid);
4a22e893 3334#else
e3161323 3335 pmap->pm_active &= ~1;
4a22e893 3336#endif
e3161323 3337 }
e3161323
MD
3338 }
3339 crit_exit();
4a22e893
MD
3340}
3341
984263bc
MD
3342vm_offset_t
3343pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
3344{
3345
3346 if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
3347 return addr;
3348 }
3349
3350 addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
3351 return addr;
3352}
3353
3354
984263bc
MD
3355#if defined(DEBUG)
3356
3ae0cd58 3357static void pads (pmap_t pm);
c469b1c4 3358void pmap_pvdump (vm_paddr_t pa);
984263bc
MD
3359
3360/* print address space of pmap*/
3361static void
840de426 3362pads(pmap_t pm)
984263bc
MD
3363{
3364 unsigned va, i, j;
3365 unsigned *ptep;
3366
fbbaeba3 3367 if (pm == &kernel_pmap)
984263bc 3368 return;
eec2b734
MD
3369 crit_enter();
3370 for (i = 0; i < 1024; i++) {
3371 if (pm->pm_pdir[i]) {
984263bc
MD
3372 for (j = 0; j < 1024; j++) {
3373 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
fbbaeba3 3374 if (pm == &kernel_pmap && va < KERNBASE)
984263bc 3375 continue;
fbbaeba3 3376 if (pm != &kernel_pmap && va > UPT_MAX_ADDRESS)
984263bc
MD
3377 continue;
3378 ptep = pmap_pte_quick(pm, va);
3379 if (pmap_pte_v(ptep))
26be20a0 3380 kprintf("%x:%x ", va, *(int *) ptep);
984263bc 3381 };
eec2b734
MD
3382 }
3383 }
3384 crit_exit();
984263bc
MD
3385
3386}