2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * pmap invalidation support code. Certain hardware requirements must
37 * be dealt with when manipulating page table entries and page directory
38 * entries within a pmap. In particular, we cannot safely manipulate
39 * page tables which are in active use by another cpu (even if it is
40 * running in userland) for two reasons: First, TLB writebacks will
41 * race against our own modifications and tests. Second, even if we
42 * were to use bus-locked instruction we can still screw up the
43 * target cpu's instruction pipeline due to Intel cpu errata.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
50 #include <sys/vmmeter.h>
51 #include <sys/thread2.h>
52 #include <sys/sysctl.h>
56 #include <vm/vm_object.h>
58 #include <machine/cputypes.h>
59 #include <machine/md_var.h>
60 #include <machine/specialreg.h>
61 #include <machine/smp.h>
62 #include <machine/globaldata.h>
63 #include <machine/pmap.h>
64 #include <machine/pmap_inval.h>
67 #define LOOPMASK (/* 32 * */ 16 * 128 * 1024 - 1)
70 #define MAX_INVAL_PAGES 128
72 struct pmap_inval_info {
77 enum { INVDONE, INVSTORE, INVCMPSET } mode;
89 typedef struct pmap_inval_info pmap_inval_info_t;
91 static pmap_inval_info_t invinfo[MAXCPU];
92 extern cpumask_t smp_invmask;
95 extern cpumask_t smp_in_mask;
97 extern cpumask_t smp_smurf_mask;
99 static long pmap_inval_bulk_count;
101 SYSCTL_LONG(_machdep, OID_AUTO, pmap_inval_bulk_count, CTLFLAG_RW,
102 &pmap_inval_bulk_count, 0, "");
105 pmap_inval_init(pmap_t pmap)
110 crit_enter_id("inval");
112 if (pmap != &kernel_pmap) {
114 olock = pmap->pm_active_lock;
116 nlock = olock | CPULOCK_EXCL;
117 if (olock != nlock &&
118 atomic_cmpset_int(&pmap->pm_active_lock,
125 atomic_add_acq_long(&pmap->pm_invgen, 1);
130 pmap_inval_done(pmap_t pmap)
132 if (pmap != &kernel_pmap) {
133 atomic_clear_int(&pmap->pm_active_lock, CPULOCK_EXCL);
134 atomic_add_acq_long(&pmap->pm_invgen, 1);
136 crit_exit_id("inval");
142 * API function - invalidation the pte at (va) and replace *ptep with
143 * npte atomically across the pmap's active cpus.
145 * This is a holy mess.
147 * Returns the previous contents of *ptep.
151 loopdebug(const char *msg, pmap_inval_info_t *info)
154 int cpu = mycpu->gd_cpuid;
158 atomic_add_long(&smp_smurf_mask.ary[0], 0);
160 kprintf("%s %d mode=%d m=%08jx d=%08jx "
170 , msg, cpu, info->mode,
174 , info->sigmask.ary[0]
180 , smp_smurf_mask.ary[0]
184 for (p = 0; p < ncpus; ++p)
185 kprintf(" %d", CPU_prvspace[p]->mdglobaldata.gd_xinvaltlb);
193 #define CHECKSIGMASK(info) _checksigmask(info, __FILE__, __LINE__)
197 _checksigmask(pmap_inval_info_t *info, const char *file, int line)
202 CPUMASK_ANDMASK(tmp, info->sigmask);
203 if (CPUMASK_CMPMASKNEQ(tmp, info->mask)) {
204 kprintf("\"%s\" line %d: bad sig/mask %08jx %08jx\n",
205 file, line, info->sigmask.ary[0], info->mask.ary[0]);
211 #define CHECKSIGMASK(info)
216 * Invalidate the specified va across all cpus associated with the pmap.
217 * If va == (vm_offset_t)-1, we invltlb() instead of invlpg(). The operation
218 * will be done fully synchronously with storing npte into *ptep and returning
221 * If ptep is NULL the operation will execute semi-synchronously.
222 * ptep must be NULL if npgs > 1
225 pmap_inval_smp(pmap_t pmap, vm_offset_t va, int npgs,
226 pt_entry_t *ptep, pt_entry_t npte)
228 globaldata_t gd = mycpu;
229 pmap_inval_info_t *info;
231 int cpu = gd->gd_cpuid;
233 unsigned long rflags;
236 * Initialize invalidation for pmap and enter critical section.
240 pmap_inval_init(pmap);
243 * Shortcut single-cpu case if possible.
245 if (CPUMASK_CMPMASKEQ(pmap->pm_active, gd->gd_cpumask)) {
247 * Convert to invltlb if there are too many pages to
250 if (npgs > MAX_INVAL_PAGES) {
252 va = (vm_offset_t)-1;
256 * Invalidate the specified pages, handle invltlb if requested.
261 opte = atomic_swap_long(ptep, npte);
264 if (va == (vm_offset_t)-1)
266 cpu_invlpg((void *)va);
269 if (va == (vm_offset_t)-1)
271 pmap_inval_done(pmap);
277 * We need a critical section to prevent getting preempted while
278 * we setup our command. A preemption might execute its own
279 * pmap_inval*() command and create confusion below.
281 info = &invinfo[cpu];
284 * We must wait for other cpus which may still be finishing up a
285 * prior operation that we requested.
287 * We do not have to disable interrupts here. An Xinvltlb can occur
288 * at any time (even within a critical section), but it will not
289 * act on our command until we set our done bits.
291 while (CPUMASK_TESTNZERO(info->done)) {
295 loops = ++info->xloops;
296 if ((loops & LOOPMASK) == 0) {
298 loopdebug("orig_waitA", info);
299 /* XXX recover from possible bug */
300 CPUMASK_ASSZERO(info->done);
305 KKASSERT(info->mode == INVDONE);
308 * Must set our cpu in the invalidation scan mask before
309 * any possibility of [partial] execution (remember, XINVLTLB
310 * can interrupt a critical section).
312 ATOMIC_CPUMASK_ORBIT(smp_invmask, cpu);
322 info->mode = INVSTORE;
324 tmpmask = pmap->pm_active; /* volatile (bits may be cleared) */
326 CPUMASK_ANDMASK(tmpmask, smp_active_mask);
329 * If ptep is NULL the operation can be semi-synchronous, which means
330 * we can improve performance by flagging and removing idle cpus
331 * (see the idleinvlclr function in mp_machdep.c).
333 * Typically kernel page table operation is semi-synchronous.
336 smp_smurf_idleinvlclr(&tmpmask);
337 CPUMASK_ORBIT(tmpmask, cpu);
338 info->mask = tmpmask;
341 * Command may start executing the moment 'done' is initialized,
342 * disable current cpu interrupt to prevent 'done' field from
343 * changing (other cpus can't clear done bits until the originating
344 * cpu clears its mask bit, but other cpus CAN start clearing their
348 info->sigmask = tmpmask;
352 rflags = read_rflags();
355 ATOMIC_CPUMASK_COPY(info->done, tmpmask);
356 /* execution can begin here due to races */
359 * Pass our copy of the done bits (so they don't change out from
360 * under us) to generate the Xinvltlb interrupt on the targets.
362 smp_invlpg(&tmpmask);
364 KKASSERT(info->mode == INVDONE);
367 * Target cpus will be in their loop exiting concurrently with our
368 * cleanup. They will not lose the bitmask they obtained before so
369 * we can safely clear this bit.
371 ATOMIC_CPUMASK_NANDBIT(smp_invmask, cpu);
372 write_rflags(rflags);
373 pmap_inval_done(pmap);
379 * API function - invalidate the pte at (va) and replace *ptep with npte
380 * atomically only if *ptep equals opte, across the pmap's active cpus.
382 * Returns 1 on success, 0 on failure (caller typically retries).
385 pmap_inval_smp_cmpset(pmap_t pmap, vm_offset_t va, pt_entry_t *ptep,
386 pt_entry_t opte, pt_entry_t npte)
388 globaldata_t gd = mycpu;
389 pmap_inval_info_t *info;
391 int cpu = gd->gd_cpuid;
393 unsigned long rflags;
396 * Initialize invalidation for pmap and enter critical section.
400 pmap_inval_init(pmap);
403 * Shortcut single-cpu case if possible.
405 if (CPUMASK_CMPMASKEQ(pmap->pm_active, gd->gd_cpumask)) {
406 if (atomic_cmpset_long(ptep, opte, npte)) {
407 if (va == (vm_offset_t)-1)
410 cpu_invlpg((void *)va);
411 pmap_inval_done(pmap);
414 pmap_inval_done(pmap);
420 * We need a critical section to prevent getting preempted while
421 * we setup our command. A preemption might execute its own
422 * pmap_inval*() command and create confusion below.
424 info = &invinfo[cpu];
427 * We must wait for other cpus which may still be finishing
428 * up a prior operation.
430 while (CPUMASK_TESTNZERO(info->done)) {
434 loops = ++info->xloops;
435 if ((loops & LOOPMASK) == 0) {
437 loopdebug("orig_waitB", info);
438 /* XXX recover from possible bug */
439 CPUMASK_ASSZERO(info->done);
444 KKASSERT(info->mode == INVDONE);
447 * Must set our cpu in the invalidation scan mask before
448 * any possibility of [partial] execution (remember, XINVLTLB
449 * can interrupt a critical section).
451 ATOMIC_CPUMASK_ORBIT(smp_invmask, cpu);
454 info->npgs = 1; /* unused */
461 info->mode = INVCMPSET;
464 tmpmask = pmap->pm_active; /* volatile */
466 CPUMASK_ANDMASK(tmpmask, smp_active_mask);
467 CPUMASK_ORBIT(tmpmask, cpu);
468 info->mask = tmpmask;
471 * Command may start executing the moment 'done' is initialized,
472 * disable current cpu interrupt to prevent 'done' field from
473 * changing (other cpus can't clear done bits until the originating
474 * cpu clears its mask bit).
477 info->sigmask = tmpmask;
481 rflags = read_rflags();
484 ATOMIC_CPUMASK_COPY(info->done, tmpmask);
487 * Pass our copy of the done bits (so they don't change out from
488 * under us) to generate the Xinvltlb interrupt on the targets.
490 smp_invlpg(&tmpmask);
491 success = info->success;
492 KKASSERT(info->mode == INVDONE);
494 ATOMIC_CPUMASK_NANDBIT(smp_invmask, cpu);
495 write_rflags(rflags);
496 pmap_inval_done(pmap);
502 pmap_inval_bulk_init(pmap_inval_bulk_t *bulk, struct pmap *pmap)
511 pmap_inval_bulk(pmap_inval_bulk_t *bulk, vm_offset_t va,
512 pt_entry_t *ptep, pt_entry_t npte)
517 * Degenerate case, localized or we don't care (e.g. because we
518 * are jacking the entire page table) or the pmap is not in-use
519 * by anyone. No invalidations are done on any cpu.
522 pte = atomic_swap_long(ptep, npte);
527 * If it isn't the kernel pmap we execute the operation synchronously
528 * on all cpus belonging to the pmap, which avoids concurrency bugs in
529 * the hw related to changing pte's out from under threads.
531 * Eventually I would like to implement streaming pmap invalidation
532 * for user pmaps to reduce mmap/munmap overheads for heavily-loaded
535 if (bulk->pmap != &kernel_pmap) {
536 pte = pmap_inval_smp(bulk->pmap, va, 1, ptep, npte);
541 * This is the kernel_pmap. All unmap operations presume that there
542 * are no other cpus accessing the addresses in question. Implement
543 * the bulking algorithm. collect the required information and
544 * synchronize once at the end.
546 pte = atomic_swap_long(ptep, npte);
547 if (va == (vm_offset_t)-1) {
549 } else if (bulk->va_beg == bulk->va_end) {
551 bulk->va_end = va + PAGE_SIZE;
552 } else if (va == bulk->va_end) {
553 bulk->va_end = va + PAGE_SIZE;
555 bulk->va_beg = (vm_offset_t)-1;
558 pmap_inval_bulk_flush(bulk);
560 if (va == (vm_offset_t)-1) {
565 bulk->va_end = va + PAGE_SIZE;
575 pmap_inval_bulk_flush(pmap_inval_bulk_t *bulk)
580 pmap_inval_bulk_count += (bulk->count - 1);
581 if (bulk->va_beg != bulk->va_end) {
582 if (bulk->va_beg == (vm_offset_t)-1) {
583 pmap_inval_smp(bulk->pmap, bulk->va_beg, 1, NULL, 0);
587 n = (bulk->va_end - bulk->va_beg) >> PAGE_SHIFT;
588 pmap_inval_smp(bulk->pmap, bulk->va_beg, n, NULL, 0);
597 * Called with a critical section held and interrupts enabled.
600 pmap_inval_intr(cpumask_t *cpumaskp, int toolong)
602 globaldata_t gd = mycpu;
603 pmap_inval_info_t *info;
612 * Check all cpus for invalidations we may need to service.
618 while (CPUMASK_TESTNZERO(cpumask)) {
619 int n = BSFCPUMASK(cpumask);
622 KKASSERT(n >= 0 && n < MAXCPU);
625 CPUMASK_NANDBIT(cpumask, n);
629 * Due to interrupts/races we can catch a new operation
630 * in an older interrupt. A fence is needed once we detect
631 * the (not) done bit.
633 if (!CPUMASK_TESTBIT(info->done, cpu))
638 kprintf("pminvl %d->%d %08jx %08jx mode=%d\n",
639 cpu, n, info->done.ary[0], info->mask.ary[0],
645 * info->mask and info->done always contain the originating
646 * cpu until the originator is done. Targets may still be
647 * present in info->done after the originator is done (they
648 * will be finishing up their loops).
650 * Clear info->mask bits on other cpus to indicate that they
651 * have quiesced (entered the loop). Once the other mask bits
652 * are clear we can execute the operation on the original,
653 * then clear the mask and done bits on the originator. The
654 * targets will then finish up their side and clear their
657 * The command is considered 100% done when all done bits have
662 * Command state machine for 'other' cpus.
664 if (CPUMASK_TESTBIT(info->mask, cpu)) {
666 * Other cpu indicate to originator that they
669 ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
671 } else if (info->ptep &&
672 CPUMASK_TESTBIT(info->mask, n)) {
674 * Other cpu must wait for the originator (n)
675 * to complete its command if ptep is not NULL.
680 * Other cpu detects that the originator has
681 * completed its command, or there was no
684 * Now that the page table entry has changed,
685 * we can follow up with our own invalidation.
687 vm_offset_t va = info->va;
690 if (va == (vm_offset_t)-1 ||
691 info->npgs > MAX_INVAL_PAGES) {
694 for (npgs = info->npgs; npgs; --npgs) {
695 cpu_invlpg((void *)va);
699 ATOMIC_CPUMASK_NANDBIT(info->done, cpu);
700 /* info invalid now */
701 /* loopme left alone */
703 } else if (CPUMASK_TESTBIT(info->mask, cpu)) {
705 * Originator is waiting for other cpus
707 if (CPUMASK_CMPMASKNEQ(info->mask, gd->gd_cpumask)) {
709 * Originator waits for other cpus to enter
710 * their loop (aka quiesce).
714 loops = ++info->xloops;
715 if ((loops & LOOPMASK) == 0) {
717 loopdebug("orig_waitC", info);
718 /* XXX recover from possible bug */
719 mdcpu->gd_xinvaltlb = 0;
721 smp_invlpg(&smp_active_mask);
727 * Originator executes operation and clears
728 * mask to allow other cpus to finish.
730 KKASSERT(info->mode != INVDONE);
731 if (info->mode == INVSTORE) {
733 info->opte = atomic_swap_long(info->ptep, info->npte);
735 ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
738 if (atomic_cmpset_long(info->ptep,
739 info->opte, info->npte)) {
745 ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
752 * Originator does not have to wait for the other
753 * cpus to finish. It clears its done bit. A new
754 * command will not be initiated by the originator
755 * until the other cpus have cleared their done bits
758 vm_offset_t va = info->va;
761 if (va == (vm_offset_t)-1 ||
762 info->npgs > MAX_INVAL_PAGES) {
765 for (npgs = info->npgs; npgs; --npgs) {
766 cpu_invlpg((void *)va);
773 /* leave loopme alone */
774 /* other cpus may still be finishing up */
775 /* can't race originator since that's us */
776 info->mode = INVDONE;
777 ATOMIC_CPUMASK_NANDBIT(info->done, cpu);