2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
9 * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $
10 * $DragonFly: src/sys/i386/i386/Attic/mplock.s,v 1.6 2003/07/10 04:47:53 dillon Exp $
12 * Functions for locking between CPUs in a SMP system.
14 * This is an "exclusive counting semaphore". This means that it can be
15 * free (0xffffffff) or be owned by a CPU (0xXXYYYYYY where XX is CPU-id
16 * and YYYYYY is the count).
18 * Contrary to most implementations around, this one is entirely atomic:
19 * The attempt to seize/release the semaphore and the increment/decrement
20 * is done in one atomic operation. This way we are safe from all kinds
21 * of weird reentrancy situations.
24 #include <machine/asmacros.h>
25 #include <machine/smptests.h> /** GRAB_LOPRIO */
26 #include <machine/apic.h>
31 * YYY Debugging only. Define this to be paranoid about invalidating the
32 * TLB when we get giant.
34 #undef PARANOID_INVLTLB
41 .long -1 /* initialized to not held */
48 * Note on cmpxchgl... exchanges ecx with mem if mem matches eax.
49 * Z=1 (jz) on success. A lock prefix is required for MP.
51 NON_GPROF_ENTRY(cpu_get_initial_mplock)
52 movl PCPU(curthread),%ecx
53 movl $1,TD_MPCOUNT(%ecx) /* curthread has mpcount of 1 */
54 movl $0,mp_lock /* owned by cpu 0 */
58 * cpu_try_mplock() returns non-zero on success, 0 on failure. It
59 * only adjusts mp_lock. It does not touch td_mpcount, and it
60 * must be called from inside a critical section.
62 NON_GPROF_ENTRY(cpu_try_mplock)
65 lock cmpxchgl %ecx,mp_lock /* ecx<->mem if eax matches */
67 #ifdef PARANOID_INVLTLB
68 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
76 NON_GPROF_ENTRY(get_mplock)
77 movl PCPU(curthread),%edx
78 cmpl $0,TD_MPCOUNT(%edx)
80 incl TD_MPCOUNT(%edx) /* already have it, just ++mpcount */
82 movl PCPU(cpuid),%eax /* failure */
90 movl $1,TD_MPCOUNT(%edx)
93 lock cmpxchgl %ecx,mp_lock /* ecx<->mem & JZ if eax matches */
95 #ifdef PARANOID_INVLTLB
96 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
102 movl PCPU(cpuid),%eax /* failure */
106 addl $TDPRI_CRIT,TD_PRI(%edx)
108 call lwkt_switch /* will be correct on return */
110 movl PCPU(cpuid),%eax /* failure */
114 movl PCPU(curthread),%edx
115 subl $TDPRI_CRIT,TD_PRI(%edx)
118 cmpl $0,panicstr /* don't double panic */
124 cmpl $0,panicstr /* don't double panic */
128 NON_GPROF_ENTRY(try_mplock)
129 movl PCPU(curthread),%edx
130 cmpl $0,TD_MPCOUNT(%edx)
132 incl TD_MPCOUNT(%edx) /* already have it, just ++mpcount */
134 movl PCPU(cpuid),%eax /* failure */
143 movl PCPU(cpuid),%ecx
145 lock cmpxchgl %ecx,mp_lock /* ecx<->mem & JZ if eax matches */
147 movl $1,TD_MPCOUNT(%edx)
148 #ifdef PARANOID_INVLTLB
149 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
158 movl PCPU(cpuid),%eax /* failure */
167 NON_GPROF_ENTRY(rel_mplock)
168 movl PCPU(curthread),%edx
169 movl TD_MPCOUNT(%edx),%eax
177 movl %eax,TD_MPCOUNT(%edx)
183 movl PCPU(cpuid),%ecx
187 movl $0,TD_MPCOUNT(%edx)
188 movl $MP_FREE_LOCK,mp_lock
210 .asciz "try/get_mplock(): already have lock! %d %p"
213 .asciz "try/get_mplock(): failed on count or switch %d %p"
216 .asciz "rel_mplock(): mpcount already 0 @ %p %p %p %p %p %p %p %p!"
219 .asciz "rel_mplock(): Releasing another cpu's MP lock! %p %p"
224 /* after 1st acquire of lock we grab all hardware INTs */
226 #define GRAB_HWI movl $ALLHWI_LEVEL, lapic_tpr
228 /* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
229 #define ARB_HWI movl $LOPRIO_LEVEL, lapic_tpr /* CHEAP_TPR */