2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
9 * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $
10 * $DragonFly: src/sys/platform/pc32/i386/mplock.s,v 1.5 2003/07/08 06:27:26 dillon Exp $
12 * Functions for locking between CPUs in a SMP system.
14 * This is an "exclusive counting semaphore". This means that it can be
15 * free (0xffffffff) or be owned by a CPU (0xXXYYYYYY where XX is CPU-id
16 * and YYYYYY is the count).
18 * Contrary to most implementations around, this one is entirely atomic:
19 * The attempt to seize/release the semaphore and the increment/decrement
20 * is done in one atomic operation. This way we are safe from all kinds
21 * of weird reentrancy situations.
24 #include <machine/asmacros.h>
25 #include <machine/smptests.h> /** GRAB_LOPRIO */
26 #include <machine/apic.h>
35 .long -1 /* initialized to not held */
42 * Note on cmpxchgl... exchanges ecx with mem if mem matches eax.
43 * Z=1 (jz) on success. A lock prefix is required for MP.
45 NON_GPROF_ENTRY(cpu_get_initial_mplock)
46 movl PCPU(curthread),%ecx
47 movl $1,TD_MPCOUNT(%ecx) /* curthread has mpcount of 1 */
48 movl $0,mp_lock /* owned by cpu 0 */
52 * cpu_try_mplock() returns non-zero on success, 0 on failure. It
53 * only adjusts mp_lock. It does not touch td_mpcount, and it
54 * must be called from inside a critical section.
56 NON_GPROF_ENTRY(cpu_try_mplock)
59 lock cmpxchgl %ecx,mp_lock /* ecx<->mem if eax matches */
67 NON_GPROF_ENTRY(get_mplock)
68 movl PCPU(curthread),%edx
69 cmpl $0,TD_MPCOUNT(%edx)
71 incl TD_MPCOUNT(%edx) /* already have it, just ++mpcount */
76 movl $1,TD_MPCOUNT(%edx)
79 lock cmpxchgl %ecx,mp_lock /* ecx<->mem & JZ if eax matches */
85 movl PCPU(cpuid),%eax /* failure */
89 addl $TDPRI_CRIT,TD_PRI(%edx)
91 call lwkt_switch /* will be correct on return */
92 movl PCPU(curthread),%edx
93 subl $TDPRI_CRIT,TD_PRI(%edx)
96 cmpl $0,panicstr /* don't double panic */
101 NON_GPROF_ENTRY(try_mplock)
102 movl PCPU(curthread),%edx
103 cmpl $0,TD_MPCOUNT(%edx)
105 incl TD_MPCOUNT(%edx) /* already have it, just ++mpcount */
111 movl PCPU(cpuid),%ecx
113 lock cmpxchgl %ecx,mp_lock /* ecx<->mem & JZ if eax matches */
115 movl $1,TD_MPCOUNT(%edx)
123 movl PCPU(cpuid),%eax /* failure */
132 NON_GPROF_ENTRY(rel_mplock)
133 movl PCPU(curthread),%edx
134 movl TD_MPCOUNT(%edx),%eax
142 movl %eax,TD_MPCOUNT(%edx)
148 movl PCPU(cpuid),%ecx
152 movl $0,TD_MPCOUNT(%edx)
153 movl $MP_FREE_LOCK,mp_lock
172 .asciz "try/get_mplock(): already have lock! %d %p"
175 .asciz "rel_mplock(): mpcount already 0 @ %p %p %p %p %p %p %p %p!"
178 .asciz "rel_mplock(): Releasing another cpu's MP lock! %p %p"
183 /* after 1st acquire of lock we grab all hardware INTs */
185 #define GRAB_HWI movl $ALLHWI_LEVEL, lapic_tpr
187 /* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
188 #define ARB_HWI movl $LOPRIO_LEVEL, lapic_tpr /* CHEAP_TPR */