kernel tree reorganization stage 1: Major cvs repository work (not logged as
[dragonfly.git] / sys / i386 / i386 / mplock.s
CommitLineData
984263bc
MD
1/*
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $
634081ff 10 * $DragonFly: src/sys/i386/i386/Attic/mplock.s,v 1.9 2003/07/20 07:46:19 dillon Exp $
984263bc
MD
11 *
12 * Functions for locking between CPUs in a SMP system.
13 *
14 * This is an "exclusive counting semaphore". This means that it can be
15 * free (0xffffffff) or be owned by a CPU (0xXXYYYYYY where XX is CPU-id
16 * and YYYYYY is the count).
17 *
18 * Contrary to most implementations around, this one is entirely atomic:
19 * The attempt to seize/release the semaphore and the increment/decrement
20 * is done in one atomic operation. This way we are safe from all kinds
21 * of weird reentrancy situations.
22 */
23
24#include <machine/asmacros.h>
25#include <machine/smptests.h> /** GRAB_LOPRIO */
26#include <machine/apic.h>
27
8a8d5d85 28#include "assym.s"
984263bc 29
a2a5ad0d
MD
30/*
31 * YYY Debugging only. Define this to be paranoid about invalidating the
32 * TLB when we get giant.
33 */
34#undef PARANOID_INVLTLB
35
8a8d5d85
MD
36 .data
37 ALIGN_DATA
38#ifdef SMP
39 .globl mp_lock
40mp_lock:
41 .long -1 /* initialized to not held */
42#endif
984263bc
MD
43
44 .text
8a8d5d85
MD
45 SUPERALIGN_TEXT
46
47 /*
48 * Note on cmpxchgl... exchanges ecx with mem if mem matches eax.
96728c05 49 * Z=1 (jz) on success. A lock prefix is required for MP.
8a8d5d85
MD
50 */
51NON_GPROF_ENTRY(cpu_get_initial_mplock)
52 movl PCPU(curthread),%ecx
53 movl $1,TD_MPCOUNT(%ecx) /* curthread has mpcount of 1 */
54 movl $0,mp_lock /* owned by cpu 0 */
55 NON_GPROF_RET
56
57 /*
58 * cpu_try_mplock() returns non-zero on success, 0 on failure. It
a5934754 59 * only adjusts mp_lock. It does not touch td_mpcount.
8a8d5d85
MD
60 */
61NON_GPROF_ENTRY(cpu_try_mplock)
62 movl PCPU(cpuid),%ecx
63 movl $-1,%eax
96728c05 64 lock cmpxchgl %ecx,mp_lock /* ecx<->mem if eax matches */
8a8d5d85 65 jnz 1f
a2a5ad0d
MD
66#ifdef PARANOID_INVLTLB
67 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
68#endif
8a8d5d85
MD
69 movl $1,%eax
70 NON_GPROF_RET
984263bc 711:
cb9d56ea 72 subl %eax,%eax
8a8d5d85 73 NON_GPROF_RET
984263bc 74
a5934754
MD
75 /*
76 * get_mplock() Obtains the MP lock and may switch away if it cannot
77 * get it. Note that td_mpcount may not be synchronized with the
78 * actual state of the MP lock. This situation occurs when
79 * get_mplock() or try_mplock() is indirectly called from the
80 * lwkt_switch() code, or from a preemption (though, truthfully,
81 * only try_mplock() should ever be called in this fashion). If
82 * we cannot get the MP lock we pre-dispose TD_MPCOUNT and call
83 * lwkt_swich(). The MP lock will be held on return.
84 *
85 * Note that both get_mplock() and try_mplock() must pre-dispose
86 * mpcount before attempting to get the lock, in case we get
87 * preempted. This allows us to avoid expensive interrupt
88 * disablement instructions and allows us to be called from outside
89 * a critical section.
90 */
8a8d5d85 91NON_GPROF_ENTRY(get_mplock)
a5934754 92 movl PCPU(cpuid),%ecx
8a8d5d85 93 movl PCPU(curthread),%edx
a5934754
MD
94 cmpl %ecx,mp_lock
95 jne 1f
96 incl TD_MPCOUNT(%edx)
8a8d5d85 97 NON_GPROF_RET
984263bc 981:
a5934754 99 incl TD_MPCOUNT(%edx)
8a8d5d85 100 movl $-1,%eax
a5934754 101 lock cmpxchgl %ecx,mp_lock
984263bc 102 jnz 2f
a2a5ad0d
MD
103#ifdef PARANOID_INVLTLB
104 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
105#endif
8a8d5d85 106 NON_GPROF_RET
984263bc 1072:
96728c05 108 call lwkt_switch /* will be correct on return */
a2a5ad0d
MD
109#ifdef INVARIANTS
110 movl PCPU(cpuid),%eax /* failure */
111 cmpl %eax,mp_lock
112 jne 4f
113#endif
96728c05 114 NON_GPROF_RET
634081ff 115#ifdef INVARIANTS
a2a5ad0d
MD
1164:
117 cmpl $0,panicstr /* don't double panic */
118 je badmp_get2
119 NON_GPROF_RET
634081ff 120#endif
a2a5ad0d 121
a5934754
MD
122 /*
123 * try_mplock() attempts to obtain the MP lock and will not switch
124 * away if it cannot get it. Note that td_mpcoutn may not be
125 * synchronized with the actual state of the MP lock.
126 */
8a8d5d85 127NON_GPROF_ENTRY(try_mplock)
a5934754 128 movl PCPU(cpuid),%ecx
8a8d5d85 129 movl PCPU(curthread),%edx
a5934754
MD
130 cmpl %ecx,mp_lock
131 jne 1f
132 incl TD_MPCOUNT(%edx)
8a8d5d85
MD
133 movl $1,%eax
134 NON_GPROF_RET
984263bc 1351:
a5934754 136 incl TD_MPCOUNT(%edx) /* pre-dispose */
8a8d5d85 137 movl $-1,%eax
a5934754 138 lock cmpxchgl %ecx,mp_lock
8a8d5d85 139 jnz 2f
a2a5ad0d
MD
140#ifdef PARANOID_INVLTLB
141 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
142#endif
cb9d56ea 143 movl $1,%eax
8a8d5d85 144 NON_GPROF_RET
984263bc 1452:
a5934754
MD
146 decl TD_MPCOUNT(%edx) /* un-dispose */
147 subl %eax,%eax
8a8d5d85 148 NON_GPROF_RET
984263bc 149
a5934754
MD
150 /*
151 * rel_mplock() release the MP lock. The MP lock MUST be held,
152 * td_mpcount must NOT be out of synch with the lock. It is allowed
153 * for the physical lock to be released prior to setting the count
154 * to 0, preemptions will deal with the case (see lwkt_thread.c).
155 */
8a8d5d85
MD
156NON_GPROF_ENTRY(rel_mplock)
157 movl PCPU(curthread),%edx
96728c05
MD
158 movl TD_MPCOUNT(%edx),%eax
159 cmpl $1,%eax
8a8d5d85 160 je 1f
96728c05
MD
161#ifdef INVARIANTS
162 testl %eax,%eax
163 jz badmp_rel
164#endif
165 subl $1,%eax
166 movl %eax,TD_MPCOUNT(%edx)
8a8d5d85
MD
167 NON_GPROF_RET
1681:
96728c05
MD
169#ifdef INVARIANTS
170 movl PCPU(cpuid),%ecx
171 cmpl %ecx,mp_lock
172 jne badmp_rel2
173#endif
8a8d5d85 174 movl $MP_FREE_LOCK,mp_lock
a5934754 175 movl $0,TD_MPCOUNT(%edx)
8a8d5d85 176 NON_GPROF_RET
984263bc 177
96728c05
MD
178#ifdef INVARIANTS
179
8a8d5d85
MD
180badmp_get:
181 pushl $bmpsw1
182 call panic
a2a5ad0d
MD
183badmp_get2:
184 pushl $bmpsw1a
185 call panic
8a8d5d85
MD
186badmp_rel:
187 pushl $bmpsw2
188 call panic
96728c05
MD
189badmp_rel2:
190 pushl $bmpsw2a
191 call panic
984263bc 192
984263bc 193 .data
984263bc 194
8a8d5d85 195bmpsw1:
96728c05 196 .asciz "try/get_mplock(): already have lock! %d %p"
984263bc 197
a2a5ad0d
MD
198bmpsw1a:
199 .asciz "try/get_mplock(): failed on count or switch %d %p"
200
8a8d5d85 201bmpsw2:
96728c05
MD
202 .asciz "rel_mplock(): mpcount already 0 @ %p %p %p %p %p %p %p %p!"
203
204bmpsw2a:
205 .asciz "rel_mplock(): Releasing another cpu's MP lock! %p %p"
206
207#endif
984263bc 208
8a8d5d85
MD
209#if 0
210/* after 1st acquire of lock we grab all hardware INTs */
211#ifdef GRAB_LOPRIO
212#define GRAB_HWI movl $ALLHWI_LEVEL, lapic_tpr
984263bc 213
8a8d5d85
MD
214/* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
215#define ARB_HWI movl $LOPRIO_LEVEL, lapic_tpr /* CHEAP_TPR */
216#endif
217#endif
984263bc 218