2 * Copyright (c) 1997, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $FreeBSD: src/sys/i386/i386/simplelock.s,v 1.11.2.2 2003/02/04 20:55:28 jhb Exp $
26 * $DragonFly: src/sys/i386/i386/Attic/simplelock.s,v 1.3 2003/07/01 20:30:40 dillon Exp $
30 * credit to Bruce Evans <bde@zeta.org.au> for help with asm optimization.
33 #include <machine/asmacros.h> /* miscellaneous macros */
34 #include <i386/isa/intr_machdep.h>
35 #include <machine/psl.h>
37 #include <machine/smptests.h> /** FAST_HI */
40 * The following impliments the primitives described in i386/i386/param.h
41 * necessary for the Lite2 lock manager system.
42 * The major difference is that the "volatility" of the lock datum has been
43 * pushed down from the various functions to lock_data itself.
47 * The simple-lock routines are the primitives out of which the lock
48 * package is built. The machine-dependent code must implement an
49 * atomic test_and_set operation that indivisibly sets the simple lock
50 * to non-zero and returns its old value. It also assumes that the
51 * setting of the lock to zero below is indivisible. Simple locks may
52 * only be used for exclusive locks.
55 * volatile int lock_data;
61 * s_lock_init(struct simplelock *lkp)
67 movl 4(%esp), %eax /* get the address of the lock */
74 * s_lock(struct simplelock *lkp)
76 * while (test_and_set(&lkp->lock_data))
81 * If the acquire fails we do a loop of reads waiting for the lock to
82 * become free instead of continually beating on the lock with xchgl.
83 * The theory here is that the CPU will stay within its cache until
84 * a write by the other CPU updates it, instead of continually updating
85 * the local cache (and thus causing external bus writes) with repeated
91 movl 4(%esp), %eax /* get the address of the lock */
96 jz gotit /* it was clear, return */
99 cmpl $0, (%eax) /* wait to empty */
100 jne wait /* still set... */
101 jmp setlock /* empty again, try once more */
108 movl 4(%esp), %edx /* get the address of the lock */
110 movl _cpu_lockid, %ecx /* add cpu id portion */
111 incl %ecx /* add lock portion */
114 cmpxchgl %ecx, (%edx)
115 jz gotit /* it was clear, return */
116 pushl %eax /* save what we xchanged */
117 decl %eax /* remove lock portion */
118 cmpl _cpu_lockid, %eax /* do we hold it? */
119 je bad_slock /* yes, thats not good... */
120 addl $4, %esp /* clear the stack */
123 cmpl $0, (%edx) /* wait to empty */
124 jne wait /* still set... */
125 jmp setlock /* empty again, try once more */
131 /* %eax (current lock) is already on the stack */
137 bsl1: .asciz "rslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
139 #endif /* SL_DEBUG */
144 * s_lock_try(struct simplelock *lkp)
146 * return (!test_and_set(&lkp->lock_data));
152 movl 4(%esp), %eax /* get the address of the lock */
157 setz %al /* 1 if previous value was 0 */
158 movzbl %al, %eax /* convert to an int */
165 movl 4(%esp), %edx /* get the address of the lock */
166 movl cpu_lockid, %ecx /* add cpu id portion */
167 incl %ecx /* add lock portion */
171 cmpxchgl %ecx, (%edx)
172 setz %al /* 1 if previous value was 0 */
173 movzbl %al, %eax /* convert to an int */
177 #endif /* SL_DEBUG */
182 * s_unlock(struct simplelock *lkp)
184 * lkp->lock_data = 0;
188 movl 4(%esp), %eax /* get the address of the lock */
195 * XXX CRUFTY SS_LOCK IMPLEMENTATION REMOVED XXX
197 * These versions of simple_lock block interrupts,
198 * making it suitable for regions accessed by both top and bottom levels.
199 * This is done by saving the current value of the cpu flags in a per-cpu
200 * global, and disabling interrupts when the lock is taken. When the
201 * lock is released, interrupts might be enabled, depending upon the saved
203 * Because of this, it must ONLY be used for SHORT, deterministic paths!
206 * It would appear to be "bad behaviour" to blindly store a value in
207 * ss_eflags, as this could destroy the previous contents. But since ss_eflags
208 * is a per-cpu variable, and its fatal to attempt to acquire a simplelock
209 * that you already hold, we get away with it. This needs to be cleaned
214 * void ss_lock(struct simplelock *lkp)
219 movl 4(%esp), %eax /* get the address of the lock */
220 movl $1, %ecx /* value for a held lock */
224 xchgl %ecx, (%eax) /* compete */
226 jz sgotit /* it was clear, return */
227 popfl /* previous value while waiting */
230 cmpl $0, (%eax) /* wait to empty */
231 jne swait /* still set... */
232 jmp ssetlock /* empty again, try once more */
234 popl ss_eflags /* save the old eflags */
240 movl 4(%esp), %edx /* get the address of the lock */
242 movl cpu_lockid, %ecx /* add cpu id portion */
243 incl %ecx /* add lock portion */
248 cmpxchgl %ecx, (%edx) /* compete */
249 jz sgotit /* it was clear, return */
250 pushl %eax /* save what we xchanged */
251 decl %eax /* remove lock portion */
252 cmpl cpu_lockid, %eax /* do we hold it? */
253 je sbad_slock /* yes, thats not good... */
254 addl $4, %esp /* clear the stack */
258 cmpl $0, (%edx) /* wait to empty */
259 jne swait /* still set... */
260 jmp ssetlock /* empty again, try once more */
262 popl ss_eflags /* save the old task priority */
268 /* %eax (current lock) is already on the stack */
274 sbsl1: .asciz "rsslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
276 #endif /* SL_DEBUG */
279 * void ss_unlock(struct simplelock *lkp)
282 movl 4(%esp), %eax /* get the address of the lock */
283 movl $0, (%eax) /* clear the simple lock */
284 testl $PSL_I, ss_eflags
293 * These versions of simple_lock does not contain calls to profiling code.
294 * Thus they can be called from the profiling code.
298 * void s_lock_np(struct simplelock *lkp)
300 NON_GPROF_ENTRY(s_lock_np)
301 movl 4(%esp), %eax /* get the address of the lock */
309 cmpl $0, (%eax) /* wait to empty */
310 jne 2b /* still set... */
311 jmp 1b /* empty again, try once more */
316 * void s_unlock_np(struct simplelock *lkp)
318 NON_GPROF_ENTRY(s_unlock_np)
319 movl 4(%esp), %eax /* get the address of the lock */