Add missing required '*' in indirect jmp (fix assembler warning).
[dragonfly.git] / sys / i386 / include / lock.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2003, Matthew Dillon, All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. The name of the developer may NOT be used to endorse or promote products
10 * derived from this software without specific prior written permission.
11 *
12 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
13 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
16 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
17 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
18 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
20 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
21 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
22 * SUCH DAMAGE.
23 *
24 * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $
25 * $DragonFly: src/sys/i386/include/Attic/lock.h,v 1.4 2003/07/08 06:27:27 dillon Exp $
26 */
27
28#ifndef _MACHINE_LOCK_H_
29#define _MACHINE_LOCK_H_
30
31#ifndef _MACHINE_PSL_H_
32#include <machine/psl.h>
33#endif
34
35/*
36 * MP_FREE_LOCK is used by both assembly and C under SMP.
37 */
38#ifdef SMP
39#define MP_FREE_LOCK 0xffffffff /* value of lock when free */
40#endif
41
42#ifdef LOCORE
43
44/*
45 * Spinlock assembly support. Note: eax and ecx can be tromped. No
46 * other register will be. Note that these routines are sometimes
47 * called with (%edx) as the mem argument.
48 *
49 * Under UP the spinlock routines still serve to disable/restore
50 * interrupts.
51 */
52
53
54#ifdef SMP
55
56#define SPIN_INIT(mem) \
57 movl $0,mem ; \
58
59#define SPIN_INIT_NOREG(mem) \
60 SPIN_INIT(mem) ; \
61
62#define SPIN_LOCK(mem) \
63 pushfl ; \
64 popl %ecx ; /* flags */ \
65 cli ; \
66 orl $PSL_C,%ecx ; /* make sure non-zero */ \
677: ; \
68 movl $0,%eax ; /* expected contents of lock */ \
69 lock cmpxchgl %ecx,mem ; /* Z=1 (jz) on success */ \
70 jnz 7b ; \
71
72#define SPIN_LOCK_PUSH_REGS \
73 subl $8,%esp ; \
74 movl %ecx,(%esp) ; \
75 movl %eax,4(%esp) ; \
76
77#define SPIN_LOCK_POP_REGS \
78 movl (%esp),%ecx ; \
79 movl 4(%esp),%eax ; \
80 addl $8,%esp ; \
81
82#define SPIN_LOCK_FRAME_SIZE 8
83
84#define SPIN_LOCK_NOREG(mem) \
85 SPIN_LOCK_PUSH_REGS ; \
86 SPIN_LOCK(mem) ; \
87 SPIN_LOCK_POP_REGS ; \
88
89#define SPIN_UNLOCK(mem) \
90 pushl mem ; \
91 movl $0,mem ; \
92 popfl ; \
93
94#define SPIN_UNLOCK_PUSH_REGS
95#define SPIN_UNLOCK_POP_REGS
96#define SPIN_UNLOCK_FRAME_SIZE 0
97
98#define SPIN_UNLOCK_NOREG(mem) \
99 SPIN_UNLOCK(mem) ; \
100
101#else
102
103#define SPIN_LOCK(mem) \
104 pushfl ; \
105 cli ; \
106 orl $PSL_C,(%esp) ; \
107 popl mem ; \
108
109#define SPIN_LOCK_PUSH_RESG
110#define SPIN_LOCK_POP_REGS
111#define SPIN_LOCK_FRAME_SIZE 0
112
113#define SPIN_UNLOCK(mem) \
114 pushl mem ; \
115 movl $0,mem ; \
116 popfl ; \
117
118#define SPIN_UNLOCK_PUSH_REGS
119#define SPIN_UNLOCK_POP_REGS
120#define SPIN_UNLOCK_FRAME_SIZE 0
121
122#endif /* SMP */
123
124#else /* LOCORE */
125
126/*
127 * Spinlock functions (UP and SMP). Under UP a spinlock still serves
128 * to disable/restore interrupts even if it doesn't spin.
129 */
130struct spinlock {
131 volatile int opaque;
132};
133
134typedef struct spinlock *spinlock_t;
135
136void mpintr_lock(void); /* disables int / spinlock combo */
137void mpintr_unlock(void);
138void com_lock(void); /* disables int / spinlock combo */
139void com_unlock(void);
140void imen_lock(void); /* disables int / spinlock combo */
141void imen_unlock(void);
142void clock_lock(void); /* disables int / spinlock combo */
143void clock_unlock(void);
144void cons_lock(void); /* disables int / spinlock combo */
145void cons_unlock(void);
146
147extern struct spinlock smp_rv_spinlock;
148
149void spin_lock(spinlock_t lock);
150void spin_lock_np(spinlock_t lock);
151void spin_unlock(spinlock_t lock);
152void spin_unlock_np(spinlock_t lock);
153#if 0
154void spin_lock_init(spinlock_t lock);
155#endif
156
157/*
158 * Inline version of spinlock routines -- overrides assembly. Only unlock
159 * and init here please.
160 */
161static __inline void
162spin_lock_init(spinlock_t lock)
163{
164 lock->opaque = 0;
165}
166
167/*
168 * MP LOCK functions for SMP and UP. Under UP the MP lock does not exist
169 * but we leave a few functions intact as macros for convenience.
170 */
171#ifdef SMP
172
173void get_mplock(void);
174int try_mplock(void);
175void rel_mplock(void);
176int cpu_try_mplock(void);
177#if 0
178void cpu_rel_mplock(void);
179#endif
180void cpu_get_initial_mplock(void);
181
182extern u_int mp_lock;
183
184#define MP_LOCK_HELD() (mp_lock == mycpu->gd_cpuid)
185#define ASSERT_MP_LOCK_HELD() KKASSERT(MP_LOCK_HELD())
186
187static __inline void
188cpu_rel_mplock(void)
189{
190 mp_lock = MP_FREE_LOCK;
191}
192
193#else
194
195#define get_mplock()
196#define try_mplock() 1
197#define rel_mplock()
198#define ASSERT_MP_LOCK_HELD()
199
200#endif /* SMP */
201#endif /* LOCORE */
202#endif /* !_MACHINE_LOCK_H_ */