9f5b9422da1574e9b7dcbd70feabe3faa907a15b
[dragonfly.git] / sys / platform / pc32 / include / lock.h
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $
35  * $DragonFly: src/sys/platform/pc32/include/lock.h,v 1.9 2004/07/16 05:48:46 dillon Exp $
36  */
37
38 #ifndef _MACHINE_LOCK_H_
39 #define _MACHINE_LOCK_H_
40
41 #ifndef _MACHINE_PSL_H_
42 #include "psl.h"
43 #endif
44
45 /*
46  * MP_FREE_LOCK is used by both assembly and C under SMP.
47  */
48 #ifdef SMP
49 #define MP_FREE_LOCK            0xffffffff      /* value of lock when free */
50 #endif
51
52 #ifdef LOCORE
53
54 /*
55  * Spinlock assembly support.  Note: eax and ecx can be tromped.  No
56  * other register will be.   Note that these routines are sometimes
57  * called with (%edx) as the mem argument.
58  *
59  * Under UP the spinlock routines still serve to disable/restore 
60  * interrupts.
61  */
62
63
64 #ifdef SMP
65
66 #define SPIN_INIT(mem)                                          \
67         movl    $0,mem ;                                        \
68
69 #define SPIN_INIT_NOREG(mem)                                    \
70         SPIN_INIT(mem) ;                                        \
71
72 #define SPIN_LOCK(mem)                                          \
73         pushfl ;                                                \
74         popl    %ecx ;          /* flags */                     \
75         cli ;                                                   \
76         orl     $PSL_C,%ecx ;   /* make sure non-zero */        \
77 7: ;                                                            \
78         movl    $0,%eax ;       /* expected contents of lock */ \
79         lock cmpxchgl %ecx,mem ; /* Z=1 (jz) on success */      \
80         jnz     7b ;                                            \
81
82 #define SPIN_LOCK_PUSH_REGS                                     \
83         subl    $8,%esp ;                                       \
84         movl    %ecx,(%esp) ;                                   \
85         movl    %eax,4(%esp) ;                                  \
86
87 #define SPIN_LOCK_POP_REGS                                      \
88         movl    (%esp),%ecx ;                                   \
89         movl    4(%esp),%eax ;                                  \
90         addl    $8,%esp ;                                       \
91
92 #define SPIN_LOCK_FRAME_SIZE    8
93
94 #define SPIN_LOCK_NOREG(mem)                                    \
95         SPIN_LOCK_PUSH_REGS ;                                   \
96         SPIN_LOCK(mem) ;                                        \
97         SPIN_LOCK_POP_REGS ;                                    \
98
99 #define SPIN_UNLOCK(mem)                                        \
100         pushl   mem ;                                           \
101         movl    $0,mem ;                                        \
102         popfl ;                                                 \
103
104 #define SPIN_UNLOCK_PUSH_REGS
105 #define SPIN_UNLOCK_POP_REGS
106 #define SPIN_UNLOCK_FRAME_SIZE  0
107
108 #define SPIN_UNLOCK_NOREG(mem)                                  \
109         SPIN_UNLOCK(mem) ;                                      \
110
111 #else
112
113 #define SPIN_LOCK(mem)                                          \
114         pushfl ;                                                \
115         cli ;                                                   \
116         orl     $PSL_C,(%esp) ;                                 \
117         popl    mem ;                                           \
118
119 #define SPIN_LOCK_PUSH_RESG
120 #define SPIN_LOCK_POP_REGS
121 #define SPIN_LOCK_FRAME_SIZE    0
122
123 #define SPIN_UNLOCK(mem)                                        \
124         pushl   mem ;                                           \
125         movl    $0,mem ;                                        \
126         popfl ;                                                 \
127
128 #define SPIN_UNLOCK_PUSH_REGS
129 #define SPIN_UNLOCK_POP_REGS
130 #define SPIN_UNLOCK_FRAME_SIZE  0
131
132 #endif  /* SMP */
133
134 #else   /* !LOCORE */
135
136 #ifdef _KERNEL
137
138 /*
139  * Spinlock functions (UP and SMP).  Under UP a spinlock still serves
140  * to disable/restore interrupts even if it doesn't spin.
141  */
142 struct spinlock {
143         volatile int    opaque;
144 };
145
146 typedef struct spinlock *spinlock_t;
147
148 void    mpintr_lock(void);      /* disables int / spinlock combo */
149 void    mpintr_unlock(void);
150 void    com_lock(void);         /* disables int / spinlock combo */
151 void    com_unlock(void);
152 void    imen_lock(void);        /* disables int / spinlock combo */
153 void    imen_unlock(void);
154 void    clock_lock(void);       /* disables int / spinlock combo */
155 void    clock_unlock(void);
156 void    cons_lock(void);        /* disables int / spinlock combo */
157 void    cons_unlock(void);
158
159 extern struct spinlock smp_rv_spinlock;
160
161 void    spin_lock(spinlock_t lock);
162 void    spin_lock_np(spinlock_t lock);
163 void    spin_unlock(spinlock_t lock);
164 void    spin_unlock_np(spinlock_t lock);
165 #if 0
166 void    spin_lock_init(spinlock_t lock);
167 #endif
168
169 /*
170  * Inline version of spinlock routines -- overrides assembly.  Only unlock
171  * and init here please.
172  */
173 static __inline void
174 spin_lock_init(spinlock_t lock)
175 {
176         lock->opaque = 0;
177 }
178
179 #endif  /* _KERNEL */
180
181 #if defined(_KERNEL) || defined(_UTHREAD)
182
183 /*
184  * MP LOCK functions for SMP and UP.  Under UP the MP lock does not exist
185  * but we leave a few functions intact as macros for convenience.
186  */
187 #ifdef SMP
188
189 void    get_mplock(void);
190 int     try_mplock(void);
191 void    rel_mplock(void);
192 int     cpu_try_mplock(void);
193 void    cpu_get_initial_mplock(void);
194
195 extern u_int    mp_lock;
196
197 #define MP_LOCK_HELD()   (mp_lock == mycpu->gd_cpuid)
198 #define ASSERT_MP_LOCK_HELD()   KKASSERT(MP_LOCK_HELD())
199
200 static __inline void
201 cpu_rel_mplock(void)
202 {
203         mp_lock = MP_FREE_LOCK;
204 }
205
206 #else
207
208 #define get_mplock()
209 #define try_mplock()    1
210 #define rel_mplock()
211 #define ASSERT_MP_LOCK_HELD()
212
213 #endif  /* SMP */
214 #endif  /* _KERNEL || _UTHREAD */
215 #endif  /* LOCORE */
216 #endif  /* !_MACHINE_LOCK_H_ */