kernel tree reorganization stage 1: Major cvs repository work (not logged as
[dragonfly.git] / sys / i386 / include / lock.h
1 /*
2  * Copyright (c) 2003, Matthew Dillon, All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. The name of the developer may NOT be used to endorse or promote products
10  *    derived from this software without specific prior written permission.
11  *
12  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
13  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
16  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
17  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
18  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
20  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
21  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
22  * SUCH DAMAGE.
23  *
24  * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $
25  * $DragonFly: src/sys/i386/include/Attic/lock.h,v 1.6 2003/08/07 21:17:22 dillon Exp $
26  */
27
28 #ifndef _MACHINE_LOCK_H_
29 #define _MACHINE_LOCK_H_
30
31 #ifndef _MACHINE_PSL_H_
32 #include "psl.h"
33 #endif
34
35 /*
36  * MP_FREE_LOCK is used by both assembly and C under SMP.
37  */
38 #ifdef SMP
39 #define MP_FREE_LOCK            0xffffffff      /* value of lock when free */
40 #endif
41
42 #ifdef LOCORE
43
44 /*
45  * Spinlock assembly support.  Note: eax and ecx can be tromped.  No
46  * other register will be.   Note that these routines are sometimes
47  * called with (%edx) as the mem argument.
48  *
49  * Under UP the spinlock routines still serve to disable/restore 
50  * interrupts.
51  */
52
53
54 #ifdef SMP
55
56 #define SPIN_INIT(mem)                                          \
57         movl    $0,mem ;                                        \
58
59 #define SPIN_INIT_NOREG(mem)                                    \
60         SPIN_INIT(mem) ;                                        \
61
62 #define SPIN_LOCK(mem)                                          \
63         pushfl ;                                                \
64         popl    %ecx ;          /* flags */                     \
65         cli ;                                                   \
66         orl     $PSL_C,%ecx ;   /* make sure non-zero */        \
67 7: ;                                                            \
68         movl    $0,%eax ;       /* expected contents of lock */ \
69         lock cmpxchgl %ecx,mem ; /* Z=1 (jz) on success */      \
70         jnz     7b ;                                            \
71
72 #define SPIN_LOCK_PUSH_REGS                                     \
73         subl    $8,%esp ;                                       \
74         movl    %ecx,(%esp) ;                                   \
75         movl    %eax,4(%esp) ;                                  \
76
77 #define SPIN_LOCK_POP_REGS                                      \
78         movl    (%esp),%ecx ;                                   \
79         movl    4(%esp),%eax ;                                  \
80         addl    $8,%esp ;                                       \
81
82 #define SPIN_LOCK_FRAME_SIZE    8
83
84 #define SPIN_LOCK_NOREG(mem)                                    \
85         SPIN_LOCK_PUSH_REGS ;                                   \
86         SPIN_LOCK(mem) ;                                        \
87         SPIN_LOCK_POP_REGS ;                                    \
88
89 #define SPIN_UNLOCK(mem)                                        \
90         pushl   mem ;                                           \
91         movl    $0,mem ;                                        \
92         popfl ;                                                 \
93
94 #define SPIN_UNLOCK_PUSH_REGS
95 #define SPIN_UNLOCK_POP_REGS
96 #define SPIN_UNLOCK_FRAME_SIZE  0
97
98 #define SPIN_UNLOCK_NOREG(mem)                                  \
99         SPIN_UNLOCK(mem) ;                                      \
100
101 #else
102
103 #define SPIN_LOCK(mem)                                          \
104         pushfl ;                                                \
105         cli ;                                                   \
106         orl     $PSL_C,(%esp) ;                                 \
107         popl    mem ;                                           \
108
109 #define SPIN_LOCK_PUSH_RESG
110 #define SPIN_LOCK_POP_REGS
111 #define SPIN_LOCK_FRAME_SIZE    0
112
113 #define SPIN_UNLOCK(mem)                                        \
114         pushl   mem ;                                           \
115         movl    $0,mem ;                                        \
116         popfl ;                                                 \
117
118 #define SPIN_UNLOCK_PUSH_REGS
119 #define SPIN_UNLOCK_POP_REGS
120 #define SPIN_UNLOCK_FRAME_SIZE  0
121
122 #endif  /* SMP */
123
124 #else   /* !LOCORE */
125
126 #ifdef _KERNEL
127
128 /*
129  * Spinlock functions (UP and SMP).  Under UP a spinlock still serves
130  * to disable/restore interrupts even if it doesn't spin.
131  */
132 struct spinlock {
133         volatile int    opaque;
134 };
135
136 typedef struct spinlock *spinlock_t;
137
138 void    mpintr_lock(void);      /* disables int / spinlock combo */
139 void    mpintr_unlock(void);
140 void    com_lock(void);         /* disables int / spinlock combo */
141 void    com_unlock(void);
142 void    imen_lock(void);        /* disables int / spinlock combo */
143 void    imen_unlock(void);
144 void    clock_lock(void);       /* disables int / spinlock combo */
145 void    clock_unlock(void);
146 void    cons_lock(void);        /* disables int / spinlock combo */
147 void    cons_unlock(void);
148
149 extern struct spinlock smp_rv_spinlock;
150
151 void    spin_lock(spinlock_t lock);
152 void    spin_lock_np(spinlock_t lock);
153 void    spin_unlock(spinlock_t lock);
154 void    spin_unlock_np(spinlock_t lock);
155 #if 0
156 void    spin_lock_init(spinlock_t lock);
157 #endif
158
159 /*
160  * Inline version of spinlock routines -- overrides assembly.  Only unlock
161  * and init here please.
162  */
163 static __inline void
164 spin_lock_init(spinlock_t lock)
165 {
166         lock->opaque = 0;
167 }
168
169 /*
170  * MP LOCK functions for SMP and UP.  Under UP the MP lock does not exist
171  * but we leave a few functions intact as macros for convenience.
172  */
173 #ifdef SMP
174
175 void    get_mplock(void);
176 int     try_mplock(void);
177 void    rel_mplock(void);
178 int     cpu_try_mplock(void);
179 #if 0
180 void    cpu_rel_mplock(void);
181 #endif
182 void    cpu_get_initial_mplock(void);
183
184 extern u_int    mp_lock;
185
186 #define MP_LOCK_HELD()   (mp_lock == mycpu->gd_cpuid)
187 #define ASSERT_MP_LOCK_HELD()   KKASSERT(MP_LOCK_HELD())
188
189 static __inline void
190 cpu_rel_mplock(void)
191 {
192         mp_lock = MP_FREE_LOCK;
193 }
194
195 #else
196
197 #define get_mplock()
198 #define try_mplock()    1
199 #define rel_mplock()
200 #define ASSERT_MP_LOCK_HELD()
201
202 #endif  /* SMP */
203 #endif  /* _KERNEL */
204 #endif  /* LOCORE */
205 #endif  /* !_MACHINE_LOCK_H_ */