40522086f0a180ab12b5a36d0b9787f29dc60990
[dragonfly.git] / sys / platform / pc64 / include / lock.h
1 /*
2  * Copyright (c) 2003,2008 The DragonFly Project.
3  * Copyright (c) 2003 Matthew Dillon.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. The name of the developer may NOT be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $
27  * $DragonFly: src/sys/platform/pc64/include/lock.h,v 1.4 2008/08/29 17:07:17 dillon Exp $
28  */
29
30 #ifndef _MACHINE_LOCK_H_
31 #define _MACHINE_LOCK_H_
32
33 #ifndef _CPU_PSL_H_
34 #include <machine/psl.h>
35 #endif
36
37 /*
38  * MP_FREE_LOCK is used by both assembly and C under SMP.
39  */
40 #ifdef SMP
41 #define MP_FREE_LOCK            0xffffffff      /* value of lock when free */
42 #endif
43
44 #ifdef LOCORE
45
46 /*
47  * Spinlock assembly support.  Note: rax and rcx can be tromped.  No
48  * other register will be.   Note that these routines are sometimes
49  * called with (%edx) as the mem argument.
50  *
51  * Under UP the spinlock routines still serve to disable/restore 
52  * interrupts.
53  */
54
55
56 #ifdef SMP
57
58 #define SPIN_INIT(mem)                                          \
59         movq    $0,mem ;                                        \
60
61 #define SPIN_INIT_NOREG(mem)                                    \
62         SPIN_INIT(mem) ;                                        \
63
64 #define SPIN_LOCK(mem)                                          \
65         pushfq ;                                                \
66         popq    %rcx ;          /* flags */                     \
67         cli ;                                                   \
68         orl     $PSL_C,%rcx ;   /* make sure non-zero */        \
69 7: ;                                                            \
70         movq    $0,%rax ;       /* expected contents of lock */ \
71         lock cmpxchgq %rcx,mem ; /* Z=1 (jz) on success */      \
72         jnz     7b ;                                            \
73
74 #define SPIN_LOCK_PUSH_REGS                                     \
75         subq    $2*8,%rsp ;                                     \
76         movq    %rcx,(%rsp) ;                                   \
77         movq    %rax,8(%rsp) ;                                  \
78
79 #define SPIN_LOCK_POP_REGS                                      \
80         movq    (%rsp),%rcx ;                                   \
81         movq    8(%rsp),%rax ;                                  \
82         addq    $2*8,%rsp ;                                     \
83
84 #define SPIN_LOCK_FRAME_SIZE    8
85
86 #define SPIN_LOCK_NOREG(mem)                                    \
87         SPIN_LOCK_PUSH_REGS ;                                   \
88         SPIN_LOCK(mem) ;                                        \
89         SPIN_LOCK_POP_REGS ;                                    \
90
91 #define SPIN_UNLOCK(mem)                                        \
92         pushq   mem ;                                           \
93         movq    $0,mem ;                                        \
94         popfq ;                                                 \
95
96 #define SPIN_UNLOCK_PUSH_REGS
97 #define SPIN_UNLOCK_POP_REGS
98 #define SPIN_UNLOCK_FRAME_SIZE  0
99
100 #define SPIN_UNLOCK_NOREG(mem)                                  \
101         SPIN_UNLOCK(mem) ;                                      \
102
103 #else /* !SMP */
104
105 #define SPIN_LOCK(mem)                                          \
106         pushfq ;                                                \
107         cli ;                                                   \
108         orq     $PSL_C,(%rsp) ;                                 \
109         popq    mem ;                                           \
110
111 #define SPIN_LOCK_PUSH_RESG
112 #define SPIN_LOCK_POP_REGS
113 #define SPIN_LOCK_FRAME_SIZE    0
114
115 #define SPIN_UNLOCK(mem)                                        \
116         pushq   mem ;                                           \
117         movq    $0,mem ;                                        \
118         popfq ;                                                 \
119
120 #define SPIN_UNLOCK_PUSH_REGS
121 #define SPIN_UNLOCK_POP_REGS
122 #define SPIN_UNLOCK_FRAME_SIZE  0
123
124 #endif  /* SMP */
125
126 #else   /* !LOCORE */
127
128 #ifdef _KERNEL
129
130 /*
131  * Spinlock functions (UP and SMP).  Under UP a spinlock still serves
132  * to disable/restore interrupts even if it doesn't spin.
133  */
134 struct spinlock_deprecated {
135         volatile long   opaque;
136 };
137
138 typedef struct spinlock_deprecated *spinlock_t;
139
140 void    mpintr_lock(void);      /* disables int / spinlock combo */
141 void    mpintr_unlock(void);
142 void    com_lock(void);         /* disables int / spinlock combo */
143 void    com_unlock(void);
144 void    imen_lock(void);        /* disables int / spinlock combo */
145 void    imen_unlock(void);
146 void    clock_lock(void);       /* disables int / spinlock combo */
147 void    clock_unlock(void);
148
149 extern struct spinlock_deprecated smp_rv_spinlock;
150
151 void    spin_lock_deprecated(spinlock_t);
152 void    spin_unlock_deprecated(spinlock_t);
153
154 /*
155  * Inline version of spinlock routines -- overrides assembly.  Only unlock
156  * and init here please.
157  */
158 static __inline void
159 spin_lock_init(spinlock_t lock)
160 {
161         lock->opaque = 0;
162 }
163
164 #endif  /* _KERNEL */
165
166 #if defined(_KERNEL) || defined(_UTHREAD)
167
168 /*
169  * MP LOCK functions for SMP and UP.  Under UP the MP lock does not exist
170  * but we leave a few functions intact as macros for convenience.
171  */
172 #ifdef SMP
173
174 void    get_mplock(void);
175 int     try_mplock(void);
176 void    rel_mplock(void);
177 int     cpu_try_mplock(void);
178 void    cpu_get_initial_mplock(void);
179
180 extern u_int    mp_lock;
181
182 #define MP_LOCK_HELD()   (mp_lock == mycpu->gd_cpuid)
183 #define ASSERT_MP_LOCK_HELD(td)   KASSERT(MP_LOCK_HELD(), ("MP_LOCK_HELD(): not held thread %p", td))
184
185 static __inline void
186 cpu_rel_mplock(void)
187 {
188         mp_lock = MP_FREE_LOCK;
189 }
190
191 #else
192
193 #define get_mplock()
194 #define try_mplock()    1
195 #define rel_mplock()
196 #define MP_LOCK_HELD()  (!0)
197 #define ASSERT_MP_LOCK_HELD(td)
198
199 #endif  /* SMP */
200 #endif  /* _KERNEL || _UTHREAD */
201 #endif  /* LOCORE */
202 #endif  /* !_MACHINE_LOCK_H_ */