Merge branch 'vendor/ZLIB'
[dragonfly.git] / sys / platform / pc32 / include / lock.h
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $FreeBSD: src/sys/i386/include/lock.h,v 1.11.2.2 2000/09/30 02:49:34 ps Exp $
35  */
36
37 #ifndef _MACHINE_LOCK_H_
38 #define _MACHINE_LOCK_H_
39
40 #ifndef _CPU_PSL_H_
41 #include <machine/psl.h>
42 #endif
43
44 #ifdef LOCORE
45
46 /*
47  * Spinlock assembly support.  Note: eax and ecx can be tromped.  No
48  * other register will be.   Note that these routines are sometimes
49  * called with (%edx) as the mem argument.
50  *
51  * Under UP the spinlock routines still serve to disable/restore 
52  * interrupts.
53  */
54 #define SPIN_INIT(mem)                                          \
55         movl    $0,mem ;                                        \
56
57 #define SPIN_INIT_NOREG(mem)                                    \
58         SPIN_INIT(mem) ;                                        \
59
60 #define SPIN_LOCK(mem)                                          \
61         pushfl ;                                                \
62         popl    %ecx ;          /* flags */                     \
63         cli ;                                                   \
64         orl     $PSL_C,%ecx ;   /* make sure non-zero */        \
65 7: ;                                                            \
66         movl    $0,%eax ;       /* expected contents of lock */ \
67         lock cmpxchgl %ecx,mem ; /* Z=1 (jz) on success */      \
68         pause ;                                                 \
69         jnz     7b ;                                            \
70
71 #define SPIN_LOCK_PUSH_REGS                                     \
72         subl    $8,%esp ;                                       \
73         movl    %ecx,(%esp) ;                                   \
74         movl    %eax,4(%esp) ;                                  \
75
76 #define SPIN_LOCK_POP_REGS                                      \
77         movl    (%esp),%ecx ;                                   \
78         movl    4(%esp),%eax ;                                  \
79         addl    $8,%esp ;                                       \
80
81 #define SPIN_LOCK_FRAME_SIZE    8
82
83 #define SPIN_LOCK_NOREG(mem)                                    \
84         SPIN_LOCK_PUSH_REGS ;                                   \
85         SPIN_LOCK(mem) ;                                        \
86         SPIN_LOCK_POP_REGS ;                                    \
87
88 #define SPIN_UNLOCK(mem)                                        \
89         pushl   mem ;                                           \
90         movl    $0,mem ;                                        \
91         popfl ;                                                 \
92
93 #define SPIN_UNLOCK_PUSH_REGS
94 #define SPIN_UNLOCK_POP_REGS
95 #define SPIN_UNLOCK_FRAME_SIZE  0
96
97 #define SPIN_UNLOCK_NOREG(mem)                                  \
98         SPIN_UNLOCK(mem) ;                                      \
99
100 #else   /* !LOCORE */
101
102 #ifdef _KERNEL
103
104 /*
105  * Spinlock functions (UP and SMP).  Under UP a spinlock still serves
106  * to disable/restore interrupts even if it doesn't spin.
107  */
108 struct spinlock_deprecated {
109         volatile int    opaque;
110 };
111
112 typedef struct spinlock_deprecated *spinlock_t;
113
114 void    mpintr_lock(void);      /* disables int / spinlock combo */
115 void    mpintr_unlock(void);
116 void    com_lock(void);         /* disables int / spinlock combo */
117 void    com_unlock(void);
118 void    imen_lock(void);        /* disables int / spinlock combo */
119 void    imen_unlock(void);
120 void    clock_lock(void);       /* disables int / spinlock combo */
121 void    clock_unlock(void);
122
123 void    spin_lock_deprecated(spinlock_t lock);
124 void    spin_unlock_deprecated(spinlock_t lock);
125
126 /*
127  * Inline version of spinlock routines -- overrides assembly.  Only unlock
128  * and init here please.
129  */
130 static __inline void
131 spin_lock_init(spinlock_t lock)
132 {
133         lock->opaque = 0;
134 }
135
136 #endif  /* _KERNEL */
137
138 #endif  /* LOCORE */
139 #endif  /* !_MACHINE_LOCK_H_ */