AMD64 - Fix many compile-time warnings. int/ptr type mismatches, %llx, etc.
[dragonfly.git] / sys / kern / kern_upcall.c
CommitLineData
a722be49 1/*
1f33f833 2 * Copyright (c) 2003,2004,2006 The DragonFly Project. All rights reserved.
8c10bfcf
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
a722be49
MD
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
8c10bfcf 10 *
a722be49
MD
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
a722be49 32 * SUCH DAMAGE.
8c10bfcf 33 *
1f33f833 34 * $DragonFly: src/sys/kern/kern_upcall.c,v 1.11 2006/09/10 21:35:10 dillon Exp $
a722be49
MD
35 */
36
37/*
38 * Implement upcall registration and dispatch.
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/proc.h>
45#include <sys/upcall.h>
46#include <sys/thread2.h>
47#include <sys/upcall.h>
48#include <sys/malloc.h>
49#include <sys/sysproto.h>
50#include <sys/lock.h>
51#include <sys/signalvar.h>
52
53#include <vm/vm.h>
54#include <vm/vm_param.h>
55#include <vm/vm_kern.h>
56#include <vm/pmap.h>
57#include <vm/vm_map.h>
58
59#include <machine/cpu.h>
60
61MALLOC_DEFINE(M_UPCALL, "upcalls", "upcall registration structures");
62
63#ifdef SMP
64
65static void
66sigupcall_remote(void *arg)
67{
4170cc8b 68 struct lwp *lp = arg;
553ea3c8 69 if (lp == lwkt_preempted_proc())
a722be49
MD
70 sigupcall();
71}
72
73#endif
74
75/*
76 * upc_register:
77 *
78 * Register an upcall context wrapper and procedure. Note that the
79 * upcall context is set globally for the process, not for each upcall.
80 *
81 * ARGS(struct upcall *upc, upcall_func_t ctx, upcall_func_t func, void *data)
82 */
83int
753fd850 84sys_upc_register(struct upc_register_args *uap)
a722be49 85{
4170cc8b
SS
86 struct lwp *lp = curthread->td_lwp;
87 struct vmspace *vm = curproc->p_vmspace;
a722be49
MD
88 struct vmupcall *vu;
89
90 if (vm->vm_upccount >= UPCALL_MAXCOUNT)
91 return(EFBIG);
92
efda3bd0 93 vu = kmalloc(sizeof(struct vmupcall), M_UPCALL, M_WAITOK|M_ZERO);
a722be49
MD
94 vu->vu_ctx = uap->ctxfunc;
95 vu->vu_func = uap->func;
96 vu->vu_data = uap->data;
4170cc8b
SS
97 vu->vu_lwp = lp;
98 lp->lwp_upcall = uap->upc;
a722be49
MD
99
100 if (vm->vm_upcalls != NULL)
101 vu->vu_id = vm->vm_upcalls->vu_id + 1;
102 else
1f33f833 103 vu->vu_id = UPC_RESERVED;
a722be49
MD
104 vu->vu_next = vm->vm_upcalls;
105 vm->vm_upcalls = vu;
106 ++vm->vm_upccount;
107 uap->sysmsg_result = vu->vu_id;
108 return(0);
109}
110
111/*
112 * upc_control:
113 *
114 * ARGS(int cmd, int upcid, void *data)
115 */
116int
753fd850 117sys_upc_control(struct upc_control_args *uap)
a722be49 118{
4170cc8b
SS
119 struct lwp *lp = curthread->td_lwp;
120 struct lwp *targlp;
121 struct vmspace *vms = curproc->p_vmspace;
a722be49
MD
122 struct vmupcall *vu;
123 struct vmupcall *vu_send;
124 struct vmupcall **vupp;
125 int error;
126
127 switch(uap->cmd) {
128 case UPC_CONTROL_DISPATCH:
129 /*
130 * Dispatch the specified upcall id or the next pending id if -1.
131 * the upcall will be marked pending but an actual upcall will only
132 * occur if userland is not in a critical section and the userland
133 * pending bit is not set.
134 *
135 * You can dispatch an upcall associated with your process or another
136 * process sharing the same VM space.
137 */
138 error = (uap->upcid == -1) ? 0 : ENOENT;
139 for (vu = vms->vm_upcalls; vu; vu = vu->vu_next) {
140 if (vu->vu_id == uap->upcid ||
973c11b9
MD
141 (uap->upcid == -1 &&
142 vu->vu_pending >= (int)(intptr_t)uap->data && vu->vu_lwp == lp)
a722be49 143 ) {
973c11b9
MD
144 if (vu->vu_pending < (int)(intptr_t)uap->data)
145 vu->vu_pending = (int)(intptr_t)uap->data;
a722be49 146 error = 0;
4170cc8b
SS
147 targlp = vu->vu_lwp;
148 targlp->lwp_proc->p_flag |= P_UPCALLPEND; /* XXX lwp flags */
149 if (targlp->lwp_proc->p_flag & P_UPCALLWAIT)
150 wakeup(&targlp->lwp_upcall);
a722be49 151#ifdef SMP
4170cc8b
SS
152 if (targlp->lwp_thread->td_gd != mycpu)
153 lwkt_send_ipiq(targlp->lwp_thread->td_gd, sigupcall_remote, targlp);
a722be49
MD
154 else
155 sigupcall();
156#else
157 sigupcall();
158#endif
159 break;
160 }
161 }
162 break;
163 case UPC_CONTROL_NEXT:
164 /*
165 * This is used by the context code to fetch the next pending upcall.
166 * The context code has two choices: (A) it can drop
167 * upcall->crit_count and set upcall->pending then make this call
168 * unconditionally or * (B) it can drop upcall->crit_count and then
169 * test upcall->pending and only make this call if upcall->pending
170 * is set. If upcall->pending is clear the context code can pop
171 * the upcall stack itself and return without entering into the kernel
172 * again. (B) is more efficient but leaves a small window of
173 * opportunity where multiple upcalls can pushdown the stack.
174 *
175 * If another upcall is pending the crit_count will be bumped and
176 * the function, data, and context pointers will be returned in
177 * registers (C cannot call this routine). If no more upcalls are
178 * pending the pending bit will be cleared and the 'data' argument
179 * is expected to be pointing at the upcall context which we will
180 * then pop, returning to the original code that was interrupted
181 * (NOT the context code).
182 */
183 vu_send = NULL;
184 for (vu = vms->vm_upcalls; vu; vu = vu->vu_next) {
4170cc8b 185 if (vu->vu_lwp == lp && vu->vu_pending) {
a722be49
MD
186 if (vu_send)
187 break;
188 vu_send = vu;
189 }
190 }
191 /*
192 * vu_send may be NULL, indicating that no more upcalls are pending
193 * for this cpu. We set the userland pending bit based on whether
194 * additional upcalls are pending or not.
195 */
0a455ac5 196 error = fetchupcall(vu_send, vu != NULL, uap->data);
a722be49
MD
197 break;
198 case UPC_CONTROL_DELETE:
199 /*
200 * Delete the specified upcall id. If the upcall id is -1, delete
201 * all upcall id's associated with the current process.
202 */
203 error = (uap->upcid == -1) ? 0 : ENOENT;
204 vupp = &vms->vm_upcalls;
205 while ((vu = *vupp) != NULL) {
206 if (vu->vu_id == uap->upcid ||
4170cc8b 207 (uap->upcid == -1 && vu->vu_lwp == lp)
a722be49
MD
208 ) {
209 *vupp = vu->vu_next;
210 error = 0;
efda3bd0 211 kfree(vu, M_UPCALL);
a722be49
MD
212 } else {
213 vupp = &vu->vu_next;
214 }
215 }
216 break;
217 case UPC_CONTROL_POLL:
218 case UPC_CONTROL_POLLANDCLEAR:
fe8c5e17 219 case UPC_CONTROL_WAIT:
a722be49
MD
220 /*
221 * If upcid is -1 poll for the first pending upcall and return the
222 * id or 0 if no upcalls are pending.
223 *
224 * If upcid is a particular upcall then poll that upcall and return
225 * its pending status (0 or 1). For POLLANDCLEAR, also clear the
226 * pending status. The userland pending bit is not modified by
227 * this call (maybe we should modify it for poll-and-clear).
228 */
229 error = (uap->upcid == -1) ? 0 : ENOENT;
230 for (vu = vms->vm_upcalls; vu; vu = vu->vu_next) {
231 if (vu->vu_id == uap->upcid ||
973c11b9
MD
232 (uap->upcid == -1 &&
233 vu->vu_pending >= (int)(intptr_t)uap->data && vu->vu_lwp == lp)
a722be49
MD
234 ) {
235 error = 0;
236 if (uap->upcid == -1)
237 uap->sysmsg_result = vu->vu_id;
238 else
239 uap->sysmsg_result = vu->vu_pending;
240 if (uap->cmd == UPC_CONTROL_POLLANDCLEAR)
241 vu->vu_pending = 0;
242 break;
243 }
244 }
fe8c5e17 245 if (uap->cmd == UPC_CONTROL_WAIT && vu == NULL) {
4170cc8b
SS
246 lp->lwp_proc->p_flag |= P_UPCALLWAIT; /* XXX lwp flags */
247 tsleep(&lp->lwp_upcall, PCATCH, "wupcall", 0);
248 lp->lwp_proc->p_flag &= ~P_UPCALLWAIT; /* XXX lwp flags */
fe8c5e17 249 }
a722be49
MD
250 break;
251 default:
252 error = EINVAL;
253 break;
254 }
255 return(error);
256}
257
258void
4170cc8b 259upc_release(struct vmspace *vm, struct lwp *lp)
a722be49
MD
260{
261 struct vmupcall **vupp;
262 struct vmupcall *vu;
263
264 vupp = &vm->vm_upcalls;
265 while ((vu = *vupp) != NULL) {
4170cc8b 266 if (vu->vu_lwp == lp) {
a722be49 267 *vupp = vu->vu_next;
efda3bd0 268 kfree(vu, M_UPCALL);
a722be49
MD
269 --vm->vm_upccount;
270 } else {
271 vupp = &vu->vu_next;
272 }
273 }
274}
275
093dd88e
MD
276/*
277 * XXX eventually we should sort by vu_pending priority and dispatch
278 * the highest priority upcall first.
279 */
a722be49 280void
4170cc8b 281postupcall(struct lwp *lp)
a722be49 282{
4170cc8b 283 struct vmspace *vm = lp->lwp_proc->p_vmspace;
a722be49
MD
284 struct vmupcall *vu;
285 struct vmupcall *vu_send = NULL;
286
287 for (vu = vm->vm_upcalls; vu; vu = vu->vu_next) {
4170cc8b 288 if (vu->vu_lwp == lp && vu->vu_pending) {
a722be49
MD
289 if (vu_send) {
290 sendupcall(vu, 1);
291 return;
292 }
293 vu_send = vu;
294 }
295 }
296 if (vu_send)
297 sendupcall(vu_send, 0);
298}
299