2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/kern/kern_upcall.c,v 1.4 2003/12/07 04:20:40 dillon Exp $
30 * Implement upcall registration and dispatch.
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
37 #include <sys/upcall.h>
38 #include <sys/thread2.h>
39 #include <sys/upcall.h>
40 #include <sys/malloc.h>
41 #include <sys/sysproto.h>
43 #include <sys/signalvar.h>
46 #include <vm/vm_param.h>
47 #include <vm/vm_kern.h>
49 #include <vm/vm_map.h>
51 #include <machine/cpu.h>
53 MALLOC_DEFINE(M_UPCALL, "upcalls", "upcall registration structures");
58 sigupcall_remote(void *arg)
61 if (p == lwkt_preempted_proc())
70 * Register an upcall context wrapper and procedure. Note that the
71 * upcall context is set globally for the process, not for each upcall.
73 * ARGS(struct upcall *upc, upcall_func_t ctx, upcall_func_t func, void *data)
76 upc_register(struct upc_register_args *uap)
78 struct proc *p = curproc;
79 struct vmspace *vm = p->p_vmspace;
82 if (vm->vm_upccount >= UPCALL_MAXCOUNT)
85 vu = malloc(sizeof(struct vmupcall), M_UPCALL, M_WAITOK|M_ZERO);
86 vu->vu_ctx = uap->ctxfunc;
87 vu->vu_func = uap->func;
88 vu->vu_data = uap->data;
90 p->p_upcall = uap->upc;
92 if (vm->vm_upcalls != NULL)
93 vu->vu_id = vm->vm_upcalls->vu_id + 1;
96 vu->vu_next = vm->vm_upcalls;
99 uap->sysmsg_result = vu->vu_id;
106 * ARGS(int cmd, int upcid, void *data)
109 upc_control(struct upc_control_args *uap)
111 struct proc *p = curproc;
113 struct vmspace *vms = p->p_vmspace;
115 struct vmupcall *vu_send;
116 struct vmupcall **vupp;
120 case UPC_CONTROL_DISPATCH:
122 * Dispatch the specified upcall id or the next pending id if -1.
123 * the upcall will be marked pending but an actual upcall will only
124 * occur if userland is not in a critical section and the userland
125 * pending bit is not set.
127 * You can dispatch an upcall associated with your process or another
128 * process sharing the same VM space.
130 error = (uap->upcid == -1) ? 0 : ENOENT;
131 for (vu = vms->vm_upcalls; vu; vu = vu->vu_next) {
132 if (vu->vu_id == uap->upcid ||
133 (uap->upcid == -1 && vu->vu_pending >= (int)uap->data && vu->vu_proc == p)
135 if (vu->vu_pending < (int)uap->data)
136 vu->vu_pending = (int)uap->data;
139 targp->p_flag |= P_UPCALLPEND;
140 if (targp->p_flag & P_UPCALLWAIT)
141 wakeup(&targp->p_upcall);
143 if (targp->p_thread->td_gd != mycpu)
144 lwkt_send_ipiq(targp->p_thread->td_gd->gd_cpuid, sigupcall_remote, targp);
154 case UPC_CONTROL_NEXT:
156 * This is used by the context code to fetch the next pending upcall.
157 * The context code has two choices: (A) it can drop
158 * upcall->crit_count and set upcall->pending then make this call
159 * unconditionally or * (B) it can drop upcall->crit_count and then
160 * test upcall->pending and only make this call if upcall->pending
161 * is set. If upcall->pending is clear the context code can pop
162 * the upcall stack itself and return without entering into the kernel
163 * again. (B) is more efficient but leaves a small window of
164 * opportunity where multiple upcalls can pushdown the stack.
166 * If another upcall is pending the crit_count will be bumped and
167 * the function, data, and context pointers will be returned in
168 * registers (C cannot call this routine). If no more upcalls are
169 * pending the pending bit will be cleared and the 'data' argument
170 * is expected to be pointing at the upcall context which we will
171 * then pop, returning to the original code that was interrupted
172 * (NOT the context code).
175 for (vu = vms->vm_upcalls; vu; vu = vu->vu_next) {
176 if (vu->vu_proc == p && vu->vu_pending) {
183 * vu_send may be NULL, indicating that no more upcalls are pending
184 * for this cpu. We set the userland pending bit based on whether
185 * additional upcalls are pending or not.
187 error = fetchupcall(vu_send, vu != NULL, uap->data);
189 case UPC_CONTROL_DELETE:
191 * Delete the specified upcall id. If the upcall id is -1, delete
192 * all upcall id's associated with the current process.
194 error = (uap->upcid == -1) ? 0 : ENOENT;
195 vupp = &vms->vm_upcalls;
196 while ((vu = *vupp) != NULL) {
197 if (vu->vu_id == uap->upcid ||
198 (uap->upcid == -1 && vu->vu_proc == p)
208 case UPC_CONTROL_POLL:
209 case UPC_CONTROL_POLLANDCLEAR:
210 case UPC_CONTROL_WAIT:
212 * If upcid is -1 poll for the first pending upcall and return the
213 * id or 0 if no upcalls are pending.
215 * If upcid is a particular upcall then poll that upcall and return
216 * its pending status (0 or 1). For POLLANDCLEAR, also clear the
217 * pending status. The userland pending bit is not modified by
218 * this call (maybe we should modify it for poll-and-clear).
220 error = (uap->upcid == -1) ? 0 : ENOENT;
221 for (vu = vms->vm_upcalls; vu; vu = vu->vu_next) {
222 if (vu->vu_id == uap->upcid ||
223 (uap->upcid == -1 && vu->vu_pending >= (int)uap->data && vu->vu_proc == p)
226 if (uap->upcid == -1)
227 uap->sysmsg_result = vu->vu_id;
229 uap->sysmsg_result = vu->vu_pending;
230 if (uap->cmd == UPC_CONTROL_POLLANDCLEAR)
235 if (uap->cmd == UPC_CONTROL_WAIT && vu == NULL) {
236 p->p_flag |= P_UPCALLWAIT;
237 tsleep(&p->p_upcall, PCATCH, "wupcall", 0);
238 p->p_flag &= ~P_UPCALLWAIT;
249 upc_release(struct vmspace *vm, struct proc *p)
251 struct vmupcall **vupp;
254 vupp = &vm->vm_upcalls;
255 while ((vu = *vupp) != NULL) {
256 if (vu->vu_proc == p) {
267 * XXX eventually we should sort by vu_pending priority and dispatch
268 * the highest priority upcall first.
271 postupcall(struct proc *p)
273 struct vmspace *vm = p->p_vmspace;
275 struct vmupcall *vu_send = NULL;
277 for (vu = vm->vm_upcalls; vu; vu = vu->vu_next) {
278 if (vu->vu_proc == p && vu->vu_pending) {
287 sendupcall(vu_send, 0);