1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_sem.c,v 1.17 2006/12/23 00:35:04 swildner Exp $ */
5 * Implementation of SVID semaphores
7 * Author: Daniel Boulet
9 * This software is provided ``AS IS'' without any warranties of any kind.
12 #include "opt_sysvipc.h"
14 #include <sys/param.h>
15 #include <sys/systm.h>
16 #include <sys/sysproto.h>
17 #include <sys/kernel.h>
20 #include <sys/sysent.h>
21 #include <sys/sysctl.h>
22 #include <sys/malloc.h>
25 static MALLOC_DEFINE(M_SEM, "sem", "SVID compatible semaphores");
27 static void seminit (void *);
29 static struct sem_undo *semu_alloc (struct proc *p);
30 static int semundo_adjust (struct proc *p, struct sem_undo **supptr,
31 int semid, int semnum, int adjval);
32 static void semundo_clear (int semid, int semnum);
34 /* XXX casting to (sy_call_t *) is bogus, as usual. */
35 static sy_call_t *semcalls[] = {
36 (sy_call_t *)sys___semctl, (sy_call_t *)sys_semget,
37 (sy_call_t *)sys_semop
40 static int semtot = 0;
41 static struct semid_ds *sema; /* semaphore id pool */
42 static struct sem *sem; /* semaphore pool */
43 static struct sem_undo *semu_list; /* list of active undo structures */
44 static int *semu; /* undo structure pool */
47 u_short semval; /* semaphore value */
48 pid_t sempid; /* pid of last operation */
49 u_short semncnt; /* # awaiting semval > cval */
50 u_short semzcnt; /* # awaiting semval = 0 */
54 * Undo structure (one per process)
57 struct sem_undo *un_next; /* ptr to next active undo structure */
58 struct proc *un_proc; /* owner of this structure */
59 short un_cnt; /* # of active entries */
61 short un_adjval; /* adjust on exit values */
62 short un_num; /* semaphore # */
63 int un_id; /* semid */
64 } un_ent[1]; /* undo entries */
68 * Configuration parameters
71 #define SEMMNI 10 /* # of semaphore identifiers */
74 #define SEMMNS 60 /* # of semaphores in system */
77 #define SEMUME 10 /* max # of undo entries per process */
80 #define SEMMNU 30 /* # of undo structures in system */
83 /* shouldn't need tuning */
85 #define SEMMAP 30 /* # of entries in semaphore map */
88 #define SEMMSL SEMMNS /* max # of semaphores per id */
91 #define SEMOPM 100 /* max # of operations per semop call */
94 #define SEMVMX 32767 /* semaphore maximum value */
95 #define SEMAEM 16384 /* adjust on exit max value */
98 * Due to the way semaphore memory is allocated, we have to ensure that
99 * SEMUSZ is properly aligned.
102 #define SEM_ALIGN(bytes) (((bytes) + (sizeof(long) - 1)) & ~(sizeof(long) - 1))
104 /* actual size of an undo structure */
105 #define SEMUSZ SEM_ALIGN(offsetof(struct sem_undo, un_ent[SEMUME]))
108 * Macro to find a particular sem_undo vector
110 #define SEMU(ix) ((struct sem_undo *)(((intptr_t)semu)+ix * seminfo.semusz))
113 * semaphore info struct
115 struct seminfo seminfo = {
116 SEMMAP, /* # of entries in semaphore map */
117 SEMMNI, /* # of semaphore identifiers */
118 SEMMNS, /* # of semaphores in system */
119 SEMMNU, /* # of undo structures in system */
120 SEMMSL, /* max # of semaphores per id */
121 SEMOPM, /* max # of operations per semop call */
122 SEMUME, /* max # of undo entries per process */
123 SEMUSZ, /* size in bytes of undo structure */
124 SEMVMX, /* semaphore maximum value */
125 SEMAEM /* adjust on exit max value */
128 TUNABLE_INT("kern.ipc.semmap", &seminfo.semmap);
129 TUNABLE_INT("kern.ipc.semmni", &seminfo.semmni);
130 TUNABLE_INT("kern.ipc.semmns", &seminfo.semmns);
131 TUNABLE_INT("kern.ipc.semmnu", &seminfo.semmnu);
132 TUNABLE_INT("kern.ipc.semmsl", &seminfo.semmsl);
133 TUNABLE_INT("kern.ipc.semopm", &seminfo.semopm);
134 TUNABLE_INT("kern.ipc.semume", &seminfo.semume);
135 TUNABLE_INT("kern.ipc.semusz", &seminfo.semusz);
136 TUNABLE_INT("kern.ipc.semvmx", &seminfo.semvmx);
137 TUNABLE_INT("kern.ipc.semaem", &seminfo.semaem);
139 SYSCTL_INT(_kern_ipc, OID_AUTO, semmap, CTLFLAG_RW, &seminfo.semmap, 0, "");
140 SYSCTL_INT(_kern_ipc, OID_AUTO, semmni, CTLFLAG_RD, &seminfo.semmni, 0, "");
141 SYSCTL_INT(_kern_ipc, OID_AUTO, semmns, CTLFLAG_RD, &seminfo.semmns, 0, "");
142 SYSCTL_INT(_kern_ipc, OID_AUTO, semmnu, CTLFLAG_RD, &seminfo.semmnu, 0, "");
143 SYSCTL_INT(_kern_ipc, OID_AUTO, semmsl, CTLFLAG_RW, &seminfo.semmsl, 0, "");
144 SYSCTL_INT(_kern_ipc, OID_AUTO, semopm, CTLFLAG_RD, &seminfo.semopm, 0, "");
145 SYSCTL_INT(_kern_ipc, OID_AUTO, semume, CTLFLAG_RD, &seminfo.semume, 0, "");
146 SYSCTL_INT(_kern_ipc, OID_AUTO, semusz, CTLFLAG_RD, &seminfo.semusz, 0, "");
147 SYSCTL_INT(_kern_ipc, OID_AUTO, semvmx, CTLFLAG_RW, &seminfo.semvmx, 0, "");
148 SYSCTL_INT(_kern_ipc, OID_AUTO, semaem, CTLFLAG_RW, &seminfo.semaem, 0, "");
151 RO seminfo.semmap /* SEMMAP unused */
154 RO seminfo.semmnu /* undo entries per system */
156 RO seminfo.semopm /* SEMOPM unused */
158 RO seminfo.semusz /* param - derived from SEMUME for per-proc sizeof */
159 RO seminfo.semvmx /* SEMVMX unused - user param */
160 RO seminfo.semaem /* SEMAEM unused - user param */
169 sem = kmalloc(sizeof(struct sem) * seminfo.semmns, M_SEM, M_WAITOK);
171 panic("sem is NULL");
172 sema = kmalloc(sizeof(struct semid_ds) * seminfo.semmni, M_SEM, M_WAITOK);
174 panic("sema is NULL");
175 semu = kmalloc(seminfo.semmnu * seminfo.semusz, M_SEM, M_WAITOK);
177 panic("semu is NULL");
179 for (i = 0; i < seminfo.semmni; i++) {
180 sema[i].sem_base = 0;
181 sema[i].sem_perm.mode = 0;
183 for (i = 0; i < seminfo.semmnu; i++) {
184 struct sem_undo *suptr = SEMU(i);
185 suptr->un_proc = NULL;
189 SYSINIT(sysv_sem, SI_SUB_SYSV_SEM, SI_ORDER_FIRST, seminit, NULL)
192 * Entry point for all SEM calls
194 * semsys_args(int which, a2, a3, ...) (VARARGS)
197 sys_semsys(struct semsys_args *uap)
199 struct proc *p = curproc;
200 unsigned int which = (unsigned int)uap->which;
202 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
205 if (which >= sizeof(semcalls)/sizeof(semcalls[0]))
207 bcopy(&uap->a2, &uap->which,
208 sizeof(struct semsys_args) - offsetof(struct semsys_args, a2));
209 return ((*semcalls[which])(uap));
213 * Allocate a new sem_undo structure for a process
214 * (returns ptr to structure or NULL if no more room)
217 static struct sem_undo *
222 struct sem_undo *suptr;
223 struct sem_undo **supptr;
227 * Try twice to allocate something.
228 * (we'll purge any empty structures after the first pass so
229 * two passes are always enough)
232 for (attempt = 0; attempt < 2; attempt++) {
234 * Look for a free structure.
235 * Fill it in and return it if we find one.
238 for (i = 0; i < seminfo.semmnu; i++) {
240 if (suptr->un_proc == NULL) {
241 suptr->un_next = semu_list;
250 * We didn't find a free one, if this is the first attempt
251 * then try to free some structures.
255 /* All the structures are in use - try to free some */
256 int did_something = 0;
259 while ((suptr = *supptr) != NULL) {
260 if (suptr->un_cnt == 0) {
261 suptr->un_proc = NULL;
262 *supptr = suptr->un_next;
265 supptr = &(suptr->un_next);
268 /* If we didn't free anything then just give-up */
273 * The second pass failed even though we freed
274 * something after the first pass!
275 * This is IMPOSSIBLE!
277 panic("semu_alloc - second attempt failed");
284 * Adjust a particular entry for a particular proc
288 semundo_adjust(p, supptr, semid, semnum, adjval)
290 struct sem_undo **supptr;
294 struct sem_undo *suptr;
298 /* Look for and remember the sem_undo if the caller doesn't provide
303 for (suptr = semu_list; suptr != NULL;
304 suptr = suptr->un_next) {
305 if (suptr->un_proc == p) {
313 suptr = semu_alloc(p);
321 * Look for the requested entry and adjust it (delete if adjval becomes
324 sunptr = &suptr->un_ent[0];
325 for (i = 0; i < suptr->un_cnt; i++, sunptr++) {
326 if (sunptr->un_id != semid || sunptr->un_num != semnum)
329 sunptr->un_adjval = 0;
331 sunptr->un_adjval += adjval;
332 if (sunptr->un_adjval == 0) {
334 if (i < suptr->un_cnt)
336 suptr->un_ent[suptr->un_cnt];
341 /* Didn't find the right entry - create it */
344 if (suptr->un_cnt != seminfo.semume) {
345 sunptr = &suptr->un_ent[suptr->un_cnt];
347 sunptr->un_adjval = adjval;
348 sunptr->un_id = semid; sunptr->un_num = semnum;
355 semundo_clear(semid, semnum)
358 struct sem_undo *suptr;
360 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) {
361 struct undo *sunptr = &suptr->un_ent[0];
364 while (i < suptr->un_cnt) {
365 if (sunptr->un_id == semid) {
366 if (semnum == -1 || sunptr->un_num == semnum) {
368 if (i < suptr->un_cnt) {
370 suptr->un_ent[suptr->un_cnt];
383 * Note that the user-mode half of this passes a union, not a pointer
387 sys___semctl(struct __semctl_args *uap)
389 struct proc *p = curproc;
390 int semid = uap->semid;
391 int semnum = uap->semnum;
393 union semun *arg = uap->arg;
394 union semun real_arg;
395 struct ucred *cred = p->p_ucred;
397 struct semid_ds sbuf;
398 struct semid_ds *semaptr;
401 kprintf("call to semctl(%d, %d, %d, 0x%x)\n", semid, semnum, cmd, arg);
404 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
407 semid = IPCID_TO_IX(semid);
408 if (semid < 0 || semid >= seminfo.semmni)
411 semaptr = &sema[semid];
412 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
413 semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid))
421 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_M)))
423 semaptr->sem_perm.cuid = cred->cr_uid;
424 semaptr->sem_perm.uid = cred->cr_uid;
425 semtot -= semaptr->sem_nsems;
426 for (i = semaptr->sem_base - sem; i < semtot; i++)
427 sem[i] = sem[i + semaptr->sem_nsems];
428 for (i = 0; i < seminfo.semmni; i++) {
429 if ((sema[i].sem_perm.mode & SEM_ALLOC) &&
430 sema[i].sem_base > semaptr->sem_base)
431 sema[i].sem_base -= semaptr->sem_nsems;
433 semaptr->sem_perm.mode = 0;
434 semundo_clear(semid, -1);
435 wakeup((caddr_t)semaptr);
439 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_M)))
441 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
443 if ((eval = copyin(real_arg.buf, (caddr_t)&sbuf,
446 semaptr->sem_perm.uid = sbuf.sem_perm.uid;
447 semaptr->sem_perm.gid = sbuf.sem_perm.gid;
448 semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) |
449 (sbuf.sem_perm.mode & 0777);
450 semaptr->sem_ctime = time_second;
454 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
456 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
458 eval = copyout((caddr_t)semaptr, real_arg.buf,
459 sizeof(struct semid_ds));
463 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
465 if (semnum < 0 || semnum >= semaptr->sem_nsems)
467 rval = semaptr->sem_base[semnum].semncnt;
471 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
473 if (semnum < 0 || semnum >= semaptr->sem_nsems)
475 rval = semaptr->sem_base[semnum].sempid;
479 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
481 if (semnum < 0 || semnum >= semaptr->sem_nsems)
483 rval = semaptr->sem_base[semnum].semval;
487 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
489 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
491 for (i = 0; i < semaptr->sem_nsems; i++) {
492 eval = copyout((caddr_t)&semaptr->sem_base[i].semval,
493 &real_arg.array[i], sizeof(real_arg.array[0]));
500 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
502 if (semnum < 0 || semnum >= semaptr->sem_nsems)
504 rval = semaptr->sem_base[semnum].semzcnt;
508 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_W)))
510 if (semnum < 0 || semnum >= semaptr->sem_nsems)
512 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
514 semaptr->sem_base[semnum].semval = real_arg.val;
515 semundo_clear(semid, semnum);
516 wakeup((caddr_t)semaptr);
520 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_W)))
522 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
524 for (i = 0; i < semaptr->sem_nsems; i++) {
525 eval = copyin(&real_arg.array[i],
526 (caddr_t)&semaptr->sem_base[i].semval,
527 sizeof(real_arg.array[0]));
531 semundo_clear(semid, -1);
532 wakeup((caddr_t)semaptr);
540 uap->sysmsg_result = rval;
545 sys_semget(struct semget_args *uap)
547 struct proc *p = curproc;
550 int nsems = uap->nsems;
551 int semflg = uap->semflg;
552 struct ucred *cred = p->p_ucred;
555 kprintf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
558 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
561 if (key != IPC_PRIVATE) {
562 for (semid = 0; semid < seminfo.semmni; semid++) {
563 if ((sema[semid].sem_perm.mode & SEM_ALLOC) &&
564 sema[semid].sem_perm.key == key)
567 if (semid < seminfo.semmni) {
569 kprintf("found public key\n");
571 if ((eval = ipcperm(p, &sema[semid].sem_perm,
574 if (nsems > 0 && sema[semid].sem_nsems < nsems) {
576 kprintf("too small\n");
580 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) {
582 kprintf("not exclusive\n");
591 kprintf("need to allocate the semid_ds\n");
593 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) {
594 if (nsems <= 0 || nsems > seminfo.semmsl) {
596 kprintf("nsems out of range (0<%d<=%d)\n", nsems,
601 if (nsems > seminfo.semmns - semtot) {
603 kprintf("not enough semaphores left (need %d, got %d)\n",
604 nsems, seminfo.semmns - semtot);
608 for (semid = 0; semid < seminfo.semmni; semid++) {
609 if ((sema[semid].sem_perm.mode & SEM_ALLOC) == 0)
612 if (semid == seminfo.semmni) {
614 kprintf("no more semid_ds's available\n");
619 kprintf("semid %d is available\n", semid);
621 sema[semid].sem_perm.key = key;
622 sema[semid].sem_perm.cuid = cred->cr_uid;
623 sema[semid].sem_perm.uid = cred->cr_uid;
624 sema[semid].sem_perm.cgid = cred->cr_gid;
625 sema[semid].sem_perm.gid = cred->cr_gid;
626 sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
627 sema[semid].sem_perm.seq =
628 (sema[semid].sem_perm.seq + 1) & 0x7fff;
629 sema[semid].sem_nsems = nsems;
630 sema[semid].sem_otime = 0;
631 sema[semid].sem_ctime = time_second;
632 sema[semid].sem_base = &sem[semtot];
634 bzero(sema[semid].sem_base,
635 sizeof(sema[semid].sem_base[0])*nsems);
637 kprintf("sembase = 0x%x, next = 0x%x\n", sema[semid].sem_base,
642 kprintf("didn't find it and wasn't asked to create it\n");
648 uap->sysmsg_result = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm);
653 sys_semop(struct semop_args *uap)
655 struct proc *p = curproc;
656 int semid = uap->semid;
657 u_int nsops = uap->nsops;
658 struct sembuf sops[MAX_SOPS];
659 struct semid_ds *semaptr;
660 struct sembuf *sopptr;
662 struct sem_undo *suptr = NULL;
664 int do_wakeup, do_undos;
667 kprintf("call to semop(%d, 0x%x, %u)\n", semid, sops, nsops);
670 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
673 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
675 if (semid < 0 || semid >= seminfo.semmni)
678 semaptr = &sema[semid];
679 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0)
681 if (semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid))
684 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_W))) {
686 kprintf("eval = %d from ipaccess\n", eval);
691 if (nsops > MAX_SOPS) {
693 kprintf("too many sops (max=%d, nsops=%u)\n", MAX_SOPS, nsops);
698 if ((eval = copyin(uap->sops, &sops, nsops * sizeof(sops[0]))) != 0) {
700 kprintf("eval = %d from copyin(%08x, %08x, %u)\n", eval,
701 uap->sops, &sops, nsops * sizeof(sops[0]));
707 * Loop trying to satisfy the vector of requests.
708 * If we reach a point where we must wait, any requests already
709 * performed are rolled back and we go to sleep until some other
710 * process wakes us up. At this point, we start all over again.
712 * This ensures that from the perspective of other tasks, a set
713 * of requests is atomic (never partially satisfied).
720 for (i = 0; i < nsops; i++) {
723 if (sopptr->sem_num >= semaptr->sem_nsems)
726 semptr = &semaptr->sem_base[sopptr->sem_num];
729 kprintf("semop: semaptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
730 semaptr, semaptr->sem_base, semptr,
731 sopptr->sem_num, semptr->semval, sopptr->sem_op,
732 (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
735 if (sopptr->sem_op < 0) {
736 if (semptr->semval + sopptr->sem_op < 0) {
738 kprintf("semop: can't do it now\n");
742 semptr->semval += sopptr->sem_op;
743 if (semptr->semval == 0 &&
747 if (sopptr->sem_flg & SEM_UNDO)
749 } else if (sopptr->sem_op == 0) {
750 if (semptr->semval > 0) {
752 kprintf("semop: not zero now\n");
757 if (semptr->semncnt > 0)
759 semptr->semval += sopptr->sem_op;
760 if (sopptr->sem_flg & SEM_UNDO)
766 * Did we get through the entire vector?
772 * No ... rollback anything that we've already done
775 kprintf("semop: rollback 0 through %d\n", i-1);
777 for (j = 0; j < i; j++)
778 semaptr->sem_base[sops[j].sem_num].semval -=
782 * If the request that we couldn't satisfy has the
783 * NOWAIT flag set then return with EAGAIN.
785 if (sopptr->sem_flg & IPC_NOWAIT)
788 if (sopptr->sem_op == 0)
794 kprintf("semop: good night!\n");
796 eval = tsleep((caddr_t)semaptr, PCATCH, "semwait", 0);
798 kprintf("semop: good morning (eval=%d)!\n", eval);
801 suptr = NULL; /* sem_undo may have been reallocated */
803 /* return code is checked below, after sem[nz]cnt-- */
806 * Make sure that the semaphore still exists
808 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
809 semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid))
813 * The semaphore is still alive. Readjust the count of
816 if (sopptr->sem_op == 0)
822 * Is it really morning, or was our sleep interrupted?
823 * (Delayed check of msleep() return code because we
824 * need to decrement sem[nz]cnt either way.)
829 kprintf("semop: good morning!\n");
835 * Process any SEM_UNDO requests.
838 for (i = 0; i < nsops; i++) {
840 * We only need to deal with SEM_UNDO's for non-zero
845 if ((sops[i].sem_flg & SEM_UNDO) == 0)
847 adjval = sops[i].sem_op;
850 eval = semundo_adjust(p, &suptr, semid,
851 sops[i].sem_num, -adjval);
856 * Oh-Oh! We ran out of either sem_undo's or undo's.
857 * Rollback the adjustments to this point and then
858 * rollback the semaphore ups and down so we can return
859 * with an error with all structures restored. We
860 * rollback the undo's in the exact reverse order that
861 * we applied them. This guarantees that we won't run
862 * out of space as we roll things back out.
864 for (j = i - 1; j >= 0; j--) {
865 if ((sops[j].sem_flg & SEM_UNDO) == 0)
867 adjval = sops[j].sem_op;
870 if (semundo_adjust(p, &suptr, semid,
871 sops[j].sem_num, adjval) != 0)
872 panic("semop - can't undo undos");
875 for (j = 0; j < nsops; j++)
876 semaptr->sem_base[sops[j].sem_num].semval -=
880 kprintf("eval = %d from semundo_adjust\n", eval);
883 } /* loop through the sops */
884 } /* if (do_undos) */
886 /* We're definitely done - set the sempid's */
887 for (i = 0; i < nsops; i++) {
889 semptr = &semaptr->sem_base[sopptr->sem_num];
890 semptr->sempid = p->p_pid;
893 /* Do a wakeup if any semaphore was up'd. */
896 kprintf("semop: doing wakeup\n");
898 wakeup((caddr_t)semaptr);
900 kprintf("semop: back from wakeup\n");
904 kprintf("semop: done\n");
906 uap->sysmsg_result = 0;
911 * Go through the undo structures for this process and apply the adjustments to
918 struct sem_undo *suptr;
919 struct sem_undo **supptr;
925 * Go through the chain of undo vectors looking for one
926 * associated with this process.
929 for (supptr = &semu_list; (suptr = *supptr) != NULL;
930 supptr = &suptr->un_next) {
931 if (suptr->un_proc == p)
939 kprintf("proc @%08x has undo structure with %d entries\n", p,
944 * If there are any active undo elements then process them.
946 if (suptr->un_cnt > 0) {
949 for (ix = 0; ix < suptr->un_cnt; ix++) {
950 int semid = suptr->un_ent[ix].un_id;
951 int semnum = suptr->un_ent[ix].un_num;
952 int adjval = suptr->un_ent[ix].un_adjval;
953 struct semid_ds *semaptr;
955 semaptr = &sema[semid];
956 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0)
957 panic("semexit - semid not allocated");
958 if (semnum >= semaptr->sem_nsems)
959 panic("semexit - semnum out of range");
962 kprintf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n",
963 suptr->un_proc, suptr->un_ent[ix].un_id,
964 suptr->un_ent[ix].un_num,
965 suptr->un_ent[ix].un_adjval,
966 semaptr->sem_base[semnum].semval);
970 if (semaptr->sem_base[semnum].semval < -adjval)
971 semaptr->sem_base[semnum].semval = 0;
973 semaptr->sem_base[semnum].semval +=
976 semaptr->sem_base[semnum].semval += adjval;
978 wakeup((caddr_t)semaptr);
980 kprintf("semexit: back from wakeup\n");
986 * Deallocate the undo vector.
989 kprintf("removing vector\n");
991 suptr->un_proc = NULL;
992 *supptr = suptr->un_next;