1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
2 /* $DragonFly: src/sys/kern/sysv_sem.c,v 1.13 2004/03/19 18:00:25 hmp Exp $ */
5 * Implementation of SVID semaphores
7 * Author: Daniel Boulet
9 * This software is provided ``AS IS'' without any warranties of any kind.
12 #include "opt_sysvipc.h"
14 #include <sys/param.h>
15 #include <sys/systm.h>
16 #include <sys/sysproto.h>
17 #include <sys/kernel.h>
20 #include <sys/sysent.h>
21 #include <sys/sysctl.h>
22 #include <sys/malloc.h>
25 static MALLOC_DEFINE(M_SEM, "sem", "SVID compatible semaphores");
27 static void seminit (void *);
29 #ifndef _SYS_SYSPROTO_H_
31 int __semctl (struct proc *p, struct __semctl_args *uap);
33 int semget (struct proc *p, struct semget_args *uap);
35 int semop (struct proc *p, struct semop_args *uap);
38 static struct sem_undo *semu_alloc (struct proc *p);
39 static int semundo_adjust (struct proc *p, struct sem_undo **supptr,
40 int semid, int semnum, int adjval);
41 static void semundo_clear (int semid, int semnum);
43 /* XXX casting to (sy_call_t *) is bogus, as usual. */
44 static sy_call_t *semcalls[] = {
45 (sy_call_t *)__semctl, (sy_call_t *)semget,
49 static int semtot = 0;
50 static struct semid_ds *sema; /* semaphore id pool */
51 static struct sem *sem; /* semaphore pool */
52 static struct sem_undo *semu_list; /* list of active undo structures */
53 static int *semu; /* undo structure pool */
56 u_short semval; /* semaphore value */
57 pid_t sempid; /* pid of last operation */
58 u_short semncnt; /* # awaiting semval > cval */
59 u_short semzcnt; /* # awaiting semval = 0 */
63 * Undo structure (one per process)
66 struct sem_undo *un_next; /* ptr to next active undo structure */
67 struct proc *un_proc; /* owner of this structure */
68 short un_cnt; /* # of active entries */
70 short un_adjval; /* adjust on exit values */
71 short un_num; /* semaphore # */
72 int un_id; /* semid */
73 } un_ent[1]; /* undo entries */
77 * Configuration parameters
80 #define SEMMNI 10 /* # of semaphore identifiers */
83 #define SEMMNS 60 /* # of semaphores in system */
86 #define SEMUME 10 /* max # of undo entries per process */
89 #define SEMMNU 30 /* # of undo structures in system */
92 /* shouldn't need tuning */
94 #define SEMMAP 30 /* # of entries in semaphore map */
97 #define SEMMSL SEMMNS /* max # of semaphores per id */
100 #define SEMOPM 100 /* max # of operations per semop call */
103 #define SEMVMX 32767 /* semaphore maximum value */
104 #define SEMAEM 16384 /* adjust on exit max value */
107 * Due to the way semaphore memory is allocated, we have to ensure that
108 * SEMUSZ is properly aligned.
111 #define SEM_ALIGN(bytes) (((bytes) + (sizeof(long) - 1)) & ~(sizeof(long) - 1))
113 /* actual size of an undo structure */
114 #define SEMUSZ SEM_ALIGN(offsetof(struct sem_undo, un_ent[SEMUME]))
117 * Macro to find a particular sem_undo vector
119 #define SEMU(ix) ((struct sem_undo *)(((intptr_t)semu)+ix * seminfo.semusz))
122 * semaphore info struct
124 struct seminfo seminfo = {
125 SEMMAP, /* # of entries in semaphore map */
126 SEMMNI, /* # of semaphore identifiers */
127 SEMMNS, /* # of semaphores in system */
128 SEMMNU, /* # of undo structures in system */
129 SEMMSL, /* max # of semaphores per id */
130 SEMOPM, /* max # of operations per semop call */
131 SEMUME, /* max # of undo entries per process */
132 SEMUSZ, /* size in bytes of undo structure */
133 SEMVMX, /* semaphore maximum value */
134 SEMAEM /* adjust on exit max value */
137 TUNABLE_INT("kern.ipc.semmap", &seminfo.semmap);
138 TUNABLE_INT("kern.ipc.semmni", &seminfo.semmni);
139 TUNABLE_INT("kern.ipc.semmns", &seminfo.semmns);
140 TUNABLE_INT("kern.ipc.semmnu", &seminfo.semmnu);
141 TUNABLE_INT("kern.ipc.semmsl", &seminfo.semmsl);
142 TUNABLE_INT("kern.ipc.semopm", &seminfo.semopm);
143 TUNABLE_INT("kern.ipc.semume", &seminfo.semume);
144 TUNABLE_INT("kern.ipc.semusz", &seminfo.semusz);
145 TUNABLE_INT("kern.ipc.semvmx", &seminfo.semvmx);
146 TUNABLE_INT("kern.ipc.semaem", &seminfo.semaem);
148 SYSCTL_DECL(_kern_ipc);
149 SYSCTL_INT(_kern_ipc, OID_AUTO, semmap, CTLFLAG_RW, &seminfo.semmap, 0, "");
150 SYSCTL_INT(_kern_ipc, OID_AUTO, semmni, CTLFLAG_RD, &seminfo.semmni, 0, "");
151 SYSCTL_INT(_kern_ipc, OID_AUTO, semmns, CTLFLAG_RD, &seminfo.semmns, 0, "");
152 SYSCTL_INT(_kern_ipc, OID_AUTO, semmnu, CTLFLAG_RD, &seminfo.semmnu, 0, "");
153 SYSCTL_INT(_kern_ipc, OID_AUTO, semmsl, CTLFLAG_RW, &seminfo.semmsl, 0, "");
154 SYSCTL_INT(_kern_ipc, OID_AUTO, semopm, CTLFLAG_RD, &seminfo.semopm, 0, "");
155 SYSCTL_INT(_kern_ipc, OID_AUTO, semume, CTLFLAG_RD, &seminfo.semume, 0, "");
156 SYSCTL_INT(_kern_ipc, OID_AUTO, semusz, CTLFLAG_RD, &seminfo.semusz, 0, "");
157 SYSCTL_INT(_kern_ipc, OID_AUTO, semvmx, CTLFLAG_RW, &seminfo.semvmx, 0, "");
158 SYSCTL_INT(_kern_ipc, OID_AUTO, semaem, CTLFLAG_RW, &seminfo.semaem, 0, "");
161 RO seminfo.semmap /* SEMMAP unused */
164 RO seminfo.semmnu /* undo entries per system */
166 RO seminfo.semopm /* SEMOPM unused */
168 RO seminfo.semusz /* param - derived from SEMUME for per-proc sizeof */
169 RO seminfo.semvmx /* SEMVMX unused - user param */
170 RO seminfo.semaem /* SEMAEM unused - user param */
179 sem = malloc(sizeof(struct sem) * seminfo.semmns, M_SEM, M_WAITOK);
181 panic("sem is NULL");
182 sema = malloc(sizeof(struct semid_ds) * seminfo.semmni, M_SEM, M_WAITOK);
184 panic("sema is NULL");
185 semu = malloc(seminfo.semmnu * seminfo.semusz, M_SEM, M_WAITOK);
187 panic("semu is NULL");
189 for (i = 0; i < seminfo.semmni; i++) {
190 sema[i].sem_base = 0;
191 sema[i].sem_perm.mode = 0;
193 for (i = 0; i < seminfo.semmnu; i++) {
194 struct sem_undo *suptr = SEMU(i);
195 suptr->un_proc = NULL;
199 SYSINIT(sysv_sem, SI_SUB_SYSV_SEM, SI_ORDER_FIRST, seminit, NULL)
202 * Entry point for all SEM calls
204 * semsys_args(int which, a2, a3, ...) (VARARGS)
207 semsys(struct semsys_args *uap)
209 struct proc *p = curproc;
210 unsigned int which = (unsigned int)uap->which;
212 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
215 if (which >= sizeof(semcalls)/sizeof(semcalls[0]))
217 bcopy(&uap->a2, &uap->which,
218 sizeof(struct semsys_args) - offsetof(struct semsys_args, a2));
219 return ((*semcalls[which])(uap));
223 * Allocate a new sem_undo structure for a process
224 * (returns ptr to structure or NULL if no more room)
227 static struct sem_undo *
232 struct sem_undo *suptr;
233 struct sem_undo **supptr;
237 * Try twice to allocate something.
238 * (we'll purge any empty structures after the first pass so
239 * two passes are always enough)
242 for (attempt = 0; attempt < 2; attempt++) {
244 * Look for a free structure.
245 * Fill it in and return it if we find one.
248 for (i = 0; i < seminfo.semmnu; i++) {
250 if (suptr->un_proc == NULL) {
251 suptr->un_next = semu_list;
260 * We didn't find a free one, if this is the first attempt
261 * then try to free some structures.
265 /* All the structures are in use - try to free some */
266 int did_something = 0;
269 while ((suptr = *supptr) != NULL) {
270 if (suptr->un_cnt == 0) {
271 suptr->un_proc = NULL;
272 *supptr = suptr->un_next;
275 supptr = &(suptr->un_next);
278 /* If we didn't free anything then just give-up */
283 * The second pass failed even though we freed
284 * something after the first pass!
285 * This is IMPOSSIBLE!
287 panic("semu_alloc - second attempt failed");
294 * Adjust a particular entry for a particular proc
298 semundo_adjust(p, supptr, semid, semnum, adjval)
300 struct sem_undo **supptr;
304 struct sem_undo *suptr;
308 /* Look for and remember the sem_undo if the caller doesn't provide
313 for (suptr = semu_list; suptr != NULL;
314 suptr = suptr->un_next) {
315 if (suptr->un_proc == p) {
323 suptr = semu_alloc(p);
331 * Look for the requested entry and adjust it (delete if adjval becomes
334 sunptr = &suptr->un_ent[0];
335 for (i = 0; i < suptr->un_cnt; i++, sunptr++) {
336 if (sunptr->un_id != semid || sunptr->un_num != semnum)
339 sunptr->un_adjval = 0;
341 sunptr->un_adjval += adjval;
342 if (sunptr->un_adjval == 0) {
344 if (i < suptr->un_cnt)
346 suptr->un_ent[suptr->un_cnt];
351 /* Didn't find the right entry - create it */
354 if (suptr->un_cnt != seminfo.semume) {
355 sunptr = &suptr->un_ent[suptr->un_cnt];
357 sunptr->un_adjval = adjval;
358 sunptr->un_id = semid; sunptr->un_num = semnum;
365 semundo_clear(semid, semnum)
368 struct sem_undo *suptr;
370 for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) {
371 struct undo *sunptr = &suptr->un_ent[0];
374 while (i < suptr->un_cnt) {
375 if (sunptr->un_id == semid) {
376 if (semnum == -1 || sunptr->un_num == semnum) {
378 if (i < suptr->un_cnt) {
380 suptr->un_ent[suptr->un_cnt];
393 * Note that the user-mode half of this passes a union, not a pointer
397 __semctl(struct __semctl_args *uap)
399 struct proc *p = curproc;
400 int semid = uap->semid;
401 int semnum = uap->semnum;
403 union semun *arg = uap->arg;
404 union semun real_arg;
405 struct ucred *cred = p->p_ucred;
407 struct semid_ds sbuf;
408 struct semid_ds *semaptr;
411 printf("call to semctl(%d, %d, %d, 0x%x)\n", semid, semnum, cmd, arg);
414 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
417 semid = IPCID_TO_IX(semid);
418 if (semid < 0 || semid >= seminfo.semmni)
421 semaptr = &sema[semid];
422 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
423 semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid))
431 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_M)))
433 semaptr->sem_perm.cuid = cred->cr_uid;
434 semaptr->sem_perm.uid = cred->cr_uid;
435 semtot -= semaptr->sem_nsems;
436 for (i = semaptr->sem_base - sem; i < semtot; i++)
437 sem[i] = sem[i + semaptr->sem_nsems];
438 for (i = 0; i < seminfo.semmni; i++) {
439 if ((sema[i].sem_perm.mode & SEM_ALLOC) &&
440 sema[i].sem_base > semaptr->sem_base)
441 sema[i].sem_base -= semaptr->sem_nsems;
443 semaptr->sem_perm.mode = 0;
444 semundo_clear(semid, -1);
445 wakeup((caddr_t)semaptr);
449 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_M)))
451 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
453 if ((eval = copyin(real_arg.buf, (caddr_t)&sbuf,
456 semaptr->sem_perm.uid = sbuf.sem_perm.uid;
457 semaptr->sem_perm.gid = sbuf.sem_perm.gid;
458 semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) |
459 (sbuf.sem_perm.mode & 0777);
460 semaptr->sem_ctime = time_second;
464 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
466 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
468 eval = copyout((caddr_t)semaptr, real_arg.buf,
469 sizeof(struct semid_ds));
473 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
475 if (semnum < 0 || semnum >= semaptr->sem_nsems)
477 rval = semaptr->sem_base[semnum].semncnt;
481 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
483 if (semnum < 0 || semnum >= semaptr->sem_nsems)
485 rval = semaptr->sem_base[semnum].sempid;
489 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
491 if (semnum < 0 || semnum >= semaptr->sem_nsems)
493 rval = semaptr->sem_base[semnum].semval;
497 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
499 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
501 for (i = 0; i < semaptr->sem_nsems; i++) {
502 eval = copyout((caddr_t)&semaptr->sem_base[i].semval,
503 &real_arg.array[i], sizeof(real_arg.array[0]));
510 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_R)))
512 if (semnum < 0 || semnum >= semaptr->sem_nsems)
514 rval = semaptr->sem_base[semnum].semzcnt;
518 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_W)))
520 if (semnum < 0 || semnum >= semaptr->sem_nsems)
522 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
524 semaptr->sem_base[semnum].semval = real_arg.val;
525 semundo_clear(semid, semnum);
526 wakeup((caddr_t)semaptr);
530 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_W)))
532 if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0)
534 for (i = 0; i < semaptr->sem_nsems; i++) {
535 eval = copyin(&real_arg.array[i],
536 (caddr_t)&semaptr->sem_base[i].semval,
537 sizeof(real_arg.array[0]));
541 semundo_clear(semid, -1);
542 wakeup((caddr_t)semaptr);
550 uap->sysmsg_result = rval;
555 semget(struct semget_args *uap)
557 struct proc *p = curproc;
560 int nsems = uap->nsems;
561 int semflg = uap->semflg;
562 struct ucred *cred = p->p_ucred;
565 printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg);
568 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
571 if (key != IPC_PRIVATE) {
572 for (semid = 0; semid < seminfo.semmni; semid++) {
573 if ((sema[semid].sem_perm.mode & SEM_ALLOC) &&
574 sema[semid].sem_perm.key == key)
577 if (semid < seminfo.semmni) {
579 printf("found public key\n");
581 if ((eval = ipcperm(p, &sema[semid].sem_perm,
584 if (nsems > 0 && sema[semid].sem_nsems < nsems) {
586 printf("too small\n");
590 if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) {
592 printf("not exclusive\n");
601 printf("need to allocate the semid_ds\n");
603 if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) {
604 if (nsems <= 0 || nsems > seminfo.semmsl) {
606 printf("nsems out of range (0<%d<=%d)\n", nsems,
611 if (nsems > seminfo.semmns - semtot) {
613 printf("not enough semaphores left (need %d, got %d)\n",
614 nsems, seminfo.semmns - semtot);
618 for (semid = 0; semid < seminfo.semmni; semid++) {
619 if ((sema[semid].sem_perm.mode & SEM_ALLOC) == 0)
622 if (semid == seminfo.semmni) {
624 printf("no more semid_ds's available\n");
629 printf("semid %d is available\n", semid);
631 sema[semid].sem_perm.key = key;
632 sema[semid].sem_perm.cuid = cred->cr_uid;
633 sema[semid].sem_perm.uid = cred->cr_uid;
634 sema[semid].sem_perm.cgid = cred->cr_gid;
635 sema[semid].sem_perm.gid = cred->cr_gid;
636 sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC;
637 sema[semid].sem_perm.seq =
638 (sema[semid].sem_perm.seq + 1) & 0x7fff;
639 sema[semid].sem_nsems = nsems;
640 sema[semid].sem_otime = 0;
641 sema[semid].sem_ctime = time_second;
642 sema[semid].sem_base = &sem[semtot];
644 bzero(sema[semid].sem_base,
645 sizeof(sema[semid].sem_base[0])*nsems);
647 printf("sembase = 0x%x, next = 0x%x\n", sema[semid].sem_base,
652 printf("didn't find it and wasn't asked to create it\n");
658 uap->sysmsg_result = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm);
663 semop(struct semop_args *uap)
665 struct proc *p = curproc;
666 int semid = uap->semid;
667 u_int nsops = uap->nsops;
668 struct sembuf sops[MAX_SOPS];
669 struct semid_ds *semaptr;
670 struct sembuf *sopptr;
672 struct sem_undo *suptr = NULL;
674 int do_wakeup, do_undos;
677 printf("call to semop(%d, 0x%x, %u)\n", semid, sops, nsops);
680 if (!jail_sysvipc_allowed && p->p_ucred->cr_prison != NULL)
683 semid = IPCID_TO_IX(semid); /* Convert back to zero origin */
685 if (semid < 0 || semid >= seminfo.semmni)
688 semaptr = &sema[semid];
689 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0)
691 if (semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid))
694 if ((eval = ipcperm(p, &semaptr->sem_perm, IPC_W))) {
696 printf("eval = %d from ipaccess\n", eval);
701 if (nsops > MAX_SOPS) {
703 printf("too many sops (max=%d, nsops=%u)\n", MAX_SOPS, nsops);
708 if ((eval = copyin(uap->sops, &sops, nsops * sizeof(sops[0]))) != 0) {
710 printf("eval = %d from copyin(%08x, %08x, %u)\n", eval,
711 uap->sops, &sops, nsops * sizeof(sops[0]));
717 * Loop trying to satisfy the vector of requests.
718 * If we reach a point where we must wait, any requests already
719 * performed are rolled back and we go to sleep until some other
720 * process wakes us up. At this point, we start all over again.
722 * This ensures that from the perspective of other tasks, a set
723 * of requests is atomic (never partially satisfied).
730 for (i = 0; i < nsops; i++) {
733 if (sopptr->sem_num >= semaptr->sem_nsems)
736 semptr = &semaptr->sem_base[sopptr->sem_num];
739 printf("semop: semaptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n",
740 semaptr, semaptr->sem_base, semptr,
741 sopptr->sem_num, semptr->semval, sopptr->sem_op,
742 (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
745 if (sopptr->sem_op < 0) {
746 if (semptr->semval + sopptr->sem_op < 0) {
748 printf("semop: can't do it now\n");
752 semptr->semval += sopptr->sem_op;
753 if (semptr->semval == 0 &&
757 if (sopptr->sem_flg & SEM_UNDO)
759 } else if (sopptr->sem_op == 0) {
760 if (semptr->semval > 0) {
762 printf("semop: not zero now\n");
767 if (semptr->semncnt > 0)
769 semptr->semval += sopptr->sem_op;
770 if (sopptr->sem_flg & SEM_UNDO)
776 * Did we get through the entire vector?
782 * No ... rollback anything that we've already done
785 printf("semop: rollback 0 through %d\n", i-1);
787 for (j = 0; j < i; j++)
788 semaptr->sem_base[sops[j].sem_num].semval -=
792 * If the request that we couldn't satisfy has the
793 * NOWAIT flag set then return with EAGAIN.
795 if (sopptr->sem_flg & IPC_NOWAIT)
798 if (sopptr->sem_op == 0)
804 printf("semop: good night!\n");
806 eval = tsleep((caddr_t)semaptr, PCATCH, "semwait", 0);
808 printf("semop: good morning (eval=%d)!\n", eval);
811 suptr = NULL; /* sem_undo may have been reallocated */
813 /* return code is checked below, after sem[nz]cnt-- */
816 * Make sure that the semaphore still exists
818 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
819 semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid))
823 * The semaphore is still alive. Readjust the count of
826 if (sopptr->sem_op == 0)
832 * Is it really morning, or was our sleep interrupted?
833 * (Delayed check of msleep() return code because we
834 * need to decrement sem[nz]cnt either way.)
839 printf("semop: good morning!\n");
845 * Process any SEM_UNDO requests.
848 for (i = 0; i < nsops; i++) {
850 * We only need to deal with SEM_UNDO's for non-zero
855 if ((sops[i].sem_flg & SEM_UNDO) == 0)
857 adjval = sops[i].sem_op;
860 eval = semundo_adjust(p, &suptr, semid,
861 sops[i].sem_num, -adjval);
866 * Oh-Oh! We ran out of either sem_undo's or undo's.
867 * Rollback the adjustments to this point and then
868 * rollback the semaphore ups and down so we can return
869 * with an error with all structures restored. We
870 * rollback the undo's in the exact reverse order that
871 * we applied them. This guarantees that we won't run
872 * out of space as we roll things back out.
874 for (j = i - 1; j >= 0; j--) {
875 if ((sops[j].sem_flg & SEM_UNDO) == 0)
877 adjval = sops[j].sem_op;
880 if (semundo_adjust(p, &suptr, semid,
881 sops[j].sem_num, adjval) != 0)
882 panic("semop - can't undo undos");
885 for (j = 0; j < nsops; j++)
886 semaptr->sem_base[sops[j].sem_num].semval -=
890 printf("eval = %d from semundo_adjust\n", eval);
893 } /* loop through the sops */
894 } /* if (do_undos) */
896 /* We're definitely done - set the sempid's */
897 for (i = 0; i < nsops; i++) {
899 semptr = &semaptr->sem_base[sopptr->sem_num];
900 semptr->sempid = p->p_pid;
903 /* Do a wakeup if any semaphore was up'd. */
906 printf("semop: doing wakeup\n");
908 wakeup((caddr_t)semaptr);
910 printf("semop: back from wakeup\n");
914 printf("semop: done\n");
916 uap->sysmsg_result = 0;
921 * Go through the undo structures for this process and apply the adjustments to
928 struct sem_undo *suptr;
929 struct sem_undo **supptr;
935 * Go through the chain of undo vectors looking for one
936 * associated with this process.
939 for (supptr = &semu_list; (suptr = *supptr) != NULL;
940 supptr = &suptr->un_next) {
941 if (suptr->un_proc == p)
949 printf("proc @%08x has undo structure with %d entries\n", p,
954 * If there are any active undo elements then process them.
956 if (suptr->un_cnt > 0) {
959 for (ix = 0; ix < suptr->un_cnt; ix++) {
960 int semid = suptr->un_ent[ix].un_id;
961 int semnum = suptr->un_ent[ix].un_num;
962 int adjval = suptr->un_ent[ix].un_adjval;
963 struct semid_ds *semaptr;
965 semaptr = &sema[semid];
966 if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0)
967 panic("semexit - semid not allocated");
968 if (semnum >= semaptr->sem_nsems)
969 panic("semexit - semnum out of range");
972 printf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n",
973 suptr->un_proc, suptr->un_ent[ix].un_id,
974 suptr->un_ent[ix].un_num,
975 suptr->un_ent[ix].un_adjval,
976 semaptr->sem_base[semnum].semval);
980 if (semaptr->sem_base[semnum].semval < -adjval)
981 semaptr->sem_base[semnum].semval = 0;
983 semaptr->sem_base[semnum].semval +=
986 semaptr->sem_base[semnum].semval += adjval;
988 wakeup((caddr_t)semaptr);
990 printf("semexit: back from wakeup\n");
996 * Deallocate the undo vector.
999 printf("removing vector\n");
1001 suptr->un_proc = NULL;
1002 *supptr = suptr->un_next;