Scrap DEC Alpha support.
[dragonfly.git] / sys / opencrypto / crypto.c
1 /*      $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.7 2003/06/03 00:09:02 sam Exp $   */
2 /*      $DragonFly: src/sys/opencrypto/crypto.c,v 1.6 2003/07/19 21:14:47 dillon Exp $  */
3 /*      $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $        */
4 /*
5  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  *
13  * Permission to use, copy, and modify this software with or without fee
14  * is hereby granted, provided that this entire notice is included in
15  * all source code copies of any software which is or includes a copy or
16  * modification of this software.
17  *
18  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22  * PURPOSE.
23  */
24
25 #define CRYPTO_TIMING                   /* enable cryptop timing stuff */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/eventhandler.h>
30 #include <sys/kernel.h>
31 #include <sys/kthread.h>
32 #include <sys/malloc.h>
33 #include <sys/proc.h>
34 #include <sys/sysctl.h>
35
36 #include <sys/interrupt.h>
37 #include <machine/ipl.h>
38
39 #include <vm/vm_zone.h>
40 #include <opencrypto/cryptodev.h>
41 #include <opencrypto/xform.h>                   /* XXX for M_XDATA */
42
43 #define SESID2HID(sid)  (((sid) >> 32) & 0xffffffff)
44
45 /*
46  * Crypto drivers register themselves by allocating a slot in the
47  * crypto_drivers table with crypto_get_driverid() and then registering
48  * each algorithm they support with crypto_register() and crypto_kregister().
49  */
50 static  struct cryptocap *crypto_drivers = NULL;
51 static  int crypto_drivers_num = 0;
52
53 /*
54  * There are two queues for crypto requests; one for symmetric (e.g.
55  * cipher) operations and one for asymmetric (e.g. MOD) operations.
56  * See below for how synchronization is handled.
57  */
58 static  TAILQ_HEAD(,cryptop) crp_q;             /* request queues */
59 static  TAILQ_HEAD(,cryptkop) crp_kq;
60
61 /*
62  * There are two queues for processing completed crypto requests; one
63  * for the symmetric and one for the asymmetric ops.  We only need one
64  * but have two to avoid type futzing (cryptop vs. cryptkop).  See below
65  * for how synchronization is handled.
66  */
67 static  TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queues */
68 static  TAILQ_HEAD(,cryptkop) crp_ret_kq;
69
70 /*
71  * Crypto op and desciptor data structures are allocated
72  * from separate private zones.
73  */
74 static  vm_zone_t cryptop_zone;
75 static  vm_zone_t cryptodesc_zone;
76
77 int     crypto_usercrypto = 1;          /* userland may open /dev/crypto */
78 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
79            &crypto_usercrypto, 0,
80            "Enable/disable user-mode access to crypto support");
81 int     crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
82 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
83            &crypto_userasymcrypto, 0,
84            "Enable/disable user-mode access to asymmetric crypto support");
85 int     crypto_devallowsoft = 0;        /* only use hardware crypto for asym */
86 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
87            &crypto_devallowsoft, 0,
88            "Enable/disable use of software asym crypto support");
89
90 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
91
92 /*
93  * Synchronization: read carefully, this is non-trivial.
94  *
95  * Crypto requests are submitted via crypto_dispatch.  Typically
96  * these come in from network protocols at spl0 (output path) or
97  * splnet (input path).
98  *
99  * Requests are typically passed on the driver directly, but they
100  * may also be queued for processing by a software interrupt thread,
101  * cryptointr, that runs at splsoftcrypto.  This thread dispatches 
102  * the requests to crypto drivers (h/w or s/w) who call crypto_done
103  * when a request is complete.  Hardware crypto drivers are assumed
104  * to register their IRQ's as network devices so their interrupt handlers
105  * and subsequent "done callbacks" happen at splimp.
106  *
107  * Completed crypto ops are queued for a separate kernel thread that
108  * handles the callbacks at spl0.  This decoupling insures the crypto
109  * driver interrupt service routine is not delayed while the callback
110  * takes place and that callbacks are delivered after a context switch
111  * (as opposed to a software interrupt that clients must block).
112  *
113  * This scheme is not intended for SMP machines.
114  */ 
115 static  void cryptointr(void *dummy);   /* swi thread to dispatch ops */
116 static  void cryptoret(void);           /* kernel thread for callbacks*/
117 static  struct thread *cryptothread;
118 static  void crypto_destroy(void);
119 static  int crypto_invoke(struct cryptop *crp, int hint);
120 static  int crypto_kinvoke(struct cryptkop *krp, int hint);
121
122 static struct cryptostats cryptostats;
123 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
124             cryptostats, "Crypto system statistics");
125
126 #ifdef CRYPTO_TIMING
127 static  int crypto_timing = 0;
128 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
129            &crypto_timing, 0, "Enable/disable crypto timing support");
130 #endif
131
132 static int
133 crypto_init(void)
134 {
135         int error;
136
137         cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
138         cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
139                                 0, 0, 1);
140         if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
141                 printf("crypto_init: cannot setup crypto zones\n");
142                 return ENOMEM;
143         }
144
145         crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
146         crypto_drivers = malloc(crypto_drivers_num *
147             sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
148         if (crypto_drivers == NULL) {
149                 printf("crypto_init: cannot malloc driver table\n");
150                 return ENOMEM;
151         }
152
153         TAILQ_INIT(&crp_q);
154         TAILQ_INIT(&crp_kq);
155
156         TAILQ_INIT(&crp_ret_q);
157         TAILQ_INIT(&crp_ret_kq);
158
159         register_swi(SWI_CRYPTO, cryptointr, NULL, "swi_crypto");
160         error = kthread_create((void (*)(void *)) cryptoret, NULL,
161                     &cryptothread, "cryptoret");
162         if (error) {
163                 printf("crypto_init: cannot start cryptoret thread; error %d",
164                         error);
165                 crypto_destroy();
166         }
167         return error;
168 }
169
170 static void
171 crypto_destroy(void)
172 {
173         /* XXX no wait to reclaim zones */
174         if (crypto_drivers != NULL)
175                 free(crypto_drivers, M_CRYPTO_DATA);
176         unregister_swi(SWI_CRYPTO, cryptointr);
177 }
178
179 /*
180  * Initialization code, both for static and dynamic loading.
181  */
182 static int
183 crypto_modevent(module_t mod, int type, void *unused)
184 {
185         int error = EINVAL;
186
187         switch (type) {
188         case MOD_LOAD:
189                 error = crypto_init();
190                 if (error == 0 && bootverbose)
191                         printf("crypto: <crypto core>\n");
192                 break;
193         case MOD_UNLOAD:
194                 /*XXX disallow if active sessions */
195                 error = 0;
196                 crypto_destroy();
197                 break;
198         }
199         return error;
200 }
201
202 static moduledata_t crypto_mod = {
203         "crypto",
204         crypto_modevent,
205         0
206 };
207 MODULE_VERSION(crypto, 1);
208 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
209
210 /*
211  * Create a new session.
212  */
213 int
214 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
215 {
216         struct cryptoini *cr;
217         u_int32_t hid, lid;
218         int err = EINVAL;
219         int s;
220
221         s = splcrypto();
222
223         if (crypto_drivers == NULL)
224                 goto done;
225
226         /*
227          * The algorithm we use here is pretty stupid; just use the
228          * first driver that supports all the algorithms we need.
229          *
230          * XXX We need more smarts here (in real life too, but that's
231          * XXX another story altogether).
232          */
233
234         for (hid = 0; hid < crypto_drivers_num; hid++) {
235                 /*
236                  * If it's not initialized or has remaining sessions
237                  * referencing it, skip.
238                  */
239                 if (crypto_drivers[hid].cc_newsession == NULL ||
240                     (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
241                         continue;
242
243                 /* Hardware required -- ignore software drivers. */
244                 if (hard > 0 &&
245                     (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
246                         continue;
247                 /* Software required -- ignore hardware drivers. */
248                 if (hard < 0 &&
249                     (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
250                         continue;
251
252                 /* See if all the algorithms are supported. */
253                 for (cr = cri; cr; cr = cr->cri_next)
254                         if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
255                                 break;
256
257                 if (cr == NULL) {
258                         /* Ok, all algorithms are supported. */
259
260                         /*
261                          * Can't do everything in one session.
262                          *
263                          * XXX Fix this. We need to inject a "virtual" session layer right
264                          * XXX about here.
265                          */
266
267                         /* Call the driver initialization routine. */
268                         lid = hid;              /* Pass the driver ID. */
269                         err = crypto_drivers[hid].cc_newsession(
270                                         crypto_drivers[hid].cc_arg, &lid, cri);
271                         if (err == 0) {
272                                 (*sid) = hid;
273                                 (*sid) <<= 32;
274                                 (*sid) |= (lid & 0xffffffff);
275                                 crypto_drivers[hid].cc_sessions++;
276                         }
277                         break;
278                 }
279         }
280 done:
281         splx(s);
282         return err;
283 }
284
285 /*
286  * Delete an existing session (or a reserved session on an unregistered
287  * driver).
288  */
289 int
290 crypto_freesession(u_int64_t sid)
291 {
292         u_int32_t hid;
293         int err, s;
294
295         s = splcrypto();
296
297         if (crypto_drivers == NULL) {
298                 err = EINVAL;
299                 goto done;
300         }
301
302         /* Determine two IDs. */
303         hid = SESID2HID(sid);
304
305         if (hid >= crypto_drivers_num) {
306                 err = ENOENT;
307                 goto done;
308         }
309
310         if (crypto_drivers[hid].cc_sessions)
311                 crypto_drivers[hid].cc_sessions--;
312
313         /* Call the driver cleanup routine, if available. */
314         if (crypto_drivers[hid].cc_freesession)
315                 err = crypto_drivers[hid].cc_freesession(
316                                 crypto_drivers[hid].cc_arg, sid);
317         else
318                 err = 0;
319
320         /*
321          * If this was the last session of a driver marked as invalid,
322          * make the entry available for reuse.
323          */
324         if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
325             crypto_drivers[hid].cc_sessions == 0)
326                 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
327
328 done:
329         splx(s);
330         return err;
331 }
332
333 /*
334  * Return an unused driver id.  Used by drivers prior to registering
335  * support for the algorithms they handle.
336  */
337 int32_t
338 crypto_get_driverid(u_int32_t flags)
339 {
340         struct cryptocap *newdrv;
341         int i, s;
342
343         s = splcrypto();
344         for (i = 0; i < crypto_drivers_num; i++)
345                 if (crypto_drivers[i].cc_process == NULL &&
346                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
347                     crypto_drivers[i].cc_sessions == 0)
348                         break;
349
350         /* Out of entries, allocate some more. */
351         if (i == crypto_drivers_num) {
352                 /* Be careful about wrap-around. */
353                 if (2 * crypto_drivers_num <= crypto_drivers_num) {
354                         splx(s);
355                         printf("crypto: driver count wraparound!\n");
356                         return -1;
357                 }
358
359                 newdrv = malloc(2 * crypto_drivers_num *
360                     sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
361                 if (newdrv == NULL) {
362                         splx(s);
363                         printf("crypto: no space to expand driver table!\n");
364                         return -1;
365                 }
366
367                 bcopy(crypto_drivers, newdrv,
368                     crypto_drivers_num * sizeof(struct cryptocap));
369
370                 crypto_drivers_num *= 2;
371
372                 free(crypto_drivers, M_CRYPTO_DATA);
373                 crypto_drivers = newdrv;
374         }
375
376         /* NB: state is zero'd on free */
377         crypto_drivers[i].cc_sessions = 1;      /* Mark */
378         crypto_drivers[i].cc_flags = flags;
379         if (bootverbose)
380                 printf("crypto: assign driver %u, flags %u\n", i, flags);
381
382         splx(s);
383
384         return i;
385 }
386
387 static struct cryptocap *
388 crypto_checkdriver(u_int32_t hid)
389 {
390         if (crypto_drivers == NULL)
391                 return NULL;
392         return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
393 }
394
395 /*
396  * Register support for a key-related algorithm.  This routine
397  * is called once for each algorithm supported a driver.
398  */
399 int
400 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
401     int (*kprocess)(void*, struct cryptkop *, int),
402     void *karg)
403 {
404         int s;
405         struct cryptocap *cap;
406         int err;
407
408         s = splcrypto();
409
410         cap = crypto_checkdriver(driverid);
411         if (cap != NULL &&
412             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
413                 /*
414                  * XXX Do some performance testing to determine placing.
415                  * XXX We probably need an auxiliary data structure that
416                  * XXX describes relative performances.
417                  */
418
419                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
420                 if (bootverbose)
421                         printf("crypto: driver %u registers key alg %u flags %u\n"
422                                 , driverid
423                                 , kalg
424                                 , flags
425                         );
426
427                 if (cap->cc_kprocess == NULL) {
428                         cap->cc_karg = karg;
429                         cap->cc_kprocess = kprocess;
430                 }
431                 err = 0;
432         } else
433                 err = EINVAL;
434
435         splx(s);
436         return err;
437 }
438
439 /*
440  * Register support for a non-key-related algorithm.  This routine
441  * is called once for each such algorithm supported by a driver.
442  */
443 int
444 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
445     u_int32_t flags,
446     int (*newses)(void*, u_int32_t*, struct cryptoini*),
447     int (*freeses)(void*, u_int64_t),
448     int (*process)(void*, struct cryptop *, int),
449     void *arg)
450 {
451         struct cryptocap *cap;
452         int s, err;
453
454         s = splcrypto();
455
456         cap = crypto_checkdriver(driverid);
457         /* NB: algorithms are in the range [1..max] */
458         if (cap != NULL &&
459             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
460                 /*
461                  * XXX Do some performance testing to determine placing.
462                  * XXX We probably need an auxiliary data structure that
463                  * XXX describes relative performances.
464                  */
465
466                 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
467                 cap->cc_max_op_len[alg] = maxoplen;
468                 if (bootverbose)
469                         printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
470                                 , driverid
471                                 , alg
472                                 , flags
473                                 , maxoplen
474                         );
475
476                 if (cap->cc_process == NULL) {
477                         cap->cc_arg = arg;
478                         cap->cc_newsession = newses;
479                         cap->cc_process = process;
480                         cap->cc_freesession = freeses;
481                         cap->cc_sessions = 0;           /* Unmark */
482                 }
483                 err = 0;
484         } else
485                 err = EINVAL;
486
487         splx(s);
488         return err;
489 }
490
491 /*
492  * Unregister a crypto driver. If there are pending sessions using it,
493  * leave enough information around so that subsequent calls using those
494  * sessions will correctly detect the driver has been unregistered and
495  * reroute requests.
496  */
497 int
498 crypto_unregister(u_int32_t driverid, int alg)
499 {
500         int i, err, s = splcrypto();
501         u_int32_t ses;
502         struct cryptocap *cap;
503
504         cap = crypto_checkdriver(driverid);
505         if (cap != NULL &&
506             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
507             cap->cc_alg[alg] != 0) {
508                 cap->cc_alg[alg] = 0;
509                 cap->cc_max_op_len[alg] = 0;
510
511                 /* Was this the last algorithm ? */
512                 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
513                         if (cap->cc_alg[i] != 0)
514                                 break;
515
516                 if (i == CRYPTO_ALGORITHM_MAX + 1) {
517                         ses = cap->cc_sessions;
518                         bzero(cap, sizeof(struct cryptocap));
519                         if (ses != 0) {
520                                 /*
521                                  * If there are pending sessions, just mark as invalid.
522                                  */
523                                 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
524                                 cap->cc_sessions = ses;
525                         }
526                 }
527                 err = 0;
528         } else
529                 err = EINVAL;
530
531         splx(s);
532         return err;
533 }
534
535 /*
536  * Unregister all algorithms associated with a crypto driver.
537  * If there are pending sessions using it, leave enough information
538  * around so that subsequent calls using those sessions will
539  * correctly detect the driver has been unregistered and reroute
540  * requests.
541  */
542 int
543 crypto_unregister_all(u_int32_t driverid)
544 {
545         int i, err, s = splcrypto();
546         u_int32_t ses;
547         struct cryptocap *cap;
548
549         cap = crypto_checkdriver(driverid);
550         if (cap != NULL) {
551                 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
552                         cap->cc_alg[i] = 0;
553                         cap->cc_max_op_len[i] = 0;
554                 }
555                 ses = cap->cc_sessions;
556                 bzero(cap, sizeof(struct cryptocap));
557                 if (ses != 0) {
558                         /*
559                          * If there are pending sessions, just mark as invalid.
560                          */
561                         cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
562                         cap->cc_sessions = ses;
563                 }
564                 err = 0;
565         } else
566                 err = EINVAL;
567
568         splx(s);
569         return err;
570 }
571
572 /*
573  * Clear blockage on a driver.  The what parameter indicates whether
574  * the driver is now ready for cryptop's and/or cryptokop's.
575  */
576 int
577 crypto_unblock(u_int32_t driverid, int what)
578 {
579         struct cryptocap *cap;
580         int needwakeup, err, s;
581
582         s = splcrypto();
583         cap = crypto_checkdriver(driverid);
584         if (cap != NULL) {
585                 needwakeup = 0;
586                 if (what & CRYPTO_SYMQ) {
587                         needwakeup |= cap->cc_qblocked;
588                         cap->cc_qblocked = 0;
589                 }
590                 if (what & CRYPTO_ASYMQ) {
591                         needwakeup |= cap->cc_kqblocked;
592                         cap->cc_kqblocked = 0;
593                 }
594                 if (needwakeup)
595                         setsoftcrypto();
596                 err = 0;
597         } else
598                 err = EINVAL;
599         splx(s);
600
601         return err;
602 }
603
604 /*
605  * Dispatch a crypto request to a driver or queue
606  * it, to be processed by the kernel thread.
607  */
608 int
609 crypto_dispatch(struct cryptop *crp)
610 {
611         u_int32_t hid = SESID2HID(crp->crp_sid);
612         int s, result;
613
614         cryptostats.cs_ops++;
615
616 #ifdef CRYPTO_TIMING
617         if (crypto_timing)
618                 nanouptime(&crp->crp_tstamp);
619 #endif
620         s = splcrypto();
621         if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
622                 struct cryptocap *cap;
623                 /*
624                  * Caller marked the request to be processed
625                  * immediately; dispatch it directly to the
626                  * driver unless the driver is currently blocked.
627                  */
628                 cap = crypto_checkdriver(hid);
629                 if (cap && !cap->cc_qblocked) {
630                         result = crypto_invoke(crp, 0);
631                         if (result == ERESTART) {
632                                 /*
633                                  * The driver ran out of resources, mark the
634                                  * driver ``blocked'' for cryptop's and put
635                                  * the op on the queue.
636                                  */
637                                 crypto_drivers[hid].cc_qblocked = 1;
638                                 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
639                                 cryptostats.cs_blocks++;
640                                 result = 0;
641                         }
642                 } else {
643                         /*
644                          * The driver is blocked, just queue the op until
645                          * it unblocks and the swi thread gets kicked.
646                          */
647                         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
648                         result = 0;
649                 }
650         } else {
651                 int wasempty = TAILQ_EMPTY(&crp_q);
652                 /*
653                  * Caller marked the request as ``ok to delay'';
654                  * queue it for the swi thread.  This is desirable
655                  * when the operation is low priority and/or suitable
656                  * for batching.
657                  */
658                 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
659                 if (wasempty)
660                         setsoftcrypto();
661                 result = 0;
662         }
663         splx(s);
664
665         return result;
666 }
667
668 /*
669  * Add an asymetric crypto request to a queue,
670  * to be processed by the kernel thread.
671  */
672 int
673 crypto_kdispatch(struct cryptkop *krp)
674 {
675         struct cryptocap *cap;
676         int s, result;
677
678         cryptostats.cs_kops++;
679
680         s = splcrypto();
681         cap = crypto_checkdriver(krp->krp_hid);
682         if (cap && !cap->cc_kqblocked) {
683                 result = crypto_kinvoke(krp, 0);
684                 if (result == ERESTART) {
685                         /*
686                          * The driver ran out of resources, mark the
687                          * driver ``blocked'' for cryptop's and put
688                          * the op on the queue.
689                          */
690                         crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
691                         TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
692                         cryptostats.cs_kblocks++;
693                 }
694         } else {
695                 /*
696                  * The driver is blocked, just queue the op until
697                  * it unblocks and the swi thread gets kicked.
698                  */
699                 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
700                 result = 0;
701         }
702         splx(s);
703
704         return result;
705 }
706
707 /*
708  * Dispatch an assymetric crypto request to the appropriate crypto devices.
709  */
710 static int
711 crypto_kinvoke(struct cryptkop *krp, int hint)
712 {
713         u_int32_t hid;
714         int error;
715
716         /* Sanity checks. */
717         if (krp == NULL)
718                 return EINVAL;
719         if (krp->krp_callback == NULL) {
720                 free(krp, M_XDATA);             /* XXX allocated in cryptodev */
721                 return EINVAL;
722         }
723
724         for (hid = 0; hid < crypto_drivers_num; hid++) {
725                 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
726                     !crypto_devallowsoft)
727                         continue;
728                 if (crypto_drivers[hid].cc_kprocess == NULL)
729                         continue;
730                 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
731                     CRYPTO_ALG_FLAG_SUPPORTED) == 0)
732                         continue;
733                 break;
734         }
735         if (hid < crypto_drivers_num) {
736                 krp->krp_hid = hid;
737                 error = crypto_drivers[hid].cc_kprocess(
738                                 crypto_drivers[hid].cc_karg, krp, hint);
739         } else
740                 error = ENODEV;
741
742         if (error) {
743                 krp->krp_status = error;
744                 crypto_kdone(krp);
745         }
746         return 0;
747 }
748
749 #ifdef CRYPTO_TIMING
750 static void
751 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
752 {
753         struct timespec now, t;
754
755         nanouptime(&now);
756         t.tv_sec = now.tv_sec - tv->tv_sec;
757         t.tv_nsec = now.tv_nsec - tv->tv_nsec;
758         if (t.tv_nsec < 0) {
759                 t.tv_sec--;
760                 t.tv_nsec += 1000000000;
761         }
762         timespecadd(&ts->acc, &t);
763         if (timespeccmp(&t, &ts->min, <))
764                 ts->min = t;
765         if (timespeccmp(&t, &ts->max, >))
766                 ts->max = t;
767         ts->count++;
768
769         *tv = now;
770 }
771 #endif
772
773 /*
774  * Dispatch a crypto request to the appropriate crypto devices.
775  */
776 static int
777 crypto_invoke(struct cryptop *crp, int hint)
778 {
779         u_int32_t hid;
780         int (*process)(void*, struct cryptop *, int);
781
782 #ifdef CRYPTO_TIMING
783         if (crypto_timing)
784                 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
785 #endif
786         /* Sanity checks. */
787         if (crp == NULL)
788                 return EINVAL;
789         if (crp->crp_callback == NULL) {
790                 crypto_freereq(crp);
791                 return EINVAL;
792         }
793         if (crp->crp_desc == NULL) {
794                 crp->crp_etype = EINVAL;
795                 crypto_done(crp);
796                 return 0;
797         }
798
799         hid = SESID2HID(crp->crp_sid);
800         if (hid < crypto_drivers_num) {
801                 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
802                         crypto_freesession(crp->crp_sid);
803                 process = crypto_drivers[hid].cc_process;
804         } else {
805                 process = NULL;
806         }
807
808         if (process == NULL) {
809                 struct cryptodesc *crd;
810                 u_int64_t nid;
811
812                 /*
813                  * Driver has unregistered; migrate the session and return
814                  * an error to the caller so they'll resubmit the op.
815                  */
816                 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
817                         crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
818
819                 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
820                         crp->crp_sid = nid;
821
822                 crp->crp_etype = EAGAIN;
823                 crypto_done(crp);
824                 return 0;
825         } else {
826                 /*
827                  * Invoke the driver to process the request.
828                  */
829                 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
830         }
831 }
832
833 /*
834  * Release a set of crypto descriptors.
835  */
836 void
837 crypto_freereq(struct cryptop *crp)
838 {
839         struct cryptodesc *crd;
840
841         if (crp) {
842                 while ((crd = crp->crp_desc) != NULL) {
843                         crp->crp_desc = crd->crd_next;
844                         zfree(cryptodesc_zone, crd);
845                 }
846                 zfree(cryptop_zone, crp);
847         }
848 }
849
850 /*
851  * Acquire a set of crypto descriptors.  The descriptors are self contained
852  * so no special spl protection is necessary.
853  */
854 struct cryptop *
855 crypto_getreq(int num)
856 {
857         struct cryptodesc *crd;
858         struct cryptop *crp;
859
860         crp = zalloc(cryptop_zone);
861         if (crp != NULL) {
862                 bzero(crp, sizeof (*crp));
863                 while (num--) {
864                         crd = zalloc(cryptodesc_zone);
865                         if (crd == NULL) {
866                                 crypto_freereq(crp);
867                                 crp = NULL;
868                                 break;
869                         }
870                         bzero(crd, sizeof (*crd));
871                         crd->crd_next = crp->crp_desc;
872                         crp->crp_desc = crd;
873                 }
874         }
875         return crp;
876 }
877
878 /*
879  * Invoke the callback on behalf of the driver.
880  */
881 void
882 crypto_done(struct cryptop *crp)
883 {
884         KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
885                 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
886         crp->crp_flags |= CRYPTO_F_DONE;
887         if (crp->crp_etype != 0)
888                 cryptostats.cs_errs++;
889 #ifdef CRYPTO_TIMING
890         if (crypto_timing)
891                 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
892 #endif
893         if (crp->crp_flags & CRYPTO_F_CBIMM) {
894                 /*
895                  * Do the callback directly.  This is ok when the
896                  * callback routine does very little (e.g. the
897                  * /dev/crypto callback method just does a wakeup).
898                  */
899 #ifdef CRYPTO_TIMING
900                 if (crypto_timing) {
901                         /*
902                          * NB: We must copy the timestamp before
903                          * doing the callback as the cryptop is
904                          * likely to be reclaimed.
905                          */
906                         struct timespec t = crp->crp_tstamp;
907                         crypto_tstat(&cryptostats.cs_cb, &t);
908                         crp->crp_callback(crp);
909                         crypto_tstat(&cryptostats.cs_finis, &t);
910                 } else
911 #endif
912                         crp->crp_callback(crp);
913         } else {
914                 int s, wasempty;
915                 /*
916                  * Normal case; queue the callback for the thread.
917                  *
918                  * The return queue is manipulated by the swi thread
919                  * and, potentially, by crypto device drivers calling
920                  * back to mark operations completed.  Thus we need
921                  * to mask both while manipulating the return queue.
922                  */
923                 s = splcrypto();
924                 wasempty = TAILQ_EMPTY(&crp_ret_q);
925                 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
926                 if (wasempty)
927                         wakeup_one(&crp_ret_q);
928                 splx(s);
929         }
930 }
931
932 /*
933  * Invoke the callback on behalf of the driver.
934  */
935 void
936 crypto_kdone(struct cryptkop *krp)
937 {
938         int s, wasempty;
939
940         if (krp->krp_status != 0)
941                 cryptostats.cs_kerrs++;
942         /*
943          * The return queue is manipulated by the swi thread
944          * and, potentially, by crypto device drivers calling
945          * back to mark operations completed.  Thus we need
946          * to mask both while manipulating the return queue.
947          */
948         s = splcrypto();
949         wasempty = TAILQ_EMPTY(&crp_ret_kq);
950         TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
951         if (wasempty)
952                 wakeup_one(&crp_ret_q);
953         splx(s);
954 }
955
956 int
957 crypto_getfeat(int *featp)
958 {
959         int hid, kalg, feat = 0;
960         int s = splcrypto();
961
962         if (!crypto_userasymcrypto)
963                 goto out;         
964
965         for (hid = 0; hid < crypto_drivers_num; hid++) {
966                 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
967                     !crypto_devallowsoft) {
968                         continue;
969                 }
970                 if (crypto_drivers[hid].cc_kprocess == NULL)
971                         continue;
972                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
973                         if ((crypto_drivers[hid].cc_kalg[kalg] &
974                             CRYPTO_ALG_FLAG_SUPPORTED) != 0)
975                                 feat |=  1 << kalg;
976         }
977 out:
978         splx(s);
979         *featp = feat;
980         return (0);
981 }
982
983 /*
984  * Software interrupt thread to dispatch crypto requests.
985  */
986 static void
987 cryptointr(void *dummy)
988 {
989         struct cryptop *crp, *submit;
990         struct cryptkop *krp;
991         struct cryptocap *cap;
992         int result, hint, s;
993
994         cryptostats.cs_intrs++;
995         s = splcrypto();
996         do {
997                 /*
998                  * Find the first element in the queue that can be
999                  * processed and look-ahead to see if multiple ops
1000                  * are ready for the same driver.
1001                  */
1002                 submit = NULL;
1003                 hint = 0;
1004                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1005                         u_int32_t hid = SESID2HID(crp->crp_sid);
1006                         cap = crypto_checkdriver(hid);
1007                         if (cap == NULL || cap->cc_process == NULL) {
1008                                 /* Op needs to be migrated, process it. */
1009                                 if (submit == NULL)
1010                                         submit = crp;
1011                                 break;
1012                         }
1013                         if (!cap->cc_qblocked) {
1014                                 if (submit != NULL) {
1015                                         /*
1016                                          * We stop on finding another op,
1017                                          * regardless whether its for the same
1018                                          * driver or not.  We could keep
1019                                          * searching the queue but it might be
1020                                          * better to just use a per-driver
1021                                          * queue instead.
1022                                          */
1023                                         if (SESID2HID(submit->crp_sid) == hid)
1024                                                 hint = CRYPTO_HINT_MORE;
1025                                         break;
1026                                 } else {
1027                                         submit = crp;
1028                                         if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1029                                                 break;
1030                                         /* keep scanning for more are q'd */
1031                                 }
1032                         }
1033                 }
1034                 if (submit != NULL) {
1035                         TAILQ_REMOVE(&crp_q, submit, crp_next);
1036                         result = crypto_invoke(submit, hint);
1037                         if (result == ERESTART) {
1038                                 /*
1039                                  * The driver ran out of resources, mark the
1040                                  * driver ``blocked'' for cryptop's and put
1041                                  * the request back in the queue.  It would
1042                                  * best to put the request back where we got
1043                                  * it but that's hard so for now we put it
1044                                  * at the front.  This should be ok; putting
1045                                  * it at the end does not work.
1046                                  */
1047                                 /* XXX validate sid again? */
1048                                 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1049                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1050                                 cryptostats.cs_blocks++;
1051                         }
1052                 }
1053
1054                 /* As above, but for key ops */
1055                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1056                         cap = crypto_checkdriver(krp->krp_hid);
1057                         if (cap == NULL || cap->cc_kprocess == NULL) {
1058                                 /* Op needs to be migrated, process it. */
1059                                 break;
1060                         }
1061                         if (!cap->cc_kqblocked)
1062                                 break;
1063                 }
1064                 if (krp != NULL) {
1065                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
1066                         result = crypto_kinvoke(krp, 0);
1067                         if (result == ERESTART) {
1068                                 /*
1069                                  * The driver ran out of resources, mark the
1070                                  * driver ``blocked'' for cryptkop's and put
1071                                  * the request back in the queue.  It would
1072                                  * best to put the request back where we got
1073                                  * it but that's hard so for now we put it
1074                                  * at the front.  This should be ok; putting
1075                                  * it at the end does not work.
1076                                  */
1077                                 /* XXX validate sid again? */
1078                                 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1079                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1080                                 cryptostats.cs_kblocks++;
1081                         }
1082                 }
1083         } while (submit != NULL || krp != NULL);
1084         splx(s);
1085 }
1086
1087 /*
1088  * Kernel thread to do callbacks.
1089  */
1090 static void
1091 cryptoret(void)
1092 {
1093         struct cryptop *crp;
1094         struct cryptkop *krp;
1095         int s;
1096
1097         s = splcrypto();
1098         for (;;) {
1099                 crp = TAILQ_FIRST(&crp_ret_q);
1100                 if (crp != NULL)
1101                         TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1102                 krp = TAILQ_FIRST(&crp_ret_kq);
1103                 if (krp != NULL)
1104                         TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1105
1106                 if (crp != NULL || krp != NULL) {
1107                         splx(s);                /* lower ipl for callbacks */
1108                         if (crp != NULL) {
1109 #ifdef CRYPTO_TIMING
1110                                 if (crypto_timing) {
1111                                         /*
1112                                          * NB: We must copy the timestamp before
1113                                          * doing the callback as the cryptop is
1114                                          * likely to be reclaimed.
1115                                          */
1116                                         struct timespec t = crp->crp_tstamp;
1117                                         crypto_tstat(&cryptostats.cs_cb, &t);
1118                                         crp->crp_callback(crp);
1119                                         crypto_tstat(&cryptostats.cs_finis, &t);
1120                                 } else
1121 #endif
1122                                         crp->crp_callback(crp);
1123                         }
1124                         if (krp != NULL)
1125                                 krp->krp_callback(krp);
1126                         s  = splcrypto();
1127                 } else {
1128                         (void) tsleep(&crp_ret_q, 0, "crypto_wait", 0);
1129                         cryptostats.cs_rets++;
1130                 }
1131         }
1132 }