Merge from vendor branch TEXINFO:
[dragonfly.git] / sys / opencrypto / crypto.c
1 /*      $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.7 2003/06/03 00:09:02 sam Exp $   */
2 /*      $DragonFly: src/sys/opencrypto/crypto.c,v 1.9 2005/06/16 21:12:49 dillon Exp $  */
3 /*      $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $        */
4 /*
5  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  *
13  * Permission to use, copy, and modify this software with or without fee
14  * is hereby granted, provided that this entire notice is included in
15  * all source code copies of any software which is or includes a copy or
16  * modification of this software.
17  *
18  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22  * PURPOSE.
23  */
24
25 #define CRYPTO_TIMING                   /* enable cryptop timing stuff */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/eventhandler.h>
30 #include <sys/kernel.h>
31 #include <sys/kthread.h>
32 #include <sys/malloc.h>
33 #include <sys/proc.h>
34 #include <sys/sysctl.h>
35 #include <sys/interrupt.h>
36 #include <sys/thread2.h>
37 #include <machine/ipl.h>
38
39 #include <vm/vm_zone.h>
40 #include <opencrypto/cryptodev.h>
41 #include <opencrypto/xform.h>                   /* XXX for M_XDATA */
42
43 #define SESID2HID(sid)  (((sid) >> 32) & 0xffffffff)
44
45 /*
46  * Crypto drivers register themselves by allocating a slot in the
47  * crypto_drivers table with crypto_get_driverid() and then registering
48  * each algorithm they support with crypto_register() and crypto_kregister().
49  */
50 static  struct cryptocap *crypto_drivers = NULL;
51 static  int crypto_drivers_num = 0;
52
53 /*
54  * There are two queues for crypto requests; one for symmetric (e.g.
55  * cipher) operations and one for asymmetric (e.g. MOD) operations.
56  * See below for how synchronization is handled.
57  */
58 static  TAILQ_HEAD(,cryptop) crp_q;             /* request queues */
59 static  TAILQ_HEAD(,cryptkop) crp_kq;
60
61 /*
62  * There are two queues for processing completed crypto requests; one
63  * for the symmetric and one for the asymmetric ops.  We only need one
64  * but have two to avoid type futzing (cryptop vs. cryptkop).  See below
65  * for how synchronization is handled.
66  */
67 static  TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queues */
68 static  TAILQ_HEAD(,cryptkop) crp_ret_kq;
69
70 /*
71  * Crypto op and desciptor data structures are allocated
72  * from separate private zones.
73  */
74 static  vm_zone_t cryptop_zone;
75 static  vm_zone_t cryptodesc_zone;
76
77 int     crypto_usercrypto = 1;          /* userland may open /dev/crypto */
78 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
79            &crypto_usercrypto, 0,
80            "Enable/disable user-mode access to crypto support");
81 int     crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
82 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
83            &crypto_userasymcrypto, 0,
84            "Enable/disable user-mode access to asymmetric crypto support");
85 int     crypto_devallowsoft = 0;        /* only use hardware crypto for asym */
86 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
87            &crypto_devallowsoft, 0,
88            "Enable/disable use of software asym crypto support");
89
90 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
91
92 /*
93  * Synchronization: read carefully, this is non-trivial.
94  *
95  * Crypto requests are submitted via crypto_dispatch.  No critical
96  * section or lock/interlock guarentees are made on entry.
97  *
98  * Requests are typically passed on the driver directly, but they
99  * may also be queued for processing by a software interrupt thread,
100  * cryptointr, that runs in a critical section.  This thread dispatches 
101  * the requests to crypto drivers (h/w or s/w) who call crypto_done
102  * when a request is complete.  Hardware crypto drivers are assumed
103  * to register their IRQ's as network devices so their interrupt handlers
104  * and subsequent "done callbacks" happen at appropriate protection levels.
105  *
106  * Completed crypto ops are queued for a separate kernel thread that
107  * handles the callbacks with no critical section or lock/interlock
108  * guarentees.  This decoupling insures the crypto driver interrupt service
109  * routine is not delayed while the callback takes place and that callbacks
110  * are delivered after a context switch (as opposed to a software interrupt
111  * that clients must block).
112  *
113  * This scheme is not intended for SMP machines.
114  */ 
115 static  void cryptointr(void *dummy);   /* swi thread to dispatch ops */
116 static  void cryptoret(void);           /* kernel thread for callbacks*/
117 static  struct thread *cryptothread;
118 static  void crypto_destroy(void);
119 static  int crypto_invoke(struct cryptop *crp, int hint);
120 static  int crypto_kinvoke(struct cryptkop *krp, int hint);
121
122 static struct cryptostats cryptostats;
123 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
124             cryptostats, "Crypto system statistics");
125
126 #ifdef CRYPTO_TIMING
127 static  int crypto_timing = 0;
128 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
129            &crypto_timing, 0, "Enable/disable crypto timing support");
130 #endif
131
132 static int
133 crypto_init(void)
134 {
135         int error;
136
137         cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
138         cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
139                                 0, 0, 1);
140         if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
141                 printf("crypto_init: cannot setup crypto zones\n");
142                 return ENOMEM;
143         }
144
145         crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
146         crypto_drivers = malloc(crypto_drivers_num *
147             sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
148         if (crypto_drivers == NULL) {
149                 printf("crypto_init: cannot malloc driver table\n");
150                 return ENOMEM;
151         }
152
153         TAILQ_INIT(&crp_q);
154         TAILQ_INIT(&crp_kq);
155
156         TAILQ_INIT(&crp_ret_q);
157         TAILQ_INIT(&crp_ret_kq);
158
159         register_swi(SWI_CRYPTO, cryptointr, NULL, "swi_crypto");
160         error = kthread_create((void (*)(void *)) cryptoret, NULL,
161                     &cryptothread, "cryptoret");
162         if (error) {
163                 printf("crypto_init: cannot start cryptoret thread; error %d",
164                         error);
165                 crypto_destroy();
166         }
167         return error;
168 }
169
170 static void
171 crypto_destroy(void)
172 {
173         /* XXX no wait to reclaim zones */
174         if (crypto_drivers != NULL)
175                 free(crypto_drivers, M_CRYPTO_DATA);
176         unregister_swi(SWI_CRYPTO, cryptointr);
177 }
178
179 /*
180  * Initialization code, both for static and dynamic loading.
181  */
182 static int
183 crypto_modevent(module_t mod, int type, void *unused)
184 {
185         int error = EINVAL;
186
187         switch (type) {
188         case MOD_LOAD:
189                 error = crypto_init();
190                 if (error == 0 && bootverbose)
191                         printf("crypto: <crypto core>\n");
192                 break;
193         case MOD_UNLOAD:
194                 /*XXX disallow if active sessions */
195                 error = 0;
196                 crypto_destroy();
197                 break;
198         }
199         return error;
200 }
201
202 static moduledata_t crypto_mod = {
203         "crypto",
204         crypto_modevent,
205         0
206 };
207 MODULE_VERSION(crypto, 1);
208 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
209
210 /*
211  * Create a new session.
212  */
213 int
214 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
215 {
216         struct cryptoini *cr;
217         u_int32_t hid, lid;
218         int err = EINVAL;
219
220         crit_enter();
221
222         if (crypto_drivers == NULL)
223                 goto done;
224
225         /*
226          * The algorithm we use here is pretty stupid; just use the
227          * first driver that supports all the algorithms we need.
228          *
229          * XXX We need more smarts here (in real life too, but that's
230          * XXX another story altogether).
231          */
232
233         for (hid = 0; hid < crypto_drivers_num; hid++) {
234                 /*
235                  * If it's not initialized or has remaining sessions
236                  * referencing it, skip.
237                  */
238                 if (crypto_drivers[hid].cc_newsession == NULL ||
239                     (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
240                         continue;
241
242                 /* Hardware required -- ignore software drivers. */
243                 if (hard > 0 &&
244                     (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
245                         continue;
246                 /* Software required -- ignore hardware drivers. */
247                 if (hard < 0 &&
248                     (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
249                         continue;
250
251                 /* See if all the algorithms are supported. */
252                 for (cr = cri; cr; cr = cr->cri_next)
253                         if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
254                                 break;
255
256                 if (cr == NULL) {
257                         /* Ok, all algorithms are supported. */
258
259                         /*
260                          * Can't do everything in one session.
261                          *
262                          * XXX Fix this. We need to inject a "virtual" session layer right
263                          * XXX about here.
264                          */
265
266                         /* Call the driver initialization routine. */
267                         lid = hid;              /* Pass the driver ID. */
268                         err = crypto_drivers[hid].cc_newsession(
269                                         crypto_drivers[hid].cc_arg, &lid, cri);
270                         if (err == 0) {
271                                 (*sid) = hid;
272                                 (*sid) <<= 32;
273                                 (*sid) |= (lid & 0xffffffff);
274                                 crypto_drivers[hid].cc_sessions++;
275                         }
276                         break;
277                 }
278         }
279 done:
280         crit_exit();
281         return err;
282 }
283
284 /*
285  * Delete an existing session (or a reserved session on an unregistered
286  * driver).
287  */
288 int
289 crypto_freesession(u_int64_t sid)
290 {
291         u_int32_t hid;
292         int err;
293
294         crit_enter();
295
296         if (crypto_drivers == NULL) {
297                 err = EINVAL;
298                 goto done;
299         }
300
301         /* Determine two IDs. */
302         hid = SESID2HID(sid);
303
304         if (hid >= crypto_drivers_num) {
305                 err = ENOENT;
306                 goto done;
307         }
308
309         if (crypto_drivers[hid].cc_sessions)
310                 crypto_drivers[hid].cc_sessions--;
311
312         /* Call the driver cleanup routine, if available. */
313         if (crypto_drivers[hid].cc_freesession)
314                 err = crypto_drivers[hid].cc_freesession(
315                                 crypto_drivers[hid].cc_arg, sid);
316         else
317                 err = 0;
318
319         /*
320          * If this was the last session of a driver marked as invalid,
321          * make the entry available for reuse.
322          */
323         if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
324             crypto_drivers[hid].cc_sessions == 0)
325                 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
326
327 done:
328         crit_exit();
329         return err;
330 }
331
332 /*
333  * Return an unused driver id.  Used by drivers prior to registering
334  * support for the algorithms they handle.
335  */
336 int32_t
337 crypto_get_driverid(u_int32_t flags)
338 {
339         struct cryptocap *newdrv;
340         int i;
341
342         crit_enter();
343         for (i = 0; i < crypto_drivers_num; i++)
344                 if (crypto_drivers[i].cc_process == NULL &&
345                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
346                     crypto_drivers[i].cc_sessions == 0)
347                         break;
348
349         /* Out of entries, allocate some more. */
350         if (i == crypto_drivers_num) {
351                 /* Be careful about wrap-around. */
352                 if (2 * crypto_drivers_num <= crypto_drivers_num) {
353                         crit_exit();
354                         printf("crypto: driver count wraparound!\n");
355                         return -1;
356                 }
357
358                 newdrv = malloc(2 * crypto_drivers_num *
359                     sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
360                 if (newdrv == NULL) {
361                         crit_exit();
362                         printf("crypto: no space to expand driver table!\n");
363                         return -1;
364                 }
365
366                 bcopy(crypto_drivers, newdrv,
367                     crypto_drivers_num * sizeof(struct cryptocap));
368
369                 crypto_drivers_num *= 2;
370
371                 free(crypto_drivers, M_CRYPTO_DATA);
372                 crypto_drivers = newdrv;
373         }
374
375         /* NB: state is zero'd on free */
376         crypto_drivers[i].cc_sessions = 1;      /* Mark */
377         crypto_drivers[i].cc_flags = flags;
378         if (bootverbose)
379                 printf("crypto: assign driver %u, flags %u\n", i, flags);
380
381         crit_exit();
382
383         return i;
384 }
385
386 static struct cryptocap *
387 crypto_checkdriver(u_int32_t hid)
388 {
389         if (crypto_drivers == NULL)
390                 return NULL;
391         return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
392 }
393
394 /*
395  * Register support for a key-related algorithm.  This routine
396  * is called once for each algorithm supported a driver.
397  */
398 int
399 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
400     int (*kprocess)(void*, struct cryptkop *, int),
401     void *karg)
402 {
403         struct cryptocap *cap;
404         int err;
405
406         crit_enter();
407
408         cap = crypto_checkdriver(driverid);
409         if (cap != NULL &&
410             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
411                 /*
412                  * XXX Do some performance testing to determine placing.
413                  * XXX We probably need an auxiliary data structure that
414                  * XXX describes relative performances.
415                  */
416
417                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
418                 if (bootverbose)
419                         printf("crypto: driver %u registers key alg %u flags %u\n"
420                                 , driverid
421                                 , kalg
422                                 , flags
423                         );
424
425                 if (cap->cc_kprocess == NULL) {
426                         cap->cc_karg = karg;
427                         cap->cc_kprocess = kprocess;
428                 }
429                 err = 0;
430         } else
431                 err = EINVAL;
432
433         crit_exit();
434         return err;
435 }
436
437 /*
438  * Register support for a non-key-related algorithm.  This routine
439  * is called once for each such algorithm supported by a driver.
440  */
441 int
442 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
443     u_int32_t flags,
444     int (*newses)(void*, u_int32_t*, struct cryptoini*),
445     int (*freeses)(void*, u_int64_t),
446     int (*process)(void*, struct cryptop *, int),
447     void *arg)
448 {
449         struct cryptocap *cap;
450         int err;
451
452         crit_enter();
453
454         cap = crypto_checkdriver(driverid);
455         /* NB: algorithms are in the range [1..max] */
456         if (cap != NULL &&
457             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
458                 /*
459                  * XXX Do some performance testing to determine placing.
460                  * XXX We probably need an auxiliary data structure that
461                  * XXX describes relative performances.
462                  */
463
464                 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
465                 cap->cc_max_op_len[alg] = maxoplen;
466                 if (bootverbose)
467                         printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
468                                 , driverid
469                                 , alg
470                                 , flags
471                                 , maxoplen
472                         );
473
474                 if (cap->cc_process == NULL) {
475                         cap->cc_arg = arg;
476                         cap->cc_newsession = newses;
477                         cap->cc_process = process;
478                         cap->cc_freesession = freeses;
479                         cap->cc_sessions = 0;           /* Unmark */
480                 }
481                 err = 0;
482         } else
483                 err = EINVAL;
484
485         crit_exit();
486         return err;
487 }
488
489 /*
490  * Unregister a crypto driver. If there are pending sessions using it,
491  * leave enough information around so that subsequent calls using those
492  * sessions will correctly detect the driver has been unregistered and
493  * reroute requests.
494  */
495 int
496 crypto_unregister(u_int32_t driverid, int alg)
497 {
498         int i, err;
499         u_int32_t ses;
500         struct cryptocap *cap;
501
502         crit_enter();
503
504         cap = crypto_checkdriver(driverid);
505         if (cap != NULL &&
506             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
507             cap->cc_alg[alg] != 0) {
508                 cap->cc_alg[alg] = 0;
509                 cap->cc_max_op_len[alg] = 0;
510
511                 /* Was this the last algorithm ? */
512                 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
513                         if (cap->cc_alg[i] != 0)
514                                 break;
515
516                 if (i == CRYPTO_ALGORITHM_MAX + 1) {
517                         ses = cap->cc_sessions;
518                         bzero(cap, sizeof(struct cryptocap));
519                         if (ses != 0) {
520                                 /*
521                                  * If there are pending sessions, just mark as invalid.
522                                  */
523                                 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
524                                 cap->cc_sessions = ses;
525                         }
526                 }
527                 err = 0;
528         } else
529                 err = EINVAL;
530
531         crit_exit();
532         return err;
533 }
534
535 /*
536  * Unregister all algorithms associated with a crypto driver.
537  * If there are pending sessions using it, leave enough information
538  * around so that subsequent calls using those sessions will
539  * correctly detect the driver has been unregistered and reroute
540  * requests.
541  */
542 int
543 crypto_unregister_all(u_int32_t driverid)
544 {
545         int i, err;
546         u_int32_t ses;
547         struct cryptocap *cap;
548
549         crit_enter();
550         cap = crypto_checkdriver(driverid);
551         if (cap != NULL) {
552                 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
553                         cap->cc_alg[i] = 0;
554                         cap->cc_max_op_len[i] = 0;
555                 }
556                 ses = cap->cc_sessions;
557                 bzero(cap, sizeof(struct cryptocap));
558                 if (ses != 0) {
559                         /*
560                          * If there are pending sessions, just mark as invalid.
561                          */
562                         cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
563                         cap->cc_sessions = ses;
564                 }
565                 err = 0;
566         } else
567                 err = EINVAL;
568
569         crit_exit();
570         return err;
571 }
572
573 /*
574  * Clear blockage on a driver.  The what parameter indicates whether
575  * the driver is now ready for cryptop's and/or cryptokop's.
576  */
577 int
578 crypto_unblock(u_int32_t driverid, int what)
579 {
580         struct cryptocap *cap;
581         int needwakeup, err;
582
583         crit_enter();
584         cap = crypto_checkdriver(driverid);
585         if (cap != NULL) {
586                 needwakeup = 0;
587                 if (what & CRYPTO_SYMQ) {
588                         needwakeup |= cap->cc_qblocked;
589                         cap->cc_qblocked = 0;
590                 }
591                 if (what & CRYPTO_ASYMQ) {
592                         needwakeup |= cap->cc_kqblocked;
593                         cap->cc_kqblocked = 0;
594                 }
595                 if (needwakeup)
596                         setsoftcrypto();
597                 err = 0;
598         } else
599                 err = EINVAL;
600         crit_exit();
601
602         return err;
603 }
604
605 /*
606  * Dispatch a crypto request to a driver or queue
607  * it, to be processed by the kernel thread.
608  */
609 int
610 crypto_dispatch(struct cryptop *crp)
611 {
612         u_int32_t hid = SESID2HID(crp->crp_sid);
613         int result;
614
615         cryptostats.cs_ops++;
616
617 #ifdef CRYPTO_TIMING
618         if (crypto_timing)
619                 nanouptime(&crp->crp_tstamp);
620 #endif
621         crit_enter();
622         if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
623                 struct cryptocap *cap;
624                 /*
625                  * Caller marked the request to be processed
626                  * immediately; dispatch it directly to the
627                  * driver unless the driver is currently blocked.
628                  */
629                 cap = crypto_checkdriver(hid);
630                 if (cap && !cap->cc_qblocked) {
631                         result = crypto_invoke(crp, 0);
632                         if (result == ERESTART) {
633                                 /*
634                                  * The driver ran out of resources, mark the
635                                  * driver ``blocked'' for cryptop's and put
636                                  * the op on the queue.
637                                  */
638                                 crypto_drivers[hid].cc_qblocked = 1;
639                                 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
640                                 cryptostats.cs_blocks++;
641                                 result = 0;
642                         }
643                 } else {
644                         /*
645                          * The driver is blocked, just queue the op until
646                          * it unblocks and the swi thread gets kicked.
647                          */
648                         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
649                         result = 0;
650                 }
651         } else {
652                 int wasempty = TAILQ_EMPTY(&crp_q);
653                 /*
654                  * Caller marked the request as ``ok to delay'';
655                  * queue it for the swi thread.  This is desirable
656                  * when the operation is low priority and/or suitable
657                  * for batching.
658                  */
659                 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
660                 if (wasempty)
661                         setsoftcrypto();
662                 result = 0;
663         }
664         crit_exit();
665
666         return result;
667 }
668
669 /*
670  * Add an asymetric crypto request to a queue,
671  * to be processed by the kernel thread.
672  */
673 int
674 crypto_kdispatch(struct cryptkop *krp)
675 {
676         struct cryptocap *cap;
677         int result;
678
679         cryptostats.cs_kops++;
680
681         crit_enter();
682         cap = crypto_checkdriver(krp->krp_hid);
683         if (cap && !cap->cc_kqblocked) {
684                 result = crypto_kinvoke(krp, 0);
685                 if (result == ERESTART) {
686                         /*
687                          * The driver ran out of resources, mark the
688                          * driver ``blocked'' for cryptop's and put
689                          * the op on the queue.
690                          */
691                         crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
692                         TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
693                         cryptostats.cs_kblocks++;
694                 }
695         } else {
696                 /*
697                  * The driver is blocked, just queue the op until
698                  * it unblocks and the swi thread gets kicked.
699                  */
700                 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
701                 result = 0;
702         }
703         crit_exit();
704
705         return result;
706 }
707
708 /*
709  * Dispatch an assymetric crypto request to the appropriate crypto devices.
710  */
711 static int
712 crypto_kinvoke(struct cryptkop *krp, int hint)
713 {
714         u_int32_t hid;
715         int error;
716
717         /* Sanity checks. */
718         if (krp == NULL)
719                 return EINVAL;
720         if (krp->krp_callback == NULL) {
721                 free(krp, M_XDATA);             /* XXX allocated in cryptodev */
722                 return EINVAL;
723         }
724
725         for (hid = 0; hid < crypto_drivers_num; hid++) {
726                 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
727                     !crypto_devallowsoft)
728                         continue;
729                 if (crypto_drivers[hid].cc_kprocess == NULL)
730                         continue;
731                 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
732                     CRYPTO_ALG_FLAG_SUPPORTED) == 0)
733                         continue;
734                 break;
735         }
736         if (hid < crypto_drivers_num) {
737                 krp->krp_hid = hid;
738                 error = crypto_drivers[hid].cc_kprocess(
739                                 crypto_drivers[hid].cc_karg, krp, hint);
740         } else
741                 error = ENODEV;
742
743         if (error) {
744                 krp->krp_status = error;
745                 crypto_kdone(krp);
746         }
747         return 0;
748 }
749
750 #ifdef CRYPTO_TIMING
751 static void
752 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
753 {
754         struct timespec now, t;
755
756         nanouptime(&now);
757         t.tv_sec = now.tv_sec - tv->tv_sec;
758         t.tv_nsec = now.tv_nsec - tv->tv_nsec;
759         if (t.tv_nsec < 0) {
760                 t.tv_sec--;
761                 t.tv_nsec += 1000000000;
762         }
763         timespecadd(&ts->acc, &t);
764         if (timespeccmp(&t, &ts->min, <))
765                 ts->min = t;
766         if (timespeccmp(&t, &ts->max, >))
767                 ts->max = t;
768         ts->count++;
769
770         *tv = now;
771 }
772 #endif
773
774 /*
775  * Dispatch a crypto request to the appropriate crypto devices.
776  */
777 static int
778 crypto_invoke(struct cryptop *crp, int hint)
779 {
780         u_int32_t hid;
781         int (*process)(void*, struct cryptop *, int);
782
783 #ifdef CRYPTO_TIMING
784         if (crypto_timing)
785                 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
786 #endif
787         /* Sanity checks. */
788         if (crp == NULL)
789                 return EINVAL;
790         if (crp->crp_callback == NULL) {
791                 crypto_freereq(crp);
792                 return EINVAL;
793         }
794         if (crp->crp_desc == NULL) {
795                 crp->crp_etype = EINVAL;
796                 crypto_done(crp);
797                 return 0;
798         }
799
800         hid = SESID2HID(crp->crp_sid);
801         if (hid < crypto_drivers_num) {
802                 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
803                         crypto_freesession(crp->crp_sid);
804                 process = crypto_drivers[hid].cc_process;
805         } else {
806                 process = NULL;
807         }
808
809         if (process == NULL) {
810                 struct cryptodesc *crd;
811                 u_int64_t nid;
812
813                 /*
814                  * Driver has unregistered; migrate the session and return
815                  * an error to the caller so they'll resubmit the op.
816                  */
817                 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
818                         crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
819
820                 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
821                         crp->crp_sid = nid;
822
823                 crp->crp_etype = EAGAIN;
824                 crypto_done(crp);
825                 return 0;
826         } else {
827                 /*
828                  * Invoke the driver to process the request.
829                  */
830                 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
831         }
832 }
833
834 /*
835  * Release a set of crypto descriptors.
836  */
837 void
838 crypto_freereq(struct cryptop *crp)
839 {
840         struct cryptodesc *crd;
841
842         if (crp) {
843                 while ((crd = crp->crp_desc) != NULL) {
844                         crp->crp_desc = crd->crd_next;
845                         zfree(cryptodesc_zone, crd);
846                 }
847                 zfree(cryptop_zone, crp);
848         }
849 }
850
851 /*
852  * Acquire a set of crypto descriptors.  The descriptors are self contained
853  * so no special lock/interlock protection is necessary.
854  */
855 struct cryptop *
856 crypto_getreq(int num)
857 {
858         struct cryptodesc *crd;
859         struct cryptop *crp;
860
861         crp = zalloc(cryptop_zone);
862         if (crp != NULL) {
863                 bzero(crp, sizeof (*crp));
864                 while (num--) {
865                         crd = zalloc(cryptodesc_zone);
866                         if (crd == NULL) {
867                                 crypto_freereq(crp);
868                                 crp = NULL;
869                                 break;
870                         }
871                         bzero(crd, sizeof (*crd));
872                         crd->crd_next = crp->crp_desc;
873                         crp->crp_desc = crd;
874                 }
875         }
876         return crp;
877 }
878
879 /*
880  * Invoke the callback on behalf of the driver.
881  */
882 void
883 crypto_done(struct cryptop *crp)
884 {
885         KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
886                 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
887         crp->crp_flags |= CRYPTO_F_DONE;
888         if (crp->crp_etype != 0)
889                 cryptostats.cs_errs++;
890 #ifdef CRYPTO_TIMING
891         if (crypto_timing)
892                 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
893 #endif
894         if (crp->crp_flags & CRYPTO_F_CBIMM) {
895                 /*
896                  * Do the callback directly.  This is ok when the
897                  * callback routine does very little (e.g. the
898                  * /dev/crypto callback method just does a wakeup).
899                  */
900 #ifdef CRYPTO_TIMING
901                 if (crypto_timing) {
902                         /*
903                          * NB: We must copy the timestamp before
904                          * doing the callback as the cryptop is
905                          * likely to be reclaimed.
906                          */
907                         struct timespec t = crp->crp_tstamp;
908                         crypto_tstat(&cryptostats.cs_cb, &t);
909                         crp->crp_callback(crp);
910                         crypto_tstat(&cryptostats.cs_finis, &t);
911                 } else
912 #endif
913                         crp->crp_callback(crp);
914         } else {
915                 int wasempty;
916                 /*
917                  * Normal case; queue the callback for the thread.
918                  *
919                  * The return queue is manipulated by the swi thread
920                  * and, potentially, by crypto device drivers calling
921                  * back to mark operations completed.  Thus we need
922                  * to mask both while manipulating the return queue.
923                  */
924                 crit_enter();
925                 wasempty = TAILQ_EMPTY(&crp_ret_q);
926                 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
927                 if (wasempty)
928                         wakeup_one(&crp_ret_q);
929                 crit_exit();
930         }
931 }
932
933 /*
934  * Invoke the callback on behalf of the driver.
935  */
936 void
937 crypto_kdone(struct cryptkop *krp)
938 {
939         int wasempty;
940
941         if (krp->krp_status != 0)
942                 cryptostats.cs_kerrs++;
943         /*
944          * The return queue is manipulated by the swi thread
945          * and, potentially, by crypto device drivers calling
946          * back to mark operations completed.  Thus we need
947          * to mask both while manipulating the return queue.
948          */
949         crit_enter();
950         wasempty = TAILQ_EMPTY(&crp_ret_kq);
951         TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
952         if (wasempty)
953                 wakeup_one(&crp_ret_q);
954         crit_exit();
955 }
956
957 int
958 crypto_getfeat(int *featp)
959 {
960         int hid, kalg, feat = 0;
961
962         crit_enter();
963         if (!crypto_userasymcrypto)
964                 goto out;         
965
966         for (hid = 0; hid < crypto_drivers_num; hid++) {
967                 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
968                     !crypto_devallowsoft) {
969                         continue;
970                 }
971                 if (crypto_drivers[hid].cc_kprocess == NULL)
972                         continue;
973                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
974                         if ((crypto_drivers[hid].cc_kalg[kalg] &
975                             CRYPTO_ALG_FLAG_SUPPORTED) != 0)
976                                 feat |=  1 << kalg;
977         }
978 out:
979         crit_exit();
980         *featp = feat;
981         return (0);
982 }
983
984 /*
985  * Software interrupt thread to dispatch crypto requests.
986  */
987 static void
988 cryptointr(void *dummy)
989 {
990         struct cryptop *crp, *submit;
991         struct cryptkop *krp;
992         struct cryptocap *cap;
993         int result, hint;
994
995         cryptostats.cs_intrs++;
996         crit_enter();
997         do {
998                 /*
999                  * Find the first element in the queue that can be
1000                  * processed and look-ahead to see if multiple ops
1001                  * are ready for the same driver.
1002                  */
1003                 submit = NULL;
1004                 hint = 0;
1005                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1006                         u_int32_t hid = SESID2HID(crp->crp_sid);
1007                         cap = crypto_checkdriver(hid);
1008                         if (cap == NULL || cap->cc_process == NULL) {
1009                                 /* Op needs to be migrated, process it. */
1010                                 if (submit == NULL)
1011                                         submit = crp;
1012                                 break;
1013                         }
1014                         if (!cap->cc_qblocked) {
1015                                 if (submit != NULL) {
1016                                         /*
1017                                          * We stop on finding another op,
1018                                          * regardless whether its for the same
1019                                          * driver or not.  We could keep
1020                                          * searching the queue but it might be
1021                                          * better to just use a per-driver
1022                                          * queue instead.
1023                                          */
1024                                         if (SESID2HID(submit->crp_sid) == hid)
1025                                                 hint = CRYPTO_HINT_MORE;
1026                                         break;
1027                                 } else {
1028                                         submit = crp;
1029                                         if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1030                                                 break;
1031                                         /* keep scanning for more are q'd */
1032                                 }
1033                         }
1034                 }
1035                 if (submit != NULL) {
1036                         TAILQ_REMOVE(&crp_q, submit, crp_next);
1037                         result = crypto_invoke(submit, hint);
1038                         if (result == ERESTART) {
1039                                 /*
1040                                  * The driver ran out of resources, mark the
1041                                  * driver ``blocked'' for cryptop's and put
1042                                  * the request back in the queue.  It would
1043                                  * best to put the request back where we got
1044                                  * it but that's hard so for now we put it
1045                                  * at the front.  This should be ok; putting
1046                                  * it at the end does not work.
1047                                  */
1048                                 /* XXX validate sid again? */
1049                                 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1050                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1051                                 cryptostats.cs_blocks++;
1052                         }
1053                 }
1054
1055                 /* As above, but for key ops */
1056                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1057                         cap = crypto_checkdriver(krp->krp_hid);
1058                         if (cap == NULL || cap->cc_kprocess == NULL) {
1059                                 /* Op needs to be migrated, process it. */
1060                                 break;
1061                         }
1062                         if (!cap->cc_kqblocked)
1063                                 break;
1064                 }
1065                 if (krp != NULL) {
1066                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
1067                         result = crypto_kinvoke(krp, 0);
1068                         if (result == ERESTART) {
1069                                 /*
1070                                  * The driver ran out of resources, mark the
1071                                  * driver ``blocked'' for cryptkop's and put
1072                                  * the request back in the queue.  It would
1073                                  * best to put the request back where we got
1074                                  * it but that's hard so for now we put it
1075                                  * at the front.  This should be ok; putting
1076                                  * it at the end does not work.
1077                                  */
1078                                 /* XXX validate sid again? */
1079                                 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1080                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1081                                 cryptostats.cs_kblocks++;
1082                         }
1083                 }
1084         } while (submit != NULL || krp != NULL);
1085         crit_exit();
1086 }
1087
1088 /*
1089  * Kernel thread to do callbacks.
1090  */
1091 static void
1092 cryptoret(void)
1093 {
1094         struct cryptop *crp;
1095         struct cryptkop *krp;
1096
1097         crit_enter();
1098         for (;;) {
1099                 crp = TAILQ_FIRST(&crp_ret_q);
1100                 if (crp != NULL)
1101                         TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1102                 krp = TAILQ_FIRST(&crp_ret_kq);
1103                 if (krp != NULL)
1104                         TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1105
1106                 if (crp != NULL || krp != NULL) {
1107                         crit_exit();            /* lower ipl for callbacks */
1108                         if (crp != NULL) {
1109 #ifdef CRYPTO_TIMING
1110                                 if (crypto_timing) {
1111                                         /*
1112                                          * NB: We must copy the timestamp before
1113                                          * doing the callback as the cryptop is
1114                                          * likely to be reclaimed.
1115                                          */
1116                                         struct timespec t = crp->crp_tstamp;
1117                                         crypto_tstat(&cryptostats.cs_cb, &t);
1118                                         crp->crp_callback(crp);
1119                                         crypto_tstat(&cryptostats.cs_finis, &t);
1120                                 } else
1121 #endif
1122                                         crp->crp_callback(crp);
1123                         }
1124                         if (krp != NULL)
1125                                 krp->krp_callback(krp);
1126                         crit_enter();
1127                 } else {
1128                         (void) tsleep(&crp_ret_q, 0, "crypto_wait", 0);
1129                         cryptostats.cs_rets++;
1130                 }
1131         }
1132         /* CODE NOT REACHED (crit_exit() would go here otherwise) */ 
1133 }