kernel: Remove some unneeded NULL checks after kmalloc() with M_WAITOK.
[dragonfly.git] / sys / opencrypto / crypto.c
CommitLineData
42ee1e6b
SW
1/* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.28 2007/10/20 23:23:22 julian Exp $ */
2/*-
3 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
984263bc 26/*
42ee1e6b
SW
27 * Cryptographic Subsystem.
28 *
29 * This code is derived from the Openbsd Cryptographic Framework (OCF)
30 * that has the copyright shown below. Very little of the original
31 * code remains.
32 */
33
34/*-
984263bc
MD
35 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
36 *
37 * This code was written by Angelos D. Keromytis in Athens, Greece, in
38 * February 2000. Network Security Technologies Inc. (NSTI) kindly
39 * supported the development of this code.
40 *
41 * Copyright (c) 2000, 2001 Angelos D. Keromytis
42 *
43 * Permission to use, copy, and modify this software with or without fee
44 * is hereby granted, provided that this entire notice is included in
45 * all source code copies of any software which is or includes a copy or
46 * modification of this software.
47 *
48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
52 * PURPOSE.
53 */
54
42ee1e6b
SW
55#define CRYPTO_TIMING /* enable timing support */
56
57#include "opt_ddb.h"
984263bc
MD
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/eventhandler.h>
62#include <sys/kernel.h>
63#include <sys/kthread.h>
42ee1e6b
SW
64#include <sys/lock.h>
65#include <sys/module.h>
984263bc
MD
66#include <sys/malloc.h>
67#include <sys/proc.h>
68#include <sys/sysctl.h>
0bb408f6 69#include <sys/objcache.h>
cd8ab232 70
a7f45447 71#include <sys/thread2.h>
a0419b33 72#include <sys/mplock2.h>
984263bc 73
42ee1e6b
SW
74#include <ddb/ddb.h>
75
984263bc
MD
76#include <opencrypto/cryptodev.h>
77#include <opencrypto/xform.h> /* XXX for M_XDATA */
78
42ee1e6b
SW
79#include <sys/kobj.h>
80#include <sys/bus.h>
81#include "cryptodev_if.h"
984263bc
MD
82
83/*
84 * Crypto drivers register themselves by allocating a slot in the
85 * crypto_drivers table with crypto_get_driverid() and then registering
86 * each algorithm they support with crypto_register() and crypto_kregister().
87 */
42ee1e6b
SW
88static struct lock crypto_drivers_lock; /* lock on driver table */
89#define CRYPTO_DRIVER_LOCK() lockmgr(&crypto_drivers_lock, LK_EXCLUSIVE)
90#define CRYPTO_DRIVER_UNLOCK() lockmgr(&crypto_drivers_lock, LK_RELEASE)
91#define CRYPTO_DRIVER_ASSERT() KKASSERT(lockstatus(&crypto_drivers_lock, curthread) != 0)
92
93/*
94 * Crypto device/driver capabilities structure.
95 *
96 * Synchronization:
97 * (d) - protected by CRYPTO_DRIVER_LOCK()
98 * (q) - protected by CRYPTO_Q_LOCK()
99 * Not tagged fields are read-only.
100 */
101struct cryptocap {
102 device_t cc_dev; /* (d) device/driver */
103 u_int32_t cc_sessions; /* (d) # of sessions */
104 u_int32_t cc_koperations; /* (d) # os asym operations */
105 /*
106 * Largest possible operator length (in bits) for each type of
107 * encryption algorithm. XXX not used
108 */
109 u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
110 u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
111 u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
112
113 int cc_flags; /* (d) flags */
114#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
115 int cc_qblocked; /* (q) symmetric q blocked */
116 int cc_kqblocked; /* (q) asymmetric q blocked */
117};
984263bc
MD
118static struct cryptocap *crypto_drivers = NULL;
119static int crypto_drivers_num = 0;
120
a0419b33
MD
121typedef struct crypto_tdinfo {
122 TAILQ_HEAD(,cryptop) crp_q; /* request queues */
123 TAILQ_HEAD(,cryptkop) crp_kq;
124 thread_t crp_td;
125 struct lock crp_lock;
126 int crp_sleep;
127} *crypto_tdinfo_t;
128
984263bc
MD
129/*
130 * There are two queues for crypto requests; one for symmetric (e.g.
131 * cipher) operations and one for asymmetric (e.g. MOD) operations.
132 * See below for how synchronization is handled.
42ee1e6b
SW
133 * A single lock is used to lock access to both queues. We could
134 * have one per-queue but having one simplifies handling of block/unblock
135 * operations.
984263bc 136 */
a0419b33
MD
137static struct crypto_tdinfo tdinfo_array[MAXCPU];
138
139#define CRYPTO_Q_LOCK(tdinfo) lockmgr(&tdinfo->crp_lock, LK_EXCLUSIVE)
140#define CRYPTO_Q_UNLOCK(tdinfo) lockmgr(&tdinfo->crp_lock, LK_RELEASE)
984263bc
MD
141
142/*
143 * There are two queues for processing completed crypto requests; one
144 * for the symmetric and one for the asymmetric ops. We only need one
42ee1e6b
SW
145 * but have two to avoid type futzing (cryptop vs. cryptkop). A single
146 * lock is used to lock access to both queues. Note that this lock
147 * must be separate from the lock on request queues to insure driver
148 * callbacks don't generate lock order reversals.
984263bc
MD
149 */
150static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */
151static TAILQ_HEAD(,cryptkop) crp_ret_kq;
42ee1e6b
SW
152static struct lock crypto_ret_q_lock;
153#define CRYPTO_RETQ_LOCK() lockmgr(&crypto_ret_q_lock, LK_EXCLUSIVE)
154#define CRYPTO_RETQ_UNLOCK() lockmgr(&crypto_ret_q_lock, LK_RELEASE)
155#define CRYPTO_RETQ_EMPTY() (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
984263bc
MD
156
157/*
158 * Crypto op and desciptor data structures are allocated
0bb408f6 159 * from separate object caches.
984263bc 160 */
0bb408f6
SG
161static struct objcache *cryptop_oc, *cryptodesc_oc;
162
163static MALLOC_DEFINE(M_CRYPTO_OP, "crypto op", "crypto op");
164static MALLOC_DEFINE(M_CRYPTO_DESC, "crypto desc", "crypto desc");
984263bc 165
984263bc
MD
166int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
167SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
168 &crypto_userasymcrypto, 0,
169 "Enable/disable user-mode access to asymmetric crypto support");
170int crypto_devallowsoft = 0; /* only use hardware crypto for asym */
171SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
172 &crypto_devallowsoft, 0,
173 "Enable/disable use of software asym crypto support");
345ee1fb
MD
174int crypto_altdispatch = 0; /* dispatch to alternative cpu */
175SYSCTL_INT(_kern, OID_AUTO, cryptoaltdispatch, CTLFLAG_RW,
176 &crypto_altdispatch, 0,
177 "Do not queue crypto op on current cpu");
984263bc
MD
178
179MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
180
a0419b33
MD
181static void crypto_proc(void *dummy);
182static void crypto_ret_proc(void *dummy);
42ee1e6b 183static struct thread *cryptoretthread;
984263bc 184static void crypto_destroy(void);
42ee1e6b
SW
185static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
186static int crypto_kinvoke(struct cryptkop *krp, int flags);
984263bc
MD
187
188static struct cryptostats cryptostats;
189SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
190 cryptostats, "Crypto system statistics");
191
192#ifdef CRYPTO_TIMING
193static int crypto_timing = 0;
194SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
195 &crypto_timing, 0, "Enable/disable crypto timing support");
196#endif
197
198static int
199crypto_init(void)
200{
a0419b33 201 crypto_tdinfo_t tdinfo;
984263bc 202 int error;
a0419b33 203 int n;
984263bc 204
42ee1e6b
SW
205 lockinit(&crypto_drivers_lock, "crypto driver table", 0, LK_CANRECURSE);
206
42ee1e6b
SW
207 TAILQ_INIT(&crp_ret_q);
208 TAILQ_INIT(&crp_ret_kq);
209 lockinit(&crypto_ret_q_lock, "crypto return queues", 0, LK_CANRECURSE);
210
0bb408f6
SG
211 cryptop_oc = objcache_create_simple(M_CRYPTO_OP, sizeof(struct cryptop));
212 cryptodesc_oc = objcache_create_simple(M_CRYPTO_DESC,
213 sizeof(struct cryptodesc));
214 if (cryptodesc_oc == NULL || cryptop_oc == NULL) {
215 kprintf("crypto_init: cannot setup crypto caches\n");
42ee1e6b
SW
216 error = ENOMEM;
217 goto bad;
984263bc
MD
218 }
219
220 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
a0419b33
MD
221 crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
222 M_CRYPTO_DATA, M_WAITOK | M_ZERO);
984263bc 223
a0419b33
MD
224 for (n = 0; n < ncpus; ++n) {
225 tdinfo = &tdinfo_array[n];
226 TAILQ_INIT(&tdinfo->crp_q);
227 TAILQ_INIT(&tdinfo->crp_kq);
228 lockinit(&tdinfo->crp_lock, "crypto op queues",
229 0, LK_CANRECURSE);
230 kthread_create_cpu(crypto_proc, tdinfo, &tdinfo->crp_td,
231 n, "crypto %d", n);
984263bc 232 }
a0419b33
MD
233 kthread_create(crypto_ret_proc, NULL,
234 &cryptoretthread, "crypto returns");
42ee1e6b
SW
235 return 0;
236bad:
237 crypto_destroy();
984263bc
MD
238 return error;
239}
240
42ee1e6b
SW
241/*
242 * Signal a crypto thread to terminate. We use the driver
243 * table lock to synchronize the sleep/wakeups so that we
244 * are sure the threads have terminated before we release
245 * the data structures they use. See crypto_finis below
246 * for the other half of this song-and-dance.
247 */
248static void
249crypto_terminate(struct thread **tp, void *q)
250{
251 struct thread *t;
252
253 KKASSERT(lockstatus(&crypto_drivers_lock, curthread) != 0);
254 t = *tp;
255 *tp = NULL;
256 if (t) {
54734da1 257 kprintf("crypto_terminate: start\n");
42ee1e6b 258 wakeup_one(q);
54734da1 259 crit_enter();
42ee1e6b
SW
260 tsleep_interlock(t, 0);
261 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
42ee1e6b 262 crit_exit();
54734da1 263 tsleep(t, PINTERLOCKED, "crypto_destroy", 0);
42ee1e6b 264 CRYPTO_DRIVER_LOCK();
54734da1 265 kprintf("crypto_terminate: end\n");
42ee1e6b
SW
266 }
267}
268
984263bc
MD
269static void
270crypto_destroy(void)
271{
a0419b33
MD
272 crypto_tdinfo_t tdinfo;
273 int n;
274
42ee1e6b
SW
275 /*
276 * Terminate any crypto threads.
277 */
278 CRYPTO_DRIVER_LOCK();
a0419b33
MD
279 for (n = 0; n < ncpus; ++n) {
280 tdinfo = &tdinfo_array[n];
281 crypto_terminate(&tdinfo->crp_td, &tdinfo->crp_q);
282 lockuninit(&tdinfo->crp_lock);
283 }
54734da1 284 crypto_terminate(&cryptoretthread, &crp_ret_q);
42ee1e6b
SW
285 CRYPTO_DRIVER_UNLOCK();
286
287 /* XXX flush queues??? */
288
289 /*
290 * Reclaim dynamically allocated resources.
291 */
984263bc 292 if (crypto_drivers != NULL)
efda3bd0 293 kfree(crypto_drivers, M_CRYPTO_DATA);
42ee1e6b 294
0bb408f6
SG
295 if (cryptodesc_oc != NULL)
296 objcache_destroy(cryptodesc_oc);
297 if (cryptop_oc != NULL)
298 objcache_destroy(cryptop_oc);
54734da1
AH
299 lockuninit(&crypto_ret_q_lock);
300 lockuninit(&crypto_drivers_lock);
42ee1e6b
SW
301}
302
303static struct cryptocap *
304crypto_checkdriver(u_int32_t hid)
305{
306 if (crypto_drivers == NULL)
307 return NULL;
308 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
984263bc
MD
309}
310
311/*
42ee1e6b
SW
312 * Compare a driver's list of supported algorithms against another
313 * list; return non-zero if all algorithms are supported.
984263bc
MD
314 */
315static int
42ee1e6b 316driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
984263bc 317{
42ee1e6b 318 const struct cryptoini *cr;
984263bc 319
42ee1e6b
SW
320 /* See if all the algorithms are supported. */
321 for (cr = cri; cr; cr = cr->cri_next)
322 if (cap->cc_alg[cr->cri_alg] == 0)
323 return 0;
324 return 1;
984263bc
MD
325}
326
984263bc 327/*
42ee1e6b
SW
328 * Select a driver for a new session that supports the specified
329 * algorithms and, optionally, is constrained according to the flags.
330 * The algorithm we use here is pretty stupid; just use the
331 * first driver that supports all the algorithms we need. If there
332 * are multiple drivers we choose the driver with the fewest active
333 * sessions. We prefer hardware-backed drivers to software ones.
334 *
335 * XXX We need more smarts here (in real life too, but that's
336 * XXX another story altogether).
984263bc 337 */
42ee1e6b
SW
338static struct cryptocap *
339crypto_select_driver(const struct cryptoini *cri, int flags)
984263bc 340{
42ee1e6b
SW
341 struct cryptocap *cap, *best;
342 int match, hid;
984263bc 343
42ee1e6b 344 CRYPTO_DRIVER_ASSERT();
984263bc
MD
345
346 /*
42ee1e6b 347 * Look first for hardware crypto devices if permitted.
984263bc 348 */
42ee1e6b
SW
349 if (flags & CRYPTOCAP_F_HARDWARE)
350 match = CRYPTOCAP_F_HARDWARE;
351 else
352 match = CRYPTOCAP_F_SOFTWARE;
353 best = NULL;
354again:
984263bc 355 for (hid = 0; hid < crypto_drivers_num; hid++) {
42ee1e6b 356 cap = &crypto_drivers[hid];
984263bc 357 /*
42ee1e6b
SW
358 * If it's not initialized, is in the process of
359 * going away, or is not appropriate (hardware
360 * or software based on match), then skip.
984263bc 361 */
42ee1e6b
SW
362 if (cap->cc_dev == NULL ||
363 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
364 (cap->cc_flags & match) == 0)
984263bc
MD
365 continue;
366
42ee1e6b
SW
367 /* verify all the algorithms are supported. */
368 if (driver_suitable(cap, cri)) {
369 if (best == NULL ||
370 cap->cc_sessions < best->cc_sessions)
371 best = cap;
372 }
373 }
374 if (best != NULL)
375 return best;
376 if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
377 /* sort of an Algol 68-style for loop */
378 match = CRYPTOCAP_F_SOFTWARE;
379 goto again;
380 }
381 return best;
382}
984263bc 383
42ee1e6b
SW
384/*
385 * Create a new session. The crid argument specifies a crypto
386 * driver to use or constraints on a driver to select (hardware
387 * only, software only, either). Whatever driver is selected
388 * must be capable of the requested crypto algorithms.
389 */
390int
391crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
392{
393 struct cryptocap *cap;
394 u_int32_t hid, lid;
395 int err;
984263bc 396
42ee1e6b
SW
397 CRYPTO_DRIVER_LOCK();
398 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
399 /*
400 * Use specified driver; verify it is capable.
401 */
402 cap = crypto_checkdriver(crid);
403 if (cap != NULL && !driver_suitable(cap, cri))
404 cap = NULL;
405 } else {
406 /*
407 * No requested driver; select based on crid flags.
408 */
409 cap = crypto_select_driver(cri, crid);
410 /*
411 * if NULL then can't do everything in one session.
412 * XXX Fix this. We need to inject a "virtual" session
413 * XXX layer right about here.
414 */
984263bc 415 }
42ee1e6b
SW
416 if (cap != NULL) {
417 /* Call the driver initialization routine. */
418 hid = cap - crypto_drivers;
419 lid = hid; /* Pass the driver ID. */
420 err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
421 if (err == 0) {
422 (*sid) = (cap->cc_flags & 0xff000000)
423 | (hid & 0x00ffffff);
424 (*sid) <<= 32;
425 (*sid) |= (lid & 0xffffffff);
426 cap->cc_sessions++;
427 }
428 } else
429 err = EINVAL;
430 CRYPTO_DRIVER_UNLOCK();
984263bc
MD
431 return err;
432}
433
42ee1e6b
SW
434static void
435crypto_remove(struct cryptocap *cap)
436{
437
438 KKASSERT(lockstatus(&crypto_drivers_lock, curthread) != 0);
439 if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
440 bzero(cap, sizeof(*cap));
441}
442
984263bc
MD
443/*
444 * Delete an existing session (or a reserved session on an unregistered
445 * driver).
446 */
447int
448crypto_freesession(u_int64_t sid)
449{
42ee1e6b 450 struct cryptocap *cap;
984263bc 451 u_int32_t hid;
a7f45447 452 int err;
984263bc 453
42ee1e6b 454 CRYPTO_DRIVER_LOCK();
984263bc
MD
455
456 if (crypto_drivers == NULL) {
457 err = EINVAL;
458 goto done;
459 }
460
461 /* Determine two IDs. */
42ee1e6b 462 hid = CRYPTO_SESID2HID(sid);
984263bc
MD
463
464 if (hid >= crypto_drivers_num) {
465 err = ENOENT;
466 goto done;
467 }
42ee1e6b 468 cap = &crypto_drivers[hid];
984263bc 469
42ee1e6b
SW
470 if (cap->cc_sessions)
471 cap->cc_sessions--;
984263bc
MD
472
473 /* Call the driver cleanup routine, if available. */
42ee1e6b 474 err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
984263bc 475
42ee1e6b
SW
476 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
477 crypto_remove(cap);
984263bc
MD
478
479done:
42ee1e6b 480 CRYPTO_DRIVER_UNLOCK();
984263bc
MD
481 return err;
482}
483
484/*
485 * Return an unused driver id. Used by drivers prior to registering
486 * support for the algorithms they handle.
487 */
488int32_t
42ee1e6b 489crypto_get_driverid(device_t dev, int flags)
984263bc
MD
490{
491 struct cryptocap *newdrv;
a7f45447 492 int i;
984263bc 493
42ee1e6b
SW
494 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
495 kprintf("%s: no flags specified when registering driver\n",
496 device_get_nameunit(dev));
497 return -1;
498 }
499
500 CRYPTO_DRIVER_LOCK();
501
502 for (i = 0; i < crypto_drivers_num; i++) {
503 if (crypto_drivers[i].cc_dev == NULL &&
504 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
984263bc 505 break;
42ee1e6b
SW
506 }
507 }
984263bc
MD
508
509 /* Out of entries, allocate some more. */
510 if (i == crypto_drivers_num) {
511 /* Be careful about wrap-around. */
512 if (2 * crypto_drivers_num <= crypto_drivers_num) {
42ee1e6b 513 CRYPTO_DRIVER_UNLOCK();
26be20a0 514 kprintf("crypto: driver count wraparound!\n");
984263bc
MD
515 return -1;
516 }
517
77652cad 518 newdrv = kmalloc(2 * crypto_drivers_num *
a0419b33
MD
519 sizeof(struct cryptocap),
520 M_CRYPTO_DATA, M_WAITOK|M_ZERO);
984263bc
MD
521
522 bcopy(crypto_drivers, newdrv,
523 crypto_drivers_num * sizeof(struct cryptocap));
524
525 crypto_drivers_num *= 2;
526
efda3bd0 527 kfree(crypto_drivers, M_CRYPTO_DATA);
984263bc
MD
528 crypto_drivers = newdrv;
529 }
530
531 /* NB: state is zero'd on free */
532 crypto_drivers[i].cc_sessions = 1; /* Mark */
42ee1e6b 533 crypto_drivers[i].cc_dev = dev;
984263bc
MD
534 crypto_drivers[i].cc_flags = flags;
535 if (bootverbose)
42ee1e6b
SW
536 kprintf("crypto: assign %s driver id %u, flags %u\n",
537 device_get_nameunit(dev), i, flags);
984263bc 538
42ee1e6b 539 CRYPTO_DRIVER_UNLOCK();
984263bc
MD
540
541 return i;
542}
543
42ee1e6b
SW
544/*
545 * Lookup a driver by name. We match against the full device
546 * name and unit, and against just the name. The latter gives
547 * us a simple widlcarding by device name. On success return the
548 * driver/hardware identifier; otherwise return -1.
549 */
550int
551crypto_find_driver(const char *match)
984263bc 552{
42ee1e6b
SW
553 int i, len = strlen(match);
554
555 CRYPTO_DRIVER_LOCK();
556 for (i = 0; i < crypto_drivers_num; i++) {
557 device_t dev = crypto_drivers[i].cc_dev;
558 if (dev == NULL ||
559 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
560 continue;
561 if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
562 strncmp(match, device_get_name(dev), len) == 0)
563 break;
564 }
565 CRYPTO_DRIVER_UNLOCK();
566 return i < crypto_drivers_num ? i : -1;
567}
568
569/*
570 * Return the device_t for the specified driver or NULL
571 * if the driver identifier is invalid.
572 */
573device_t
574crypto_find_device_byhid(int hid)
575{
576 struct cryptocap *cap = crypto_checkdriver(hid);
577 return cap != NULL ? cap->cc_dev : NULL;
578}
579
580/*
581 * Return the device/driver capabilities.
582 */
583int
584crypto_getcaps(int hid)
585{
586 struct cryptocap *cap = crypto_checkdriver(hid);
587 return cap != NULL ? cap->cc_flags : 0;
984263bc
MD
588}
589
590/*
591 * Register support for a key-related algorithm. This routine
592 * is called once for each algorithm supported a driver.
593 */
594int
42ee1e6b 595crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
984263bc 596{
984263bc
MD
597 struct cryptocap *cap;
598 int err;
599
42ee1e6b 600 CRYPTO_DRIVER_LOCK();
984263bc
MD
601
602 cap = crypto_checkdriver(driverid);
603 if (cap != NULL &&
604 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
605 /*
606 * XXX Do some performance testing to determine placing.
607 * XXX We probably need an auxiliary data structure that
608 * XXX describes relative performances.
609 */
610
611 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
612 if (bootverbose)
42ee1e6b
SW
613 kprintf("crypto: %s registers key alg %u flags %u\n"
614 , device_get_nameunit(cap->cc_dev)
984263bc
MD
615 , kalg
616 , flags
617 );
618
984263bc
MD
619 err = 0;
620 } else
621 err = EINVAL;
622
42ee1e6b 623 CRYPTO_DRIVER_UNLOCK();
984263bc
MD
624 return err;
625}
626
627/*
628 * Register support for a non-key-related algorithm. This routine
629 * is called once for each such algorithm supported by a driver.
630 */
631int
632crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
a0419b33 633 u_int32_t flags)
984263bc
MD
634{
635 struct cryptocap *cap;
a7f45447 636 int err;
984263bc 637
42ee1e6b 638 CRYPTO_DRIVER_LOCK();
984263bc
MD
639
640 cap = crypto_checkdriver(driverid);
641 /* NB: algorithms are in the range [1..max] */
642 if (cap != NULL &&
643 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
644 /*
645 * XXX Do some performance testing to determine placing.
646 * XXX We probably need an auxiliary data structure that
647 * XXX describes relative performances.
648 */
649
650 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
651 cap->cc_max_op_len[alg] = maxoplen;
652 if (bootverbose)
42ee1e6b
SW
653 kprintf("crypto: %s registers alg %u flags %u maxoplen %u\n"
654 , device_get_nameunit(cap->cc_dev)
984263bc
MD
655 , alg
656 , flags
657 , maxoplen
658 );
42ee1e6b 659 cap->cc_sessions = 0; /* Unmark */
984263bc
MD
660 err = 0;
661 } else
662 err = EINVAL;
663
42ee1e6b 664 CRYPTO_DRIVER_UNLOCK();
984263bc
MD
665 return err;
666}
667
42ee1e6b
SW
668static void
669driver_finis(struct cryptocap *cap)
670{
671 u_int32_t ses, kops;
672
673 CRYPTO_DRIVER_ASSERT();
674
675 ses = cap->cc_sessions;
676 kops = cap->cc_koperations;
677 bzero(cap, sizeof(*cap));
678 if (ses != 0 || kops != 0) {
679 /*
680 * If there are pending sessions,
681 * just mark as invalid.
682 */
683 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
684 cap->cc_sessions = ses;
685 cap->cc_koperations = kops;
686 }
687}
688
984263bc
MD
689/*
690 * Unregister a crypto driver. If there are pending sessions using it,
691 * leave enough information around so that subsequent calls using those
692 * sessions will correctly detect the driver has been unregistered and
693 * reroute requests.
694 */
695int
696crypto_unregister(u_int32_t driverid, int alg)
697{
984263bc 698 struct cryptocap *cap;
42ee1e6b 699 int i, err;
984263bc 700
42ee1e6b 701 CRYPTO_DRIVER_LOCK();
984263bc
MD
702 cap = crypto_checkdriver(driverid);
703 if (cap != NULL &&
704 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
705 cap->cc_alg[alg] != 0) {
706 cap->cc_alg[alg] = 0;
707 cap->cc_max_op_len[alg] = 0;
708
709 /* Was this the last algorithm ? */
a0419b33 710 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) {
984263bc
MD
711 if (cap->cc_alg[i] != 0)
712 break;
a0419b33 713 }
984263bc 714
42ee1e6b
SW
715 if (i == CRYPTO_ALGORITHM_MAX + 1)
716 driver_finis(cap);
984263bc 717 err = 0;
a0419b33 718 } else {
984263bc 719 err = EINVAL;
a0419b33 720 }
42ee1e6b 721 CRYPTO_DRIVER_UNLOCK();
984263bc 722
984263bc
MD
723 return err;
724}
725
726/*
727 * Unregister all algorithms associated with a crypto driver.
728 * If there are pending sessions using it, leave enough information
729 * around so that subsequent calls using those sessions will
730 * correctly detect the driver has been unregistered and reroute
731 * requests.
732 */
733int
734crypto_unregister_all(u_int32_t driverid)
735{
984263bc 736 struct cryptocap *cap;
42ee1e6b 737 int err;
984263bc 738
42ee1e6b 739 CRYPTO_DRIVER_LOCK();
984263bc
MD
740 cap = crypto_checkdriver(driverid);
741 if (cap != NULL) {
42ee1e6b 742 driver_finis(cap);
984263bc 743 err = 0;
a0419b33 744 } else {
984263bc 745 err = EINVAL;
a0419b33 746 }
42ee1e6b 747 CRYPTO_DRIVER_UNLOCK();
984263bc 748
984263bc
MD
749 return err;
750}
751
752/*
753 * Clear blockage on a driver. The what parameter indicates whether
754 * the driver is now ready for cryptop's and/or cryptokop's.
755 */
756int
757crypto_unblock(u_int32_t driverid, int what)
758{
a0419b33 759 crypto_tdinfo_t tdinfo;
984263bc 760 struct cryptocap *cap;
42ee1e6b 761 int err;
a0419b33 762 int n;
984263bc 763
a0419b33 764 CRYPTO_DRIVER_LOCK();
984263bc
MD
765 cap = crypto_checkdriver(driverid);
766 if (cap != NULL) {
42ee1e6b 767 if (what & CRYPTO_SYMQ)
984263bc 768 cap->cc_qblocked = 0;
42ee1e6b 769 if (what & CRYPTO_ASYMQ)
984263bc 770 cap->cc_kqblocked = 0;
a0419b33
MD
771 for (n = 0; n < ncpus; ++n) {
772 tdinfo = &tdinfo_array[n];
773 CRYPTO_Q_LOCK(tdinfo);
774 if (tdinfo[n].crp_sleep)
775 wakeup_one(&tdinfo->crp_q);
776 CRYPTO_Q_UNLOCK(tdinfo);
777 }
984263bc 778 err = 0;
a0419b33 779 } else {
984263bc 780 err = EINVAL;
a0419b33
MD
781 }
782 CRYPTO_DRIVER_UNLOCK();
984263bc
MD
783
784 return err;
785}
786
a0419b33
MD
787static volatile int dispatch_rover;
788
984263bc 789/*
42ee1e6b 790 * Add a crypto request to a queue, to be processed by the kernel thread.
984263bc
MD
791 */
792int
793crypto_dispatch(struct cryptop *crp)
794{
a0419b33 795 crypto_tdinfo_t tdinfo;
42ee1e6b
SW
796 struct cryptocap *cap;
797 u_int32_t hid;
a7f45447 798 int result;
a0419b33 799 int n;
984263bc
MD
800
801 cryptostats.cs_ops++;
802
803#ifdef CRYPTO_TIMING
804 if (crypto_timing)
805 nanouptime(&crp->crp_tstamp);
806#endif
42ee1e6b
SW
807
808 hid = CRYPTO_SESID2HID(crp->crp_sid);
809
345ee1fb
MD
810 /*
811 * Dispatch the crypto op directly to the driver if the caller
812 * marked the request to be processed immediately or this is
813 * a synchronous callback chain occuring from within a crypto
814 * processing thread.
815 *
816 * Fall through to queueing the driver is blocked.
817 */
818 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0 ||
819 (curthread->td_flags & TDF_CRYPTO)) {
984263bc 820 cap = crypto_checkdriver(hid);
42ee1e6b
SW
821 /* Driver cannot disappeared when there is an active session. */
822 KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
823 if (!cap->cc_qblocked) {
824 result = crypto_invoke(cap, crp, 0);
825 if (result != ERESTART)
826 return (result);
984263bc 827 /*
42ee1e6b
SW
828 * The driver ran out of resources, put the request on
829 * the queue.
984263bc 830 */
984263bc 831 }
984263bc 832 }
a0419b33
MD
833
834 /*
345ee1fb
MD
835 * Dispatch to a cpu for action if possible. Dispatch to a different
836 * cpu than the current cpu.
a0419b33
MD
837 */
838 if (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SMP) {
839 n = atomic_fetchadd_int(&dispatch_rover, 1) & 255;
345ee1fb
MD
840 if (crypto_altdispatch && mycpu->gd_cpuid == n)
841 ++n;
a0419b33
MD
842 n = n % ncpus;
843 } else {
844 n = 0;
845 }
846 tdinfo = &tdinfo_array[n];
847
848 CRYPTO_Q_LOCK(tdinfo);
849 TAILQ_INSERT_TAIL(&tdinfo->crp_q, crp, crp_next);
850 if (tdinfo->crp_sleep)
851 wakeup_one(&tdinfo->crp_q);
852 CRYPTO_Q_UNLOCK(tdinfo);
42ee1e6b 853 return 0;
984263bc
MD
854}
855
856/*
857 * Add an asymetric crypto request to a queue,
858 * to be processed by the kernel thread.
859 */
860int
861crypto_kdispatch(struct cryptkop *krp)
862{
a0419b33 863 crypto_tdinfo_t tdinfo;
42ee1e6b 864 int error;
a0419b33 865 int n;
984263bc
MD
866
867 cryptostats.cs_kops++;
868
a0419b33
MD
869#if 0
870 /* not sure how to test F_SMP here */
871 n = atomic_fetchadd_int(&dispatch_rover, 1) & 255;
872 n = n % ncpus;
873#endif
874 n = 0;
875 tdinfo = &tdinfo_array[n];
876
42ee1e6b 877 error = crypto_kinvoke(krp, krp->krp_crid);
a0419b33 878
42ee1e6b 879 if (error == ERESTART) {
a0419b33
MD
880 CRYPTO_Q_LOCK(tdinfo);
881 TAILQ_INSERT_TAIL(&tdinfo->crp_kq, krp, krp_next);
882 if (tdinfo->crp_sleep)
883 wakeup_one(&tdinfo->crp_q);
884 CRYPTO_Q_UNLOCK(tdinfo);
42ee1e6b 885 error = 0;
984263bc 886 }
42ee1e6b
SW
887 return error;
888}
984263bc 889
42ee1e6b
SW
890/*
891 * Verify a driver is suitable for the specified operation.
892 */
893static __inline int
894kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
895{
896 return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
984263bc
MD
897}
898
899/*
42ee1e6b
SW
900 * Select a driver for an asym operation. The driver must
901 * support the necessary algorithm. The caller can constrain
902 * which device is selected with the flags parameter. The
903 * algorithm we use here is pretty stupid; just use the first
904 * driver that supports the algorithms we need. If there are
905 * multiple suitable drivers we choose the driver with the
906 * fewest active operations. We prefer hardware-backed
907 * drivers to software ones when either may be used.
984263bc 908 */
42ee1e6b
SW
909static struct cryptocap *
910crypto_select_kdriver(const struct cryptkop *krp, int flags)
984263bc 911{
42ee1e6b
SW
912 struct cryptocap *cap, *best, *blocked;
913 int match, hid;
984263bc 914
42ee1e6b 915 CRYPTO_DRIVER_ASSERT();
984263bc 916
42ee1e6b
SW
917 /*
918 * Look first for hardware crypto devices if permitted.
919 */
920 if (flags & CRYPTOCAP_F_HARDWARE)
921 match = CRYPTOCAP_F_HARDWARE;
922 else
923 match = CRYPTOCAP_F_SOFTWARE;
924 best = NULL;
925 blocked = NULL;
926again:
984263bc 927 for (hid = 0; hid < crypto_drivers_num; hid++) {
42ee1e6b
SW
928 cap = &crypto_drivers[hid];
929 /*
930 * If it's not initialized, is in the process of
931 * going away, or is not appropriate (hardware
932 * or software based on match), then skip.
933 */
934 if (cap->cc_dev == NULL ||
935 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
936 (cap->cc_flags & match) == 0)
984263bc 937 continue;
42ee1e6b
SW
938
939 /* verify all the algorithms are supported. */
940 if (kdriver_suitable(cap, krp)) {
941 if (best == NULL ||
942 cap->cc_koperations < best->cc_koperations)
943 best = cap;
944 }
984263bc 945 }
42ee1e6b
SW
946 if (best != NULL)
947 return best;
948 if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
949 /* sort of an Algol 68-style for loop */
950 match = CRYPTOCAP_F_SOFTWARE;
951 goto again;
952 }
953 return best;
954}
955
956/*
957 * Dispatch an assymetric crypto request.
958 */
959static int
960crypto_kinvoke(struct cryptkop *krp, int crid)
961{
962 struct cryptocap *cap = NULL;
963 int error;
964
965 KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
966 KASSERT(krp->krp_callback != NULL,
967 ("%s: krp->crp_callback == NULL", __func__));
968
969 CRYPTO_DRIVER_LOCK();
970 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
971 cap = crypto_checkdriver(crid);
972 if (cap != NULL) {
973 /*
974 * Driver present, it must support the necessary
975 * algorithm and, if s/w drivers are excluded,
976 * it must be registered as hardware-backed.
977 */
978 if (!kdriver_suitable(cap, krp) ||
979 (!crypto_devallowsoft &&
980 (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
981 cap = NULL;
982 }
983 } else {
984 /*
985 * No requested driver; select based on crid flags.
986 */
987 if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
988 crid &= ~CRYPTOCAP_F_SOFTWARE;
989 cap = crypto_select_kdriver(krp, crid);
990 }
991 if (cap != NULL && !cap->cc_kqblocked) {
992 krp->krp_hid = cap - crypto_drivers;
993 cap->cc_koperations++;
994 CRYPTO_DRIVER_UNLOCK();
995 error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
996 CRYPTO_DRIVER_LOCK();
997 if (error == ERESTART) {
998 cap->cc_koperations--;
999 CRYPTO_DRIVER_UNLOCK();
1000 return (error);
1001 }
1002 } else {
1003 /*
1004 * NB: cap is !NULL if device is blocked; in
1005 * that case return ERESTART so the operation
1006 * is resubmitted if possible.
1007 */
1008 error = (cap == NULL) ? ENODEV : ERESTART;
1009 }
1010 CRYPTO_DRIVER_UNLOCK();
984263bc
MD
1011
1012 if (error) {
1013 krp->krp_status = error;
1014 crypto_kdone(krp);
1015 }
1016 return 0;
1017}
1018
1019#ifdef CRYPTO_TIMING
1020static void
1021crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
1022{
1023 struct timespec now, t;
1024
1025 nanouptime(&now);
1026 t.tv_sec = now.tv_sec - tv->tv_sec;
1027 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
1028 if (t.tv_nsec < 0) {
1029 t.tv_sec--;
1030 t.tv_nsec += 1000000000;
1031 }
1032 timespecadd(&ts->acc, &t);
1033 if (timespeccmp(&t, &ts->min, <))
1034 ts->min = t;
1035 if (timespeccmp(&t, &ts->max, >))
1036 ts->max = t;
1037 ts->count++;
1038
1039 *tv = now;
1040}
1041#endif
1042
1043/*
1044 * Dispatch a crypto request to the appropriate crypto devices.
1045 */
1046static int
42ee1e6b 1047crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
984263bc 1048{
42ee1e6b
SW
1049
1050 KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1051 KASSERT(crp->crp_callback != NULL,
1052 ("%s: crp->crp_callback == NULL", __func__));
1053 KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
984263bc
MD
1054
1055#ifdef CRYPTO_TIMING
1056 if (crypto_timing)
1057 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1058#endif
42ee1e6b 1059 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
984263bc
MD
1060 struct cryptodesc *crd;
1061 u_int64_t nid;
1062
1063 /*
1064 * Driver has unregistered; migrate the session and return
1065 * an error to the caller so they'll resubmit the op.
42ee1e6b
SW
1066 *
1067 * XXX: What if there are more already queued requests for this
1068 * session?
984263bc 1069 */
42ee1e6b
SW
1070 crypto_freesession(crp->crp_sid);
1071
984263bc
MD
1072 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1073 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1074
42ee1e6b
SW
1075 /* XXX propagate flags from initial session? */
1076 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
1077 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
984263bc
MD
1078 crp->crp_sid = nid;
1079
1080 crp->crp_etype = EAGAIN;
1081 crypto_done(crp);
1082 return 0;
1083 } else {
1084 /*
1085 * Invoke the driver to process the request.
1086 */
42ee1e6b 1087 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
984263bc
MD
1088 }
1089}
1090
1091/*
1092 * Release a set of crypto descriptors.
1093 */
1094void
1095crypto_freereq(struct cryptop *crp)
1096{
1097 struct cryptodesc *crd;
a0419b33
MD
1098#ifdef DIAGNOSTIC
1099 crypto_tdinfo_t tdinfo;
401c1428 1100 struct cryptop *crp2;
a0419b33
MD
1101 int n;
1102#endif
984263bc 1103
42ee1e6b
SW
1104 if (crp == NULL)
1105 return;
1106
1107#ifdef DIAGNOSTIC
a0419b33 1108 for (n = 0; n < ncpus; ++n) {
a0419b33
MD
1109 tdinfo = &tdinfo_array[n];
1110
1111 CRYPTO_Q_LOCK(tdinfo);
1112 TAILQ_FOREACH(crp2, &tdinfo->crp_q, crp_next) {
42ee1e6b
SW
1113 KASSERT(crp2 != crp,
1114 ("Freeing cryptop from the crypto queue (%p).",
1115 crp));
8a8d5d85 1116 }
a0419b33 1117 CRYPTO_Q_UNLOCK(tdinfo);
984263bc 1118 }
a0419b33
MD
1119 CRYPTO_RETQ_LOCK();
1120 TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
1121 KASSERT(crp2 != crp,
1122 ("Freeing cryptop from the return queue (%p).",
1123 crp));
1124 }
1125 CRYPTO_RETQ_UNLOCK();
42ee1e6b
SW
1126#endif
1127
1128 while ((crd = crp->crp_desc) != NULL) {
1129 crp->crp_desc = crd->crd_next;
0bb408f6 1130 objcache_put(cryptodesc_oc, crd);
42ee1e6b 1131 }
0bb408f6 1132 objcache_put(cryptop_oc, crp);
984263bc
MD
1133}
1134
1135/*
42ee1e6b 1136 * Acquire a set of crypto descriptors.
984263bc
MD
1137 */
1138struct cryptop *
1139crypto_getreq(int num)
1140{
1141 struct cryptodesc *crd;
1142 struct cryptop *crp;
984263bc 1143
0bb408f6 1144 crp = objcache_get(cryptop_oc, M_WAITOK);
984263bc
MD
1145 if (crp != NULL) {
1146 bzero(crp, sizeof (*crp));
1147 while (num--) {
0bb408f6 1148 crd = objcache_get(cryptodesc_oc, M_WAITOK);
984263bc
MD
1149 if (crd == NULL) {
1150 crypto_freereq(crp);
42ee1e6b 1151 return NULL;
984263bc 1152 }
984263bc 1153 bzero(crd, sizeof (*crd));
42ee1e6b 1154
984263bc
MD
1155 crd->crd_next = crp->crp_desc;
1156 crp->crp_desc = crd;
1157 }
1158 }
984263bc
MD
1159 return crp;
1160}
1161
1162/*
1163 * Invoke the callback on behalf of the driver.
1164 */
1165void
1166crypto_done(struct cryptop *crp)
1167{
1168 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
1169 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
1170 crp->crp_flags |= CRYPTO_F_DONE;
1171 if (crp->crp_etype != 0)
1172 cryptostats.cs_errs++;
1173#ifdef CRYPTO_TIMING
1174 if (crypto_timing)
1175 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1176#endif
42ee1e6b
SW
1177 /*
1178 * CBIMM means unconditionally do the callback immediately;
1179 * CBIFSYNC means do the callback immediately only if the
1180 * operation was done synchronously. Both are used to avoid
1181 * doing extraneous context switches; the latter is mostly
1182 * used with the software crypto driver.
1183 */
1184 if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1185 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1186 (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
984263bc
MD
1187 /*
1188 * Do the callback directly. This is ok when the
1189 * callback routine does very little (e.g. the
1190 * /dev/crypto callback method just does a wakeup).
1191 */
1192#ifdef CRYPTO_TIMING
1193 if (crypto_timing) {
1194 /*
1195 * NB: We must copy the timestamp before
1196 * doing the callback as the cryptop is
1197 * likely to be reclaimed.
1198 */
1199 struct timespec t = crp->crp_tstamp;
1200 crypto_tstat(&cryptostats.cs_cb, &t);
1201 crp->crp_callback(crp);
1202 crypto_tstat(&cryptostats.cs_finis, &t);
1203 } else
1204#endif
1205 crp->crp_callback(crp);
1206 } else {
984263bc
MD
1207 /*
1208 * Normal case; queue the callback for the thread.
984263bc 1209 */
42ee1e6b
SW
1210 CRYPTO_RETQ_LOCK();
1211 if (CRYPTO_RETQ_EMPTY())
1212 wakeup_one(&crp_ret_q); /* shared wait channel */
984263bc 1213 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
42ee1e6b 1214 CRYPTO_RETQ_UNLOCK();
984263bc
MD
1215 }
1216}
1217
1218/*
1219 * Invoke the callback on behalf of the driver.
1220 */
1221void
1222crypto_kdone(struct cryptkop *krp)
1223{
42ee1e6b 1224 struct cryptocap *cap;
984263bc
MD
1225
1226 if (krp->krp_status != 0)
1227 cryptostats.cs_kerrs++;
42ee1e6b
SW
1228 CRYPTO_DRIVER_LOCK();
1229 /* XXX: What if driver is loaded in the meantime? */
1230 if (krp->krp_hid < crypto_drivers_num) {
1231 cap = &crypto_drivers[krp->krp_hid];
1232 cap->cc_koperations--;
1233 KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
1234 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1235 crypto_remove(cap);
1236 }
1237 CRYPTO_DRIVER_UNLOCK();
1238 CRYPTO_RETQ_LOCK();
1239 if (CRYPTO_RETQ_EMPTY())
1240 wakeup_one(&crp_ret_q); /* shared wait channel */
984263bc 1241 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
42ee1e6b 1242 CRYPTO_RETQ_UNLOCK();
984263bc
MD
1243}
1244
1245int
1246crypto_getfeat(int *featp)
1247{
1248 int hid, kalg, feat = 0;
984263bc 1249
42ee1e6b 1250 CRYPTO_DRIVER_LOCK();
984263bc 1251 for (hid = 0; hid < crypto_drivers_num; hid++) {
42ee1e6b
SW
1252 const struct cryptocap *cap = &crypto_drivers[hid];
1253
1254 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
984263bc
MD
1255 !crypto_devallowsoft) {
1256 continue;
1257 }
c34d71fb 1258 for (kalg = 0; kalg <= CRK_ALGORITHM_MAX; kalg++)
42ee1e6b 1259 if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
984263bc
MD
1260 feat |= 1 << kalg;
1261 }
42ee1e6b 1262 CRYPTO_DRIVER_UNLOCK();
984263bc
MD
1263 *featp = feat;
1264 return (0);
1265}
1266
1267/*
42ee1e6b
SW
1268 * Terminate a thread at module unload. The process that
1269 * initiated this is waiting for us to signal that we're gone;
1270 * wake it up and exit. We use the driver table lock to insure
1271 * we don't do the wakeup before they're waiting. There is no
1272 * race here because the waiter sleeps on the proc lock for the
1273 * thread so it gets notified at the right time because of an
1274 * extra wakeup that's done in exit1().
984263bc
MD
1275 */
1276static void
42ee1e6b
SW
1277crypto_finis(void *chan)
1278{
1279 CRYPTO_DRIVER_LOCK();
1280 wakeup_one(chan);
1281 CRYPTO_DRIVER_UNLOCK();
1282 kthread_exit();
1283}
1284
1285/*
1286 * Crypto thread, dispatches crypto requests.
cd8ab232
MD
1287 *
1288 * MPSAFE
42ee1e6b
SW
1289 */
1290static void
a0419b33 1291crypto_proc(void *arg)
984263bc 1292{
a0419b33 1293 crypto_tdinfo_t tdinfo = arg;
984263bc
MD
1294 struct cryptop *crp, *submit;
1295 struct cryptkop *krp;
1296 struct cryptocap *cap;
42ee1e6b 1297 u_int32_t hid;
a7f45447 1298 int result, hint;
984263bc 1299
a0419b33
MD
1300 CRYPTO_Q_LOCK(tdinfo);
1301
345ee1fb
MD
1302 curthread->td_flags |= TDF_CRYPTO;
1303
42ee1e6b 1304 for (;;) {
984263bc
MD
1305 /*
1306 * Find the first element in the queue that can be
1307 * processed and look-ahead to see if multiple ops
1308 * are ready for the same driver.
1309 */
1310 submit = NULL;
1311 hint = 0;
a0419b33 1312 TAILQ_FOREACH(crp, &tdinfo->crp_q, crp_next) {
42ee1e6b 1313 hid = CRYPTO_SESID2HID(crp->crp_sid);
984263bc 1314 cap = crypto_checkdriver(hid);
42ee1e6b
SW
1315 /*
1316 * Driver cannot disappeared when there is an active
1317 * session.
1318 */
1319 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1320 __func__, __LINE__));
1321 if (cap == NULL || cap->cc_dev == NULL) {
984263bc
MD
1322 /* Op needs to be migrated, process it. */
1323 if (submit == NULL)
1324 submit = crp;
1325 break;
1326 }
1327 if (!cap->cc_qblocked) {
1328 if (submit != NULL) {
1329 /*
1330 * We stop on finding another op,
1331 * regardless whether its for the same
1332 * driver or not. We could keep
1333 * searching the queue but it might be
1334 * better to just use a per-driver
1335 * queue instead.
1336 */
42ee1e6b 1337 if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
984263bc
MD
1338 hint = CRYPTO_HINT_MORE;
1339 break;
1340 } else {
1341 submit = crp;
1342 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1343 break;
1344 /* keep scanning for more are q'd */
1345 }
1346 }
1347 }
1348 if (submit != NULL) {
a0419b33 1349 TAILQ_REMOVE(&tdinfo->crp_q, submit, crp_next);
42ee1e6b
SW
1350 hid = CRYPTO_SESID2HID(submit->crp_sid);
1351 cap = crypto_checkdriver(hid);
1352 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1353 __func__, __LINE__));
a0419b33
MD
1354
1355 CRYPTO_Q_UNLOCK(tdinfo);
42ee1e6b 1356 result = crypto_invoke(cap, submit, hint);
a0419b33
MD
1357 CRYPTO_Q_LOCK(tdinfo);
1358
984263bc
MD
1359 if (result == ERESTART) {
1360 /*
1361 * The driver ran out of resources, mark the
1362 * driver ``blocked'' for cryptop's and put
1363 * the request back in the queue. It would
1364 * best to put the request back where we got
1365 * it but that's hard so for now we put it
1366 * at the front. This should be ok; putting
1367 * it at the end does not work.
1368 */
1369 /* XXX validate sid again? */
42ee1e6b 1370 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
a0419b33
MD
1371 TAILQ_INSERT_HEAD(&tdinfo->crp_q,
1372 submit, crp_next);
984263bc
MD
1373 cryptostats.cs_blocks++;
1374 }
1375 }
1376
1377 /* As above, but for key ops */
a0419b33 1378 TAILQ_FOREACH(krp, &tdinfo->crp_kq, krp_next) {
984263bc 1379 cap = crypto_checkdriver(krp->krp_hid);
42ee1e6b
SW
1380 if (cap == NULL || cap->cc_dev == NULL) {
1381 /*
1382 * Operation needs to be migrated, invalidate
1383 * the assigned device so it will reselect a
1384 * new one below. Propagate the original
1385 * crid selection flags if supplied.
1386 */
1387 krp->krp_hid = krp->krp_crid &
1388 (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
1389 if (krp->krp_hid == 0)
1390 krp->krp_hid =
1391 CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
984263bc
MD
1392 break;
1393 }
1394 if (!cap->cc_kqblocked)
1395 break;
1396 }
1397 if (krp != NULL) {
a0419b33
MD
1398 TAILQ_REMOVE(&tdinfo->crp_kq, krp, krp_next);
1399
1400 CRYPTO_Q_UNLOCK(tdinfo);
42ee1e6b 1401 result = crypto_kinvoke(krp, krp->krp_hid);
a0419b33
MD
1402 CRYPTO_Q_LOCK(tdinfo);
1403
984263bc
MD
1404 if (result == ERESTART) {
1405 /*
1406 * The driver ran out of resources, mark the
1407 * driver ``blocked'' for cryptkop's and put
1408 * the request back in the queue. It would
1409 * best to put the request back where we got
1410 * it but that's hard so for now we put it
1411 * at the front. This should be ok; putting
1412 * it at the end does not work.
1413 */
1414 /* XXX validate sid again? */
1415 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
a0419b33
MD
1416 TAILQ_INSERT_HEAD(&tdinfo->crp_kq,
1417 krp, krp_next);
984263bc
MD
1418 cryptostats.cs_kblocks++;
1419 }
1420 }
42ee1e6b
SW
1421
1422 if (submit == NULL && krp == NULL) {
1423 /*
1424 * Nothing more to be processed. Sleep until we're
1425 * woken because there are more ops to process.
1426 * This happens either by submission or by a driver
1427 * becoming unblocked and notifying us through
1428 * crypto_unblock. Note that when we wakeup we
1429 * start processing each queue again from the
1430 * front. It's not clear that it's important to
1431 * preserve this ordering since ops may finish
1432 * out of order if dispatched to different devices
1433 * and some become blocked while others do not.
1434 */
a0419b33
MD
1435 tdinfo->crp_sleep = 1;
1436 lksleep (&tdinfo->crp_q, &tdinfo->crp_lock,
1437 0, "crypto_wait", 0);
1438 tdinfo->crp_sleep = 0;
1439 if (tdinfo->crp_td == NULL)
42ee1e6b
SW
1440 break;
1441 cryptostats.cs_intrs++;
1442 }
1443 }
a0419b33 1444 CRYPTO_Q_UNLOCK(tdinfo);
42ee1e6b 1445
a0419b33 1446 crypto_finis(&tdinfo->crp_q);
984263bc
MD
1447}
1448
1449/*
42ee1e6b
SW
1450 * Crypto returns thread, does callbacks for processed crypto requests.
1451 * Callbacks are done here, rather than in the crypto drivers, because
1452 * callbacks typically are expensive and would slow interrupt handling.
cd8ab232
MD
1453 *
1454 * MPSAFE
984263bc
MD
1455 */
1456static void
a0419b33 1457crypto_ret_proc(void *dummy __unused)
984263bc 1458{
42ee1e6b
SW
1459 struct cryptop *crpt;
1460 struct cryptkop *krpt;
984263bc 1461
cd8ab232 1462 get_mplock();
42ee1e6b 1463 CRYPTO_RETQ_LOCK();
984263bc 1464 for (;;) {
42ee1e6b
SW
1465 /* Harvest return q's for completed ops */
1466 crpt = TAILQ_FIRST(&crp_ret_q);
1467 if (crpt != NULL)
1468 TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
1469
1470 krpt = TAILQ_FIRST(&crp_ret_kq);
1471 if (krpt != NULL)
1472 TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
1473
1474 if (crpt != NULL || krpt != NULL) {
1475 CRYPTO_RETQ_UNLOCK();
1476 /*
1477 * Run callbacks unlocked.
1478 */
1479 if (crpt != NULL) {
984263bc
MD
1480#ifdef CRYPTO_TIMING
1481 if (crypto_timing) {
1482 /*
1483 * NB: We must copy the timestamp before
1484 * doing the callback as the cryptop is
1485 * likely to be reclaimed.
1486 */
54734da1 1487 struct timespec t = crpt->crp_tstamp;
984263bc 1488 crypto_tstat(&cryptostats.cs_cb, &t);
42ee1e6b 1489 crpt->crp_callback(crpt);
984263bc
MD
1490 crypto_tstat(&cryptostats.cs_finis, &t);
1491 } else
1492#endif
42ee1e6b 1493 crpt->crp_callback(crpt);
984263bc 1494 }
42ee1e6b
SW
1495 if (krpt != NULL)
1496 krpt->krp_callback(krpt);
1497 CRYPTO_RETQ_LOCK();
984263bc 1498 } else {
42ee1e6b
SW
1499 /*
1500 * Nothing more to be processed. Sleep until we're
1501 * woken because there are more returns to process.
1502 */
a0419b33
MD
1503 lksleep (&crp_ret_q, &crypto_ret_q_lock,
1504 0, "crypto_ret_wait", 0);
1505 if (cryptoretthread == NULL)
42ee1e6b 1506 break;
984263bc
MD
1507 cryptostats.cs_rets++;
1508 }
1509 }
42ee1e6b
SW
1510 CRYPTO_RETQ_UNLOCK();
1511
1512 crypto_finis(&crp_ret_q);
984263bc 1513}
42ee1e6b
SW
1514
1515#ifdef DDB
1516static void
1517db_show_drivers(void)
1518{
1519 int hid;
1520
1521 db_printf("%12s %4s %4s %8s %2s %2s\n"
1522 , "Device"
1523 , "Ses"
1524 , "Kops"
1525 , "Flags"
1526 , "QB"
1527 , "KB"
1528 );
1529 for (hid = 0; hid < crypto_drivers_num; hid++) {
1530 const struct cryptocap *cap = &crypto_drivers[hid];
1531 if (cap->cc_dev == NULL)
1532 continue;
1533 db_printf("%-12s %4u %4u %08x %2u %2u\n"
1534 , device_get_nameunit(cap->cc_dev)
1535 , cap->cc_sessions
1536 , cap->cc_koperations
1537 , cap->cc_flags
1538 , cap->cc_qblocked
1539 , cap->cc_kqblocked
1540 );
1541 }
1542}
1543
1544DB_SHOW_COMMAND(crypto, db_show_crypto)
1545{
a0419b33 1546 crypto_tdinfo_t tdinfo;
42ee1e6b 1547 struct cryptop *crp;
a0419b33 1548 int n;
42ee1e6b
SW
1549
1550 db_show_drivers();
1551 db_printf("\n");
1552
1553 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
1554 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
1555 "Desc", "Callback");
a0419b33
MD
1556
1557 for (n = 0; n < ncpus; ++n) {
1558 tdinfo = &tdinfo_array[n];
1559
1560 TAILQ_FOREACH(crp, &tdinfo->crp_q, crp_next) {
1561 db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
1562 , (int) CRYPTO_SESID2HID(crp->crp_sid)
1563 , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
1564 , crp->crp_ilen, crp->crp_olen
1565 , crp->crp_etype
1566 , crp->crp_flags
1567 , crp->crp_desc
1568 , crp->crp_callback
1569 );
1570 }
42ee1e6b
SW
1571 }
1572 if (!TAILQ_EMPTY(&crp_ret_q)) {
1573 db_printf("\n%4s %4s %4s %8s\n",
1574 "HID", "Etype", "Flags", "Callback");
1575 TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
1576 db_printf("%4u %4u %04x %8p\n"
1577 , (int) CRYPTO_SESID2HID(crp->crp_sid)
1578 , crp->crp_etype
1579 , crp->crp_flags
1580 , crp->crp_callback
1581 );
1582 }
1583 }
1584}
1585
1586DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
1587{
a0419b33 1588 crypto_tdinfo_t tdinfo;
42ee1e6b 1589 struct cryptkop *krp;
a0419b33 1590 int n;
42ee1e6b
SW
1591
1592 db_show_drivers();
1593 db_printf("\n");
1594
1595 db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
1596 "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
a0419b33
MD
1597
1598 for (n = 0; n < ncpus; ++n) {
1599 tdinfo = &tdinfo_array[n];
1600
1601 TAILQ_FOREACH(krp, &tdinfo->crp_kq, krp_next) {
1602 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
1603 , krp->krp_op
1604 , krp->krp_status
1605 , krp->krp_iparams, krp->krp_oparams
1606 , krp->krp_crid, krp->krp_hid
1607 , krp->krp_callback
1608 );
1609 }
42ee1e6b
SW
1610 }
1611 if (!TAILQ_EMPTY(&crp_ret_q)) {
1612 db_printf("%4s %5s %8s %4s %8s\n",
1613 "Op", "Status", "CRID", "HID", "Callback");
1614 TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
1615 db_printf("%4u %5u %08x %4u %8p\n"
1616 , krp->krp_op
1617 , krp->krp_status
1618 , krp->krp_crid, krp->krp_hid
1619 , krp->krp_callback
1620 );
1621 }
1622 }
1623}
1624#endif
1625
1626int crypto_modevent(module_t mod, int type, void *unused);
1627
1628/*
1629 * Initialization code, both for static and dynamic loading.
1630 * Note this is not invoked with the usual MODULE_DECLARE
1631 * mechanism but instead is listed as a dependency by the
1632 * cryptosoft driver. This guarantees proper ordering of
1633 * calls on module load/unload.
1634 */
1635int
1636crypto_modevent(module_t mod, int type, void *unused)
1637{
1638 int error = EINVAL;
1639
1640 switch (type) {
1641 case MOD_LOAD:
1642 error = crypto_init();
1643 if (error == 0 && bootverbose)
54734da1 1644 kprintf("crypto: <crypto core>\n");
42ee1e6b
SW
1645 break;
1646 case MOD_UNLOAD:
1647 /*XXX disallow if active sessions */
1648 error = 0;
1649 crypto_destroy();
1650 return 0;
1651 }
1652 return error;
1653}
1654MODULE_VERSION(crypto, 1);
1655MODULE_DEPEND(crypto, zlib, 1, 1, 1);