651bed87825b29f9398486bc48adc8f3029a1c62
[dragonfly.git] / sys / dev / crypto / glxsb / glxsb.c
1 /* $OpenBSD: glxsb.c,v 1.7 2007/02/12 14:31:45 tom Exp $ */
2
3 /*
4  * Copyright (c) 2006 Tom Cosgrove <tom@openbsd.org>
5  * Copyright (c) 2003, 2004 Theo de Raadt
6  * Copyright (c) 2003 Jason Wright
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  *
20  * $FreeBSD: src/sys/dev/glxsb/glxsb.c,v 1.3 2008/11/17 07:09:40 philip Exp $
21  */
22
23 /*
24  * Driver for the security block on the AMD Geode LX processors
25  * http://www.amd.com/files/connectivitysolutions/geode/geode_lx/33234d_lx_ds.pdf
26  */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/bus.h>
31 #include <sys/errno.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mbuf.h>
36 #include <sys/module.h>
37 #include <sys/spinlock2.h>
38 #include <sys/proc.h>
39 #include <sys/random.h>
40 #include <sys/rman.h>
41 #include <sys/sysctl.h>
42 #include <sys/taskqueue.h>
43
44 #include <sys/bus.h>
45 #include <sys/bus_dma.h>
46 #include <machine/cpufunc.h>
47 #include <sys/resource.h>
48
49 #include <bus/pci/pcivar.h>
50 #include <bus/pci/pcireg.h>
51
52 #include <opencrypto/cryptodev.h>
53 #include <opencrypto/cryptosoft.h>
54 #include <opencrypto/xform.h>
55
56 #include "cryptodev_if.h"
57 #include "glxsb.h"
58
59 #define PCI_VENDOR_AMD                  0x1022  /* AMD */
60 #define PCI_PRODUCT_AMD_GEODE_LX_CRYPTO 0x2082  /* Geode LX Crypto */
61
62 #define SB_GLD_MSR_CAP          0x58002000      /* RO - Capabilities */
63 #define SB_GLD_MSR_CONFIG       0x58002001      /* RW - Master Config */
64 #define SB_GLD_MSR_SMI          0x58002002      /* RW - SMI */
65 #define SB_GLD_MSR_ERROR        0x58002003      /* RW - Error */
66 #define SB_GLD_MSR_PM           0x58002004      /* RW - Power Mgmt */
67 #define SB_GLD_MSR_DIAG         0x58002005      /* RW - Diagnostic */
68 #define SB_GLD_MSR_CTRL         0x58002006      /* RW - Security Block Cntrl */
69
70                                                 /* For GLD_MSR_CTRL: */
71 #define SB_GMC_DIV0             0x0000          /* AES update divisor values */
72 #define SB_GMC_DIV1             0x0001
73 #define SB_GMC_DIV2             0x0002
74 #define SB_GMC_DIV3             0x0003
75 #define SB_GMC_DIV_MASK         0x0003
76 #define SB_GMC_SBI              0x0004          /* AES swap bits */
77 #define SB_GMC_SBY              0x0008          /* AES swap bytes */
78 #define SB_GMC_TW               0x0010          /* Time write (EEPROM) */
79 #define SB_GMC_T_SEL0           0x0000          /* RNG post-proc: none */
80 #define SB_GMC_T_SEL1           0x0100          /* RNG post-proc: LFSR */
81 #define SB_GMC_T_SEL2           0x0200          /* RNG post-proc: whitener */
82 #define SB_GMC_T_SEL3           0x0300          /* RNG LFSR+whitener */
83 #define SB_GMC_T_SEL_MASK       0x0300
84 #define SB_GMC_T_NE             0x0400          /* Noise (generator) Enable */
85 #define SB_GMC_T_TM             0x0800          /* RNG test mode */
86                                                 /*     (deterministic) */
87
88 /* Security Block configuration/control registers (offsets from base) */
89 #define SB_CTL_A                0x0000          /* RW - SB Control A */
90 #define SB_CTL_B                0x0004          /* RW - SB Control B */
91 #define SB_AES_INT              0x0008          /* RW - SB AES Interrupt */
92 #define SB_SOURCE_A             0x0010          /* RW - Source A */
93 #define SB_DEST_A               0x0014          /* RW - Destination A */
94 #define SB_LENGTH_A             0x0018          /* RW - Length A */
95 #define SB_SOURCE_B             0x0020          /* RW - Source B */
96 #define SB_DEST_B               0x0024          /* RW - Destination B */
97 #define SB_LENGTH_B             0x0028          /* RW - Length B */
98 #define SB_WKEY                 0x0030          /* WO - Writable Key 0-3 */
99 #define SB_WKEY_0               0x0030          /* WO - Writable Key 0 */
100 #define SB_WKEY_1               0x0034          /* WO - Writable Key 1 */
101 #define SB_WKEY_2               0x0038          /* WO - Writable Key 2 */
102 #define SB_WKEY_3               0x003C          /* WO - Writable Key 3 */
103 #define SB_CBC_IV               0x0040          /* RW - CBC IV 0-3 */
104 #define SB_CBC_IV_0             0x0040          /* RW - CBC IV 0 */
105 #define SB_CBC_IV_1             0x0044          /* RW - CBC IV 1 */
106 #define SB_CBC_IV_2             0x0048          /* RW - CBC IV 2 */
107 #define SB_CBC_IV_3             0x004C          /* RW - CBC IV 3 */
108 #define SB_RANDOM_NUM           0x0050          /* RW - Random Number */
109 #define SB_RANDOM_NUM_STATUS    0x0054          /* RW - Random Number Status */
110 #define SB_EEPROM_COMM          0x0800          /* RW - EEPROM Command */
111 #define SB_EEPROM_ADDR          0x0804          /* RW - EEPROM Address */
112 #define SB_EEPROM_DATA          0x0808          /* RW - EEPROM Data */
113 #define SB_EEPROM_SEC_STATE     0x080C          /* RW - EEPROM Security State */
114
115                                                 /* For SB_CTL_A and _B */
116 #define SB_CTL_ST               0x0001          /* Start operation (enc/dec) */
117 #define SB_CTL_ENC              0x0002          /* Encrypt (0 is decrypt) */
118 #define SB_CTL_DEC              0x0000          /* Decrypt */
119 #define SB_CTL_WK               0x0004          /* Use writable key (we set) */
120 #define SB_CTL_DC               0x0008          /* Destination coherent */
121 #define SB_CTL_SC               0x0010          /* Source coherent */
122 #define SB_CTL_CBC              0x0020          /* CBC (0 is ECB) */
123
124                                                 /* For SB_AES_INT */
125 #define SB_AI_DISABLE_AES_A     0x0001          /* Disable AES A compl int */
126 #define SB_AI_ENABLE_AES_A      0x0000          /* Enable AES A compl int */
127 #define SB_AI_DISABLE_AES_B     0x0002          /* Disable AES B compl int */
128 #define SB_AI_ENABLE_AES_B      0x0000          /* Enable AES B compl int */
129 #define SB_AI_DISABLE_EEPROM    0x0004          /* Disable EEPROM op comp int */
130 #define SB_AI_ENABLE_EEPROM     0x0000          /* Enable EEPROM op compl int */
131 #define SB_AI_AES_A_COMPLETE   0x10000          /* AES A operation complete */
132 #define SB_AI_AES_B_COMPLETE   0x20000          /* AES B operation complete */
133 #define SB_AI_EEPROM_COMPLETE  0x40000          /* EEPROM operation complete */
134
135 #define SB_AI_CLEAR_INTR \
136         (SB_AI_DISABLE_AES_A | SB_AI_DISABLE_AES_B |\
137         SB_AI_DISABLE_EEPROM | SB_AI_AES_A_COMPLETE |\
138         SB_AI_AES_B_COMPLETE | SB_AI_EEPROM_COMPLETE)
139
140 #define SB_RNS_TRNG_VALID       0x0001          /* in SB_RANDOM_NUM_STATUS */
141
142 #define SB_MEM_SIZE             0x0810          /* Size of memory block */
143
144 #define SB_AES_ALIGN            0x0010          /* Source and dest buffers */
145                                                 /* must be 16-byte aligned */
146 #define SB_AES_BLOCK_SIZE       0x0010
147
148 /*
149  * The Geode LX security block AES acceleration doesn't perform scatter-
150  * gather: it just takes source and destination addresses.  Therefore the
151  * plain- and ciphertexts need to be contiguous.  To this end, we allocate
152  * a buffer for both, and accept the overhead of copying in and out.  If
153  * the number of bytes in one operation is bigger than allowed for by the
154  * buffer (buffer is twice the size of the max length, as it has both input
155  * and output) then we have to perform multiple encryptions/decryptions.
156  */
157
158 #define GLXSB_MAX_AES_LEN       16384
159
160 MALLOC_DEFINE(M_GLXSB, "glxsb_data", "Glxsb Data");
161
162 struct glxsb_dma_map {
163         bus_dmamap_t            dma_map;        /* DMA map */
164         bus_dma_segment_t       dma_seg;        /* segments */
165         int                     dma_nsegs;      /* #segments */
166         int                     dma_size;       /* size */
167         caddr_t                 dma_vaddr;      /* virtual address */
168         bus_addr_t              dma_paddr;      /* physical address */
169 };
170
171 struct glxsb_taskop {
172         struct glxsb_session    *to_ses;        /* crypto session */
173         struct cryptop          *to_crp;        /* cryptop to perfom */
174         struct cryptodesc       *to_enccrd;     /* enccrd to perform */
175         struct cryptodesc       *to_maccrd;     /* maccrd to perform */
176 };
177
178 struct glxsb_softc {
179         device_t                sc_dev;         /* device backpointer */
180         struct resource         *sc_sr;         /* resource */
181         int                     sc_rid;         /* resource rid */
182         struct callout          sc_rngco;       /* RNG callout */
183         int                     sc_rnghz;       /* RNG callout ticks */
184         bus_dma_tag_t           sc_dmat;        /* DMA tag */
185         struct glxsb_dma_map    sc_dma;         /* DMA map */
186         int32_t                 sc_cid;         /* crypto tag */
187         uint32_t                sc_sid;         /* session id */
188         TAILQ_HEAD(ses_head, glxsb_session)
189                                 sc_sessions;    /* crypto sessions */
190         struct spinlock         sc_sessions_lock;/* sessions lock */
191         struct spinlock         sc_task_mtx;    /* task mutex */
192         struct taskqueue        *sc_tq;         /* task queue */
193         struct task             sc_cryptotask;  /* task */
194         struct glxsb_taskop     sc_to;          /* task's crypto operation */
195         int                     sc_task_count;  /* tasks count */
196 };
197
198 static int glxsb_probe(device_t);
199 static int glxsb_attach(device_t);
200 static int glxsb_detach(device_t);
201
202 static void glxsb_dmamap_cb(void *, bus_dma_segment_t *, int, int);
203 static int  glxsb_dma_alloc(struct glxsb_softc *);
204 static void glxsb_dma_pre_op(struct glxsb_softc *, struct glxsb_dma_map *);
205 static void glxsb_dma_post_op(struct glxsb_softc *, struct glxsb_dma_map *);
206 static void glxsb_dma_free(struct glxsb_softc *, struct glxsb_dma_map *);
207
208 static void glxsb_rnd(void *);
209 static int  glxsb_crypto_setup(struct glxsb_softc *);
210 static int  glxsb_crypto_newsession(device_t, uint32_t *, struct cryptoini *);
211 static int  glxsb_crypto_freesession(device_t, uint64_t);
212 static int  glxsb_aes(struct glxsb_softc *, uint32_t, uint32_t,
213         uint32_t, void *, int, void *);
214
215 static int  glxsb_crypto_encdec(struct cryptop *, struct cryptodesc *,
216         struct glxsb_session *, struct glxsb_softc *);
217
218 static void glxsb_crypto_task(void *, int);
219 static int  glxsb_crypto_process(device_t, struct cryptop *, int);
220
221 static device_method_t glxsb_methods[] = {
222         /* device interface */
223         DEVMETHOD(device_probe,         glxsb_probe),
224         DEVMETHOD(device_attach,        glxsb_attach),
225         DEVMETHOD(device_detach,        glxsb_detach),
226
227         /* crypto device methods */
228         DEVMETHOD(cryptodev_newsession,         glxsb_crypto_newsession),
229         DEVMETHOD(cryptodev_freesession,        glxsb_crypto_freesession),
230         DEVMETHOD(cryptodev_process,            glxsb_crypto_process),
231
232         {0,0}
233 };
234
235 static driver_t glxsb_driver = {
236         "glxsb",
237         glxsb_methods,
238         sizeof(struct glxsb_softc)
239 };
240
241 static devclass_t glxsb_devclass;
242
243 DRIVER_MODULE(glxsb, pci, glxsb_driver, glxsb_devclass, NULL, NULL);
244 MODULE_VERSION(glxsb, 1);
245 MODULE_DEPEND(glxsb, crypto, 1, 1, 1);
246
247 static int
248 glxsb_probe(device_t dev)
249 {
250
251         if (pci_get_vendor(dev) == PCI_VENDOR_AMD &&
252             pci_get_device(dev) == PCI_PRODUCT_AMD_GEODE_LX_CRYPTO) {
253                 device_set_desc(dev,
254                     "AMD Geode LX Security Block (AES-128-CBC, RNG)");
255                 return (BUS_PROBE_DEFAULT);
256         }
257
258         return (ENXIO);
259 }
260
261 static int
262 glxsb_attach(device_t dev)
263 {
264         struct glxsb_softc *sc = device_get_softc(dev);
265         uint64_t msr;
266
267         sc->sc_dev = dev;
268         msr = rdmsr(SB_GLD_MSR_CAP);
269
270         if ((msr & 0xFFFF00) != 0x130400) {
271                 device_printf(dev, "unknown ID 0x%x\n",
272                     (int)((msr & 0xFFFF00) >> 16));
273                 return (ENXIO);
274         }
275
276         pci_enable_busmaster(dev);
277
278         /* Map in the security block configuration/control registers */
279         sc->sc_rid = PCIR_BAR(0);
280         sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
281             RF_ACTIVE);
282         if (sc->sc_sr == NULL) {
283                 device_printf(dev, "cannot map register space\n");
284                 return (ENXIO);
285         }
286
287         /*
288          * Configure the Security Block.
289          *
290          * We want to enable the noise generator (T_NE), and enable the
291          * linear feedback shift register and whitener post-processing
292          * (T_SEL = 3).  Also ensure that test mode (deterministic values)
293          * is disabled.
294          */
295         msr = rdmsr(SB_GLD_MSR_CTRL);
296         msr &= ~(SB_GMC_T_TM | SB_GMC_T_SEL_MASK);
297         msr |= SB_GMC_T_NE | SB_GMC_T_SEL3;
298 #if 0
299         msr |= SB_GMC_SBI | SB_GMC_SBY;         /* for AES, if necessary */
300 #endif
301         wrmsr(SB_GLD_MSR_CTRL, msr);
302
303         /* Disable interrupts */
304         bus_write_4(sc->sc_sr, SB_AES_INT, SB_AI_CLEAR_INTR);
305
306         /* Allocate a contiguous DMA-able buffer to work in */
307         if (glxsb_dma_alloc(sc) != 0)
308                 goto fail0;
309
310
311         /* XXX: thread taskqueues ? */
312         /* Initialize our task queue */
313         sc->sc_tq = taskqueue_create("glxsb_taskq", M_NOWAIT | M_ZERO,
314             taskqueue_thread_enqueue, &sc->sc_tq);
315         if (sc->sc_tq == NULL) {
316                 device_printf(dev, "cannot create task queue\n");
317                 goto fail0;
318         }
319         if (taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1,
320             "%s taskq", device_get_nameunit(dev)) != 0) {
321                 device_printf(dev, "cannot start task queue\n");
322                 goto fail1;
323         }
324
325         TASK_INIT(&sc->sc_cryptotask, 0, glxsb_crypto_task, sc);
326
327         /* Initialize crypto */
328         if (glxsb_crypto_setup(sc) != 0)
329                 goto fail1;
330
331         /* Install a periodic collector for the "true" (AMD's word) RNG */
332         if (hz > 100)
333                 sc->sc_rnghz = hz / 100;
334         else
335                 sc->sc_rnghz = 1;
336         callout_init_mp(&sc->sc_rngco);
337         glxsb_rnd(sc);
338
339         return (0);
340
341 fail1:
342
343         /* XXX: thread taskqueues ? */
344         taskqueue_free(sc->sc_tq);
345
346 fail0:
347         bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_sr);
348         return (ENXIO);
349 }
350
351 static int
352 glxsb_detach(device_t dev)
353 {
354         struct glxsb_softc *sc = device_get_softc(dev);
355         struct glxsb_session *ses;
356
357         spin_lock(&sc->sc_sessions_lock);
358         TAILQ_FOREACH(ses, &sc->sc_sessions, ses_next) {
359                 if (ses->ses_used) {
360                         spin_unlock(&sc->sc_sessions_lock);
361                         device_printf(dev,
362                                 "cannot detach, sessions still active.\n");
363                         return (EBUSY);
364                 }
365         }
366         while (!TAILQ_EMPTY(&sc->sc_sessions)) {
367                 ses = TAILQ_FIRST(&sc->sc_sessions);
368                 TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
369                 kfree(ses, M_GLXSB);
370         }
371         spin_unlock(&sc->sc_sessions_lock);
372         crypto_unregister_all(sc->sc_cid);
373 #if 0
374         /* XXX: need implementation of callout_drain or workaround */
375         callout_drain(&sc->sc_rngco);
376 #endif
377
378         /* XXX: thread taskqueues ? */
379         /* XXX: need implementation of taskqueue_drain or workaround */
380         taskqueue_drain(sc->sc_tq, &sc->sc_cryptotask);
381
382         bus_generic_detach(dev);
383         glxsb_dma_free(sc, &sc->sc_dma);
384         bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_sr);
385
386         /* XXX: thread taskqueues ? */
387         taskqueue_free(sc->sc_tq);
388
389         spin_uninit(&sc->sc_sessions_lock);
390         spin_uninit(&sc->sc_task_mtx);
391         return (0);
392 }
393
394 /*
395  *      callback for bus_dmamap_load()
396  */
397 static void
398 glxsb_dmamap_cb(void *arg, bus_dma_segment_t *seg, int nseg, int error)
399 {
400
401         bus_addr_t *paddr = (bus_addr_t*) arg;
402         *paddr = seg[0].ds_addr;
403 }
404
405 static int
406 glxsb_dma_alloc(struct glxsb_softc *sc)
407 {
408         struct glxsb_dma_map *dma = &sc->sc_dma;
409         int rc;
410
411         dma->dma_nsegs = 1;
412         dma->dma_size = GLXSB_MAX_AES_LEN * 2;
413
414         /* Setup DMA descriptor area */
415         rc = bus_dma_tag_create(NULL,                   /* parent */
416                                 SB_AES_ALIGN, 0,        /* alignments, bounds */
417                                 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
418                                 BUS_SPACE_MAXADDR,      /* highaddr */
419                                 NULL, NULL,             /* filter, filterarg */
420                                 dma->dma_size,          /* maxsize */
421                                 dma->dma_nsegs,         /* nsegments */
422                                 dma->dma_size,          /* maxsegsize */
423                                 BUS_DMA_ALLOCNOW,       /* flags */
424                                 &sc->sc_dmat);
425         if (rc != 0) {
426                 device_printf(sc->sc_dev,
427                     "cannot allocate DMA tag (%d)\n", rc);
428                 return (rc);
429         }
430
431         rc = bus_dmamem_alloc(sc->sc_dmat, (void **)&dma->dma_vaddr,
432             BUS_DMA_NOWAIT, &dma->dma_map);
433         if (rc != 0) {
434                 device_printf(sc->sc_dev,
435                     "cannot allocate DMA memory of %d bytes (%d)\n",
436                         dma->dma_size, rc);
437                 goto fail0;
438         }
439
440         rc = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
441             dma->dma_size, glxsb_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
442         if (rc != 0) {
443                 device_printf(sc->sc_dev,
444                     "cannot load DMA memory for %d bytes (%d)\n",
445                    dma->dma_size, rc);
446                 goto fail1;
447         }
448
449         return (0);
450
451 fail1:
452         bus_dmamem_free(sc->sc_dmat, dma->dma_vaddr, dma->dma_map);
453 fail0:
454         bus_dma_tag_destroy(sc->sc_dmat);
455         return (rc);
456 }
457
458 static void
459 glxsb_dma_pre_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma)
460 {
461
462         bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
463             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
464 }
465
466 static void
467 glxsb_dma_post_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma)
468 {
469
470         bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
471             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
472 }
473
474 static void
475 glxsb_dma_free(struct glxsb_softc *sc, struct glxsb_dma_map *dma)
476 {
477
478         bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
479         bus_dmamem_free(sc->sc_dmat, dma->dma_vaddr, dma->dma_map);
480         bus_dma_tag_destroy(sc->sc_dmat);
481 }
482
483 static void
484 glxsb_rnd(void *v)
485 {
486         struct glxsb_softc *sc = v;
487         uint32_t status;
488         int32_t value;
489
490         status = bus_read_4(sc->sc_sr, SB_RANDOM_NUM_STATUS);
491         if (status & SB_RNS_TRNG_VALID) {
492                 value = bus_read_4(sc->sc_sr, SB_RANDOM_NUM);
493                 /* feed with one uint32 */
494                 add_true_randomness(value);
495         }
496
497         callout_reset(&sc->sc_rngco, sc->sc_rnghz, glxsb_rnd, sc);
498 }
499
500 static int
501 glxsb_crypto_setup(struct glxsb_softc *sc)
502 {
503
504         sc->sc_cid = crypto_get_driverid(sc->sc_dev, CRYPTOCAP_F_HARDWARE);
505
506         if (sc->sc_cid < 0) {
507                 device_printf(sc->sc_dev, "cannot get crypto driver id\n");
508                 return (ENOMEM);
509         }
510
511         TAILQ_INIT(&sc->sc_sessions);
512         sc->sc_sid = 1;
513         spin_init(&sc->sc_sessions_lock);
514         spin_init(&sc->sc_task_mtx);
515
516         if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0) != 0)
517                 goto crypto_fail;
518         if (crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0) != 0)
519                 goto crypto_fail;
520         if (crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0) != 0)
521                 goto crypto_fail;
522         if (crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0) != 0)
523                 goto crypto_fail;
524         if (crypto_register(sc->sc_cid, CRYPTO_RIPEMD160_HMAC, 0, 0) != 0)
525                 goto crypto_fail;
526         if (crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0) != 0)
527                 goto crypto_fail;
528         if (crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0) != 0)
529                 goto crypto_fail;
530         if (crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0) != 0)
531                 goto crypto_fail;
532
533         return (0);
534
535 crypto_fail:
536         device_printf(sc->sc_dev, "cannot register crypto\n");
537         crypto_unregister_all(sc->sc_cid);
538         spin_uninit(&sc->sc_sessions_lock);
539         spin_uninit(&sc->sc_task_mtx);
540         return (ENOMEM);
541 }
542
543 static int
544 glxsb_crypto_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
545 {
546         struct glxsb_softc *sc = device_get_softc(dev);
547         struct glxsb_session *ses = NULL;
548         struct cryptoini *encini, *macini;
549         int error;
550
551         if (sc == NULL || sidp == NULL || cri == NULL)
552                 return (EINVAL);
553
554         encini = macini = NULL;
555         for (; cri != NULL; cri = cri->cri_next) {
556                 switch(cri->cri_alg) {
557                 case CRYPTO_NULL_HMAC:
558                 case CRYPTO_MD5_HMAC:
559                 case CRYPTO_SHA1_HMAC:
560                 case CRYPTO_RIPEMD160_HMAC:
561                 case CRYPTO_SHA2_256_HMAC:
562                 case CRYPTO_SHA2_384_HMAC:
563                 case CRYPTO_SHA2_512_HMAC:
564                         if (macini != NULL)
565                                 return (EINVAL);
566                         macini = cri;
567                         break;
568                 case CRYPTO_AES_CBC:
569                         if (encini != NULL)
570                                 return (EINVAL);
571                         encini = cri;
572                         break;
573                 default:
574                         return (EINVAL);
575                 }
576         }
577
578         /*
579          * We only support HMAC algorithms to be able to work with
580          * ipsec(4), so if we are asked only for authentication without
581          * encryption, don't pretend we can accellerate it.
582          */
583         if (encini == NULL)
584                 return (EINVAL);
585
586         /*
587          * Look for a free session
588          *
589          * Free sessions goes first, so if first session is used, we need to
590          * allocate one.
591          */
592
593         spin_lock(&sc->sc_sessions_lock);
594         ses = TAILQ_FIRST(&sc->sc_sessions);
595         if (ses == NULL || ses->ses_used) {
596                 ses = kmalloc(sizeof(*ses), M_GLXSB, M_NOWAIT | M_ZERO);
597                 if (ses == NULL) {
598                         spin_unlock(&sc->sc_sessions_lock);
599                         return (ENOMEM);
600                 }
601                 ses->ses_id = sc->sc_sid++;
602         } else {
603                 TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
604         }
605         ses->ses_used = 1;
606         TAILQ_INSERT_TAIL(&sc->sc_sessions, ses, ses_next);
607         spin_unlock(&sc->sc_sessions_lock);
608
609         if (encini->cri_alg == CRYPTO_AES_CBC) {
610                 if (encini->cri_klen != 128) {
611                         glxsb_crypto_freesession(sc->sc_dev, ses->ses_id);
612                         return (EINVAL);
613                 }
614
615                 karc4rand(ses->ses_iv, sizeof(ses->ses_iv));
616                 ses->ses_klen = encini->cri_klen;
617
618                 /* Copy the key (Geode LX wants the primary key only) */
619                 bcopy(encini->cri_key, ses->ses_key, sizeof(ses->ses_key));
620         }
621
622         if (macini != NULL) {
623                 error = glxsb_hash_setup(ses, macini);
624                 if (error != 0) {
625                         glxsb_crypto_freesession(sc->sc_dev, ses->ses_id);
626                         return (error);
627                 }
628         }
629
630         *sidp = ses->ses_id;
631         return (0);
632 }
633
634 static int
635 glxsb_crypto_freesession(device_t dev, uint64_t tid)
636 {
637         struct glxsb_softc *sc = device_get_softc(dev);
638         struct glxsb_session *ses = NULL;
639         uint32_t sid = ((uint32_t)tid) & 0xffffffff;
640
641         if (sc == NULL)
642                 return (EINVAL);
643
644         spin_lock(&sc->sc_sessions_lock);
645         TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, ses_head, ses_next) {
646                 if (ses->ses_id == sid)
647                         break;
648         }
649         if (ses == NULL) {
650                 spin_unlock(&sc->sc_sessions_lock);
651                 return (EINVAL);
652         }
653         TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
654         glxsb_hash_free(ses);
655         bzero(ses, sizeof(*ses));
656         ses->ses_used = 0;
657         ses->ses_id = sid;
658         TAILQ_INSERT_HEAD(&sc->sc_sessions, ses, ses_next);
659         spin_unlock(&sc->sc_sessions_lock);
660
661         return (0);
662 }
663
664 static int
665 glxsb_aes(struct glxsb_softc *sc, uint32_t control, uint32_t psrc,
666     uint32_t pdst, void *key, int len, void *iv)
667 {
668         uint32_t status;
669         int i;
670
671         if (len & 0xF) {
672                 device_printf(sc->sc_dev,
673                     "len must be a multiple of 16 (not %d)\n", len);
674                 return (EINVAL);
675         }
676
677         /* Set the source */
678         bus_write_4(sc->sc_sr, SB_SOURCE_A, psrc);
679
680         /* Set the destination address */
681         bus_write_4(sc->sc_sr, SB_DEST_A, pdst);
682
683         /* Set the data length */
684         bus_write_4(sc->sc_sr, SB_LENGTH_A, len);
685
686         /* Set the IV */
687         if (iv != NULL) {
688                 bus_write_region_4(sc->sc_sr, SB_CBC_IV, iv, 4);
689                 control |= SB_CTL_CBC;
690         }
691
692         /* Set the key */
693         bus_write_region_4(sc->sc_sr, SB_WKEY, key, 4);
694
695         /* Ask the security block to do it */
696         bus_write_4(sc->sc_sr, SB_CTL_A,
697             control | SB_CTL_WK | SB_CTL_DC | SB_CTL_SC | SB_CTL_ST);
698
699         /*
700          * Now wait until it is done.
701          *
702          * We do a busy wait.  Obviously the number of iterations of
703          * the loop required to perform the AES operation depends upon
704          * the number of bytes to process.
705          *
706          * On a 500 MHz Geode LX we see
707          *
708          *      length (bytes)  typical max iterations
709          *          16             12
710          *          64             22
711          *         256             59
712          *        1024            212
713          *        8192          1,537
714          *
715          * Since we have a maximum size of operation defined in
716          * GLXSB_MAX_AES_LEN, we use this constant to decide how long
717          * to wait.  Allow an order of magnitude longer than it should
718          * really take, just in case.
719          */
720
721         for (i = 0; i < GLXSB_MAX_AES_LEN * 10; i++) {
722                 status = bus_read_4(sc->sc_sr, SB_CTL_A);
723                 if ((status & SB_CTL_ST) == 0)          /* Done */
724                         return (0);
725         }
726
727         device_printf(sc->sc_dev, "operation failed to complete\n");
728         return (EIO);
729 }
730
731 static int
732 glxsb_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
733     struct glxsb_session *ses, struct glxsb_softc *sc)
734 {
735         char *op_src, *op_dst;
736         uint32_t op_psrc, op_pdst;
737         uint8_t op_iv[SB_AES_BLOCK_SIZE], *piv;
738         int error;
739         int len, tlen, xlen;
740         int offset;
741         uint32_t control;
742
743         if (crd == NULL || (crd->crd_len % SB_AES_BLOCK_SIZE) != 0)
744                 return (EINVAL);
745
746         /* How much of our buffer will we need to use? */
747         xlen = crd->crd_len > GLXSB_MAX_AES_LEN ?
748             GLXSB_MAX_AES_LEN : crd->crd_len;
749
750         /*
751          * XXX Check if we can have input == output on Geode LX.
752          * XXX In the meantime, use two separate (adjacent) buffers.
753          */
754         op_src = sc->sc_dma.dma_vaddr;
755         op_dst = (char *)sc->sc_dma.dma_vaddr + xlen;
756
757         op_psrc = sc->sc_dma.dma_paddr;
758         op_pdst = sc->sc_dma.dma_paddr + xlen;
759
760         if (crd->crd_flags & CRD_F_ENCRYPT) {
761                 control = SB_CTL_ENC;
762                 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
763                         bcopy(crd->crd_iv, op_iv, sizeof(op_iv));
764                 else
765                         bcopy(ses->ses_iv, op_iv, sizeof(op_iv));
766
767                 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
768                         crypto_copyback(crp->crp_flags, crp->crp_buf,
769                             crd->crd_inject, sizeof(op_iv), op_iv);
770                 }
771         } else {
772                 control = SB_CTL_DEC;
773                 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
774                         bcopy(crd->crd_iv, op_iv, sizeof(op_iv));
775                 else {
776                         crypto_copydata(crp->crp_flags, crp->crp_buf,
777                             crd->crd_inject, sizeof(op_iv), op_iv);
778                 }
779         }
780
781         offset = 0;
782         tlen = crd->crd_len;
783         piv = op_iv;
784
785         /* Process the data in GLXSB_MAX_AES_LEN chunks */
786         while (tlen > 0) {
787                 len = (tlen > GLXSB_MAX_AES_LEN) ? GLXSB_MAX_AES_LEN : tlen;
788                 crypto_copydata(crp->crp_flags, crp->crp_buf,
789                     crd->crd_skip + offset, len, op_src);
790
791                 glxsb_dma_pre_op(sc, &sc->sc_dma);
792
793                 error = glxsb_aes(sc, control, op_psrc, op_pdst, ses->ses_key,
794                     len, op_iv);
795
796                 glxsb_dma_post_op(sc, &sc->sc_dma);
797                 if (error != 0)
798                         return (error);
799
800                 crypto_copyback(crp->crp_flags, crp->crp_buf,
801                     crd->crd_skip + offset, len, op_dst);
802
803                 offset += len;
804                 tlen -= len;
805
806                 if (tlen <= 0) {        /* Ideally, just == 0 */
807                         /* Finished - put the IV in session IV */
808                         piv = ses->ses_iv;
809                 }
810
811                 /*
812                  * Copy out last block for use as next iteration/session IV.
813                  *
814                  * piv is set to op_iv[] before the loop starts, but is
815                  * set to ses->ses_iv if we're going to exit the loop this
816                  * time.
817                  */
818                 if (crd->crd_flags & CRD_F_ENCRYPT)
819                         bcopy(op_dst + len - sizeof(op_iv), piv, sizeof(op_iv));
820                 else {
821                         /* Decryption, only need this if another iteration */
822                         if (tlen > 0) {
823                                 bcopy(op_src + len - sizeof(op_iv), piv,
824                                     sizeof(op_iv));
825                         }
826                 }
827         } /* while */
828
829         /* All AES processing has now been done. */
830         bzero(sc->sc_dma.dma_vaddr, xlen * 2);
831
832         return (0);
833 }
834
835 static void
836 glxsb_crypto_task(void *arg, int pending)
837 {
838         struct glxsb_softc *sc = arg;
839         struct glxsb_session *ses;
840         struct cryptop *crp;
841         struct cryptodesc *enccrd, *maccrd;
842         int error;
843
844         maccrd = sc->sc_to.to_maccrd;
845         enccrd = sc->sc_to.to_enccrd;
846         crp = sc->sc_to.to_crp;
847         ses = sc->sc_to.to_ses;
848
849         /* Perform data authentication if requested before encryption */
850         if (maccrd != NULL && maccrd->crd_next == enccrd) {
851                 error = glxsb_hash_process(ses, maccrd, crp);
852                 if (error != 0)
853                         goto out;
854         }
855
856         error = glxsb_crypto_encdec(crp, enccrd, ses, sc);
857         if (error != 0)
858                 goto out;
859
860         /* Perform data authentication if requested after encryption */
861         if (maccrd != NULL && enccrd->crd_next == maccrd) {
862                 error = glxsb_hash_process(ses, maccrd, crp);
863                 if (error != 0)
864                         goto out;
865         }
866 out:
867         spin_lock(&sc->sc_task_mtx);
868         sc->sc_task_count--;
869         spin_unlock(&sc->sc_task_mtx);
870
871         crp->crp_etype = error;
872         crypto_unblock(sc->sc_cid, CRYPTO_SYMQ);
873         crypto_done(crp);
874 }
875
876 static int
877 glxsb_crypto_process(device_t dev, struct cryptop *crp, int hint)
878 {
879         struct glxsb_softc *sc = device_get_softc(dev);
880         struct glxsb_session *ses;
881         struct cryptodesc *crd, *enccrd, *maccrd;
882         uint32_t sid;
883         int error = 0;
884
885         enccrd = maccrd = NULL;
886
887         /* Sanity check. */
888         if (crp == NULL)
889                 return (EINVAL);
890
891         if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
892                 error = EINVAL;
893                 goto fail;
894         }
895
896         for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
897                 switch (crd->crd_alg) {
898                 case CRYPTO_NULL_HMAC:
899                 case CRYPTO_MD5_HMAC:
900                 case CRYPTO_SHA1_HMAC:
901                 case CRYPTO_RIPEMD160_HMAC:
902                 case CRYPTO_SHA2_256_HMAC:
903                 case CRYPTO_SHA2_384_HMAC:
904                 case CRYPTO_SHA2_512_HMAC:
905                         if (maccrd != NULL) {
906                                 error = EINVAL;
907                                 goto fail;
908                         }
909                         maccrd = crd;
910                         break;
911                 case CRYPTO_AES_CBC:
912                         if (enccrd != NULL) {
913                                 error = EINVAL;
914                                 goto fail;
915                         }
916                         enccrd = crd;
917                         break;
918                 default:
919                         error = EINVAL;
920                         goto fail;
921                 }
922         }
923
924         if (enccrd == NULL || enccrd->crd_len % AES_BLOCK_LEN != 0) {
925                 error = EINVAL;
926                 goto fail;
927         }
928
929         sid = crp->crp_sid & 0xffffffff;
930         spin_lock(&sc->sc_sessions_lock);
931         TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, ses_head, ses_next) {
932                 if (ses->ses_id == sid)
933                         break;
934         }
935         spin_unlock(&sc->sc_sessions_lock);
936         if (ses == NULL || !ses->ses_used) {
937                 error = EINVAL;
938                 goto fail;
939         }
940
941         spin_lock(&sc->sc_task_mtx);
942         if (sc->sc_task_count != 0) {
943                 spin_unlock(&sc->sc_task_mtx);
944                 return (ERESTART);
945         }
946         sc->sc_task_count++;
947
948         sc->sc_to.to_maccrd = maccrd;
949         sc->sc_to.to_enccrd = enccrd;
950         sc->sc_to.to_crp = crp;
951         sc->sc_to.to_ses = ses;
952         spin_unlock(&sc->sc_task_mtx);
953         /* XXX: thread taskqueues ? */
954         taskqueue_enqueue(sc->sc_tq, &sc->sc_cryptotask);
955         return(0);
956
957 fail:
958         crp->crp_etype = error;
959         crypto_done(crp);
960         return (error);
961 }