Merge from vendor branch OPENSSL:
[dragonfly.git] / sys / dev / netif / iwl / if_iwl.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Sepherosa Ziehau <sepherosa@gmail.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/dev/netif/iwl/if_iwl.c,v 1.1 2008/03/05 14:10:39 sephe Exp $
35  */
36
37 #include <sys/param.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/mbuf.h>
42 #include <sys/module.h>
43 #include <sys/msgport2.h>
44 #include <sys/sysctl.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/rman.h>
48
49 #include <net/bpf.h>
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/ethernet.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/netmsg2.h>
56
57 #include <netproto/802_11/ieee80211_var.h>
58 #include <netproto/802_11/ieee80211_radiotap.h>
59
60 #include <bus/pci/pcidevs.h>
61 #include <bus/pci/pcireg.h>
62 #include <bus/pci/pcivar.h>
63
64 #include "if_iwlreg.h"
65 #include "if_iwlvar.h"
66 #include "iwl2100var.h"
67
68 struct iwl_devinfo {
69         const char      *desc;
70         int             bar;
71         int             (*attach)(device_t);
72         void            (*detach)(device_t);
73         int             (*shutdown)(device_t);
74 };
75
76 struct iwl_softc {
77         union {
78                 struct iwlcom common;
79                 struct iwl2100_softc sc2100;
80         } u;
81         const struct iwl_devinfo *sc_info;
82 };
83
84 static int      iwl_probe(device_t);
85 static int      iwl_attach(device_t);
86 static int      iwl_detach(device_t);
87 static int      iwl_shutdown(device_t);
88
89 static void     iwl_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
90 static void     iwl_service_loop(void *);
91 static int      iwl_put_port(struct lwkt_port *, struct lwkt_msg *);
92 static void     iwl_destroy_thread_dispatch(struct netmsg *);
93
94 static const struct iwl_devinfo iwl2100_devinfo = {
95         .desc =         IWL2100_DESC,
96         .bar =          IWL2100_PCIR_BAR,
97         .attach =       iwl2100_attach,
98         .detach =       iwl2100_detach,
99         .shutdown =     iwl2100_shutdown
100 };
101
102 static const struct iwl_dev {
103         uint16_t        vid;
104         uint16_t        did;
105         const struct iwl_devinfo *info;
106 } iwl_devices[] = {
107         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PRO_WL_2100, &iwl2100_devinfo },
108         { 0, 0, NULL }
109 };
110
111 static device_method_t iwl_methods[] = {
112         /* Device interface */
113         DEVMETHOD(device_probe,         iwl_probe),
114         DEVMETHOD(device_attach,        iwl_attach),
115         DEVMETHOD(device_detach,        iwl_detach),
116         DEVMETHOD(device_shutdown,      iwl_shutdown),
117
118         { 0, 0 }
119 };
120
121 static driver_t iwl_driver = {
122         "iwl",
123         iwl_methods,
124         sizeof(struct iwl_softc)
125 };
126
127 static devclass_t iwl_devclass;
128
129 DRIVER_MODULE(iwl, pci, iwl_driver, iwl_devclass, 0, 0);
130
131 MODULE_DEPEND(iwl, wlan, 1, 1, 1);
132 MODULE_DEPEND(iwl, pci, 1, 1, 1);
133
134 const struct ieee80211_rateset iwl_rateset_11b = { 4, { 2, 4, 11, 22 } };
135
136 static int
137 iwl_probe(device_t dev)
138 {
139         const struct iwl_dev *d;
140         uint16_t did, vid;
141
142         vid = pci_get_vendor(dev);
143         did = pci_get_device(dev);
144
145         for (d = iwl_devices; d->info != NULL; ++d) {
146                 if (d->did == did && d->vid == vid) {
147                         struct iwl_softc *sc = device_get_softc(dev);
148
149                         device_set_desc(dev, d->info->desc);
150                         sc->sc_info = d->info;
151                         return 0;
152                 }
153         }
154         return ENXIO;
155 }
156
157 static int
158 iwl_attach(device_t dev)
159 {
160         struct iwl_softc *sc = device_get_softc(dev);
161         struct iwlcom *iwl = &sc->u.common;
162         struct ifnet *ifp = &iwl->iwl_ic.ic_if;
163         int error, bar;
164
165         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
166         bar = sc->sc_info->bar;
167
168 #ifndef BURN_BRIDGES
169         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
170                 uint32_t irq, mem;
171
172                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
173                 mem = pci_read_config(dev, bar, 4);
174
175                 device_printf(dev, "chip is in D%d power mode "
176                     "-- setting to D0\n", pci_get_powerstate(dev));
177
178                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
179
180                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
181                 pci_write_config(dev, bar, mem, 4);
182         }
183 #endif  /* !BURN_BRIDGES */
184
185         /* Enable bus mastering */
186         pci_enable_busmaster(dev);
187
188         /*
189          * Allocate IO memory
190          */
191         iwl->iwl_mem_rid = bar;
192         iwl->iwl_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
193                                                   &iwl->iwl_mem_rid, RF_ACTIVE);
194         if (iwl->iwl_mem_res == NULL) {
195                 device_printf(dev, "can't allocate IO memory\n");
196                 return ENXIO;
197         }
198         iwl->iwl_mem_bt = rman_get_bustag(iwl->iwl_mem_res);
199         iwl->iwl_mem_bh = rman_get_bushandle(iwl->iwl_mem_res);
200
201         /*
202          * Allocate IRQ
203          */
204         iwl->iwl_irq_rid = 0;
205         iwl->iwl_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
206                                                   &iwl->iwl_irq_rid,
207                                                   RF_SHAREABLE | RF_ACTIVE);
208         if (iwl->iwl_irq_res == NULL) {
209                 device_printf(dev, "can't allocate irq\n");
210                 error = ENXIO;
211                 goto back;
212         }
213
214         /*
215          * Create sysctl tree
216          */
217         sysctl_ctx_init(&iwl->iwl_sysctl_ctx);
218         iwl->iwl_sysctl_tree = SYSCTL_ADD_NODE(&iwl->iwl_sysctl_ctx,
219                                                SYSCTL_STATIC_CHILDREN(_hw),
220                                                OID_AUTO,
221                                                device_get_nameunit(dev),
222                                                CTLFLAG_RD, 0, "");
223         if (iwl->iwl_sysctl_tree == NULL) {
224                 device_printf(dev, "can't add sysctl node\n");
225                 error = ENXIO;
226                 goto back;
227         }
228
229         /*
230          * Device specific attach
231          */
232         error = sc->sc_info->attach(dev);
233 back:
234         if (error)
235                 iwl_detach(dev);
236         return error;
237 }
238
239 static int
240 iwl_detach(device_t dev)
241 {
242         struct iwl_softc *sc = device_get_softc(dev);
243         struct iwlcom *iwl = &sc->u.common;
244
245         sc->sc_info->detach(dev);
246
247         if (iwl->iwl_sysctl_tree != NULL)
248                 sysctl_ctx_free(&iwl->iwl_sysctl_ctx);
249
250         if (iwl->iwl_irq_res != NULL) {
251                 bus_release_resource(dev, SYS_RES_IRQ, iwl->iwl_irq_rid,
252                                      iwl->iwl_irq_res);
253         }
254
255         if (iwl->iwl_mem_res != NULL) {
256                 bus_release_resource(dev, SYS_RES_MEMORY, iwl->iwl_mem_rid,
257                                      iwl->iwl_mem_res);
258         }
259         return 0;
260 }
261
262 static int
263 iwl_shutdown(device_t dev)
264 {
265         struct iwl_softc *sc = device_get_softc(dev);
266
267         return sc->sc_info->shutdown(dev);
268 }
269
270 void
271 iwl_ind_write_4(struct iwlcom *iwl, uint32_t addr, uint32_t data)
272 {
273         IWL_WRITE_4(iwl, IWL_IND_ADDR, addr);
274         IWL_WRITE_4(iwl, IWL_IND_DATA, data);
275 }
276
277 void
278 iwl_ind_write_2(struct iwlcom *iwl, uint32_t addr, uint16_t data)
279 {
280         IWL_WRITE_4(iwl, IWL_IND_ADDR, addr);
281         IWL_WRITE_2(iwl, IWL_IND_DATA, data);
282 }
283
284 void
285 iwl_ind_write_1(struct iwlcom *iwl, uint32_t addr, uint8_t data)
286 {
287         IWL_WRITE_4(iwl, IWL_IND_ADDR, addr);
288         IWL_WRITE_1(iwl, IWL_IND_DATA, data);
289 }
290
291 uint32_t
292 iwl_ind_read_4(struct iwlcom *iwl, uint32_t addr)
293 {
294         IWL_WRITE_4(iwl, IWL_IND_ADDR, addr);
295         return IWL_READ_4(iwl, IWL_IND_DATA);
296 }
297
298 uint16_t
299 iwl_ind_read_2(struct iwlcom *iwl, uint32_t addr)
300 {
301         IWL_WRITE_4(iwl, IWL_IND_ADDR, addr);
302         return IWL_READ_2(iwl, IWL_IND_DATA);
303 }
304
305 uint8_t
306 iwl_ind_read_1(struct iwlcom *iwl, uint32_t addr)
307 {
308         IWL_WRITE_4(iwl, IWL_IND_ADDR, addr);
309         return IWL_READ_1(iwl, IWL_IND_DATA);
310 }
311
312 #define EEPROM_WRITE(iwl, data) \
313 do { \
314         iwl_ind_write_4((iwl), IWL_EEPROM_IND_CSR, (data)); \
315         DELAY(1); \
316 } while (0)
317
318 #define EEPROM_SET_BIT(iwl) \
319 do { \
320         EEPROM_WRITE((iwl), IWL_EEBIT_CS | IWL_EEBIT_DI); \
321         EEPROM_WRITE((iwl), IWL_EEBIT_CS | IWL_EEBIT_DI | IWL_EEBIT_SK); \
322 } while (0)
323
324 #define EEPROM_CLR_BIT(iwl) \
325 do { \
326         EEPROM_WRITE((iwl), IWL_EEBIT_CS); \
327         EEPROM_WRITE((iwl), IWL_EEBIT_CS | IWL_EEBIT_SK); \
328 } while (0)
329
330 uint16_t
331 iwl_read_eeprom(struct iwlcom *iwl, uint8_t ofs)
332 {
333         uint16_t ret;
334         int i;
335
336         /* Chip select */
337         EEPROM_WRITE(iwl, 0);
338         EEPROM_WRITE(iwl, IWL_EEBIT_CS);
339         EEPROM_WRITE(iwl, IWL_EEBIT_CS | IWL_EEBIT_SK);
340         EEPROM_WRITE(iwl, IWL_EEBIT_CS);
341
342         /* Send READ opcode (0x2) */
343         EEPROM_SET_BIT(iwl);
344         EEPROM_SET_BIT(iwl); /* READ opcode */
345         EEPROM_CLR_BIT(iwl); /* READ opcode */
346
347         /* Send offset */
348         for (i = NBBY - 1; i >= 0; --i) {
349                 if (ofs & (1 << i))
350                         EEPROM_SET_BIT(iwl);
351                 else
352                         EEPROM_CLR_BIT(iwl);
353         }
354
355         /* Kick start */
356         EEPROM_WRITE(iwl, IWL_EEBIT_CS);
357
358         /* Read data */
359         ret = 0;
360         for (i = 0; i < (sizeof(ret) * NBBY); ++i) {
361                 EEPROM_WRITE(iwl, IWL_EEBIT_CS | IWL_EEBIT_SK);
362                 EEPROM_WRITE(iwl, IWL_EEBIT_CS);
363
364                 ret <<= 1;
365                 if (iwl_ind_read_4(iwl, IWL_EEPROM_IND_CSR) & IWL_EEBIT_DO)
366                         ret |= 1;
367         }
368
369         /* Stop */
370         EEPROM_WRITE(iwl, 0);
371
372         /* Chip de-select */
373         EEPROM_WRITE(iwl, IWL_EEBIT_CS);
374         EEPROM_WRITE(iwl, 0);
375         EEPROM_WRITE(iwl, IWL_EEBIT_SK);
376
377         return le16toh(ret);
378 }
379
380 static void
381 iwl_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
382 {
383         KASSERT(nseg == 1, ("too many segments\n"));
384         *((bus_addr_t *)arg) = seg->ds_addr;
385 }
386
387 int
388 iwl_dma_mem_create(device_t dev, bus_dma_tag_t parent, bus_size_t size,
389                    bus_dma_tag_t *dtag, void **addr, bus_addr_t *paddr,
390                    bus_dmamap_t *dmap)
391 {
392         int error;
393
394         error = bus_dma_tag_create(parent, IWL_ALIGN, 0,
395                                    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
396                                    NULL, NULL,
397                                    size, 1, BUS_SPACE_MAXSIZE_32BIT,
398                                    0, dtag);
399         if (error) {
400                 device_printf(dev, "can't create DMA tag\n");
401                 return error;
402         }
403
404         error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
405                                  dmap);
406         if (error) {
407                 device_printf(dev, "can't allocate DMA mem\n");
408                 bus_dma_tag_destroy(*dtag);
409                 *dtag = NULL;
410                 return error;
411         }
412
413         error = bus_dmamap_load(*dtag, *dmap, *addr, size,
414                                 iwl_dma_ring_addr, paddr, BUS_DMA_WAITOK);
415         if (error) {
416                 device_printf(dev, "can't load DMA mem\n");
417                 bus_dmamem_free(*dtag, *addr, *dmap);
418                 bus_dma_tag_destroy(*dtag);
419                 *dtag = NULL;
420                 return error;
421         }
422         return 0;
423 }
424
425 void
426 iwl_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
427 {
428         if (dtag != NULL) {
429                 bus_dmamap_unload(dtag, dmap);
430                 bus_dmamem_free(dtag, addr, dmap);
431                 bus_dma_tag_destroy(dtag);
432         }
433 }
434
435 void
436 iwl_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
437                  bus_size_t mapsz __unused, int error)
438 {
439         struct iwl_dmamap_ctx *ctx = xctx;
440         int i;
441
442         if (error)
443                 return;
444
445         if (nsegs > ctx->nsegs) {
446                 ctx->nsegs = 0;
447                 return;
448         }
449
450         ctx->nsegs = nsegs;
451         for (i = 0; i < nsegs; ++i)
452                 ctx->segs[i] = segs[i];
453 }
454
455 static int
456 iwl_put_port(struct lwkt_port *port, struct lwkt_msg *lmsg)
457 {
458         struct iwlmsg *msg = (struct iwlmsg *)lmsg;
459         struct iwlcom *iwl = msg->iwlm_softc;
460
461         ASSERT_SERIALIZED(port->mpu_serialize);
462
463         if ((lmsg->ms_flags & MSGF_SYNC) && curthread == &iwl->iwl_thread) {
464                 msg->iwlm_nmsg.nm_dispatch(&msg->iwlm_nmsg);
465                 if ((lmsg->ms_flags & MSGF_DONE) == 0) {
466                         panic("%s: self-referential deadlock on "
467                               "iwl thread port\n", __func__);
468                 }
469                 return EASYNC;
470         } else {
471                 return iwl->iwl_fwd_port(port, lmsg);
472         }
473 }
474
475 static void
476 iwl_service_loop(void *arg)
477 {
478         struct iwlcom *iwl = arg;
479         struct ifnet *ifp = &iwl->iwl_ic.ic_if;
480         struct netmsg *nmsg;
481
482         lwkt_serialize_enter(ifp->if_serializer);
483         while ((nmsg = lwkt_waitport(&iwl->iwl_thread_port, 0))) {
484                 nmsg->nm_dispatch(nmsg);
485                 if (iwl->iwl_end)
486                         break;
487         }
488         lwkt_serialize_exit(ifp->if_serializer);
489
490         lwkt_exit();
491 }
492
493 void
494 iwl_create_thread(struct iwlcom *iwl, int unit)
495 {
496         struct ifnet *ifp = &iwl->iwl_ic.ic_if;
497
498         lwkt_initport_serialize(&iwl->iwl_reply_port, ifp->if_serializer);
499         lwkt_initport_serialize(&iwl->iwl_thread_port, ifp->if_serializer);
500
501         /* NB: avoid self-reference domsg */
502         iwl->iwl_fwd_port = iwl->iwl_thread_port.mp_putport;
503         iwl->iwl_thread_port.mp_putport = iwl_put_port;
504
505         lwkt_create(iwl_service_loop, iwl, NULL, &iwl->iwl_thread,
506                     0, unit % ncpus, "iwl%d", unit);
507 }
508
509 static void
510 iwl_destroy_thread_dispatch(struct netmsg *nmsg)
511 {
512         struct iwlmsg *msg = (struct iwlmsg *)nmsg;
513         struct iwlcom *iwl = msg->iwlm_softc;
514
515         ASSERT_SERIALIZED(iwl->iwl_ic.ic_if.if_serializer);
516
517         iwl->iwl_end = 1;
518         lwkt_replymsg(&nmsg->nm_lmsg, 0);
519 }
520
521 void
522 iwl_destroy_thread(struct iwlcom *iwl)
523 {
524         struct iwlmsg msg;
525
526         ASSERT_SERIALIZED(iwl->iwl_ic.ic_if.if_serializer);
527
528         iwlmsg_init(&msg, &iwl->iwl_reply_port,
529                     iwl_destroy_thread_dispatch, iwl);
530         lwkt_domsg(&iwl->iwl_thread_port, &msg.iwlm_nmsg.nm_lmsg, 0);
531 }
532
533 void
534 iwlmsg_init(struct iwlmsg *msg, struct lwkt_port *rport, netisr_fn_t dispatch,
535             void *sc)
536 {
537         netmsg_init(&msg->iwlm_nmsg, rport, 0, dispatch);
538         msg->iwlm_softc = sc;
539 }
540
541 void
542 iwlmsg_send(struct iwlmsg *msg, struct lwkt_port *port)
543 {
544         struct lwkt_msg *lmsg;
545
546         lmsg = &msg->iwlm_nmsg.nm_lmsg;
547         if (lmsg->ms_flags & MSGF_DONE)
548                 lwkt_sendmsg(port, lmsg);
549 }