2 * Copyright (c) 2000, 2001 Michael Smith
3 * Copyright (c) 2000 BSDi
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/mly/mly_pci.c,v 1.1.2.2 2001/03/05 20:17:24 msmith Exp $
28 * $DragonFly: src/sys/dev/raid/mly/Attic/mly_pci.c,v 1.4 2005/05/24 20:59:04 dillon Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
38 #include <sys/devicestat.h>
41 #include <machine/bus_memio.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
46 #include <bus/pci/pcireg.h>
47 #include <bus/pci/pcivar.h>
53 static int mly_pci_probe(device_t dev);
54 static int mly_pci_attach(device_t dev);
55 static int mly_pci_detach(device_t dev);
56 static int mly_pci_shutdown(device_t dev);
57 static int mly_pci_suspend(device_t dev);
58 static int mly_pci_resume(device_t dev);
59 static void mly_pci_intr(void *arg);
61 static int mly_sg_map(struct mly_softc *sc);
62 static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
63 static int mly_mmbox_map(struct mly_softc *sc);
64 static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error);
66 static device_method_t mly_methods[] = {
67 /* Device interface */
68 DEVMETHOD(device_probe, mly_pci_probe),
69 DEVMETHOD(device_attach, mly_pci_attach),
70 DEVMETHOD(device_detach, mly_pci_detach),
71 DEVMETHOD(device_shutdown, mly_pci_shutdown),
72 DEVMETHOD(device_suspend, mly_pci_suspend),
73 DEVMETHOD(device_resume, mly_pci_resume),
77 static driver_t mly_pci_driver = {
80 sizeof(struct mly_softc)
83 static devclass_t mly_devclass;
84 DRIVER_MODULE(mly, pci, mly_pci_driver, mly_devclass, 0, 0);
94 } mly_identifiers[] = {
95 {0x1069, 0xba56, 0x1069, 0x0040, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 2000"},
96 {0x1069, 0xba56, 0x1069, 0x0030, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 3000"},
97 {0x1069, 0x0050, 0x1069, 0x0050, MLY_HWIF_I960RX, "Mylex AcceleRAID 352"},
98 {0x1069, 0x0050, 0x1069, 0x0052, MLY_HWIF_I960RX, "Mylex AcceleRAID 170"},
99 {0x1069, 0x0050, 0x1069, 0x0054, MLY_HWIF_I960RX, "Mylex AcceleRAID 160"},
103 /********************************************************************************
104 ********************************************************************************
106 ********************************************************************************
107 ********************************************************************************/
110 mly_pci_probe(device_t dev)
116 for (m = mly_identifiers; m->vendor != 0; m++) {
117 if ((m->vendor == pci_get_vendor(dev)) &&
118 (m->device == pci_get_device(dev)) &&
119 ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) &&
120 (m->subdevice == pci_get_subdevice(dev))))) {
122 device_set_desc(dev, m->desc);
123 return(-10); /* allow room to be overridden */
130 mly_pci_attach(device_t dev)
132 struct mly_softc *sc;
141 sc = device_get_softc(dev);
142 bzero(sc, sizeof(*sc));
146 if (device_get_unit(sc->mly_dev) == 0)
150 /* assume failure is 'not configured' */
154 * Verify that the adapter is correctly set up in PCI space.
156 command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
157 command |= PCIM_CMD_BUSMASTEREN;
158 pci_write_config(dev, PCIR_COMMAND, command, 2);
159 command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2);
160 if (!(command & PCIM_CMD_BUSMASTEREN)) {
161 mly_printf(sc, "can't enable busmaster feature\n");
164 if ((command & PCIM_CMD_MEMEN) == 0) {
165 mly_printf(sc, "memory window not available\n");
170 * Allocate the PCI register window.
172 sc->mly_regs_rid = PCIR_MAPS; /* first base address register */
173 if ((sc->mly_regs_resource = bus_alloc_resource(sc->mly_dev, SYS_RES_MEMORY, &sc->mly_regs_rid,
174 0, ~0, 1, RF_ACTIVE)) == NULL) {
175 mly_printf(sc, "can't allocate register window\n");
178 sc->mly_btag = rman_get_bustag(sc->mly_regs_resource);
179 sc->mly_bhandle = rman_get_bushandle(sc->mly_regs_resource);
182 * Allocate and connect our interrupt.
185 if ((sc->mly_irq = bus_alloc_resource(sc->mly_dev, SYS_RES_IRQ, &sc->mly_irq_rid,
186 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
187 mly_printf(sc, "can't allocate interrupt\n");
190 error = bus_setup_intr(sc->mly_dev, sc->mly_irq, INTR_TYPE_CAM,
191 mly_pci_intr, sc, &sc->mly_intr, NULL);
193 mly_printf(sc, "can't set up interrupt\n");
197 /* assume failure is 'out of memory' */
201 * Allocate the parent bus DMA tag appropriate for our PCI interface.
203 * Note that all of these controllers are 64-bit capable.
205 if (bus_dma_tag_create(NULL, /* parent */
206 1, 0, /* alignment, boundary */
207 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
208 BUS_SPACE_MAXADDR, /* highaddr */
209 NULL, NULL, /* filter, filterarg */
210 MAXBSIZE, MLY_MAXSGENTRIES, /* maxsize, nsegments */
211 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
212 BUS_DMA_ALLOCNOW, /* flags */
213 &sc->mly_parent_dmat)) {
214 mly_printf(sc, "can't allocate parent DMA tag\n");
219 * Create DMA tag for mapping buffers into controller-addressable space.
221 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
222 1, 0, /* alignment, boundary */
223 BUS_SPACE_MAXADDR, /* lowaddr */
224 BUS_SPACE_MAXADDR, /* highaddr */
225 NULL, NULL, /* filter, filterarg */
226 MAXBSIZE, MLY_MAXSGENTRIES, /* maxsize, nsegments */
227 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
229 &sc->mly_buffer_dmat)) {
230 mly_printf(sc, "can't allocate buffer DMA tag\n");
235 * Initialise the DMA tag for command packets.
237 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
238 1, 0, /* alignment, boundary */
239 BUS_SPACE_MAXADDR, /* lowaddr */
240 BUS_SPACE_MAXADDR, /* highaddr */
241 NULL, NULL, /* filter, filterarg */
242 sizeof(union mly_command_packet) * MLY_MAXCOMMANDS, 1, /* maxsize, nsegments */
243 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
245 &sc->mly_packet_dmat)) {
246 mly_printf(sc, "can't allocate command packet DMA tag\n");
251 * Detect the hardware interface version
253 for (i = 0; mly_identifiers[i].vendor != 0; i++) {
254 if ((mly_identifiers[i].vendor == pci_get_vendor(dev)) &&
255 (mly_identifiers[i].device == pci_get_device(dev))) {
256 sc->mly_hwif = mly_identifiers[i].hwif;
257 switch(sc->mly_hwif) {
258 case MLY_HWIF_I960RX:
259 debug(2, "set hardware up for i960RX");
260 sc->mly_doorbell_true = 0x00;
261 sc->mly_command_mailbox = MLY_I960RX_COMMAND_MAILBOX;
262 sc->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
263 sc->mly_idbr = MLY_I960RX_IDBR;
264 sc->mly_odbr = MLY_I960RX_ODBR;
265 sc->mly_error_status = MLY_I960RX_ERROR_STATUS;
266 sc->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
267 sc->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
269 case MLY_HWIF_STRONGARM:
270 debug(2, "set hardware up for StrongARM");
271 sc->mly_doorbell_true = 0xff; /* doorbell 'true' is 0 */
272 sc->mly_command_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
273 sc->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
274 sc->mly_idbr = MLY_STRONGARM_IDBR;
275 sc->mly_odbr = MLY_STRONGARM_ODBR;
276 sc->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
277 sc->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
278 sc->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
286 * Create the scatter/gather mappings.
288 if ((error = mly_sg_map(sc)))
292 * Allocate and map the memory mailbox
294 if ((error = mly_mmbox_map(sc)))
298 * Do bus-independent initialisation.
300 if ((error = mly_attach(sc)))
310 /********************************************************************************
311 * Disconnect from the controller completely, in preparation for unload.
314 mly_pci_detach(device_t dev)
316 struct mly_softc *sc = device_get_softc(dev);
321 if (sc->mly_state & MLY_STATE_OPEN)
324 if ((error = mly_pci_shutdown(dev)))
332 /********************************************************************************
333 * Bring the controller down to a dormant state and detach all child devices.
335 * This function is called before detach or system shutdown.
337 * Note that we can assume that the camq on the controller is empty, as we won't
338 * allow shutdown if any device is open.
341 mly_pci_shutdown(device_t dev)
343 struct mly_softc *sc = device_get_softc(dev);
351 /********************************************************************************
352 * Bring the controller to a quiescent state, ready for system suspend.
354 * We can't assume that the controller is not active at this point, so we need
355 * to mask interrupts.
358 mly_pci_suspend(device_t dev)
360 struct mly_softc *sc = device_get_softc(dev);
370 /********************************************************************************
371 * Bring the controller back to a state ready for operation.
374 mly_pci_resume(device_t dev)
376 struct mly_softc *sc = device_get_softc(dev);
379 sc->mly_state &= ~MLY_STATE_SUSPEND;
380 MLY_UNMASK_INTERRUPTS(sc);
384 /*******************************************************************************
385 * Take an interrupt, or be poked by other code to look for interrupt-worthy
389 mly_pci_intr(void *arg)
391 struct mly_softc *sc = (struct mly_softc *)arg;
395 /* collect finished commands, queue anything waiting */
399 /********************************************************************************
400 ********************************************************************************
401 Bus-dependant Resource Management
402 ********************************************************************************
403 ********************************************************************************/
405 /********************************************************************************
406 * Allocate memory for the scatter/gather tables
409 mly_sg_map(struct mly_softc *sc)
416 * Create a single tag describing a region large enough to hold all of
417 * the s/g lists we will need.
419 segsize = sizeof(struct mly_sg_entry) * MLY_MAXCOMMANDS * MLY_MAXSGENTRIES;
420 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
421 1, 0, /* alignment, boundary */
422 BUS_SPACE_MAXADDR, /* lowaddr */
423 BUS_SPACE_MAXADDR, /* highaddr */
424 NULL, NULL, /* filter, filterarg */
425 segsize, 1, /* maxsize, nsegments */
426 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
429 mly_printf(sc, "can't allocate scatter/gather DMA tag\n");
434 * Allocate enough s/g maps for all commands and permanently map them into
435 * controller-visible space.
437 * XXX this assumes we can get enough space for all the s/g maps in one
440 if (bus_dmamem_alloc(sc->mly_sg_dmat, (void **)&sc->mly_sg_table, BUS_DMA_NOWAIT, &sc->mly_sg_dmamap)) {
441 mly_printf(sc, "can't allocate s/g table\n");
444 bus_dmamap_load(sc->mly_sg_dmat, sc->mly_sg_dmamap, sc->mly_sg_table, segsize, mly_sg_map_helper, sc, 0);
448 /********************************************************************************
449 * Save the physical address of the base of the s/g table.
452 mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
454 struct mly_softc *sc = (struct mly_softc *)arg;
458 /* save base of s/g table's address in bus space */
459 sc->mly_sg_busaddr = segs->ds_addr;
462 /********************************************************************************
463 * Allocate memory for the memory-mailbox interface
466 mly_mmbox_map(struct mly_softc *sc)
470 * Create a DMA tag for a single contiguous region large enough for the
471 * memory mailbox structure.
473 if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */
474 1, 0, /* alignment, boundary */
475 BUS_SPACE_MAXADDR, /* lowaddr */
476 BUS_SPACE_MAXADDR, /* highaddr */
477 NULL, NULL, /* filter, filterarg */
478 sizeof(struct mly_mmbox), 1, /* maxsize, nsegments */
479 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
481 &sc->mly_mmbox_dmat)) {
482 mly_printf(sc, "can't allocate memory mailbox DMA tag\n");
487 * Allocate the buffer
489 if (bus_dmamem_alloc(sc->mly_mmbox_dmat, (void **)&sc->mly_mmbox, BUS_DMA_NOWAIT, &sc->mly_mmbox_dmamap)) {
490 mly_printf(sc, "can't allocate memory mailbox\n");
493 bus_dmamap_load(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap, sc->mly_mmbox, sizeof(struct mly_mmbox),
494 mly_mmbox_map_helper, sc, 0);
495 bzero(sc->mly_mmbox, sizeof(*sc->mly_mmbox));
500 /********************************************************************************
501 * Save the physical address of the memory mailbox
504 mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
506 struct mly_softc *sc = (struct mly_softc *)arg;
510 sc->mly_mmbox_busaddr = segs->ds_addr;
513 /********************************************************************************
514 * Free all of the resources associated with (sc)
516 * Should not be called if the controller is active.
519 mly_free(struct mly_softc *sc)
521 struct mly_command *mc;
525 /* detach from CAM */
528 /* throw away command buffer DMA maps */
529 while (mly_alloc_command(sc, &mc) == 0)
530 bus_dmamap_destroy(sc->mly_buffer_dmat, mc->mc_datamap);
532 /* release the packet storage */
533 if (sc->mly_packet != NULL) {
534 bus_dmamap_unload(sc->mly_packet_dmat, sc->mly_packetmap);
535 bus_dmamem_free(sc->mly_packet_dmat, sc->mly_packet, sc->mly_packetmap);
538 /* throw away the controllerinfo structure */
539 if (sc->mly_controllerinfo != NULL)
540 free(sc->mly_controllerinfo, M_DEVBUF);
542 /* throw away the controllerparam structure */
543 if (sc->mly_controllerparam != NULL)
544 free(sc->mly_controllerparam, M_DEVBUF);
546 /* destroy data-transfer DMA tag */
547 if (sc->mly_buffer_dmat)
548 bus_dma_tag_destroy(sc->mly_buffer_dmat);
550 /* free and destroy DMA memory and tag for s/g lists */
551 if (sc->mly_sg_table) {
552 bus_dmamap_unload(sc->mly_sg_dmat, sc->mly_sg_dmamap);
553 bus_dmamem_free(sc->mly_sg_dmat, sc->mly_sg_table, sc->mly_sg_dmamap);
556 bus_dma_tag_destroy(sc->mly_sg_dmat);
558 /* free and destroy DMA memory and tag for memory mailbox */
560 bus_dmamap_unload(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap);
561 bus_dmamem_free(sc->mly_mmbox_dmat, sc->mly_mmbox, sc->mly_mmbox_dmamap);
563 if (sc->mly_mmbox_dmat)
564 bus_dma_tag_destroy(sc->mly_mmbox_dmat);
566 /* disconnect the interrupt handler */
568 bus_teardown_intr(sc->mly_dev, sc->mly_irq, sc->mly_intr);
569 if (sc->mly_irq != NULL)
570 bus_release_resource(sc->mly_dev, SYS_RES_IRQ, sc->mly_irq_rid, sc->mly_irq);
572 /* destroy the parent DMA tag */
573 if (sc->mly_parent_dmat)
574 bus_dma_tag_destroy(sc->mly_parent_dmat);
576 /* release the register window mapping */
577 if (sc->mly_regs_resource != NULL)
578 bus_release_resource(sc->mly_dev, SYS_RES_MEMORY, sc->mly_regs_rid, sc->mly_regs_resource);